py4sci

Source code for Masfunct

''' 
.. module:: Masfunct
.. moduleauthor:: Anna-Lea Lesage   
.. created:: Nov. 2013
.. modified:: Sep. 2015
Astronomical and Mascara dedicated functions, to be called up
in the reduction and analysis functions.
Includes:
    - Class QueueHandler for defining a logging compatible with multiprocessing
    - Class QueueListening for defning a logging compatible with multiprocessing 
    - Astrometric functions:
        - Greenwich_Mean_Sidereal_Time()
        - Earth_Rotation_Angle()
    - Astrometric conversion functions:
        - pol2rect() to transform from polar to rectangular coordinates
        - rect2pol() and transform back
        - rotmat() defining a rotation matrix
    - Astrometric solution function (called by sites.CamCoord.makeAstrometry):
        - makevortex()
        - matchvortex()
        - vortexmatch() a wrapper of the previous two
    - Timing routines:
        - checkdiscontinuity() if there is a time jump in the image -> may require 
                               new astrometric solution
        - astrotiming() defines an interval to run the astrometric solution again        
    - General usage functions:
        - percentile()
        - Gaussian()
        - Gfitting()
        - makePSmid() create mid binned data
        - makePSslow() create slow binned date 
        - image_rotate()
        - rebin()
    - buildTransmissionMap()
    - calendar conversion:
        - calendar2jd()
        - jd2calendar()
        - jd2epoch()
        - epoch2jd()
    - I/O routines:
        - CheckFreeSpace() check how much disk space is free on computer
        - MakeDiskSpace() delete the oldest folder found in a given path
        - FindFiles(), find files in path according to root
        - lstDirectory(), 
        - pickDirectory(), select from a list those who satisfy condition
        - makedir(), wrapper around os.makedirs to create several directories at once
        - cleanup() removes directories forcibly (even if data remains inside!)
        - moveData(), which is more a copy data than a move data
        - tmpdbSave() save dictionary into temporary database
        - tmpdbRead() read entries of database
        - CombineH5() combines several h5 files into one fits file
        - CombinePkl() combines several pkl files into one fits table
        - SaveLCPS() save postage stamps and Light Curves
        - updateLCPS() update existing files
        - LCtable() transform exisitng files in binary fits tables.
        
'''
__version__ = '15.09.22'
### ***  Latest Changes Record  *** ###
### 09.12.2014  Changed in astrocheck the number of decimals to be printed in file. 
###             Increased it from 2 decimals to 4 decimals in order to see the 
###             fluctuation of the astrometry...
### 18.12.2014  Changed in savepng the scaling function: missing ()
### 14.01.2015  Added in astrolog a time input keyword, in order to give the actual
###             observed times to the log instead of the reduction times.
### 03.02.2015  Added the functions loadStellarCatalog and cleanupStellarCatalog
### 02.03.2015  Upgraded the function findOutliers by adding robuststd() which 
###             calculates the standard deviations of a sample without being 
###             biased by outliers.
### 30.03.2015  Added the functions reduction_summary and send_summary which read
###             from the log directory the main features of the reduction (error, warnings...)
### 07.04.2015  Added the function getDarkFilename which read from file the location
###             and name of the last good masterdark.
### 07.04.2015  Added the function updateDarkFilename, which writes to file the
###             location and name of the last good masterdark.
### 06.05.2015  Try to improve the light curves plot. 
### 21.08.2015  Added function statbin
### 22.09.2015  Added to function IndexArray the keyword skip, to give in a list of key entries
###             to skip when creating the header array
### ******************************* ###



import numpy as np
from datetime import datetime, timedelta  
import logging
import threading
import Queue as queue
import pyfits as pf
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import os

[docs]class QueueHandler(logging.Handler): ''' This class is new in python 3.2 and later but has been copied here for python 2.7. It sends a logging handler to a Queue. It would be used together with a multiprocessing queue to centralise logging to file in a multi-processing configuration and to avoid write contention between processes. '''
[docs] def __init__(self, queue): ''' Initialise the instance using the passed queue. ''' logging.Handler.__init__(self) self.queue = queue
[docs] def enqueue(self, record): ''' Enqueue a record. This implementation uses the no_wait : event are put to queue as soon as possible . ''' self.queue.put_nowait(record)
[docs] def prepare(self, record): ''' Prepares a record for queueing. The object returned by this method is enqueued. ''' self.format(record) record.msg = record.message record.args = None record.exc_info = None return record
[docs] def emit(self, record): ''' Emits a record. Writes the Logrecord to queue, preparing it first. ''' try: self.enqueue(self.prepare(record)) except (KeyboardInterrupt, SystemExit): raise except: self.HandlerError(record)
[docs]class QueueListener(object): ''' Queue listener works in pair with QueueHandler for writing/printing log files using logging in the case of multi-processes. This class implements an internal thread listener which watches for LogRecords being added to Queue, removes them and passes them a list of handler for processing. ''' _sentinel = None
[docs] def __init__(self, queue, *handlers): ''' initialises an instance with the specified queue and handlers ''' self.queue = queue self.handlers = handlers self._stop = threading.Event() self._thread = None
[docs] def dequeue(self, block): ''' dequeue a record, and optionally block it ''' return self.queue.get(block)
[docs] def start(self): ''' Start the listener... This starts up a background thread to monitor the queue for LogRecords to process. ''' self._thread = t = threading.Thread(target = self._monitor) t. setDeamon = True t.start()
[docs] def prepare(self, record): ''' prepares a record for handling. ''' return record
[docs] def handle(self, record): ''' Handle a record. This just loops through the handlers offering them the record to handle. ''' record = self.prepare(record) for handler in self.handlers: handler.handle(record)
[docs] def _monitor(self): ''' Monitors the queue for records and ask the handler to deal with them. This method run on a separate internal thread. The thread will terminate once it sees the sentinel object in the queue. ''' q = self.queue has_task_done = hasattr(q, 'task_done') while not self._stop.isSet(): try: record = self.dequeue(True) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: pass # there may still be some records in the queue to extract. while True: try: record = self.dequeue(False) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break
[docs] def stop(self): ''' stop the listener. This asks the thread to terminate and wait till it does so. Note that if we don't call it before the application exits, there may still be some records inside the queue. ''' self._stop.set() self.queue.put_nowait(self._sentinel) self._thread.join() self._thread = None
[docs]class Stacker: ''' To stack images more to come soon!!! '''
[docs] def __init__(self, site, camera, **kwargs): ''' Initialise the Stacker Class Inputs: - site, a Site instance - camera, a CamCoord instance ''' self.site = site self.camera = camera imageshape = kwargs.get('imageshape', None) if imageshape: self.ny, self.nx = imageshape else: self.nx = self.camera.nx self.ny = self.camera.ny self.sidex = kwargs.get('sidex', 32) self.sidey = kwargs.get('sidey', 32) self.oversize = kwargs.get('oversize', 100) self.texp = 0. self.makeGrid() self.makeStackingImages() return
def end(self): ''' ends the stacker class. Remove everything from memory and bla bla bla ''' self.Bimage = 0 self.referenceImage = 0 self.Nimages = 0 return
[docs] def makeGrid(self): ''' makes the grid used for rotating the images ''' gx, gy = [],[] self.tmpx = self.nx//self.sidex self.tmpy = self.ny//self.sidey resx = self.nx % self.sidex resy = self.ny % self.sidey if (resx%2 > 0) | (resy%2 > 0): print 'Uneven number of discarded pixels, please adjust patch size.' exit() gx = np.arange(self.tmpx)*self.sidex + self.sidex/2. + resx/2. gy = np.arange(self.tmpy)*self.sidey + self.sidey/2. + resy/2. gx, gy = np.meshgrid(gx, gy) self.gx = gx.ravel() self.gy = gy.ravel() self.onx = self.tmpx*self.sidex+self.oversize self.ony = self.tmpy*self.sidey+self.oversize return
def makeStackingImages(self): ## Create output matrix to be filled self.Bimage = np.zeros((self.ony,self.onx), dtype=np.float64) self.referenceImage = np.zeros((self.ony, self.onx), dtype = int) self.Nimages=0 return
[docs] def image_rotate(self, image, currenttime, TimeReference, exposuretime = False): ''' "rotate" image to new reference frame. The image is actually not rotated, but shifted in pieces to the new location for the given TimeReference. Inputs: - image, the image to 'rotate' - currenttime, when this image was taken. Currenttime can be either a datetime object or JD date - TimeReference, the new time reference for 'rotating' the image Keywords: - exposureTime, default false, else precise the exposure time of the image Outputs: cxp, cyp, the grid coordinates at the new ReferenceTime ''' from itertools import izip phobs, thobs, rnobs = self.camera.XYR2PhiThe(self.gx, self.gy) talt, taz = self.camera.PhiThe2Hor(phobs, thobs) #talt, taz = self.camera.ProjectionOnSky(self.gx, self.gy) tra, tec = self.site.Horizontal2Equatorial(talt, taz, currenttime) nalt, naz, nvis = self.site.ApparentStars(tra, tec, TimeReference, \ nutation=False, precession=False, refraction=False, aberration=False) phre, thre, GP = self.camera.Hor2PhiThe(nalt, naz) cxp, cyp, rp = self.camera.PhiThe2XYR(phre, thre) #cxp, cyp, bla = self.camera.VisibleStars(nalt, naz, margin=32) cx, cy = np.around(cxp), np.around(cyp) ones = np.ones(image.shape) for i, j, k, l in izip(self.gy, self.gx, cy, cx): try: self.Bimage[self.oversize/2+k-self.sidey/2:self.oversize/2+k+self.sidey/2+1, \ self.oversize/2+l-self.sidex/2:self.oversize/2+l+self.sidex/2+1] \ += image[i-self.sidey/2:i+self.sidey/2+1,j-self.sidex/2:j+self.sidex/2+1] self.referenceImage[self.oversize/2+k-self.sidey/2:self.oversize/2+k+self.sidey/2+1, \ self.oversize/2+l-self.sidex/2:self.oversize/2+l+self.sidex/2+1] \ += ones[i-self.sidey/2:i+self.sidey/2+1,j-self.sidex/2:j+self.sidex/2+1] except ValueError: continue if exposuretime: if self.Nimages==0: self.texp = 0 else: self.texp += exposuretime self.Nimages+=1 del talt, taz, tra, tec, nalt, naz, nvis, phre, thre, phobs, thobs, cx, cy return cxp, cyp
[docs] def binImage(self, endtimestamp): ''' Bin the actual image Input: - endtimestamp, a datetime object or JD date, which is used to marked the end of this binning sequence Outputs: - binimage, the binned image - nimages, the number of images used for binning ''' output = self.Bimage[self.oversize/2:self.oversize/2+self.ny, \ self.oversize/2:self.oversize/2+self.nx] reference = self.referenceImage[self.oversize/2:self.oversize/2+self.ny, \ self.oversize/2:self.oversize/2+self.nx] output = output/reference self.endtimestamp = endtimestamp Nimages = self.Nimages self.makeStackingImages() output = np.nan_to_num(output) if Nimages>0: self.texp = self.texp/Nimages return output, Nimages ###---------- Astrometric functions -----------###
[docs]def Greenwich_Mean_Sidereal_Time(date, apparent=True): #TESTED WORKING ''' Calculates the GMST at the current observation date. This is the first step towards the calculation of the Local Mean Sidereal Time (LMST). The coefficients are extracted from the circular 179 of USNO. Input: - date, the date in Julian days, or in datetime format Keyword: - Apparent, if set, the program will calculate the apparement sidereal time at the given date, taking into account the Earth's nutation. Output: - the GMST in fractional hours ''' from numpy.polynomial.polynomial import polyval from Masconst import JD2000, JDyr, as2rad from Masfunct import Earthrotangle, calendar2jd from Coordinates import Nutation # Check what is the input format, and convert the date to JD if type(date)==datetime: jd = calendar2jd(date, UTC=True, TZ = None) elif type(date) == np.float64: jd = date #elif type(date) == np.ndarray: # jd = date else: raise SyntaxError('The date format is not recognized') t = (jd - JD2000)/JDyr pgmst = np.array([0.014506, 4612.156534, 1.3915817,-0.00000044, -0.000029956, -0.0000000368]) pgmst = pgmst*as2rad era = Earthrotangle(jd, degree=False) gmst = era + polyval(t, pgmst) if apparent: eps0, dpsi, deps = Nutation(jd) gmst = gmst + dpsi*np.cos(eps0) return (gmst*12/ np.pi)%24
[docs]def Earthrotangle(date, UTC=True, degree= None): # TESTED WORKING ''' Calculates the Earth Rotation Angle (ERA) at the input date. The coefficients and formula are from the circular 179 of the USNO. Input: - date, the date chosen to calculate the ERA. Either a datetime object or JD date Output: - era the earth rotation angle in term of rotation units (rad or degree) ''' from Masconst import JD2000 if type(date)== datetime: jd = calendar2jd(date, UTC=UTC) else: jd=date Du = jd - JD2000 era = (0.7790572732640 + 0.00273781191135448 * Du + (Du%1.0)) %1.0 if degree == None: return era if degree == True: return era/360 if degree == False: return era*2*np.pi ### ---------- Astrometric Conversion functions --------------###
[docs]def pol2rect(ra, dec, degree = True): # TESTED WORKING ''' Convert any type of spherical coordinates in a matrix of cartesian rectangular coordinates to be used for coordinates conversion. Example: To convert (ra-dec)(J2000) to (ra-dec)(to date): >>> X = pol2rect(ra, dec) >>> Xnew = N*P*X # where N and P are the nutation and precession matrices. >>> ranew, decnew = rec2pol(Xnew) Inputs: - the 2 polar coordinates (ra,dec) - (alt-az) (phi theta) in DEGREE Output: -a matrix of cartesian coordinates ''' from Masconst import deg2rad #Convert the inputs into arrays: ra = np.array(ra) dec= np.array(dec) if degree: C = np.cos(dec*deg2rad) x = np.cos(ra*deg2rad) * C y = np.sin(ra*deg2rad) * C z = np.sin(dec*deg2rad) return np.matrix([x,y,z]) else: C = np.cos(dec) x = np.cos(ra)*C y = np.sin(ra)*C z = np.sin(dec) return np.matrix([x,y,z])
[docs]def rect2pol(X, Eq2AltAz=False): '''Conversion from rectangular coordinates to equatorial. Special care has to be taken around the celestial poles Inputs: - x, y, z, scalar or vector of rectangular coordinates Outputs: - ra, dec: the converted coordinates in degrees ''' from Masconst import rad2deg if X.size/len(X) == 1: x = np.ravel(X[0]) y = np.ravel(X[1]) z = np.ravel(X[2]) else: x = np.array(np.ravel(X[0,:]), dtype=float) y = np.array(np.ravel(X[1,:]), dtype=float) z = np.array(np.ravel(X[2,:]), dtype=float) if Eq2AltAz: r = np.sqrt(x**2 + y**2) ra = np.arctan2(y,x) dec = np.arctan2(z,r) else: ra = np.arctan2(y,x) dec = np.arcsin(z) # Convert the angles back in degree ra = ra*rad2deg dec = dec*rad2deg return ra, dec
[docs]def rotmat(angle, axis='x', degree=False): # TESTED WORKING ''' Rotmat generates a 3x3 rotation matrix for a given angle around the requested axis (x, y, or z) Input: - angle, the rotation angle Keywords: - the axis around which the rotation takes place. - degree=True if the input angles are in degree Output: - the rotation matrix ''' if degree: angle = np.radians(angle) if axis == 'x': s = np.sin(angle) c = np.cos(angle) R = np.matrix([[1, 0, 0],[0, c, s],[0, -s, c]]) return R elif axis == 'y': s = np.sin(angle) c = np.cos(angle) R = np.matrix(((c,0,-s),(0,1,0),(s,0,c))) return R elif axis == 'z': s = np.sin(angle) c = np.cos(angle) R = np.matrix(((c,s,0),(-s,c,0),(0,0,1))) return R else: raise SyntaxError('Give an axis for the rotation matrix') ### --------- Astrometric solution functions ---------- ###
[docs]def findOutliers(*args, **kwargs): ''' findOutliers searches for outliers in Y as a function of X. Inputs: - X, array of points - Y, array of points correlated to X - bins, the number of bins to consider, default 5 - sig, a rejection criterion, default 4 Output: - a list of the index of the good pixels ''' if len(args) == 1: ysort = args[0] elif len(args) == 2: x, y = args xsort = np.argsort(x) ysort = y[xsort] else: print ' Only 2 arguments accepted' bins = kwargs.get('bins', 5) sig = kwargs.get('sig', 3.) dim = ysort.size step = dim/bins ## test to verify that : # 1. The bins size is smaller than the sample size: if bins > dim: print 'too many bins for the sample size' return elif step < 3: print 'not enough elements in each bins to do statistics' return res = dim % bins while res > step: bins += 1 res = dim - bins*step goodvalues = np.zeros(dim) for j in xrange(bins): if j == bins-1: tmp = ysort[j*step:] else: tmp = ysort[j*step:(1+j)*step] rstd = robuststd(tmp) ## Mark the good values below threshold if j == bins-1: goodvalues[j*step:] = (np.abs(tmp - np.median(tmp)) < sig * rstd) else: goodvalues[j*step:(j+1)*step] = (np.abs(tmp - np.median(tmp)) < sig * rstd) goodvalues = np.array(goodvalues, dtype=bool) return goodvalues
[docs]def robuststd(X): '''Calculates a robust standard deviation for the distribution X, by removing gradually the outliers. Usefull when a few outliers are skewing the calculation of the standard deviation. Input: - X, array-like Output: - the robust standard deviation of X ''' ## find a "robust" standard deviation dX = X - np.median(X) istd = dX.std() nx = dX.size mm = [max(np.abs(dX)),] dstd = istd - dX[np.abs(dX) < min(mm)].std() gd = (np.abs(dX) < min(mm)).nonzero()[0] while (dstd >= .1) and (dX.size > nx*0.75): dX = X[gd] - np.median(X[gd]) mm.append(max(np.abs(dX))) gd_ = (np.abs(dX) < min(mm)).nonzero()[0] dstd = dX.std() - dX[gd_].std() gd = gd[gd_] robustd = X[gd].std() return robustd
def removeOutliers(*args, **kwargs): ''' removeOutliers searches for outliers values in the distributions X and Y. It removes all points above a N sigma level. Inputs: - X, Y, the distributions to analyse Keywords: - niter, the number of iterations to remove outliers - sig, the N sigma threshold - keep_indices, if True, returns the indices of the good points Output: - X without the outliers ''' sig = kwargs.pop('sig', 3.) niter = kwargs.pop('niter', 5.) keep_indices = kwargs.pop('keep_indices', True) if len(args) == 1: x = args[0] i =0 gd = (np.abs(x-np.median(x)) <= sig * x.std()).nonzero()[0] gd_ = gd.copy() while i <= niter: x = x[gd_] gd_ = (np.abs(x-np.median(x)) <= sig * x.std()).nonzero()[0] i+=1 gd = gd[gd_] if keep_indices: return x, gd else: return x elif len(args) == 2: x, y = args i = 0 gd = ((np.abs(x-np.median(x))<=sig*x.std())*(np.abs(y-np.median(y))<=sig*y.std())).nonzero()[0] gd_ = gd.copy() while i < niter: i+=1 x = x[gd_] y = y[gd_] gd_ = ((np.abs(x-np.median(x)) <= sig * x.std()) * \ (np.abs(y-np.median(y)) <= sig * y.std())).nonzero()[0] gd = gd[gd_] if keep_indices: return x, y, gd else: return x, y else: print 'Wrong number of inputs: maximum 2 arguments expected' return
[docs]def makevortex(x, y): ''' Makevortex uses a Delauney griding method to create from the input coordinates a triangular grid. Each star is matched with its closest neighboor to form a triangle. The method is used in the astrometric solution for identifying the stars together. Inputs: - x, y, 2 vectors of equal size representing the coordinates of the star. Output: - dist, a vector of the possible distances. - index, a array containing the indices of the 2 points forming the distance. ''' from scipy.spatial import Delaunay #rearrange the input coordinates: coords = np.array([x, y]) coords = coords.transpose() tri = Delaunay(coords) dist = [] for ii, jj, kk in tri.simplices: xx = tri.points[ii] yy = tri.points[jj] zz = tri.points[kk] dxy = np.sqrt((xx[0]-yy[0])**2+(xx[1]-yy[1])**2) dyz = np.sqrt((yy[0]-zz[0])**2+(yy[1]-zz[1])**2) dzx = np.sqrt((xx[0]-zz[0])**2+(xx[1]-zz[1])**2) dist.append([dxy,dyz, dzx]) return np.array(dist), tri.simplices
[docs]def matchvortex(distobs, indexobs, mobs, distpre, indexpre, mag, dtol=0.1, mtol=0.25): ''' Matchvortex matches together observed vortices of 3 to 6 points to the predicted stars positions. Inputs: - distobs, a vector containing all the distances computed from the 3 to 6 points. - distpre, a vector containing all the computed predicted distances - indices, an indice vector for distpre (which points makes which distance) - tol, the tolerance for matching the distance together. ''' # What is the number of triangles fed to the function: Ntri = distobs.shape[0] Npre = distpre.shape[0] # go through the Ntriangles found. And get the matching N triangles. match = [] argit = np.argsort(distobs, axis=1) init = np.sort(distobs, axis=1) nexit = np.sort(distpre, axis=1) arget = np.argsort(distpre, axis=1) ddist = np.empty([Ntri, Npre]) scaleratio = np.empty([Ntri]) magdif = np.empty([Ntri,3]) tmpnexit = np.array([nexit[i,:]/np.max(nexit[i,:]) for i in xrange(0,Npre)]) tmpinit = np.array([init[N,:]/init[N,:].max() for N in xrange(0,Ntri)]) for N in xrange(0,Ntri): ddist[N,:] = np.sqrt((tmpinit[N,0]-tmpnexit[:,0])**2 + (tmpinit[N,1]-tmpnexit[:,1])**2) dmindex = np.argmin(ddist[N,:]) scaleratio[N] = init[N,2]/nexit[dmindex,2] magdif[N,:] = mobs[indexobs[N,argit[N,:]]]- mag[indexpre[dmindex,arget[dmindex,:]]] if (np.abs(1.-scaleratio[N])< dtol)*((np.abs(magdif[N,:]) < mtol).all()): match.append([list(indexpre[dmindex, arget[dmindex,:]]), \ list(indexobs[N,argit[N,:]])]) else: order = np.argsort(ddist[N,:]) d2mindex = order[1] scaleratio[N] = init[N,2]/nexit[d2mindex,2] magdif[N,:] = mobs[indexobs[N,argit[N,:]]]-mag[indexpre[d2mindex,arget[d2mindex,:]]] if (np.abs(1.-scaleratio[N])< dtol)*((np.abs(magdif[N,:])<mtol).all()): match.append([list(indexpre[d2mindex, arget[d2mindex,:]]), \ list(indexobs[N, argit[N,:]])]) else: d3mindex = order[2] scaleratio[N] = init[N,2]/nexit[d3mindex,2] magdif[N,:] = mobs[indexobs[N,argit[N,:]]]-mag[indexpre[d3mindex,arget[d3mindex,:]]] if (np.abs(1.-scaleratio[N])< dtol)*((np.abs(magdif[N,:])<mtol).all()): match.append([list(indexpre[d3mindex,arget[d3mindex,:]]), \ list(indexobs[N, argit[N,:]])]) return np.array(match)
[docs]def vortexmatch(*args, **kwargs): ''' vortexmatch takes the coordinates of the found stars, and the coordinates of the catalog stars on the CCD, and matches them together using the vortex method. Inputs: - xpre, ypre, the catalog stellar coordinates on the CCD - mpre, the predicted magnitude - xobs, yobs, the found stellar coordinates on the CCD - fobs, the measured magnitude Keywords: - dtol, the tolerance in the distance (between 0 and 0.5) - mtol, the tolerance in the magnitude ( between 0 and 0.5) - prepointed, if true, it is assumed that the pointing is already very close to the real value. Then all triangles with distances above 20% are rejected Outputs: - matchindex, a matrix giving the correspondance between xpre and xobs ''' try: nargs = len(args) except TypeError: raise 'No inputs given' if nargs != 6: print ' Inputs are xobs, yobs, mobs, xpre, ypre, mpre' return xpre, ypre, mag, xobs, yobs, fobs = args mpre = mag - mag.mean() mobs = -2.5*np.log10(fobs) mobs = mobs - np.median(mobs) dtol = kwargs.pop('dtol', 0.1) mtol = kwargs.pop('mtol', 0.35) prepointed = kwargs.pop('prepointed', True) distobs, indexobs = makevortex(xobs, yobs) distpre, indexpre = makevortex(xpre, ypre) matchindex = matchvortex(distobs, indexobs, mobs, distpre, indexpre, \ mpre, dtol=dtol, mtol=mtol) Nmatch = matchindex.shape[0] if Nmatch > 0: fdist = np.empty([Nmatch, 3]) for i in range(Nmatch): for j in range(3): fdist[i,j]=np.sqrt((xpre[matchindex[i,0,j]]-xobs[matchindex[i,1,j]])**2+\ (ypre[matchindex[i,0,j]]-yobs[matchindex[i,1,j]])**2) if not prepointed: goodmatch = ((fdist.mean(axis=1)< (np.median(fdist)+\ (fdist.std(axis=1)).mean()))*(fdist.mean(axis=1)>(np.median(fdist)-\ (fdist.std(axis=1)).mean()))).nonzero() matchindex = matchindex[goodmatch[0],:] else: goodmatch = (fdist.mean(axis=1) < percentile(fdist.mean(axis=1), 0.25)) matchindex = matchindex[goodmatch,:] return matchindex else: return
def getBadIndices(image, threshold=1500): ''' getBadIndices finds the indices of the pixels which are designated bad. The function defines the standard deviation in X and Y and marks as bad all pixels belonging to a row and a line whose std are above threshold. ''' import bottleneck as bt ### first selection very broad stdX = bt.nanstd(image, axis=0) stdY = bt.nanstd(image, axis=1) dim = image.shape teti = np.zeros(dim) teti[:,stdX>threshold] = 1 condy = stdY > threshold teti[condy,:] = teti[condy,:]+1 badindex = (teti==2).nonzero() tim = np.zeros(dim) tim[badindex] = image[badindex] badindex = (tim > 2.e4).nonzero() tim = 0 return badindex def plotAstrometry(Cam, alt, az, mag, im): ''' plots the goodness of the astrometry for ONE image''' import matplotlib.pyplot as pl xpre, ypre, inCCD = Cam.VisibleStars(alt, az) xf, yf, mf, dist_ = Cam._evaluateastrometry(im, xpre, ypre, rad1=7, hmin=3.) xmag = mag[inCCD] xmagf = xmag[mf] pl.subplot(121) pl.plot(xmagf, dist_, 'k+') pl.plot(xmagf, np.ones(xmagf.size), 'g:') pl.plot(xmagf, np.zeros(xmagf.size), 'r-') pl.ylabel('Distance measured in pixel') pl.xlabel('Magnitude') pl.subplot(122) pl.plot(xmagf, xpre[mf]-xf, 'k+') pl.xlabel('Magnitude') pl.ylabel('Delta X') pl.plot(xmagf, np.zeros(xmagf.size), 'r-') pl.plot(xmagf, np.ones(xmagf.size), 'g:') pl.plot(xmagf, -np.ones(xmagf.size), 'g:') pl.title('Astrometry of Camera:%s'%Cam.name) pl.show() ### ----- TIMING ROUTINES ----- ### # To verify that the time cadence of the image is regular...
[docs]def checkdiscontinuity(obstime, texp): ''' Given an exposure time -texp- and an array containing at least the last 2 dates of observation, checktimescale verifies that the latest exposure is made in the continuity of the previous one. This is relevant in cases of a serie of observations during one night. If the 2 exposures diverge by more than twice the exposure time, than a discontinuity is found, and the function returns True. Input: - obstime, an array containing the dates of observation. - texp, a scalar for the exposure time, and including the read- out time of the detector. Output: - a boolean value, True if a discontinuity is found, else False ''' try: dim = len(obstime) except AttributeError: dim = 1 if dim < 2: return False else: to = jd2calendar(obstime.pop(-2)) ti = jd2calendar(obstime[-1]) tmp = ti-to if tmp.seconds < 2*texp: return False else: return True
def astrotiming(obstime, timeinterval, flagtime): ''' astrotiming returns a boolean True flag when the following condition is true: two observations dates are distant by more than the timeinterval. In other words, the time interval spacing those 2 observations is at least bigger than the input value timeinterval. Input: - obstime, an 1d array filled with the observation dates - timeinterval, a scalar giving in SECONDS the interval between a flaged observation and the latest one Output: - True or False ''' try: dim = obstime.shape[0] except AttributeError: dim = 1. if dim == 1: # First observation, we run the astrometry!! return True, flagtime else: tmp = jd2calendar(obstime[-1]) - jd2calendar(obstime[flagtime]) if tmp.seconds >= timeinterval: return True, dim-1 else: return False, flagtime ########################################### ### ----- General Purpose routine ----- ### ### ---- basic statistics? --- $$$
[docs]def percentile(im, value): ''' Percentile computes the qth percentile of the data. It is based on the numpy version percentile, but adapted for masked arrays. And is faster than the scipy.scoreatpercentile() function. Input: - im, a 2d or 1d array - value, a scalar for the percentile to calculate. Must be between 0 and 1. Output: - the value of the data at the percentile. ''' tmp = np.ravel(im) data = np.sort(tmp) # find the finite values blop = np.isfinite(tmp).nonzero() nfin = blop[0].size return data[value*nfin]
def barycenter(im): ''' returns the center of mass of an array Input: - im, an array-like Outputs: - xc, yc, the coordinates of the center of mass ''' dim = im.shape if len(dim)< 1: print 'input array must have at least one dimension' return elif len(dim) == 1: xdim = np.arange(dim[0]) xc = np.sum(xdim*im)/np.sum(im) return xc elif len(dim) == 2: xdim = np.arange(dim[1]) ydim = np.arange(dim[0]) X, Y = np.meshgrid(xdim, ydim) xc = np.sum(X*im)/np.sum(im) yc = np.sum(Y*im)/np.sum(im) if xc < 0 or xc > dim[1]: xc = dim[1]/2 if yc < 0 or yc > dim[0]: yc = dim[0]/2 return xc, yc
[docs]def gaussian(x, xo, w, A): ''' Returns a gaussian function centered in xo, with a width w and an amplitude A. Inputs: - x, 1d array of the values used for computing the Gaussian - xo, the centre of the gaussian - w, the width of the gaussian - A, the amplitude Output: - G(x) = A exp(-(x-xo)^2/(2W^2)) ''' return A*np.exp(-(x-xo)**2/(2*w**2))
def Gfitting(P, x, y): '''Use for a gaussian fit''' xo, w, a = P['xo'].value, P['w'], P['a'] g = gaussian(x, xo, w, a) E = (y - g)**2 return E def makePSmid(psdata, midbinning, bx): ''' makePSmid computes the mid cadence postage stamp out of a matrix of fast cadence postage stamps. The mid cadence is defined by the midbinning term. The input arrays, a (NxNxM) array, is reshaped into a (NxNxM'xMidbinning) array, and averaged over the last axis. Inputs: - psdata, a 3d array of dimension (NxNxM) for the postage stamps - midbinning, a scalar setting the binning interval. Outputs: - MPS, the binned postage stamp array (NxNxM') ''' dim = psdata.shape nx = dim[0] slowbinning = dim[2] nob = psdata.nonzero() nzcount = psdata[nob].shape[0]/nx**2 mindex = slowbinning - nzcount + (nzcount % midbinning) if mindex == slowbinning: return np.zeros((bx*2, bx*2,0)) mnu = nzcount / midbinning MPS = np.reshape(psdata[:,:,mindex:], (bx*2, bx*2, mnu, midbinning)) MPS = MPS.mean(axis=3) return MPS def makePSslow(psdata, slowbinning): ''' Same as above but for a slow cadence data: binning of 50 images''' dim = psdata.shape nx = dim[0] nob = psdata.nonzero() nzcount = psdata[nob].shape[0]/nx**2 if nzcount < slowbinning*3/4: return np.zeros((16,16)) else: sindex = slowbinning - nzcount SPS = psdata[:,:,sindex:] SPS = SPS.mean(axis=2) return SPS def rotaroundthispoint(output, rota, Xn, Yn): ''' Defines a rotation function for every output. ''' x, y = output[1], output[0] x_j, y_j = Xn - x, Yn-y #rota = rotmat(angle, axis='z') X = np.array([x_j, y_j], dtype=np.float64) X = X.ravel() X_ = np.inner(X,rota) nx = Xn - X_[0] ny = Yn - X_[1] return ny, nx def image_rotate(*args): ''' Wrap-around the scipy rotate function to rotate an image by an arbitary angle and define the location of the rotation center. The array is rotated in its plane (works only for 2D images). OBSOLETE, use the Stacker Class instead!!! Inputs: - data, an 2D - ndarray to rotate - gx, gy, a grid over the CCD - site, a Site instance - camera, the camera instance - t0, the date when the image was taken - t1, the new date to which the image is rotated, - b, a tuple representing the size of a image chunk (e.g (32,32)) - 'Queue' optional, a queue object Output: - the rotate image altN, azN, and camera are used to calculate the center of rotation and the distance for each pixel from this point. ''' from itertools import izip data, gx, gy, site, cam, t0, t1, b = args data = np.asarray(data) oversize= 100 #gxp, gyp = cam.invertCorrection(gx, gy) talt, taz = cam.ProjectionOnSky(gx,gy) tra, tec = site.Horizontal2Equatorial(talt, taz, t0) nalt, naz, nvis = site.ApparentStars(tra, tec, t1, nutation=False, \ precession=False, refrac=False, aberration=False) cx, cy, bla = cam.VisibleStars(nalt, naz, margin=np.max(b)) #cx, cy = cam.CorrectPositions(cpx,cpy) ny, nx = data.shape onx = int(nx/b[0])*b[0]+oversize ony = int(ny/b[1])*b[1]+oversize ## Create output matrix to be filled blati = np.empty((ony,onx), dtype=np.uint16) cx, cy = np.around(cx), np.around(cy) for i,j, k,l in izip(gy, gx, cy, cx): try: blati[oversize/2 + k - b[1]/2:oversize/2 + k + b[1]/2+1, \ oversize/2 + l - b[0]/2:oversize/2 + l + b[0]/2+1] = \ data[i-b[1]/2:i+b[1]/2+1,j-b[0]/2:j+b[0]/2+1] except ValueError: continue output = blati[oversize/2:oversize/2+ny, oversize/2:oversize/2+nx] talt, taz, tra, tec, nalt, naz, nx, ny, onx, ony = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 blati = None del blati return output
[docs]def rebin(image, binfactor): ''' rebins an image to a new format. Condition: newformat is smaller than the original image format. Inputs: - image, an numpy array - binfactor, the binning factor Output: - binimage ''' dim = image.shape ny = dim[0] nx = 0 ndim = len(dim) if ndim>1: nx = dim[1] newy = ny / binfactor newx = nx / binfactor resy = ny % binfactor resx = nx % binfactor if (resy or resx) and ndim>1: image = image[resy:, resx:] elif resy and ndim == 1: image = image[resy:] if ndim>1: sh = newy,ny//newy, newx, nx//newx return image.reshape(sh).mean(-1).mean(1) elif ndim == 1: sh = newy, ny//newy return image.reshape(sh).mean(1)
def statbin(image, binfactor): ''' statbin returns the mean values and standard deviation of the values inside a bin. Inputs: - image, a numpy array, 1D - binfactor, the binning factor. Has to be smaller than the image size... Outputs: - the mean values of the bins - the std values of the bins ''' nx = image.size newx = nx // binfactor resx = nx % binfactor if newx == 0: return 0., 0. if resx ==0: binmage = image.copy() else: binmage = image[resx/2:-resx/2] sh = newx, binfactor print nx, newx, resx, binmage.size, sh mim = np.nanmean(binmage.reshape(sh), axis=1) sim = np.nanstd(binmage.reshape(sh), axis=1) return mim, sim
[docs]def loadStellarCatalog(**kwargs): ''' loadStellarCatalog load in memory a binary fits table catalog of stellar names and coordinates for further use (for instance predict the visible stars from an observing site). Keywords: - filename: the fits file containing the stellar catalog. By default it's a vizier catalog containing only stellar coordinates. But any other fits.catalog could be loaded. - Vmag, Rmag, Bmag or Jmag, the magnitude range of the stars to load. Default is set from Vmag= 2 to 8, to avoid using too much memory. But any fainter number is fine too. - sID = 'ASCC', 'TYC', 'HIP' or 'HD', the stellar identification - sIDonly, boelan to return only the stellar identifiers. Outputs: - ra, an array of right ascension coordinates, in degree, and in J2000 - dec, an array of declination - vmag, the corresponding stellar visible magnitude - bmag, the B magnitude - sID, the stellar Identifiers - stype, the spectral type ''' filename = kwargs.pop('filename', 'StarCat.fits') if 'Rmag' in kwargs: maglimit = kwargs.get('Rmag', None) elif 'Bmag' in kwargs: maglimit = kwargs.get('Bmag', None) elif 'Jmag' in kwargs: maglimit = kwargs.get('Jmag', None) else: maglimit = kwargs.get('Vmag', (-2., 8.)) try: totcat = pf.getdata(filename, ext=1) except IOError: print 'No fits catalog found ... ' return tmag = totcat['Vmag'] mrange = (tmag >= maglimit[0])*(tmag <= maglimit[1]) if 'sIDonly' in kwargs: if kwargs.get('sID', 'ASCC') == 'TYC': tyc1 = totcat['TYC1'][mrange] tyc2 = totcat['TYC2'][mrange] tyc3 = totcat['TYC3'][mrange] sID = np.array(["".join(["TYC","%d"%i,"%d"%j,"%d" %e]) \ for i,j,e in zip(tyc1,tyc2,tyc3)]) elif kwargs.get('sID', 'ASCC') == 'HD': sID = totcat['HD'][mrange] elif kwargs.get('sID','ASCCC') == 'HIP': sID = totcat['HIP'][mrange] else: sID = totcat['ASCC'][mrange] return sID else: ra = totcat['_RAJ2000'][mrange] dec = totcat['_DEJ2000'][mrange] vmag = tmag[mrange] bmag = totcat['bmag'][mrange] Sptype = totcat['SPTYPE'][mrange] if kwargs.get('sID', 'ASCC') == 'TYC': tyc1 = totcat['TYC1'][mrange] tyc2 = totcat['TYC2'][mrange] tyc3 = totcat['TYC3'][mrange] sID = np.array(["".join(["TYC","%d"%i,"%d"%j,"%d" %e]) \ for i,j,e in zip(tyc1,tyc2,tyc3)]) elif kwargs.get('sID', 'ASCC') == 'HD': sID = totcat['HD'][mrange] elif kwargs.get('sID','ASCCC') == 'HIP': sID = totcat['HIP'][mrange] else: sID = totcat['ASCC'][mrange] return ra, dec, vmag, bmag, sID, Sptype
def selectClosest(ra, dec, mag, start, stop, dist=2.5, degree=True): ''' To select the closest stars''' to_reject = [] flagBlended = np.zeros(ra.size, dtype=int) BlendingValue = np.zeros(ra.size) for k in xrange(start, stop): dra = np.abs(ra - np.roll(ra, k)) ddec = np.abs(dec - np.roll(dec, k)) if degree: closest = ((dra < dist/60.)*(ddec < dist/60.)).nonzero()[0] else: closest = ((dra < dist)*(ddec < dist)).nonzero()[0] for j in closest: ## Mark the indice of the stars to rejects (always the faintest of 2) ## and Flag the other star if mag[j] > mag[j-k]: to_reject.append(j) flagBlended[j-k] = 1 BlendingValue[j-k] = 10**((mag[j-k]-mag[j])/2.5) else: to_reject.append(np.abs(j-k)) flagBlended[j] = 1 BlendingValue[j] = (10**((mag[j]-mag[j-k])/2.5)) return to_reject, flagBlended, BlendingValue
[docs]def cleanupStellarCatalog(ra, dec, mag, dist=2.5): ''' Astrostars is a short routine for selecting stars within a catalog that are suited for astrometry. They are bright (limiting magnitude defined via mag1), and have no close neighbours (define close via dist) of magnitude down to mag2. Inputs: - ra, dec, the stellar coordinates - mag, the corresponding stellar magnitudes. Used for rejected the fainter stars Keyword: - dist = 2.5, the distance in arcmin between 2 stars for considering them blended Outputs: - ra, dec mag of the selected stars ''' ### Make a quick test to check whether the ra is in degree or hours: if ra[-1] > 24: degree=True else: degree=False to_reject = [] flagBlended = np.zeros(ra.size, dtype=int) BlendingValue = np.zeros(ra.size) if ra.size > 10000: print ' The size of the catalog is important: turning multiprocessing mode on...' print ' It may takes several minutes to complete ... ' import multiprocessing NCPU = multiprocessing.cpu_count()-1 start = 1 stop = int(round(ra.size/2.+1)) steps = range(start, stop, 2000) steps.append(stop) pool = multiprocessing.Pool(processes=NCPU) result = [pool.apply_async(selectClosest, args=(ra, dec, mag, i, j )) \ for i,j in zip(steps[0:-1], steps[1:])] for r in result: output = r.get() tr, fg, bd = output to_reject.append(tr) flagBlended += fg BlendingValue += bd pool.close() pool.join() to_reject = [item for sublist in to_reject for item in sublist] flagBlended = np.asarray(flagBlended, dtype=str) flagBlended[flagBlended == '1'] = 'B' flagBlended[flagBlended == '0'] = '' else: flagBlended = np.zeros(ra.size, dtype=str) for k in xrange(1, ra.size-1): dra = np.abs(ra - np.roll(ra, k)) ddec = np.abs(dec - np.roll(dec, k)) if degree: closest = ((dra < dist/60.)*(ddec < dist/60.)).nonzero()[0] else: closest = ((dra < dist)*(ddec < dist/60.)).nonzero()[0] for j in closest: ## Mark the indice of the stars to rejects (always the faintest of 2) ## and Flag the other star if mag[j] > mag[j-k]: to_reject.append(j) flagBlended[j-k] = 'B' BlendingValue[j-k] = 10**((mag[j-k]-mag[j])/2.5) else: to_reject.append(np.abs(j-k)) flagBlended[j] = 'B' BlendingValue[j] = (10**((mag[j]-mag[j-k])/2.5)) to_reject = np.asarray(to_reject) to_reject = np.unique(to_reject) print 'Rejecting %g stars because of blending ' %(to_reject.size) ra = np.delete(ra, to_reject) dec = np.delete(dec, to_reject) mag = np.delete(mag, to_reject) flagBlended = np.delete(flagBlended, to_reject) BlendingValue = np.delete(BlendingValue, to_reject) return ra, dec, mag, to_reject, flagBlended, BlendingValue
def defineReferenceStars(ra, dec, mag, sID, maxdist=1.5, magrange=(4.5, 6), dec_step=2): ''' the function finds pairs of stars of magnitude inside the defined range within the declination steps. Two stars are used as reference if : 1. there are both within the magnitude range 2. there are both inside the same declination band 3. there are also close in RA!! Inputs: - ra, an array of the right ascensions - dec an array of the declinations - mag, an array of magnitude - sID the stellar identifiers. ra, dec and sID MUST have the same size Keywords: - magrange, the magnitude range for the pairs - maxdist, the maximum distance in degree between 2 reference stars. Maxdist is chosen from a maximum acceptable distance in pixel - declination_step, the declination band to probe Output: - referenceID, a chararray of the stellar identifiers listed as reference. referenceID is of the shape (N,2, m) N the number of bands and m the number of reference stars found in that spacific band ''' from numpy.polynomial.polynomial import polyval ### Select the right magnitude range mrange = (mag > magrange[0])*(mag<magrange[1]) ra_, dec_, sID_ = ra[mrange], dec[mrange], sID[mrange] ## Spacing in RA between 2 pairs of stars in a band in degree raspacing = 15. mindist = 0.1 # Minimum distance between 2 stars in degree ### Defines the declinations bands ## the dec_step is the declination band to be used close to the equator, the ## poles require larger bands halfrange = np.arange(0,90,1.) funcdec = np.round(polyval(halfrange, (dec_step, 0., (10.-dec_step)/90**2))) tmp = np.histogram(funcdec, bins=max(funcdec))[0] tmp = tmp[tmp.nonzero()[0]] steps = np.unique(funcdec) tmp = np.round( tmp/ (np.sum(tmp*steps)/90)) onehalf = [] temp = 0 for i, e in enumerate(steps): for j in np.arange(tmp[i]): temp = temp+e onehalf.append(temp) otherhalf = np.array(onehalf) onehalf.insert(0, 0.) onehalf.reverse() onehalf = np.array(onehalf) bands = np.hstack((onehalf*(-1), otherhalf)) referenceID = {} ### Loop for each bands for s,e in zip(bands, np.roll(bands,-1)): print 'Selection of band {} - {}'.format(s,e) ## safety coming from the zip and roll function if s+e == 0: break ## find decs and ras in bands indecband = ((dec_ > s)*(dec_<=e)).nonzero()[0] if len(indecband)>0: thisdec = dec_[indecband] thisra = ra_[indecband] thisID = sID_[indecband] closestinra = (np.abs(thisra -np.roll(thisra,-1))%360 < maxdist) * \ (np.abs(thisra -np.roll(thisra,-1))%360 > mindist) paircandidates_ra = closestinra + np.roll(closestinra,1) print "Number of paircandidates in RA : {}".format(paircandidates_ra.size) thisdec = thisdec[paircandidates_ra] closestindec = (np.abs(thisdec - np.roll(thisdec,1)) < maxdist) * \ (np.abs(thisdec - np.roll(thisdec,1)) > mindist) paircandidates_dec = closestindec+np.roll(closestindec,1) print "Number of paircandidates in DEC : {}".format(paircandidates_dec.size) closeRAID = thisID[paircandidates_ra] closeDEID = closeRAID[paircandidates_dec] thisra = thisra[paircandidates_ra] racandidates = thisra[paircandidates_dec] print 'Some sizes: ', closeDEID.size, racandidates.size racandidates.shape = (racandidates.size/2,2) closeDEID.shape = (racandidates.size/2,2) ## Keep pair which are moderately spaced if declination is middle: if np.abs(e) < 70: spacing = np.abs(racandidates[:,0]-np.roll(racandidates[:,0],-1)) > raspacing racandidates = racandidates[spacing] IDcandidates = closeDEID[spacing] referenceID[e] = (s, e, IDcandidates) return referenceID
[docs]def skyQuad(im, tilesize = 300, aligned=True): ''' skyQuad does nothing with the image itself. Creates tiles for further sky quality analysis. Inputs: - the image itself, to draw out the image dimensions Keywords - tilesize, the size of the tiles. - aligned = True. If True the tiles follow a classical grid pattern. Else the tiles are shifted by half a tile for each row. Outputs: - lx, ly, upx, upy, the corners of the sky Quads. ''' ny, nx = im.shape cx, cy = nx/2, ny/2 if isinstance(tilesize, int): tilesizex = tilesize tilesizey = tilesize else: tilesizex, tilesizey = tilesize tileXnumber = nx/tilesizex tileYnumber = ny/tilesizey lx, ly = [],[] if aligned: for k in np.arange(-tileYnumber/2-1, tileYnumber/2+1): if ((cy+tilesizey/2+k*tilesizey<ny) and (cy+tilesizey/2+k*tilesizey>0)): lx.append([cx+tilesizex/2+tilesizex*j for j in np.arange(-tileXnumber/2, tileXnumber/2+1)\ if (cx+tilesizex/2+(j)*tilesizex<nx)and(cx+tilesizex/2+j*tilesizex>0)]) ly.append([cy+tilesizey/2+tilesizey*k for j in np.arange(-tileXnumber/2, tileXnumber/2+1)\ if (cx+tilesizex/2+(j)*tilesizex<nx)and(cx+tilesizex/2+j*tilesizex>0)]) else: for k in np.arange(-tileYnumber/2-1, tileYnumber/2+1): if ((cy+tilesizey/2+(k+1)*tilesizey<ny) and (cy+tilesizey/2+k*tilesizey>0)): if k%2 == 0: lx.append([cx+tilesizex*j for j in np.arange(-tileXnumber/2, tileXnumber/2+1) \ if (cx+(j+1)*tilesizex<nx)and(cx+j*tilesizex>0)]) ly.append([cy+tilesizey/2+tilesizey*k for j in np.arange(-tileXnumber/2, tileXnumber/2+1) \ if (cx+(j+1)*tilesizex<nx)and(cx+j*tilesizex>0)]) if k%2==1: lx.append([cx+tilesizex/2+tilesizex*j for j in np.arange(-tileXnumber/2, tileXnumber/2+1)\ if (cx+tilesizex/2+(j+1)*tilesizex<nx)and(cx+tilesizex/2+j*tilesizex>0)]) ly.append([cy+tilesizey/2+tilesizey*k for j in np.arange(-tileXnumber/2, tileXnumber/2+1)\ if (cx+tilesizex/2+(j+1)*tilesizex<nx)and(cx+tilesizex/2+j*tilesizex>0)]) lx = [item for sublist in lx for item in sublist] ly = [item for sublist in ly for item in sublist] lx = np.array(lx).ravel() ly = np.array(ly).ravel() upx = lx+tilesizex upy = ly+tilesizey return lx, ly, upx, upy
[docs]def buildTransmissionMap(*args, **kwargs): ''' buildTransmissionMap constructs from existing dictionaries an image of the transmission. Inputs: - dict, a dictionary containing the information for constructing the map: {quad indice: [list of (observed magnitude - apparent magnitude), per aperture]} - nulx, nuly, two scalars giving the dimension of the map Keywords: - dimensions, specify the number of dimensions the map should have. If dimension > 1. - writetofile, boelan. If true, the map(s) are written to disk in fits format ''' from scipy.ndimage import filters dimension = kwargs.pop('dimension', 1) writeto = kwargs.pop('writetofile', True) if writeto: dicto, nulx, nuly, hdr, path, name = args else: dicto, nulx, nuly = args TransMap = np.zeros([dimension, nuly, nulx]) for k, v in dicto.iteritems(): TransMap[:, k/nulx, k%nulx] = np.median(np.array(v), axis=0) if writeto: TransMap = filters.median_filter(TransMap, size=3, mode='nearest') pf.writeto(os.path.join(path, name + '.fits'), TransMap, hdr, clobber=True) else: return TransMap
[docs]def getCentralStars(Cam, xpre, ypre, sptype, spectraltype='F'): ''' getCentralStars returns for a specific spectral type, all the stars which are located at the center of the CCD according to the camera's centre. The latter may differ from the image center itself. Inputs: * Cam, a CamCoord instance which defines the centre of the CCD * xpre, ypre, a set of stellar coordinates inside the CCD * sptype, an array containing the stellar spectral types Keyword: * spectraltype, the spectral type to consider. Output: * incentre, an array of indices in the centre of the CCD ''' ## Keep coordinates of the stars which satisfy spectral criterion sxpre, sypre = xpre[sptype.find(spectraltype)>=0], ypre[sptype.find(spectraltype)>=0] ## Find central stars incentre = (sxpre < Cam.Xo+100)*(sxpre>Cam.Xo-100)*(sypre<Cam.Yo+100)*(sypre>Cam.Yo-100) csptype = sptype[incentre] i = 0 while csptype.shape[0] < 50: incentre = (sxpre < Cam.Xo+100+5*i)*(sxpre>Cam.Xo-100-5*i)*(sypre<Cam.Yo+50+5*i)*(sypre>Cam.Yo-50-5*i) csptype = sptype[incentre] i+=1 return incentre
def getZeroPoint(mag, image, alt, az, Cam, sptype, spectraltype='F', apr=3., skyrad=[5,9]): ''' getZeroPoint calculates the ZeroPoint value of the system as a function of magnitude. Assuming that the centre of the image is cloud free... Inputs: * mag, the catalog magnitude * fobs, the observed magnitudes Outputs: * mfit, a linear law for correcting the observed magnitude to the catalog magnitude. ''' import Masphot from numpy.polynomial.polynomial import polyfit ## Get the stellar coordinates on the CCD xpre, ypre, inCCD = Cam.VisibleStars(alt, az) xmag = mag[inCCD] xsptype = sptype[inCCD] ## Get the central stars of the specified spectral type incentre = getCentralStars(Cam, xpre, ypre, xsptype, spectraltype=spectraltype) cmag = xmag[incentre] cxpre, cypre = xpre[incentre], ypre[incentre] ## Measure for those stars, the flux: fobs = np.array([]) for k in xrange(cxpre.shape[0]): tmp = Masphot.OnaPhot(image, cxpre[k], cypre[k], apr=apr, skyrad=skyrad) fobs = np.hstack((fobs, tmp[1])) ## Get the zero point for the spectral type: mobs = -2.5*np.log10(fobs) mzp = polyfit(cmag, mobs, 1) return mzp, incentre ###### ------------------------------##### ### ------ TIME CONVERSIONS -----------###
[docs]def calendar2jd(*args): # TESTED WORKING!! ''' Calendar to Jd converts a datetime object into a Julian date object Input: - date, a datetime object or a timedelta object or an array of datetime objects Output: - JD, the Julian date in np.float64 ''' if len(args) == 1: date = args[0] # Case 1 it is a datetime object: if type(date) == datetime: yr = date.year mth = date.month day = date.day hr = np.float64(date.hour) mn = np.float64(date.minute) sec = np.float64(date.second + date.microsecond * 1e-6) # Case 2: it's an array of datetime objects: elif type(date) == np.ndarray: getyears = np.vectorize(lambda x:x.year) getmonth = np.vectorize(lambda x:x.month) getday = np.vectorize(lambda x:x.day) gethour = np.vectorize(lambda x:np.float64(x.hour)) getmin = np.vectorize(lambda x:np.float64(x.minute)) getsec = np.vectorize(lambda x:np.float64(x.second + x.microsecond*1e-6)) yr = getyears(date) mth = getmonth(date) day = getday(date) hr = gethour(date) mn = getmin(date) sec = getsec(date) # Case 3: it is a list/tuple with year, month, day, hour, minutes, seconds format elif isinstance(date, timedelta): from Masconst import JDsec sec = date.total_seconds() return sec*JDsec elif len(args) == 6: yr, mth, day, hr, mn, sec = args hr = np.float64(hr) mn = np.float64(mn) sec = np.float64(sec) # Condition for the leap years: m3 = mth < 3 L = np.zeros(np.size(mth), dtype=int) if np.size(L) == 1: if m3: L = -1 else: L[m3] = -1 #if m3: # L = -1 #else: L = 0 thour = hr + mn/60 + sec/3600 jd = np.float64(day - 32075 + 1461*(yr + 4800 +L)/4 + 367*(mth -2 -L*12)/12 - \ 3 *(yr + 4900 + L)/400) + thour/24 - 0.5 return jd
[docs]def jd2calendar(jd): ''' Convert a Julian day date (JD) into a calendar date, using a datetime format. Input: - JD, a scalar or a vector, in float64 precision Output: - date, a datetime object in UTC timezone ''' try: import pytz utc = pytz.utc except ImportError: print 'No timezone information available' if type(jd) != np.float64 and type(jd) != np.ndarray: raise SyntaxError('JD is a float64 scalar...') frac = jd%1 + 0.5 xjd = np.array(jd - jd%1, dtype=long) if np.isscalar(frac) and frac>1: xjd = xjd + 1 frac = frac -1 elif not np.isscalar(frac): xjd[(frac>1).nonzero()] = xjd[(frac>1).nonzero()]+1 frac[(frac>1).nonzero()] = frac[(frac>1).nonzero()]-1 hr = frac*24 l = xjd + 68569 n = (4*l / 146097) l = l - (146097*n + 3) / 4 yr = 4000*(l+1) / 1461001 l = l - 1461*yr / 4 + 31 mth = 80*l / 2447 day = l - 2447*mth / 80 l = mth/11 mth = mth + 2 - 12*l yr = 100*(n-49) + yr + l mn = (hr%1)*60 sec = (mn%1)*60 usec = np.array((sec%1)*1e6, dtype=long) hr = np.array(hr, dtype = long) mn = np.array(mn, dtype=long) sec = np.array(sec, dtype=long) if (hr == 24.0).any(): print 'here?' hr[hr==24] = hr[hr==24] - 24.0 day[hr==24] = day[hr==24] + 1 def todate(year, month, day, hour, mn, sec, usec): return datetime(year, month, day, hour, mn, sec, usec, tzinfo=utc) indate = np.vectorize(todate) date = indate(yr, mth, day, hr, mn, sec, usec) if len(date.shape) > 1: date.shape = date.shape[1] return date
[docs]def jd2epoch(jd, Julian=True): # TESTED WORKING!!! ''' Conversion of a given Julian date to an epoch expressed in decimal years. Input: jd, the Julian date to convert, if jd is None, it takes the current date Keyword: Julian. If not set, the reference epoch is 1900 Output: epoch. The date converted in decimal years. ''' from Masconst import JD2000, JDyr jd = np.array(jd, copy=False) if Julian: epoch = 2000.00 + (jd - JD2000)/(JDyr/100) else: epoch = 1900.00 + (jd-2415020.31352)/365.242198781 return epoch
[docs]def epoch2jd(epoch): '''Convert an epoch into a Julian Date''' print 'do something' return ######################################################## #### File creating/modification/updating routines ####
def readcfg(configfile, keys = ''): ''' Reads a config file and returns the values for the given keywords ''' with open(configfile, 'rb') as f: c = f.read() cl = c.split('\r\n') line = [l for l in cl if not l.startswith('#') if not l.strip()==''] for l in line: if l.find(keys) == 0: return def adapt_array(arr): ''' adapt_array is an adapter for sqlite allowing the storage of numpy arrays in a database-like format. ''' import io out = io.BytesIO() np.save(out, arr) out.seek(0) return buffer(out.read()) def convert_array(text): ''' convert_array converts the informations contained in a sqlite database back into numpy arrays. ''' import io out = io.BytesIO(text) out.seek(0) return np.load(out)
[docs]def tmpdbSave(*args, **kwargs): ''' tmpdbSave saves the content of a dictionary into a database using h5 format. Inputs: - dico, a dictionary - qlog, a Queue object used for logging - (Path, the location path of the database) - (ndb, the database number) ''' import logging import multiprocessing import gc case = len(args) if case ==2: dico, qlog = args logger = logging.getLogger(multiprocessing.current_process().name) #logger.setLevel(logging.DEBUG) to = datetime.now() try: df = pd.DataFrame(dico) df.to_hdf('tmpdb.hdf', 'tmpdb', mode='w') except Exception: logger.error('Failure at creating the table!!!', exc_info=True) dto = (datetime.now() - to).total_seconds() logger.info('Saved %g entries in %g seconds' %(len(dico), dto)) return else: Path, dico, ndb, qlog = args logger = logging.getLogger(multiprocessing.current_process().name) #logger.setLevel(logging.DEBUG) to = datetime.now() try: df = pd.DataFrame(dico) df.convert_objects(convert_dates=True, convert_numeric=True) logger.debug('Transfering dict to pandas in %g' %(datetime.now()-to).total_seconds()) store = pd.HDFStore(Path + 'tmpdb'+str(ndb)+'.h5', 'w') store.put('tmpdb'+str(ndb), df) store.close() logger.debug('Saved Stellar dict') except Exception: logger.error('Failure at creating the table!!!', exc_info=True) store.close() df = None return dto = (datetime.now() - to).total_seconds() logger.info('Saved %g entries in %g seconds' %(len(dico), dto)) del df gc.collect() return
def CombineH5(savepath, listdb, globdic, name = 'LPC', minobs=51): ''' CombineH5 combines several .h5 files into one final file. It goes through the given directory, and merges the files together. ''' import h5py datdic = {} hdrdic = {} ### Keep the reference cnx0 in memory and update it with the new databases for i, ldb in enumerate(listdb): store = pd.HDFStore(ldb, 'r', complevel=5, complib='zlib') itsname = os.path.basename(ldb).split('.')[0] # print 'Reading in {}'.format(itsname) cnx = store.get(itsname) store.close() keys0 = datdic.keys() # print 'Found {} keys in the ref database'.format(len(keys0)) for k, v in cnx.iteritems(): if len(v)==0: continue if k in keys0: d = datdic[k] try: d = np.vstack((d, v[8])) except ValueError: pass datdic[k] = d tmp = hdrdic[k] tmp[8] = d.shape[0] hdrdic[k] = tmp else: datdic[k] = v[8] hdrdic[k] = [v[0], v[1], v[2], v[3],v[4], v[5], v[6], v[7], 1] naper = globdic['naper'] ### Put in indexArray a condition to reject entries which do not have enough points datdic, reject = indexArray(datdic, naper=naper, mode='data', minobs=minobs) for r in reject: datdic.pop(r) hdrdic.pop(r) hdrdic, tmp = indexArray(hdrdic, mode='header', minobs=minobs) jdstart, ra, dec, vmag, bmag, spectype, blend, blendvalue, nobs = tmp #### Save it all to hdf5 file try: myfile = h5py.File(os.path.join(savepath, name+'.hdf5'), 'w') hdrgrp = myfile.create_group('header') for k, v in hdrdic.iteritems(): hdrgrp[k]=v datagrp = myfile.create_group('data') for k,v in datdic.iteritems(): datagrp[k]=v headertab = myfile.create_group('header_table') headertab.create_dataset('ascc', data=hdrdic.keys()) headertab.create_dataset('jdstart', data=jdstart) headertab.create_dataset('ra', data=ra) headertab.create_dataset('dec', data=dec) headertab.create_dataset('bmag', data=bmag) headertab.create_dataset('vmag', data=vmag) headertab.create_dataset('spectype', data=spectype) headertab.create_dataset('blend', data=blend) headertab.create_dataset('blendvalue', data=blendvalue) headertab.create_dataset('nobs', data=nobs) glob = myfile.create_group('global') for k, v in globdic.iteritems(): glob.attrs[k]=v myfile.flush() finally: myfile.close() return def indexArray (cnx, naper = 2, mode='data', minobs=1): ''' indexArray transforms an array into an record array Inputs: - tmpdb, a dictionary of arrays Keywords: - naper, the number of aperture used - mode, 'data' or 'header' - reject, a list of entries to skip ''' if mode=='data': dtypes = ['u1',] # Unsigned integer going from 0 to 255 for the flag for i in range(0, naper*2+3): dtypes.append(np.float64) # Float 64-bits double-precision for the flux, errors and sky dtypes.append('f4') # Float 32-bits single-precision for the X position dtypes.append('f4') # Float "" "" for the Y position dtypes.append('f4') # Float 32-bits "" "" for the altitude dtypes.append('f4') # Float 32-bits "" "" for the azimuth dtypes.append('f4') # Float 32-bits "" "" for the CCD temperature dtypes.append('f8') # Float 64-bits for the exposure time dtypes.append('f8') # Float 64-bits for the Julian dte dtypes.append('f8') # Float 64-bits for the lst in hours dtypes.append('u4') # Unsigned integer 32-bits for the lst-index dtypes.append('u8') # Unsigned integer 64-bits for the lst-sequence #dtypes = [np.float64]*(naper*2+6+8) names = ['flag', ] base = 'flux' erbase = 'eflux' for i in range(0, naper): names.append(base+str(i)) names.append(erbase+str(i)) names.append('sky') names.append('esky') names.append('peak') names.append('x') names.append('y') names.append('alt') names.append('az') names.append('ccdtemp') names.append('exptime') names.append('jdmid') names.append('lst') names.append('lstidx') names.append('lstseq') mytypes = zip(names, dtypes) ### List of the entries which don't have enough data points: reject = [] ### Loop over all the entries for k, arr in cnx.iteritems(): arlist = [] dim = arr.shape ### Put here a condition on the minimal number of points? ### And an update on nobs??? if len(dim)>1 and dim[0] >=minobs: for l in range(arr.shape[1]): arlist.append(arr[:,l]) rec = np.rec.fromarrays(arlist, dtype=mytypes) cnx[k]=rec else: reject.append(k) return cnx, reject elif mode == 'header': jdstart = np.array([]) ra = np.array([]) dec = np.array([]) vmag = np.array([]) bmag = np.array([]) spectype = np.array([], dtype='S9') blend = np.array([],dtype='S1') blendvalue = np.array([]) nobs = np.array([]) tmp = [jdstart, ra, dec, vmag, bmag, spectype, blend, blendvalue, nobs] dtypes = [float, float, float, float, float, 'S9', 'S1', float, float] names = ['jdstart', 'ra', 'dec', 'vmag', 'bmag', 'spectype', 'blend', 'blendvalue','nobs'] mytypes = zip(names, dtypes) for k, arr in cnx.iteritems(): arlist = [] for i, l in enumerate(arr): arlist.append(np.array([l], dtype=dtypes[i])) tmp[i] = np.hstack((tmp[i], np.array([l], dtype=dtypes[i]))) cnx[k] = np.rec.fromarrays(arlist, dtype=mytypes) return cnx, tmp def CombinePkl(Path, savepath, stid): ''' CombinePkl combines several .h5 files into one final file. It goes through the given directory, and merges the files together. ''' import shutil nn = FindFiles(os.path.join(Path, ''), stid+'*', format='.pkl') if len(nn)>0: dat = pd.read_pickle(nn[0]) jddate = dat[0] ra, dec, mag, bmag = dat[1], dat[2], dat[3], dat[4] sptype = dat[5] flagblended = dat[6] blendedvalue= dat[7] header = pf.Header() header = header.fromtextfile(os.path.join(Path,'Header'+stid+'.txt')) header['RAJ2000'] = (ra, 'Right Ascension in deg') header['DEJ2000'] = (dec, 'Declination in deg') header['VMAG'] = (mag, 'Apparent V magnitude') header['BMAG'] = (bmag, 'Apparent B magnitude') header['SPTYPE'] = (sptype, 'Spectral Type') header['FLAG'] = (flagblended, 'Flag') header['BLENDVAL'] = (blendedvalue, 'Blending value') header['JDSTART'] = (jddate, 'Start Observation in JD') os.remove(os.path.join(Path,'Header'+stid+'.txt')) ## Read from header the number of used apertures. naper = header['NAPER'] flc = dat.get(8) for n in nn[1:]: dat = pd.read_pickle(n) try: flc = np.vstack((flc, dat.get(8))) except ValueError: continue for n in nn: os.remove(n) dim = flc.shape if len(dim) == 2: fnobs = flc.shape[0] ### ATTENTION HERE , flc is (naper*2 + 17, 50) elif len(dim) == 1: fnobs = 1. flc = np.reshape(flc, (1, dim[0])) header['NOBS'] = (fnobs, 'Number of images in the file') base = "flux" erbase = "eflux" names = ["flag"] # ---------------------------------------------------------------- # The final content of the Photometry array is: # flag, (flux, erflux)*Naperture, sky, ersky, peak, xc, yc, # altitude, azimuth, CCD Temperature, # exposure time, JDdate, lstdate, lpstid, lpstseq # Total number of elements: (2*naper+6)+10 # ---------------------------------------------------------------- for i in range(0, naper): names.append(base+str(i)) names.append(erbase+str(i)) names.append("sky") names.append("esky") names.append('peak') names.append("Xc") names.append("Yc") names.append("alt") names.append("az") names.append('ccdtemp') names.append("exptime") names.append("jdmid") names.append("lpst") names.append("lpstid") names.append("lpstseq") names = np.array(names) units = ['None'] for i in range(0, naper*2+2): units.append('e-') units.append('ADU') for i in xrange(2): units.append('pixels') units.append('degree') units.append('degree') units.append('Celsius') units.append('sec') units.append('JD days') units.append('hours') units.append('LaPalma index image') units.append('Sequence index') cols = pf.ColDefs([pf.Column(name=nom, format="D",array=flc[:,i], unit=units[i]) \ for i,nom in enumerate(names)]) tbhdu = pf.new_table(cols) prihdu = pf.PrimaryHDU(header=header) thdulist = pf.HDUList([prihdu, tbhdu]) thdulist.writeto(savepath+stid + "LC.fits", clobber=True) shutil.rmtree(Path) thdulist.close() return header def LCtable(savingPath, ID, dat, header): """ LCtable saves the inputs into a binary fits table Inputs: - the saving path - the ID, or name to save the file - the content of the file - the generic header """ jddate = dat[0] ra, dec, mag, bmag = dat[1], dat[2], dat[3], dat[4] sptype = dat[5] flagblended = dat[6] blendedvalue= dat[7] header['RAJ2000'] = (ra, 'Right Ascension in deg') header['DEJ2000'] = (dec, 'Declination in deg') header['VMAG'] = (mag, 'Apparent V magnitude') header['BMAG'] = (bmag, 'Apparent B magnitude') header['SPTYPE'] = (sptype, 'Spectral Type') header['FLAG'] = (flagblended, 'Flag') header['BLENDVAL'] = (blendedvalue, 'Blending value') header['JDSTART'] = (jddate, 'Start Observation in JD') ## Read from header the number of used apertures. naper = header['NAPER'] flc = dat.get(8) dim = flc.shape if len(dim) == 2: fnobs = dim[0] elif len(dim) == 1: fnobs = 1. flc = np.reshape(flc, (1, dim[0])) header['NOBS'] = (fnobs, 'Number of images in the file') base = "flux" erbase = "eflux" names = ["flag"] # ---------------------------------------------------------------- # The final content of the Photometry array is: # flag, (flux, erflux)*Naperture, sky, ersky, xc, yc, # xpre, ypre, altitude, azimuth, phi, theta, radius, # exposure time, JDdate, lstdate, lpstid, lpstseq # Total number of elements: (2*naper+5)+11 # ---------------------------------------------------------------- for i in range(0, naper): names.append(base+str(i)) names.append(erbase+str(i)) names.append("sky") names.append("esky") names.append("Xc") names.append("Yc") names.append("Xa") names.append("Ya") names.append("alt") names.append("az") names.append("phi") names.append("theta") names.append("r") names.append("exptime") names.append("jdmid") names.append("lpst") names.append("lpstid") names.append("lpstseq") names = np.array(names) units = ['None'] for i in range(0, naper*2+2): units.append('e-') for i in xrange(4): units.append('pixels') units.append('degree') units.append('degree') units.append('degree') units.append('degree') units.append('pixels') units.append('sec') units.append('JD days') units.append('hours') units.append('LaPalma index image') units.append('Sequence index') cols = pf.ColDefs([pf.Column(name=nom, format="D",array=flc[:,i], unit=units[i]) \ for i,nom in enumerate(names)]) tbhdu = pf.new_table(cols) prihdu = pf.PrimaryHDU(header=header) thdulist = pf.HDUList([prihdu, tbhdu]) thdulist.writeto(savingPath+ ID + "LC.fits", clobber=True) thdulist.close() return
[docs]def saveCompFits(path, name, data, header): ''' to save an image in a compressed fits. Inputs: - path, the path for saving the images - name, the name of the image - data, the data to be saved - header, the corresponding header The data is saved in a CompImageHDU, using a RICE compression type. If the data is not in UINT16 format, it will be converted before the compression. ''' ny, nx = data.shape #data = np.uint16(data) header.set('BITPIX', 16, comment='array data type', before=0) header.set('NAXIS', 2, comment='number of array dimensions', after='BITPIX') header.set('NAXIS1', nx, after='NAXIS') header.set('NAXIS2', ny, after='NAXIS1') f = pf.CompImageHDU(data=data, header=header, compression_type='HCOMPRESS_1', \ name='COMP_IMAGE', hcomp_scale=1., uint=True) f.verify(option='fix') f.writeto(os.path.join(path,name+'.fits'), clobber=True) return
[docs]def savepng(image, path, name, binfactor = 1): ''' savepng saves the input image at the given path. The image can be rebinned using the binfactor keyword. The scaling is defined via ... Inputs: - image, an array - path, the saving directory - the name of the file (without the .png) Keywords: - binfactor, if 1, the saved image has the same dimension as the original image. Set to larger as 1 to reduce image size. ''' from scipy.misc import imsave binmage = rebin(image, binfactor) binmage = (binmage**0.1-1.5)/1.5*255 imsave(os.path.join(path, name+'.png'), binmage) return
def SavePSLC(Path, FPS, MPS, SPS, FLC, MLC, SLC, header, starID): '''SavePStamp uses a thread to save the fast, mid and slow cadenced data of the Postage Stamp file. Inputs: - Path, a string giving the path-root for the saving directories - data, a (n,n,m) array filled with postage stamps - header, a updated pyfits header No outputs, files are writen to disk. ''' import pyfits as pf import warnings warnings.filterwarnings('ignore') from sys import platform as _platform ## Check the operating system: if _platform == 'linux' or _platform == 'linux2': PSpath = Path + '/PS/' LCpath = Path + '/LC/' if _platform == 'win32': PSpath = Path + '\\PS\\' LCpath = Path + '\\LC\\' try: nobs = FPS.shape[2] except IndexError: nobs = 1. FPS = FPS.transpose() header.update('NOBS-F', nobs, 'Number of images in the fast file') fp = pf.PrimaryHDU(FPS, header) fl = pf.PrimaryHDU(FLC, header) hp = pf.HDUList([fp]) hl = pf.HDUList([fl]) if MPS.shape[0] > 1: try: nobs = MPS.shape[2] except IndexError: nobs = 1 MPS = MPS.transpose() header.update('NOBS-M', nobs, 'Number of images in the mid file') hp.append(pf.ImageHDU(MPS, header)) hl.append(pf.ImageHDU(MLC, header)) #pf.append(PSpath+starID+'.fits', pf.ImageHDU(MPS), header) #pf.append(LCpath +starID+'.fits', pf.ImageHDU(MLC), header) if SPS.shape[0] > 1: nobs = 1. header.update('NOBS-S', nobs, 'Number of images in the slow file') hp.append(pf.ImageHDU(SPS, header)) hl.append(pf.ImageHDU(SLC, header)) #pf.append(PSpath+starID+'.fits', pf.ImageHDU(SPS), header) #pf.append(LCpath+starID+'.fits', pf.ImageHDU(SLC), header) hp.writeto(PSpath+starID+'.fits', clobber=True) hl.writeto(LCpath+starID+'.fits', clobber=True) return def updatePSLC(Path, fps, mps, sps, flc, mlc, slc, starID, qlog): ''' updatePSLC updates the existing fits files for the fast, mid and slow cadence data for both postage stamp and lightcurve. It uses the update mode of pyfits, without rewriting the whole file. Inputs: - Path, the path root for the reduced data - psdata, the 3D array containing the postage stamps - lcdata, the 2D array for the photometry No output, the files are directly updated. ''' import pyfits as pf import warnings warnings.filterwarnings('ignore') from sys import platform as _platform import multiprocessing import logging logger = logging.getLogger(multiprocessing.current_process().name) logger.setLevel(logging.DEBUG) ## Check the operating system: if _platform == 'linux' or _platform == 'linux2': PSpath = Path + '/PS/' LCpath = Path + '/LC/' if _platform == 'win32': PSpath = Path + '\\PS\\' LCpath = Path + '\\LC\\' ## 1. Update the postage stamp files f = pf.open(PSpath + starID+'.fits', mode = 'update') header = f[0].header # a. The fast extension : primary fits extension fo = f[0].data try: fps = fps.transpose() psfast = np.concatenate((fo, fps)) except ValueError: logger.error(' still some fo of 16x16', exc_info=True) nobs = psfast.shape[0] header.update('NOBS-F', nobs, 'Number of images in the fast extension') f[0].data = np.ascontiguousarray(psfast) # b. if enough images, then also update the mid extension with the mid binned files if len(mps.shape) > 2: # Case 1, the extension already exists: if header['extend']: mo = f[1].data try: if len(mo.shape)==2: f[1].data = mps.transpose() else: mps = mps.transpose() psmid = np.concatenate((mo, mps)) f[1].data=np.ascontiguousarray(psmid) except Exception: logger.error(' Error on mps', exc_info=True) header.update('NOBS-M', mps.shape[0], 'Number of images in the mid extension') else: # case 2. no existing extension => adding one! blu = pf.ImageHDU(mps.transpose()) f.append(blu) # c. update the slow binned extension: if sps.shape[0] > 1: try: so = f[2].data psslow = np.dstack((so, sps)) f[2].data = psslow header.update('NOBS-S', psslow.shape[2], 'Number of images in the slow extension') except IndexError: bli = pf.ImageHDU(sps) f.append(bli) f.flush() ## 2. Doing the same for the light curves: b = pf.open(LCpath+starID+'.fits', mode='update') beader = b[0].header bo = b[0].data lcfast = np.vstack((bo, flc)) b[0].data = lcfast beader.update('NOBS-F', lcfast.shape[0], 'Number of points in the fast extension') # if enough points, update the mid extension: if len(mps.shape) > 2: if beader['extend']: no = b[1].data lcmid = np.vstack((no, mlc)) beader.update('NOBS-M', lcmid.shape[0], 'Number of points in the mid extension') b[1].data = lcmid else: nlu = pf.ImageHDU(mlc) b.append(nlu) if sps.shape[0] > 1: try: po = b[2].data lcslow = np.vstack((po, slc)) b[2].data = lcslow beader.update('NOBS-S', lcslow.shape[0], 'Number of points in the slow extension') except IndexError: nlo = pf.ImageHDU(slc) b.append(nlo) b.flush() return def light_curve_overview(Path, night, filename = 'LCoverview', mrange=[4.75, 4.8]): ''' selects within the given magnitude range the stars which have been observed. Saves a plot at the end. Input: - Path, the location of the light curves Keywords: - filename, the name for saving the image. Default if LCoverview.png - mrange, the magnitude range of the stars to be plotted ''' import matplotlib.pyplot as pl import itertools flc = FindFiles(os.path.join(Path,'fLC',''), '*LC') slowLC = FindFiles(os.path.join(Path, 'sLC', ''), '*LC') pl.figure(1, figsize = (12, 8)) ax00 = pl.subplot(1,1,1) pos1 = ax00.get_position() pos2 = [pos1.x0-0.02, pos1.y0, pos1.width/1.2, pos1.height/1.1] ax00.set_position(pos2) marker = itertools.cycle(('+','x','.', '*')) smarker = itertools.cycle(('o', 's', 'v', 'd')) color = itertools.cycle(('k', 'b', 'g', 'r', 'c', 'y', 'm')) for f in flc: fname = os.path.basename(f) a = fname.split('LC')[0] tab = pf.getdata(f) col = color.next() pl.plot(tab['jdmid'], tab['flux0'], linestyle = '', marker=marker.next(), color=col, label = a) for s in slowLC: if s.find(a)>=0: stab = pf.getdata(s) pl.plot(stab['jdmid'], stab['flux0'], linestyle = '', marker=smarker.next(), mec=col, mfc='w' ) pl.title('Overview of {}'.format(night)) pl.legend(numpoints =3, title='Stellar ASCC', bbox_to_anchor=(1.05, 1.), loc=2, borderaxespad=0.) pl.savefig(os.path.join(Path,'logs',filename+'.png'), dpi=200, facecolor='w', \ orientation='landscape', papertype='A5') pl.close() return def reduction_summary(logpath, site, camera, night, date, format='html'): ''' Prepare a quick summary of the reduction. Includes the possible errors and warnings Includes the image generated at the end of the night from the astrometry ''' logfile = FindFiles(logpath, 'reduc*', format='.log') if len(logfile) == 0: if format == 'html': message = ''' <!DOCTYPE html> <html lang=\"en\"> <head><meta name=\"format-detection\" content=\"telephone=no\"> <meta name=\"viewport" content=\"width=device-width, initial-scale=0.8\"> <style> div {text-align: left;font-size: 14px;padding-left: 20px; </style> </head> <body> ''' ### Prepare the main message: message += '<div> Hi all, <br></br>' message += '{:*^96} <br></br>'.format(' Reduction summary ') message += '{:^96} <br></br>'.format(' Site: {}, Camera: {}, Night {}, finished at {} '.format(\ site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M'))) message += '{:*^96} <br></br>'.format('') ### Prepare the alternate message: message_alternate = '{:*^96} \n'.format(' Reduction summary ') message_alternate+= '{:^96} \n'.format(' Site: {}, camera: {}, Night: {}, finished at {} '.format(\ site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M'))) message_alternate+='{:*^96} \n \n'.format('') message += 'No log file was found for that night <br> </br>' message_alternate += 'No log file was found for that night ' return message, message_alternate with open(logfile[0], 'r') as f: log = f.read() ### Get from the log file the possible errors and warning loglines = log.split('\n') errors = [] warnings = [] astro = [] End_of_night = False End_of_saving = False Saved_FLC = False Solved_astro = False nimages = 0 for l in loglines: if l.find('ERROR')>0: errors.append(l) elif l.find('WARNING')>0: warnings.append(l) elif l.find('end_of_night')>0: End_of_night=True elif l.find('saved_fast_LC')>0: Saved_FLC = True elif l.find('saved_all_LC')>0: End_of_saving = True if l.find('Time needed to process')>0: nimages+=1 if l.find('solved_the_astrometry')>0: astro.append(l) Solved_astro = True if format == 'html': message = ''' <!DOCTYPE html> <html lang=\"en\"> <head><meta name=\"format-detection\" content=\"telephone=no\"> <meta name=\"viewport" content=\"width=device-width, initial-scale=0.8\"> <style> div {text-align: left;font-size: 14px;padding-left: 20px; </style> </head> <body> ''' message += '<div> Hi all, <br></br>' message += '{:*^96} <br></br>'.format(' Reduction summary ') message += '{:^96} <br></br>'.format(' Site: {}, Camera: {}, Night: {}, finished at {} '.format(\ site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M'))) message += '{:*^96} <br></br>'.format('') ### Prepare the message: message_alternate = '{:*^96} \n'.format(' Reduction summary ') message_alternate+= '{:^96} \n'.format(' Site: {}, camera: {}, Night: {}, finished at {} '.format(\ site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M'))) message_alternate+='{:*^96} \n \n'.format('') if End_of_night: message += ' The reduction went well <br></br>' message_alternate +=' The reduction went well \n' message += ' {} images were reduced this night <bf></br>'.format(nimages) message_alternate += ' {} images were reduced this night \n'.format(nimages) if Saved_FLC: message =+ ' All Fast light curves have been saved <br></br>' message_alternate += ' All fast light curves have been saved \n' if Solved_astro: message+=' Solved the astrometry {} times <br> </br>'.format(len(astro)) message_alternate+=' Solved the astrometry {} times \n'.format(len(astro)) if End_of_saving: message += ' All the light curves have been saved <br></br>' message_alternate+=' All the light curves have been saved \n' else: message_alternate+=' No light curve was saved \n' message += ' {} errors occured during the reduction: <br></br>'.format(len(errors)) message_alternate+=' {} errors occured during the reduction: \n'.format(len(errors)) for er in errors: message += er + '<br></br>' message_alternate+=er + '\n' message +=' {} warnings occured during the reduction: <br></br>'.format(len(warnings)) message_alternate += ' {} warnings occured during the reduction: \n'.format(len(warnings)) for war in warnings: message += war + '<br></br>' message_alternate+=war + '\n' message += 'Cheers, <br></br>' message_alternate += ' Cheers, ' message +='</body>' return message, message_alternate else: ### Prepare the message: message_alternate = '{:*^96} \n'.format(' Reduction summary ') message_alternate+= '{:^96} \n'.format(' Site: {}, Camera: {}, at {} '.format(site.name, \ camera.name, date.strftime('%Y-%m-%d %H:%M'))) message_alternate+='{:*^96} \n \n'.format('') if End_of_night: message_alternate +=' The reduction went well \n' if End_of_saving: message_alternate+=' All the light curves have been saved \n' else: message_alternate+=' No light curve was saved \n' message_alternate+=' {} errors occured during the reduction: \n'.format(len(errors)) for er in errors: message_alternate+=er + '\n' message_alternate += ' {} warnings occured during the reduction: \n'.format(len(warnings)) for war in warnings: message_alternate+=war + '\n' return message_alternate def send_summary(logpath, message, alternate, camera, site, night): ''' sends the summary of the reduction as generated by the function reduction_summary under the form of an email with an embedded image ''' import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText from email.MIMEImage import MIMEImage import mascara_credentials as creds dest = creds.recipients_summary_emails #dest = 'lesage@strw.leidenuniv.nl' locam = site.sname+camera.sname msgRoot = MIMEMultipart('main') msgRoot['Subject'] = 'MASCARA {} - {} Reduction summary'.format(locam, night) msgRoot['From'] = 'MASCARA @ La Palma' msgRoot['To'] = ', '.join(dest) msgRoot.preamble = 'This is a multi-part test message in MIME format' msgAlternative = MIMEMultipart('alternative') msgRoot.attach(msgAlternative) msgText = MIMEText(alternate) msgAlternative.attach(msgText) if os.path.exists(logpath+'AstroCheck.png'): msgText = MIMEText(message+' <br><img src="cid:image1"><br> ' + \ ' <br><img src="cid:image2"><br> ', 'html') msgAlternative.attach(msgText) fp = open(logpath+'AstroCheck.png', 'rb') msgImage = MIMEImage(fp.read()) fp.close() msgImage.add_header('Content-ID', '<image1>') msgRoot.attach(msgImage) imagepath = FindFiles(logpath, 'LCover*', format='.png') fp = open(imagepath[0], 'rb') msgImage=MIMEImage(fp.read()) fp.close() msgImage.add_header('Content-ID', '<image2>') msgRoot.attach(msgImage) try: mascara_user = creds.mascara_mail_username maspwd = creds.mascara_mail_password smtpserver = smtplib.SMTP("smtp.gmail.com", 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.login(mascara_user, maspwd) #msg = MIMEText(body, 'html') #msg['Subject'] = 'MASCARA night summary' #msg['From'] = 'MASCARA @ La Palma' #msg['To'] = ', '.join(dest) smtpserver.sendmail(mascara_user, dest, msgRoot.as_string()) smtpserver.close() except: print 'The following email was not sent \n' print message def getDarkFilename(camname, path=''): ''' getDarkFilename reads from file the location and name of the last valid masterdark saved for a specific camera. Inputs: - Camera Name, the name of the Camera - path, where the file is saved (optional). If not specified, the function will look in the current working directory. Output: the location and name of the last masterdark for that camera. ''' if os.path.exists('darkTable.txt'): with open('darkTable.txt', 'rb') as f: c = f.read() lurines = c.split('\n') lines = [l for l in lurines if not l.startswith('##') if not l.strip()==''] for l in lines: if l.find(camname.lower())>=0: thename, thefilelist= l.split('=') thefilelist = thefilelist.split(',') darkfilelist = [f.strip() for f in thefilelist] return darkfilelist else: print 'No Look-up Table for the darks found.' return def updateDarkFilename(camname, filename, path=''): ''' updateDarkFilename updates the dark look-up table for the given camera name Inputs: - Camera Name, the name of the Camera - the filename of the new valid dark for that camera - path, where the file is saved (optional). If not specified, the function will look in the current working directory. No outputs ''' ## 1. Check that the filename ends with .fits: if filename.find('.fits') <0: filename = filename+ '.fits' if os.path.exists('darkTable.txt'): with open('darkTable.txt', 'rb') as f: c = f.read() lurines = c.split('\n') lines = [l for l in lurines if not l.startswith('##') if not l.strip()==''] newlines = ['## Name , Location', ] for l in lines: if l.find(camname.lower())>=0: filelist = l.split('=')[1].split(',') if len(filelist) >5: filelist.pop(0) filelist.append(filename) newlines.append('{:<10} = {}'.format(camname.lower(), ', '.join(['%s']*len(filelist))%tuple(filelist))) else: newlines.append(l) with open('darkTable.txt', 'wb') as f: for n in newlines: f.write(n +'\n') return else: header = '## Name , Location' newline = '{:<10} = {}'.format(camname.lower(), filename) with open('darkTable.txt', 'wb') as f: f.write(header + '\n') f.write(newline) return
[docs]def FindFiles(path, strm, format = '.fits'): """List of the files ending with .fits in the given path directory""" import glob fn = sorted(glob.glob(path + str(strm) + format)) return fn
[docs]def listDirectory(path, root): ''' listDirectory works a bit list the os.listdir function, except that you can feed it a root and it'll list all directories having that root in their names in the given path. Inputs: - path, a string, giving the location where to look - root, a string, giving the root to use for search Output: - a list of the directories Example: >>> dires = listDirectory('D:\\LaPalma\\', '20141114') ''' import subprocess import platform directories = [] if platform.system() == 'Windows': ps = subprocess.Popen("dir "+path+"*"+root+"* /B", shell=True, stdout=subprocess.PIPE) output = ps.stdout.read() output = output.split('\r\n') for l in output: tmp = l.find(root) if tmp>=0: directories.append(path+l) return directories else: devnull = open(os.devnull,'wb') ps = subprocess.Popen("ls -d "+path+"*"+root+"*", shell=True, stdout=subprocess.PIPE, stderr=devnull) output = ps.stdout.read() output = output.split('\n') devnull.close() if root != '': for l in output: if l.find(root)>0: directories.append(l) return directories else: return output
[docs]def pickDirectory(dirlist, root): ''' picks the right directories for a list using the root as reference Inputs: - dirlist, a list of directories - root, a reference to search for in the list ''' if root: if len(root) >= 2: directory = [d for d in dirlist if d.find(root)>0] elif len(root) == 1: directory = [d for d in dirlist if d.endswith(root)] return directory
[docs]def CheckFreeSpace(folder= 'D:\\'): ''' CheckFreeSpace looks how much free space is in the given folder... preferably the hard disk. To be used to for overwriting/deleting old raw data at the beginning of the night, to avoid getting into memory troubles. Input: folder, a string giving the path of the folder to check Output: the amount of free space in MB ''' import ctypes as ct import platform if platform.system() == 'Windows': free_bytes = ct.c_ulonglong(0) ct.windll.kernel32.GetDiskFreeSpaceExW(ct.c_wchar_p(folder), None, None, \ ct.pointer(free_bytes)) return free_bytes.value/1024/1024 else: st = os.statvfs(folder) return st.f_bavail * st.f_frsize/1024/1024
[docs]def MakeDiskSpace(path='D:\\'): ''' Removes the oldest files in the path ''' import shutil filelist = [os.path.join(path,f) for f in os.listdir(path)] oldest = min(filelist, key=lambda x:os.stat(x).st_ctime) shutil.rmtree(oldest) return
def pathexist(*args): '''Check the existence of a file at the given path''' import os #Do we check for a specific file or a directory? if len(args)==1: path = args[0] fi = os.path.exists(path) if len(args)==2: print 'here is a check due' return fi def makedir(*args): ''' makedir is a subtitute for the function os.makedir. It verifies first the operating system, and changes the path directories according to the system. Inputs: - path, the path root for creating the directories - subpaths, the inside directoires Output: - none ''' import os path = args[0] if len(args)>1: for p in args[1:]: if not os.path.exists(p): os.makedirs(p) print 'Created directory %s' %p else: os.makedirs(path) return
[docs]def moveData(frompath, topath, rawpath = 'D:\\Raw\\', keeprawfiles = False): ''' Copy the Data from the directory frompath to the directory topath. and delete afterwards the frompath. ''' import os, shutil if keeprawfiles: os.makedirs(topath) shutil.copy(frompath, topath) else: for r in (os.path.join(frompath, j) for j in os.listdir(frompath)): print r if r.find('raw') > 0: if not os.path.exists(rawpath): os.makedirs(rawpath) shutil.copy(r, rawpath) else: if not os.path.exists(topath): os.makedirs(topath) shutil.copy(r, topath) return
def cleanup(path, qlog): ''' Cleanup performs the end of the night operations: - delete the temporary LC files so only the Table fits remains - move the files to their final destination... ''' import shutil import os import logging import multiprocessing logger = logging.getLogger(multiprocessing.current_process().name) logger.setLevel(logging.INFO) blati = os.listdir(path) if len(blati) > 0: logger.warning('Some elements remained in tmp directory') shutil.rmtree(path) return