# Copyright 2014 - 2015 Anna-Lea Lesage for MASCARA
#
# This file is part of the mascara package.
# mascara is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mascara is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mascara. If not, see <http://www.gnu.org/licenses/>.
'''
.. module:: masc
.. moduleauthor:: Anna-Lea Lesage
MASCARA specific functions. It includes:
- SiteReference, a mini-class to provide the local sidereal time at the
MASCARA reference site.
- tmpdbSave, saves a dictionary into a temporary dataframe/h5 file
- CombineH5, combines the temporary h5 files in one big hdf5 file
- IndexArray, build the indexes used for the various arrays
- CombinePkl, combines pickle files into fits files
- LCtable, makes an binary fits table with the light curves
- LSTSEQ, to compute the lst -sequence index of mascara
- light_curve_overview, plots a few fits files and saves the plot
- reduction_summary, constructs from the log file a summary of the reduction
- send_summary, send the summary via email to the meascara team
- getDarkFilename, get the name and location of the last good masterdark
- updateDarkFilename, write to file the name and location of the last good masterdark
'''
__version__ = '15.10.02'
import numpy as np
import os
import warnings
warnings.filterwarnings('ignore')
[docs]class SiteReference(object):
''' SiteReference is a very simplified version of the Site class in :mod:`mascara.observer`
Its main purpose is to compute te local sidereal time at the reference site which
is located in LaPalma (but is not exactly the position of the La Palma mascara station)
'''
def __init__(self):
''' Initialize the SiteReference
Inputs: None
'''
import pytz
self.__lat = 28.7611
self.__long = -17.8780
self._elev = 2364.0
self.name = "ReferenceSite"
self.sname = "RF"
self.timezone = pytz.timezone("Atlantic/Canary")
[docs] def local_sidereal_time(self, date, texp, lstindex=True, lstsequence=True, lst=True, quick=False):
''' Compute the local sidereal time at the reference site
Inputs:
- date, a datetime object or jd or an array of ...
- texp, the exposure time (used for computing the index
and sequence number
Outputs:
- lst, the local sidereal time
- lstidx, the lst index in a day
- lstseq, the exposure sequence number since 01.01.2013
- quick, to compute quickly the lstseq only!!! without passing via the LST
the resulting lstseq is OK in 95% of the cases (else diverges by 1)
'''
from datetime import datetime, timedelta
from mascara.observer.coordinates import greenwich_mean_sidereal_time
from mascara.funcs.timing import calendar2jd, jd2calendar
__iscalendar = False
if np.isscalar(date):
if type(date) == datetime:
jd = calendar2jd(date)
__iscalendar = True
elif type(date) == np.float64:
jd = date
else:
raise SyntaxError('The date format is not recognized')
## 3. or is it an array of dates?
elif type(date) == datetime:
jd = calendar2jd(date)
__iscalendar = True
elif type(date) == np.ndarray:
if date.dtype.name == 'object':
jd = calendar2jd(date)
__iscalendar = True
elif date.dtype.name == 'float64':
jd = date
else:
raise SyntaxError('The date format is not recognized')
else:
raise SyntaxError('The date format is not recognized')
if not quick:
if np.size(jd) > 1:
LST = np.ravel(np.array([(greenwich_mean_sidereal_time(j, True) \
+ self.__long/15)%24 for j in jd]))
else:
LST = (greenwich_mean_sidereal_time(jd,True) + (self.__long)/15 ) % 24
if lstindex:
stidx = LST*3600/np.around(texp, decimals=1)
if lstsequence:
from mascara.constants import iniday, lstday
if __iscalendar:
then = date
else:
then = jd2calendar(jd)
oneday = timedelta(hours=24)
onelstday = timedelta(hours=lstday)
indexperday = int(oneday.total_seconds()/np.around(texp, decimals=1))
getdif = np.vectorize(lambda x:(x - iniday).total_seconds())
if not quick:
daylst = getdif(then)/onelstday.total_seconds() // 1
if texp < 1.:
lpstseq = -1
else:
lpstseq = np.array(daylst*indexperday + \
LST*3600/np.around(texp, decimals=1)//1, dtype=long)
else:
daylst = getdif(then)/onelstday.total_seconds()
lpstseq = np.array(daylst*indexperday //1 , dtype=long)
LST = np.zeros(np.size(lpstseq))
### If there is only one date as an input transfor, the outputs in scalars
if np.size(LST) ==1 :
LST = np.asscalar(LST)
if lstindex:
stidx = np.asscalar(stidx)
if lstsequence:
lpstseq = np.asscalar(lpstseq)
### Do output
if lst:
if lstindex and not lstsequence:
return LST, stidx
elif lstsequence and not lstindex:
return LST, lpstseq
elif lstindex and lstsequence:
return LST, stidx, lpstseq
else:
return LST
else:
if lstindex and not lstsequence:
return stidx
elif lstsequence and not lstindex:
return lpstseq
elif lstindex and lstsequence:
return stidx, lpstseq
[docs]def tmpdbSave(*args, **kwargs):
''' tmpdbSave saves the content of a dictionary into a database (with pandas) using h5 format.
Inputs:
- dico, a dictionary
- qlog, a Queue object used for logging
- (Path, the location path of the database)
- (ndb, the database number)
'''
import logging
import multiprocessing
import gc
import pandas as pd
from datetime import datetime
case = len(args)
if case ==2:
dico, qlog = args
logger = logging.getLogger(multiprocessing.current_process().name)
#logger.setLevel(logging.DEBUG)
to = datetime.now()
try:
df = pd.DataFrame(dico)
df.to_hdf('tmpdb.hdf', 'tmpdb', mode='w')
except Exception:
logger.error('Failure at creating the table!!!', exc_info=True)
dto = (datetime.now() - to).total_seconds()
logger.info('Saved %g entries in %g seconds' %(len(dico), dto))
return
else:
Path, dico, ndb, qlog = args
logger = logging.getLogger(multiprocessing.current_process().name)
#logger.setLevel(logging.DEBUG)
to = datetime.now()
try:
df = pd.DataFrame(dico)
df.convert_objects(convert_dates=True, convert_numeric=True)
logger.debug('Transfering dict to pandas in %g' %(datetime.now()-to).total_seconds())
store = pd.HDFStore(Path + 'tmpdb'+str(ndb)+'.h5', 'w')
store.put('tmpdb'+str(ndb), df)
store.close()
logger.debug('Saved Stellar dict')
except Exception:
logger.error('Failure at creating the table!!!', exc_info=True)
store.close()
df = None
return
dto = (datetime.now() - to).total_seconds()
logger.info('Saved %g entries in %g seconds' %(len(dico), dto))
del df
gc.collect()
return
[docs]def CombineH5(savepath, listdb, globdic, name = 'LPC', minobs=51):
''' CombineH5 combines several .h5 files into one final file.
It goes through the given directory, and merges the files together.
Inputs:
- savepath, the path where to save the hdf5 file once constructed
- listdb, a list of the temporary h5 files (including their location)
- globdic, a header like dictionary with global inforation about the observation
For instance, the site, the camera, the CCD temperature. Things that affects all
the stars the same way
Keywords:
- name, the name of the final file
- minobs, the minimal numbers of observation points a star should have to be
saved in the hdf5 file
Outputs:
- None, the file is written to disk
'''
if np.size(listdb)==0 or listdb is None:
return
import h5py
import pandas as pd
datdic = {}
hdrdic = {}
### Keep the reference cnx0 in memory and update it with the new databases
for i, ldb in enumerate(listdb):
store = pd.HDFStore(ldb, 'r', complevel=5, complib='zlib')
itsname = os.path.basename(ldb).split('.')[0]
cnx = store.get(itsname)
store.close()
keys0 = datdic.keys()
for k, v in cnx.iteritems():
if len(v)==0:
continue
if k in keys0:
d = datdic[k]
try:
d = np.vstack((d, v[8]))
except ValueError:
pass
datdic[k] = d
tmp = hdrdic[k]
tmp[8] = d.shape[0]
hdrdic[k] = tmp
else:
datdic[k] = v[8]
hdrdic[k] = [v[0], v[1], v[2], v[3],v[4], v[5], v[6], v[7], 1]
try:
naper = globdic['naper']
except KeyError, TypeError:
naper = 2
### Put in indexArray a condition to reject entries which do not have enough points
datdic, reject = indexArray(datdic, naper=naper, mode='data', minobs=minobs)
for r in reject:
datdic.pop(r)
hdrdic.pop(r)
hdrdic, tmp = indexArray(hdrdic, mode='header', minobs=minobs)
jdstart, ra, dec, vmag, bmag, spectype, blend, blendvalue, nobs = tmp
#### Save it all to hdf5 file
try:
myfile = h5py.File(os.path.join(savepath, name+'.hdf5'), 'w')
hdrgrp = myfile.create_group('header')
for k, v in hdrdic.iteritems():
hdrgrp[k]=v
datagrp = myfile.create_group('data')
for k,v in datdic.iteritems():
datagrp[k]=v
headertab = myfile.create_group('header_table')
headertab.create_dataset('ascc', data=hdrdic.keys())
headertab.create_dataset('jdstart', data=jdstart)
headertab.create_dataset('ra', data=ra)
headertab.create_dataset('dec', data=dec)
headertab.create_dataset('bmag', data=bmag)
headertab.create_dataset('vmag', data=vmag)
headertab.create_dataset('spectype', data=spectype)
headertab.create_dataset('blend', data=blend)
headertab.create_dataset('blendvalue', data=blendvalue)
headertab.create_dataset('nobs', data=nobs)
glob = myfile.create_group('global')
for k, v in globdic.iteritems():
glob.attrs[k]=v
myfile.flush()
finally:
myfile.close()
return
[docs]def indexArray (cnx, naper = 2, mode='data', minobs=1):
''' indexArray transforms an array into an record array.
The entries of the record array are :
- in data mode:
- flag (as an unsigned integer going from 0 to 255
- flux0 (as a double float)
- eflux0 (as a double float)
- flux1 ...
- eflux1 ...
- ...
- sky (as a double float)
- esky (as a double float)
- peak (as a double float)
- x (as a single float)
- y (as a single float)
- alt (as a single float)
- az (as a single float)
- ccdtemp (as a single float)
- exptime (as a double float)
- jdmid (as a double float)
- lst (as a double float)
- lstidx (as an unsigned 32-bits integer)
- lstseq (as an unsigned 64-bits integer)
- in header mode:
- jdstart, jd date of start of observation of the star
- ra, Right Ascension of the star
- dec, Declination of the star
- vmag, Visible magnitude of the star
- bmag, Blue magnitude of the star
- spectype, Spectral type of the star
- blend, if the star is blended with another
- blendvalue, the value of the blend (0 if not blended, 1 if overlapping)
- nobs, the number of observations of that star during the night
Inputs:
- cnx, a dictionary of arrays
Keywords:
- naper, the number of aperture used
- mode, 'data' or 'header'
- reject, a list of entries to skip
'''
if mode=='data':
dtypes = ['u1',] # Unsigned integer going from 0 to 255 for the flag
for i in range(0, naper*2+3):
dtypes.append(np.float64) # Float 64-bits double-precision for the flux, errors and sky
dtypes.append('f4') # Float 32-bits single-precision for the X position
dtypes.append('f4') # Float "" "" for the Y position
dtypes.append('f4') # Float 32-bits "" "" for the altitude
dtypes.append('f4') # Float 32-bits "" "" for the azimuth
dtypes.append('f4') # Float 32-bits "" "" for the CCD temperature
dtypes.append('f8') # Float 64-bits for the exposure time
dtypes.append('f8') # Float 64-bits for the Julian dte
dtypes.append('f8') # Float 64-bits for the lst in hours
dtypes.append('u4') # Unsigned integer 32-bits for the lst-index
dtypes.append('u8') # Unsigned integer 64-bits for the lst-sequence
names = ['flag', ]
base = 'flux'
erbase = 'eflux'
for i in range(0, naper):
names.append(base+str(i))
names.append(erbase+str(i))
names.append('sky')
names.append('esky')
names.append('peak')
names.append('x')
names.append('y')
names.append('alt')
names.append('az')
names.append('ccdtemp')
names.append('exptime')
names.append('jdmid')
names.append('lst')
names.append('lstidx')
names.append('lstseq')
mytypes = zip(names, dtypes)
### List of the entries which don't have enough data points:
reject = []
### Loop over all the entries
for k, arr in cnx.iteritems():
arlist = []
dim = arr.shape
### Keep only the entries with more points than minobs
if len(dim)>1 and dim[0] >=minobs:
for l in range(arr.shape[1]):
arlist.append(arr[:,l])
rec = np.rec.fromarrays(arlist, dtype=mytypes)
cnx[k]=rec
else:
reject.append(k)
return cnx, reject
elif mode == 'header':
jdstart = np.array([])
ra = np.array([])
dec = np.array([])
vmag = np.array([])
bmag = np.array([])
spectype = np.array([], dtype='S9')
blend = np.array([],dtype='S1')
blendvalue = np.array([])
nobs = np.array([])
tmp = [jdstart, ra, dec, vmag, bmag, spectype, blend, blendvalue, nobs]
dtypes = [float, float, float, float, float, 'S9', 'S1', float, float]
names = ['jdstart', 'ra', 'dec', 'vmag', 'bmag', 'spectype', 'blend', 'blendvalue','nobs']
mytypes = zip(names, dtypes)
for k, arr in cnx.iteritems():
arlist = []
for i, l in enumerate(arr):
arlist.append(np.array([l], dtype=dtypes[i]))
tmp[i] = np.hstack((tmp[i], np.array([l], dtype=dtypes[i])))
cnx[k] = np.rec.fromarrays(arlist, dtype=mytypes)
return cnx, tmp
[docs]def CombinePkl(Path, savepath, stid):
''' CombinePkl combines several .pkl files into one final fits file.
It goes through the given directory, and merges the files together.
Inputs:
- path, the path to search for the pkl files
- savepath, the path where to save the fits file
- stid, an identifier for the files (to avoid mixing pkl files of 2 different stars together.
'''
import shutil
import pyfits as pf
import pandas as pd
import mascara.funcs.mio as mio
nn = mio.find_files(os.path.join(Path, ''), stid+'*', format='.pkl')
if len(nn)>0:
dat = pd.read_pickle(nn[0])
jddate = dat[0]
ra, dec, mag, bmag = dat[1], dat[2], dat[3], dat[4]
sptype = dat[5]
flagblended = dat[6]
blendedvalue= dat[7]
header = pf.Header()
header = header.fromtextfile(os.path.join(Path,'Header'+stid+'.txt'))
header['RAJ2000'] = (ra, 'Right Ascension in deg')
header['DEJ2000'] = (dec, 'Declination in deg')
header['VMAG'] = (mag, 'Apparent V magnitude')
header['BMAG'] = (bmag, 'Apparent B magnitude')
header['SPTYPE'] = (sptype, 'Spectral Type')
header['FLAG'] = (flagblended, 'Flag')
header['BLENDVAL'] = (blendedvalue, 'Blending value')
header['JDSTART'] = (jddate, 'Start Observation in JD')
os.remove(os.path.join(Path,'Header'+stid+'.txt'))
## Read from header the number of used apertures.
naper = header['NAPER']
flc = dat.get(8)
for n in nn[1:]:
dat = pd.read_pickle(n)
try:
flc = np.vstack((flc, dat.get(8)))
except ValueError:
continue
for n in nn:
os.remove(n)
dim = flc.shape
if len(dim) == 2:
fnobs = flc.shape[0]
### ATTENTION HERE , flc is (naper*2 + 17, 50)
elif len(dim) == 1:
fnobs = 1.
flc = np.reshape(flc, (1, dim[0]))
header['NOBS'] = (fnobs, 'Number of images in the file')
base = "flux"
erbase = "eflux"
names = ["flag"]
# ----------------------------------------------------------------
# The final content of the Photometry array is:
# flag, (flux, erflux)*Naperture, sky, ersky, peak, xc, yc,
# altitude, azimuth, CCD Temperature,
# exposure time, JDdate, lstdate, lpstid, lpstseq
# Total number of elements: (2*naper+6)+10
# ----------------------------------------------------------------
for i in range(0, naper):
names.append(base+str(i))
names.append(erbase+str(i))
names.append("sky")
names.append("esky")
names.append('peak')
names.append("Xc")
names.append("Yc")
names.append("alt")
names.append("az")
names.append('ccdtemp')
names.append("exptime")
names.append("jdmid")
names.append("lpst")
names.append("lpstid")
names.append("lpstseq")
names = np.array(names)
units = ['None']
for i in range(0, naper*2+2):
units.append('e-')
units.append('ADU')
for i in xrange(2):
units.append('pixels')
units.append('degree')
units.append('degree')
units.append('Celsius')
units.append('sec')
units.append('JD days')
units.append('hours')
units.append('LaPalma index image')
units.append('Sequence index')
cols = pf.ColDefs([pf.Column(name=nom, format="D",array=flc[:,i], unit=units[i]) \
for i,nom in enumerate(names)])
tbhdu = pf.new_table(cols)
prihdu = pf.PrimaryHDU(header=header)
thdulist = pf.HDUList([prihdu, tbhdu])
thdulist.writeto(savepath+stid + "LC.fits", clobber=True)
shutil.rmtree(Path)
thdulist.close()
return header
[docs]def LCtable(savingPath, ID, dat, header):
""" LCtable saves the inputs into a binary fits table
Inputs:
- the saving path
- the ID, or name to save the file
- the content of the file
- the generic header
"""
import pyfits as pf
jddate = dat[0]
ra, dec, mag, bmag = dat[1], dat[2], dat[3], dat[4]
sptype = dat[5]
flagblended = dat[6]
blendedvalue= dat[7]
header['RAJ2000'] = (ra, 'Right Ascension in deg')
header['DEJ2000'] = (dec, 'Declination in deg')
header['VMAG'] = (mag, 'Apparent V magnitude')
header['BMAG'] = (bmag, 'Apparent B magnitude')
header['SPTYPE'] = (sptype, 'Spectral Type')
header['FLAG'] = (flagblended, 'Flag')
header['BLENDVAL'] = (blendedvalue, 'Blending value')
header['JDSTART'] = (jddate, 'Start Observation in JD')
## Read from header the number of used apertures.
naper = header['NAPER']
flc = dat.get(8)
dim = flc.shape
if len(dim) == 2:
fnobs = dim[0]
elif len(dim) == 1:
fnobs = 1.
flc = np.reshape(flc, (1, dim[0]))
header['NOBS'] = (fnobs, 'Number of images in the file')
base = "flux"
erbase = "eflux"
names = ["flag"]
# ----------------------------------------------------------------
# The final content of the Photometry array is:
# flag, (flux, erflux)*Naperture, sky, ersky, xc, yc,
# xpre, ypre, altitude, azimuth, phi, theta, radius,
# exposure time, JDdate, lstdate, lpstid, lpstseq
# Total number of elements: (2*naper+5)+11
# ----------------------------------------------------------------
for i in range(0, naper):
names.append(base+str(i))
names.append(erbase+str(i))
names.append("sky")
names.append("esky")
names.append("Xc")
names.append("Yc")
names.append("Xa")
names.append("Ya")
names.append("alt")
names.append("az")
names.append("phi")
names.append("theta")
names.append("r")
names.append("exptime")
names.append("jdmid")
names.append("lpst")
names.append("lpstid")
names.append("lpstseq")
names = np.array(names)
units = ['None']
for i in range(0, naper*2+2):
units.append('e-')
for i in xrange(4):
units.append('pixels')
units.append('degree')
units.append('degree')
units.append('degree')
units.append('degree')
units.append('pixels')
units.append('sec')
units.append('JD days')
units.append('hours')
units.append('LaPalma index image')
units.append('Sequence index')
cols = pf.ColDefs([pf.Column(name=nom, format="D",array=flc[:,i], unit=units[i]) \
for i,nom in enumerate(names)])
tbhdu = pf.new_table(cols)
prihdu = pf.PrimaryHDU(header=header)
thdulist = pf.HDUList([prihdu, tbhdu])
thdulist.writeto(savingPath+ ID + "LC.fits", clobber=True)
thdulist.close()
return
[docs]def lstseq2date(lstseq, texp, format='calendar'):
''' lstseq2date reverse computes from the lstseq the observation date.
Inputs:
- lstseq, the sequence number
- texp, the exposure time
Keyword:
- format, 'jd' or 'calendar'
Output:
- the date in JD or datetime format
'''
from mascara.constants import iniday, lstday
from datetime import datetime, timedelta
## Define the constants
oneday = timedelta(hours=24)
onelstday = timedelta(hours=lstday)
indexperday = int(oneday.total_seconds()/np.around(texp, decimals=1))
## Convert the lstseq into seconds
getseconds = np.vectorize(lambda x:x*onelstday.total_seconds()/indexperday)
times = np.array([timedelta(seconds=seq) + iniday for seq in getseconds(lstseq)])
if format == 'calendar':
return times
elif format == 'jd':
from mascara.funcs.timing import calendar2jd
return calendar2jd(times)
[docs]def buildTransmissionMap(*args, **kwargs):
''' buildTransmissionMap constructs from existing dictionaries an image of the
transmission.
Inputs:
- dict, a dictionary containing the information for constructing the
map: {quad indice: [list of (observed magnitude - apparent magnitude),
per aperture]}
- nulx, nuly, two scalars giving the dimension of the map
Keywords:
- dimensions, specify the number of dimensions the map should have.
If dimension > 1.
- writetofile, boelan. If true, the map(s) are written to disk in fits
format
'''
from scipy.ndimage import filters
dimension = kwargs.pop('dimension', 1)
writeto = kwargs.pop('writetofile', True)
if writeto:
dicto, nulx, nuly, hdr, path, name = args
else:
dicto, nulx, nuly = args
TransMap = np.zeros([dimension, nuly, nulx])
for k, v in dicto.iteritems():
TransMap[:, k/nulx, k%nulx] = np.median(np.array(v), axis=0)
if writeto:
TransMap = filters.median_filter(TransMap, size=3, mode='nearest')
pf.writeto(os.path.join(path, name + '.fits'), TransMap, hdr, clobber=True)
else:
return TransMap
[docs]def light_curve_overview(Path, night, filename = 'LCoverview', mrange=[4.75, 4.8]):
''' selects within the given magnitude range the stars which have been observed.
Saves a plot at the end.
Input:
- Path, the location of the light curves
Keywords:
- filename, the name for saving the image. Default if LCoverview.png
- mrange, the magnitude range of the stars to be plotted
'''
import matplotlib.pyplot as pl
import pyfits as pf
import itertools
from mascara.funcs.mio import find_files
flc = find_files(os.path.join(Path,'fLC',''), '*LC')
slowLC = find_files(os.path.join(Path, 'sLC', ''), '*LC')
pl.figure(1, figsize = (12, 8))
ax00 = pl.subplot(1,1,1)
pos1 = ax00.get_position()
pos2 = [pos1.x0-0.02, pos1.y0, pos1.width/1.2, pos1.height/1.1]
ax00.set_position(pos2)
marker = itertools.cycle(('+','x','.', '*'))
smarker = itertools.cycle(('o', 's', 'v', 'd'))
color = itertools.cycle(('k', 'b', 'g', 'r', 'c', 'y', 'm'))
for f in flc:
fname = os.path.basename(f)
a = fname.split('LC')[0]
tab = pf.getdata(f)
col = color.next()
pl.plot(tab['jdmid'], tab['flux0'], linestyle = '', marker=marker.next(), color=col, label = a)
for s in slowLC:
if s.find(a)>=0:
stab = pf.getdata(s)
pl.plot(stab['jdmid'], stab['flux0'], linestyle = '', marker=smarker.next(), mec=col, mfc='w' )
pl.title('Overview of {}'.format(night))
pl.legend(numpoints =3, title='Stellar ASCC', bbox_to_anchor=(1.05, 1.), loc=2, borderaxespad=0.)
pl.savefig(os.path.join(Path,'logs',filename+'.png'), dpi=200, facecolor='w', \
orientation='landscape', papertype='A5')
pl.close()
return
[docs]def reduction_summary(logpath, site, camera, night, date, format='html'):
''' Prepare a quick summary of the reduction.
Includes the possible errors and warnings
Includes the image generated at the end of the night from the astrometry
'''
from mascara.funcs.mio import find_files
logfile = find_files(logpath, 'reduc*', format='.log')
if len(logfile) == 0:
if format == 'html':
message = '''
<!DOCTYPE html>
<html lang=\"en\">
<head><meta name=\"format-detection\" content=\"telephone=no\">
<meta name=\"viewport" content=\"width=device-width, initial-scale=0.8\">
<style>
div {text-align: left;font-size: 14px;padding-left: 20px;
</style>
</head>
<body>
'''
### Prepare the main message:
message += '<div> Hi all, <br></br>'
message += '{:*^96} <br></br>'.format(' Reduction summary ')
message += '{:^96} <br></br>'.format(' Site: {}, Camera: {}, Night {}, finished at {} '.format(\
site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M')))
message += '{:*^96} <br></br>'.format('')
### Prepare the alternate message:
message_alternate = '{:*^96} \n'.format(' Reduction summary ')
message_alternate+= '{:^96} \n'.format(' Site: {}, camera: {}, Night: {}, finished at {} '.format(\
site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M')))
message_alternate+='{:*^96} \n \n'.format('')
message += 'No log file was found for that night <br> </br>'
message_alternate += 'No log file was found for that night '
return message, message_alternate
with open(logfile[0], 'r') as f:
log = f.read()
### Get from the log file the possible errors and warning
loglines = log.split('\n')
errors = []
warnings = []
astro = []
End_of_night = False
End_of_saving = False
Saved_FLC = False
Solved_astro = False
nimages = 0
check = camera.readAstroCheck(make_plot=False)
names = ['xbl', 'xtl', 'xbr', 'xtr', 'ybl', 'ytl', 'ybr', 'ytr']
ptv = np.zeros(8)
for i, name in enumerate(names):
tmp = (check[name] - np.median(check[name]))*check['rate']
ptv[i] = tmp.max() - tmp.min()
goodastro=True
if (ptv > 0.5).nonzero()[0].size >= 4:
goodastro=False
for l in loglines:
if l.find('ERROR')>0:
errors.append(l)
elif l.find('WARNING')>0:
warnings.append(l)
elif l.find('end_of_night')>0:
End_of_night=True
elif l.find('saved_fast_LC')>0:
Saved_FLC = True
elif l.find('saved_all_LC')>0:
End_of_saving = True
if l.find('Time needed to process')>0:
nimages+=1
if l.find('solved_the_astrometry')>0:
astro.append(l)
Solved_astro = True
if format == 'html':
message = '''
<!DOCTYPE html>
<html lang=\"en\">
<head><meta name=\"format-detection\" content=\"telephone=no\">
<meta name=\"viewport" content=\"width=device-width, initial-scale=0.8\">
<style>
div {text-align: left;font-size: 14px;padding-left: 20px;
</style>
</head>
<body>
'''
message += '<div> Hi all, <br></br>'
message += '{:*^96} <br></br>'.format(' Reduction summary ')
message += '{:^96} <br></br>'.format(' Site: {}, Camera: {}, Night: {}, finished at {} '.format(\
site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M')))
message += '{:*^96} <br></br>'.format('')
### Prepare the message:
message_alternate = '{:*^96} \n'.format(' Reduction summary ')
message_alternate+= '{:^96} \n'.format(' Site: {}, camera: {}, Night: {}, finished at {} '.format(\
site.name, camera.name, night, date.strftime('%Y-%m-%d %H:%M')))
message_alternate+='{:*^96} \n \n'.format('')
if End_of_night:
message += ' The reduction went well <br></br>'
message_alternate +=' The reduction went well \n'
message += ' {} images were reduced this night <br></br>'.format(nimages)
message_alternate += ' {} images were reduced this night \n'.format(nimages)
if not goodastro:
message += ' The astrometry did not converge properly <br>'
message_alternate +=' The astrometry did not converge properly \n'
if Solved_astro:
message+=' Solved the astrometry {} times <br> </br>'.format(len(astro))
message_alternate+=' Solved the astrometry {} times \n'.format(len(astro))
if Saved_FLC:
message =+ ' All Fast light curves have been saved <br></br>'
message_alternate += ' All fast light curves have been saved \n'
if End_of_saving:
message += ' All the light curves have been saved <br></br>'
message_alternate+=' All the light curves have been saved \n'
else:
message_alternate+=' No light curve was saved \n'
message += ' {} errors occured during the reduction: <br></br>'.format(len(errors))
message_alternate+=' {} errors occured during the reduction: \n'.format(len(errors))
for er in errors:
message += er + '<br>'
message_alternate+=er + '\n'
message +=' {} warnings occured during the reduction: <br></br>'.format(len(warnings))
message_alternate += ' {} warnings occured during the reduction: \n'.format(len(warnings))
for war in warnings:
message += war + '<br>'
message_alternate+=war + '\n'
message += 'Cheers, <br></br>'
message_alternate += ' Cheers, '
message +='</body>'
return message, message_alternate
else:
### Prepare the message:
message_alternate = '{:*^96} \n'.format(' Reduction summary ')
message_alternate+= '{:^96} \n'.format(' Site: {}, Camera: {}, at {} '.format(site.name, \
camera.name, date.strftime('%Y-%m-%d %H:%M')))
message_alternate+='{:*^96} \n \n'.format('')
if End_of_night:
message_alternate +=' The reduction went well \n'
if End_of_saving:
message_alternate+=' All the light curves have been saved \n'
else:
message_alternate+=' No light curve was saved \n'
message_alternate+=' {} errors occured during the reduction: \n'.format(len(errors))
for er in errors:
message_alternate+=er + '\n'
message_alternate += ' {} warnings occured during the reduction: \n'.format(len(warnings))
for war in warnings:
message_alternate+=war + '\n'
return message_alternate
[docs]def send_summary(logpath, message, alternate, camera, site, night):
''' sends the summary of the reduction as generated by the function reduction_summary
under the form of an email with an embedded image
'''
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from mascara.reduction import credentials as creds
from mascara.funcs.mio import find_files
dest = creds.recipients_summary_emails
#dest = 'lesage@strw.leidenuniv.nl'
locam = site.sname+camera.sname
msgRoot = MIMEMultipart('main')
msgRoot['Subject'] = 'MASCARA {} - {} Reduction summary'.format(locam, night)
msgRoot['From'] = 'MASCARA @ La Palma'
msgRoot['To'] = ', '.join(dest)
msgRoot.preamble = 'This is a multi-part test message in MIME format'
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText(alternate)
msgAlternative.attach(msgText)
if os.path.exists(logpath+'AstroCheck.png'):
msgText = MIMEText(message+' <br><img src="cid:image1"><br> ' + \
' <br><img src="cid:image2"><br> ', 'html')
msgAlternative.attach(msgText)
fp = open(logpath+'AstroCheck.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
imagepath = find_files(logpath, 'LCover*', format='.png')
fp = open(imagepath[0], 'rb')
msgImage=MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', '<image2>')
msgRoot.attach(msgImage)
try:
mascara_user = creds.mascara_mail_username
maspwd = creds.mascara_mail_password
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.login(mascara_user, maspwd)
#msg = MIMEText(body, 'html')
#msg['Subject'] = 'MASCARA night summary'
#msg['From'] = 'MASCARA @ La Palma'
#msg['To'] = ', '.join(dest)
smtpserver.sendmail(mascara_user, dest, msgRoot.as_string())
smtpserver.close()
except:
print 'The following email was not sent \n'
print message
[docs]def getDarkFilename(camname, path=''):
''' getDarkFilename reads from file the location and name of the last valid masterdark
saved for a specific camera.
Inputs:
- Camera Name, the name of the Camera
- path, where the file is saved (optional). If not specified, the
function will look in the current working directory.
Output: the location and name of the last masterdark for that camera.
'''
if os.path.exists('darkTable.txt'):
with open('darkTable.txt', 'rb') as f:
c = f.read()
lurines = c.split('\n')
lines = [l for l in lurines if not l.startswith('##') if not l.strip()=='']
for l in lines:
if l.find(camname.lower())>=0:
thename, thefilelist= l.split('=')
thefilelist = thefilelist.split(',')
darkfilelist = [f.strip() for f in thefilelist]
return darkfilelist
else:
print 'No Look-up Table for the darks found.'
return
[docs]def updateDarkFilename(camname, filename, path=''):
''' updateDarkFilename updates the dark look-up table for the given camera name
Inputs:
- Camera Name, the name of the Camera
- the filename of the new valid dark for that camera
- path, where the file is saved (optional). If not specified, the
function will look in the current working directory.
No outputs
'''
## 1. Check that the filename ends with .fits:
if filename.find('.fits') <0:
filename = filename+ '.fits'
if os.path.exists('darkTable.txt'):
with open('darkTable.txt', 'rb') as f:
c = f.read()
lurines = c.split('\n')
lines = [l for l in lurines if not l.startswith('##') if not l.strip()=='']
newlines = ['## Name , Location', ]
for l in lines:
if l.find(camname.lower())>=0:
filelist = l.split('=')[1].split(',')
if len(filelist) >5:
filelist.pop(0)
filelist.append(filename)
newlines.append('{:<10} = {}'.format(camname.lower(),
', '.join(['%s']*len(filelist))%tuple(filelist)))
else:
newlines.append(l)
with open('darkTable.txt', 'wb') as f:
for n in newlines:
f.write(n +'\n')
return
else:
header = '## Name , Location'
newline = '{:<10} = {}'.format(camname.lower(), filename)
with open('darkTable.txt', 'wb') as f:
f.write(header + '\n')
f.write(newline)
return