from astropy.io import fits as pyfits
from astropy.stats import sigma_clipped_stats
import numpy as np
import os
import shutil
from scipy.signal import medfilt
from scipy.ndimage.filters import median_filter
from astropy.time import Time
import math
import sqlite3
import datetime
from stistools.calstis import calstis
from stistools.ocrreject import ocrreject
from stistools.basic2d import basic2d
#--------------------------------------------------------------------------------
[docs]def send_email(subject=None, message=None, from_addr=None, to_addr=None):
'''
Send am email via SMTP server.
This will not prompt for login if you are already on the internal network.
'''
import os
import getpass
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
users_email=getpass.getuser()+'@stsci.edu'
if not subject:
subject='Message from %s'%(__file__)
if not message:
message='You forgot to put a message into me'
if not from_addr:
from_addr=users_email
if not to_addr:
to_addr=users_email
svr_addr='smtp.stsci.edu'
msg = MIMEMultipart()
msg['Subject']=subject
msg['From']=from_addr
msg['To']=to_addr
msg.attach(MIMEText(message))
s = smtplib.SMTP(svr_addr)
s.sendmail(from_addr, to_addr, msg.as_string())
s.quit()
print('\nEmail sent to %s \n' %(from_addr))
#---------------------------------------------------------------------------
def mjd_to_greg(mjd):
#This comes from http://www.usno.navy.mil/USNO/astronomical-applications/astronomical-information-center/julian-date-form
JD = mjd + 2400000.5
JD = int(JD)
L= JD+68569
N= 4*L/146097
L= L-(146097*N+3)/4
I= 4000*(L+1)/1461001
L= L-1461*I/4+31
J= 80*L/2447
K= L-2447*J/80
L= J/11
J= J+2-12*L
I= 100*(N-49)+I+L
Year = I
Month = J
Day = K
month_to_day = {'0': 0,'1':31, '2':59, '3':90, '4':120, '5':151, '6':181, '7':212, '8':243, '9':273, '10':304, '11':334, '12':365}
tot_day = (month_to_day[str(int(Month)-1)] + Day)
day_in_year = 365.0
if (Month >= 2) & (Year%4 == 0): #for leap year
tot_day = tot_day + 1.0
day_in_year = 365.0 + 1.0
frac_day = tot_day / day_in_year
fractional_year = Year + frac_day
return (Year, Month, Day, fractional_year)
#------------------------------------------------------------------------
#------------------------------------------------------------------------
def get_start_and_endtimes(input_list):
times = []
for ifile in input_list:
times.append(pyfits.getval(ifile, 'texpstrt', 0))
times.append(pyfits.getval(ifile, 'texpend', 0))
times.sort()
start_mjd = times[0]
end_mjd = times[-1]
times = Time(times, format = 'mjd', scale = 'utc', out_subfmt = 'date').iso
start_list = times[0].split('-')
end_list = times[-1].split('-')
#return strings in format mm/dd/yyyy
start_str = '%s/%s/%s' %( start_list[2], start_list[1], start_list[0])
end_str = '%s/%s/%s' %(end_list[2], end_list[1], end_list[0])
return start_str, end_str, start_mjd, end_mjd
#------------------------------------------------------------------------
def make_resicols_image(residual_image, yfrac=1):
print("Making residual column image")
ystart = 0
yend = min(1024, int(np.floor(yfrac * residual_image.shape[0] + .5)))
residual_columns = np.mean(residual_image[ystart:yend], axis=0)
print(ystart, '-->', yend)
residual_columns_image = residual_columns * np.ones(residual_image.shape[1])[:, np.newaxis]
return residual_columns_image
#------------------------------------------------------------------------
[docs]def make_residual(mean_bias, kern=(3, 15)):
"""Create residual image
Median filter the median with a 15 x 3 box and subtract from the mean
to produce the residual image.
"""
mean_hdu = pyfits.open(mean_bias)
mean_image = mean_hdu[('sci', 1)].data
median_image = median_filter(mean_image, kern)
medi_mean = sigma_clipped_stats(median_image, sigma=3, maxiters=40)[0]
mean_mean = sigma_clipped_stats(mean_image, sigma=3, maxiters=40)[0]
diffmean = mean_mean - medi_mean
median_image += diffmean
residual_image = mean_image - median_image
return residual_image, median_image
#------------------------------------------------------------------------
[docs]def normalize_crj(filename):
""" Normalize the input filename by exptim/gain and flush hdu
"""
with pyfits.open(filename, mode='update') as hdu:
exptime = hdu[0].header['TEXPTIME']
gain = hdu[0].header['ATODGAIN']
norm_factor = float(exptime)/gain
print('Normalizing by ', norm_factor)
hdu[('sci', 1)].data /= norm_factor
hdu[('err', 1)].data /= abs(norm_factor)
hdu[0].header['TEXPTIME'] = 1
#------------------------------------------------------------------------
[docs]def msjoin(imset_list, out_name='joined_out.fits'):
""" Replicate msjoin functionality in pure python
"""
hdu = pyfits.open( imset_list[0] )
ext_count = 0
n_offset = (len( hdu[1:] ) // 3) + 1
for dataset in imset_list[1:]:
add_hdu = pyfits.open( dataset )
for extension in add_hdu[1:]:
extension.header['EXTVER'] = (ext_count // 3) + n_offset
hdu.append( extension )
ext_count += 1
hdu[0].header['NEXTEND'] = len( hdu ) - 1
hdu.writeto(out_name, output_verify='exception', overwrite=True)
if not os.path.exists(out_name):
raise IOError('Error in refstis:functions:msjoin() -- output file not written!')
#------------------------------------------------------------------------
def crreject(input_file, workdir=None):
if not 'oref' in os.environ:
os.environ['oref'] = '/grp/hst/cdbs/oref/'
path, name = os.path.split(input_file)
name, ext = os.path.splitext(name)
trailerfile = os.path.join(path, name+'_crreject_log.txt')
output_blev = input_file.replace('.fits','_blev.fits')
output_crj = input_file.replace('.fits','_crj.fits')
with pyfits.open(input_file) as hdu:
nimset = hdu[0].header['nextend'] / 3
nrptexp = hdu[0].header['nrptexp']
crcorr = hdu[0].header['crcorr']
blevcorr = hdu[0].header['blevcorr']
if (nimset <= 1 and crcorr != "COMPLETE"):
print("Sorry, your input image seems to have only 1 imset, but it isn't cr-rejected.")
print("This task can only handle 'raw' or 'flt images with the NEXTEND keyword equal to 3*N (N > 1).")
print("Bye now... better luck next time!")
raise ValueError('nimset <=1 and CRCORR not complete')
if (crcorr != "COMPLETE"):
if (nrptexp != nimset):
pyfits.setval(input_file,'NRPTEXP',value=nimset)
pyfits.setval(input_file,'CRSPLIT',value=1)
pyfits.setval(input_file, 'CRCORR', value='PERFORM')
#pyfits.setval(input_file, 'DQICORR', value='PERFORM')
pyfits.setval(input_file, 'APERTURE', value='50CCD')
pyfits.setval(input_file, 'APER_FOV', value='50x50')
if (blevcorr != 'COMPLETE') :
print('Performing BLEVCORR')
pyfits.setval(input_file, 'BLEVCORR', value='PERFORM')
status = basic2d(input_file,
output_blev,
outblev='',
dqicorr='perform',
blevcorr='perform',
doppcorr='omit',
lorscorr='omit',
glincorr='omit',
lflgcorr='omit',
biascorr='omit',
darkcorr='omit',
flatcorr='omit',
photcorr='omit',
statflag=False,
verbose=False,
trailer=trailerfile)
if status != 0:
try:
print()
with open(trailerfile) as tr:
for line in tr.readlines():
print(' {}'.format(line.strip()))
finally:
raise Exception('BASIC2D failed to properly reduce {}'.format(input_file))
else:
print('Blevcorr already Performed')
shutil.copy(input_file,output_blev)
print('Performing OCRREJECT')
status = ocrreject(input=output_blev,
output=output_crj,
verbose=False,
trailer=trailerfile)
if status != 0:
try:
print()
with open(trailerfile) as tr:
for line in tr.readlines():
print(' {}'.format(line.strip()))
finally:
raise Exception('OCRREJECT failed to properly reduce {}'.format(output_blev))
elif (crcorr == "COMPLETE"):
print("CR rejection already done")
os.rename(input_file, output_crj)
pyfits.setval(output_crj, 'FILENAME', value=output_crj)
with pyfits.open(output_crj) as hdu:
gain = hdu[0].header['atodgain']
ccdgain = hdu[0].header['ccdgain']
xsize = hdu[1].header['naxis1']
ysize = hdu[1].header['naxis2']
xbin = hdu[0].header['binaxis1']
ybin = hdu[0].header['binaxis2']
try:
ncombine = hdu[0].header['ncombine']
except:
ncombine = hdu[1].header['ncombine']
print(('Number of combined imsets is '+str(ncombine)+' while number of imsets is '+str(nimset ) ))
print(('Dividing cosmic-ray-rejected image by '+str(ncombine)+'...'))
out_div = output_crj.replace('.fits','_div.fits')
###this used to be a call to MSARITH, is anything else needed?
###modifying the error too (done), etc?
hdu = pyfits.open(output_crj)
hdu[('sci', 1)].data /= ncombine
hdu[('err', 1)].data /= ncombine
hdu.writeto(out_div, output_verify='exception', overwrite=True)
os.remove(output_blev)
os.remove(output_crj)
return out_div
#------------------------------------------------------------------------
[docs]def count_imsets(file_list):
"""Count the total number of imsets in a file list.
The total number of imsets is counted by dividing the total number
of extensions (NEXTEND) by 3 (SCI, ERR, DQ).
Parameters
----------
file_list : list
list of all files to be counted
Returns
-------
total : int
number of imsets
"""
if not isinstance(file_list, list):
file_list = [file_list]
total = 0
for item in file_list:
total += pyfits.getval(item,'NEXTEND',ext=0) / 3
return total
#------------------------------------------------------------------------
[docs]def get_keyword(file_list,keyword,ext=0):
""" return the value from a header keyword over a list of files
if the value is not consistent accross the input files, an assertion error
will be raised
"""
kw_set = set([pyfits.getval(item,keyword,ext=ext) for item in file_list])
assert len(kw_set) == 1,' multiple values found for kw: % s'% (keyword)
return list(kw_set)[0]
#------------------------------------------------------------------------
[docs]def get_anneal_month_dates(data_begin, data_end, database_path):
'''
This function uses the anneal database to get the dates of the anneal
This is written under the assumption that data_begin and data_end fall
in the same anneal period
data_begin and data_end are the start and end dates of the data and should
be in mjd
'''
assert data_begin > 50000, 'data_begin should be in mjd'
assert data_end > 50000, 'data_end should be in mjd'
db = sqlite3.connect(os.path.join(database_path, 'anneal_info.db'))
c = db.cursor()
c.execute("""SELECT DISTINCT start, end FROM anneals""")
rows = [row for row in c]
anneal_start_date = np.array([row[0] for row in rows])
anneal_end_date = np.array([row[1] for row in rows])
start_indx = np.where(data_begin - anneal_end_date > 0)
anneal_period_start_indx = start_indx[0][-1] #want to start an anneal month at the end of the anneal
end_indx = np.where(anneal_start_date - data_end > 0)
anneal_period_end_indx = end_indx[0][0] #want to end an anneal month at the start of the anneal
anneal_month_start = Time(anneal_end_date[anneal_period_start_indx], format = 'mjd', scale = 'utc')
anneal_month_end = Time(anneal_start_date[anneal_period_end_indx], format = 'mjd', scale = 'utc')
assert anneal_period_end_indx - anneal_period_start_indx == 1, \
'data cannot cross anneals, data date range [%f - %f], anneal month [%f - %f]' %(data_begin, data_end, anneal_month_start.val, anneal_month_end.val)
return anneal_month_start, anneal_month_end
#------------------------------------------------------------------------
[docs]def divide_anneal_month(data_begin, data_end, database_path, N_period):
'''
This function divides an anneal month into anneal weeks and returns
tuples of the start and end dates of the anneal weeks
'''
anneal_month_start, anneal_month_end = get_anneal_month_dates(data_begin, data_end, database_path)
total_num_days = anneal_month_end.val - anneal_month_start.val #these are in mjd so you get # of days
base_num_days = math.floor(total_num_days / N_period)
#For remaining days, add 1 to each week until all days are gone
remaining_days = math.floor(total_num_days - base_num_days*N_period)
remaining_time = total_num_days - base_num_days*N_period - remaining_days
num_days_per_period = np.array([base_num_days for i in range(N_period)])
num_days_per_period[-1] = num_days_per_period[-1] + remaining_time
for i, day in enumerate(range(int(remaining_days))):
num_days_per_period[i] += 1
anneal_weeks = []
wk_end = anneal_month_start.val
for period in num_days_per_period:
wk_start = wk_end
wk_end = wk_start + period
anneal_weeks.append((wk_start, wk_end))
return anneal_weeks
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#------------------------------------------------------------------------
def translate_date_string(input_string):
month_dict = {'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12}
date_list = input_string.split()
time_list = date_list[3].split(':')
month = month_dict[date_list[0]]
day = int(date_list[1])
year = int(date_list[2])
hour = int(time_list[0])
minute = int(time_list[1])
second = int(time_list[2])
#if time_list[3].endswith('PM'):
# hour += 12
a = (14-month)/12
y = year + 4800 - a
m = month + 12*a - 3
JDN = day + (153 * m + 2)//5 + 365*y + y//4 - y//100 + y//400 - 32045
JD = JDN + (hour - 12)/24.0 + minute/1440.0 + second/86400.0
MJD = JD - 2400000.5
return MJD
#------------------------------------------------------------------------
[docs]def bd_crreject(joinedfile):
""" Check if cosmic-ray rejection has been performed on input file
if cosmic-ray rejection has already been done on the input bias image,
skip all calstis-related calibration steps
"""
print(joinedfile)
fd = pyfits.open(joinedfile)
nimset = fd[0].header['nextend'] / 3
nrptexp = fd[0].header['nrptexp']
crcorr = fd[0].header['crcorr']
crdone = 0
if (crcorr == "COMPLETE") :
crdone = 1
print('OK, CR rejection already done')
os.rename(joinedfile, joinedfile.replace('_joined', '_crj') )
else:
print(('crcorr found = ' + crcorr))
if (nimset <= 1 and not crdone):
print("Sorry, your input image seems to have only 1 imset, but it isn't cr-rejected.")
print("This task can only handle 'raw' or 'flt images with the NEXTEND keyword equal to 3*N (N > 1).")
print("Bye now... better luck next time!")
raise ValueError( 'Something bad happened' )
if not crdone:
print('FYI: CR rejection not already done')
print(('Keyword NRPTEXP = ' + str(nrptexp) + ' while nr. of imsets = ' + str(nimset)))
if (nrptexp != nimset):
pyfits.setval( joinedfile,'NRPTEXP',value=nimset)
pyfits.setval( joinedfile,'CRSPLIT',value=1)
print(('>>>> Updated keyword NRPTEXP to '+str(nimset) ))
print(' (and set keyword CRSPLIT to 1)' )
print((' in ' + joinedfile ))
return crdone
#------------------------------------------------------------------------
[docs]def bd_calstis(joinedfile, thebiasfile=None):
""" Run CalSTIS on the joined file
Header keywords will be set for ocrreject to work correctly and not
flag regions outside the original aperture:
APERTURE --> 50CCD
APER_FOV --> '50x50'
DARKCORR --> 'OMIT'
FLATCORR --> 'OMIT'
Parameters
----------
joinedfile : str
join of multiple input darks
thebiasfile : str, bool
the biasfile to be subtracted by basic2d
"""
with pyfits.open(joinedfile, 'update') as hdu:
hdu[0].header['CRCORR'] = 'PERFORM'
hdu[0].header['APERTURE'] = '50CCD'
hdu[0].header['APER_FOV'] = '50x50'
hdu[0].header['DARKCORR'] = 'OMIT'
hdu[0].header['FLATCORR'] = 'OMIT'
if thebiasfile:
hdu[0].header['BIASFILE'] = thebiasfile
crj_file = joinedfile.replace('.fits', '_crj.fits')
path, name = os.path.split(joinedfile)
name, ext = os.path.splitext(name)
trailerfile = os.path.join(path, name+'_bd_calstis_log.txt')
if os.path.exists(crj_file):
print('Deleting old file: %s' % crj_file)
os.remove(crj_file)
if os.path.exists(trailerfile):
print('Deleting old file: %s' % trailerfile)
os.remove(trailerfile)
print('Running CalSTIS on %s' % joinedfile)
print('to create: %s' % crj_file)
status = calstis(joinedfile,
wavecal="",
outroot="",
savetmp=False,
verbose=False,
trailer=trailerfile)
if status != 0:
try:
print()
with open(trailerfile) as tr:
for line in tr.readlines():
print(' {}'.format(line.strip()))
finally:
raise Exception('CalSTIS failed to properly reduce {}'.format(joinedfile))
pyfits.setval(crj_file, 'FILENAME', value=os.path.split(crj_file)[1])
#------------------------------------------------------------------------
[docs]def RemoveIfThere(item):
"""Remove a file only if it already exists
Parameters
----------
item : str
file to be removed
Examples
--------
>>> RemoveIfThere('/path/to/file.txt')
"""
if os.path.exists(item):
os.remove(item)
#------------------------------------------------------------------------
[docs]def refaver(reffiles, combined_name):
"""Average two reference files together using itools msarith.
Parameters
----------
reffiles : list
List of reference files to be averaged together
combined_name : str
Output name of the combined file
"""
from .msarith import msarith
print('#-----------------------#')
print('combining datasets')
print(reffiles)
print('into')
print(combined_name)
print('#-----------------------#')
all_paths = {os.path.split(item)[0] for item in reffiles}
assert len(all_paths) == 1, "More than one path found"
initial_dir = os.getcwd()
if list(all_paths)[0] != '':
os.chdir(list(all_paths)[0])
all_subfiles = []
for subfile in reffiles:
subfile = os.path.split(subfile)[-1]
outfile = subfile.replace('.fits', '_aver.fits')
print("Running (itools) msarith / 2 on {}".format(subfile))
msarith(subfile, '/', 2, outfile, verbose=True)
all_subfiles.append(outfile)
assert len(all_subfiles) == 2, 'Length of subfiles doesnt equal 2: {}'.format(all_subfiles)
if not combined_name.endswith('.fits'):
combined_name = combined_name + '.fits'
#-- remove path from output name
combined_name = os.path.split(combined_name)[-1]
msarith(all_subfiles[0], '+', all_subfiles[1], combined_name, verbose=True)
for filename in all_subfiles:
os.remove(filename)
#-- move back to beginning location
os.chdir(initial_dir)
#------------------------------------------------------------------------
[docs]def apply_dark_correction(filename, expstart):
"""Perform temperature scaling to input dark file
All science extensions in the input filename will be scaled to the reference
temperatue of 18.0 c.
Parameters
----------
filename : str
full path to input FITS file
expstart : str
start time in MJD of the dataset
"""
dark_v_temp = 0.07
s2ref_temp = 18.0
with pyfits.open(filename, mode = 'update') as ofile:
if 'tempcorr' not in ofile[0].header:
nextend = ofile[0].header['nextend']
for ext in np.arange(1, nextend, 3):
occdhtav = ofile[ext].header['OCCDHTAV']
factor = 1.0 / (1.0 + dark_v_temp * (float(occdhtav) - s2ref_temp))
ofile[ext].data = ofile[ext].data * factor
print('{}, ext {}: Scaling data by '.format(filename, ext), factor, ' for temperature: ', occdhtav)
ofile[ext+1].data = np.sqrt((ofile[ext+1].data)**2 * (factor**2)) #Modify the error array
ofile[ext].header.add_history('File scaled for Side-2 temperature uncertainty by data * (1.0 + %f * (%f - %f)) following description is STIS TIR 2004-01' %(dark_v_temp, occdhtav, s2ref_temp))
ofile[0].header['tempcorr'] = 'COMPLETE'
else:
print('TEMPCORR = %s, no temperature correction applied to %s' %(ofile[0].header['tempcorr'], filename))
#-------------------------------------------------------------------------------
[docs]def bias_subtract_data(filename, biasfile):
"""Perform bias subtraction on input dataset
basic2d from calstis will be run on the input dataset with the
steps DQICORR, BLEVCORR, and BIASCORR set to perform.
Parameters
----------
filename : str
full path to input FITS file
biasfile : str
full path to the bias FITS file to be subtracted
Returns
-------
filename : str
full_path to the bias subtracted file
"""
with pyfits.open(filename) as hdu:
if (hdu[0].header['BLEVCORR'] == 'COMPLETE') or (hdu[0].header['BIASCORR'] == 'COMPLETE'):
print("BIAS correction already done for {}".format(filename))
return filename
if os.path.exists(filename.replace('raw', 'flc')):
os.remove(filename.replace('raw', 'flc'))
elif os.path.exists(filename.replace('raw', 'flt')):
os.remove(filename.replace('raw', 'flt'))
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
trailerfile = os.path.join(path, name + '_bias_subtract_log.txt')
biasfile = make_path_safe(biasfile)
pyfits.setval(filename, 'BIASFILE', ext=0, value=biasfile, comment='')
status = basic2d(filename,
dqicorr='perform',
blevcorr='perform',
biascorr='perform',
doppcorr='omit',
lorscorr='omit',
glincorr='omit',
lflgcorr='omit',
darkcorr='omit',
flatcorr='omit',
photcorr='omit',
verbose=False,
trailer=trailerfile)
if status != 0:
try:
print()
with open(trailerfile) as tr:
for line in tr.readlines():
print(' {}'.format(line.strip()))
finally:
raise Exception('BASIC2D failed to properly reduce {}'.format(filename))
filename = filename.replace('raw', 'flt')
return filename
#-------------------------------------------------------------------------------
[docs]def make_path_safe(filename):
"""Make a full path to file safe for use in FITS headers.
For full paths that are less than 67 characters, the filename is simply
returned. When the full path is equal to or greater than 67, then the path
is inserted into the environment as 'refdir' and the filename is returned as
'refdir$filename'. This will prevent the filename from being split accross
multiple FITS keywords, and is a convention understood by many
tasks/pipelines.
Parameters
----------
filename : str
Full path + name to the file.
Returns
-------
filename : str
Safe filename that can fit in a single FITS keyword.
Examples
--------
>>> make_path_safe('/short/path/reference_file.fits')
/short/path/reference_file.fits
>>> make_path_safe('/really/really/really/really/really/really/really/really/reference_file.fits')
refdir$reference_file.fits
"""
if len(filename) < 67:
return filename
path, filename = os.path.split(filename)
#-- Calstis wants the '/' at the end
if not path.endswith('/'):
path += '/'
os.environ['refdir'] = path
filename = 'refdir$'+filename
return filename
#-------------------------------------------------------------------------------