repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/hyper_b/plot.py
|
import numpy as np
import matplotlib.pyplot as plt
from V_plot import *
from u_plot import *
from plot_trajectory import *
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
font_size = 15
A = torch.load('./data/hyper_b/data.pt')[:,9:14,:,:] #pick trajectories correspond to 1.9,2.0,2.1,2.2,2.3
# print(A.shape)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
def plot_b(b):
L = np.load('./data/hyper_b/b_{}.npy'.format(b))
r_L = np.zeros(1000-len(L))
L = np.concatenate((L,r_L),axis=0)
# np.concatenate((a,b),axis=0)
plt.plot(np.arange(len(L)),L,'b')
plt.ylim(-1.0,25)
plt.title('b = {}'.format(b))
plt.xticks([0,400,800])
plt.yticks([])
for i in range(5):
plt.subplot(4, 5, i+1)
plot_b(1.9+i*0.1)
plot_grid()
if i == 0:
plt.yticks([0,10,20])
plt.ylabel('Loss',fontsize=font_size)
plt.text(-5,5,'Training',rotation=90,fontsize=font_size)
else:
plt.yticks([0, 10, 20], ['', '', ''])
if i == 2:
plt.xlabel('Iterations',fontsize=font_size)
for i in range(5):
plt.subplot(4, 5, 5 + i+1)
plot_trajec(A[0,i,:,0:10000:10],1.9+i*0.1)
plot_grid()
if i == 0:
plt.yticks([-10,-5,0,5,10])
plt.ylabel(r'$\theta$',fontsize=font_size)
plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)
else:
plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])
if i == 2:
plt.xlabel('Time',fontsize=font_size)
for i in range(5):
plt.subplot(4, 5, 10 + i+1)
drawV(1.9+i*0.1)
if i == 0:
plt.yticks([-5,0,5])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-15,-5,'Lyapunov V',rotation=90,fontsize=font_size)
if i == 2:
plt.xlabel(r'$\theta$',fontsize=font_size)
plt.colorbar()
for i in range(5):
plt.subplot(4, 5, 15 + i+1)
draw(1.9+i*0.1)
if i == 0:
plt.yticks([-5,0,5])
plt.ylabel(r'$\dot{\theta}$',fontsize=font_size)
plt.text(-15,-3,'Control u',rotation=90,fontsize=font_size)
if i == 2:
plt.xlabel(r'$\theta$',fontsize=font_size)
plt.colorbar()
plt.show()
| 2,394 | 26.848837 | 106 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/plot_loss.py
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import pylustrator
pylustrator.start()
import seaborn as sns
sns.set_theme(style="whitegrid")
L1 = torch.load('./data/harmonic/loss_icnn.pt')[2:] # delete large first tow numbers
L2 = torch.load('./data/harmonic/loss_quad.pt')
L3 = torch.load('./data/harmonic/loss_AS.pt')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
plt.subplot(231)
plt.plot(np.arange(len(L1)),L1,'b')
plt.ylim(-0.1,13)
plt.title('ES+ICNN')
plt.subplot(232)
plt.plot(np.arange(len(L2)),L2,'b')
plt.ylim(-0.1,13)
plt.title('ES+Quad')
plt.subplot(233)
plt.plot(np.arange(len(L3)),L3,'b')
plt.ylim(-0.1,13)
plt.title('AS')
plt.subplot(234)
plt.plot(np.arange(len(L1)),L1,'b')
plt.ylim(-0.1,1)
plt.subplot(235)
plt.plot(np.arange(len(L2)),L2,'b')
plt.ylim(-0.1,1)
plt.subplot(236)
plt.plot(np.arange(len(L3)),L3,'b')
plt.ylim(-0.1,1)
#% start: automatic generated code from pylustrator
plt.figure(1).ax_dict = {ax.get_label(): ax for ax in plt.figure(1).axes}
import matplotlib as mpl
plt.figure(1).set_size_inches(14.340000/2.54, 6.370000/2.54, forward=True)
plt.figure(1).axes[0].set_xlim(-8.3, 174.3)
plt.figure(1).axes[0].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[0].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[0].grid(False)
plt.figure(1).axes[0].set_position([0.095838, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[0].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[0].get_yaxis().get_label().set_text("Loss")
plt.figure(1).axes[1].set_xlim(-7.75, 162.75)
plt.figure(1).axes[1].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[1].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[1].grid(False)
plt.figure(1).axes[1].set_position([0.409361, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[1].lines[0].set_color("#4c72b0")
plt.figure(1).axes[1].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[1].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[1].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[2].set_xlim(-9.200000000000001, 193.2)
plt.figure(1).axes[2].set_xticks([0.0, 50.0, 100.0, 150.0])
plt.figure(1).axes[2].set_xticklabels(["0", "50", "100", "150"], fontsize=11.0, fontweight="normal", color=".15", fontstyle="normal", fontname="Arial", horizontalalignment="center")
plt.figure(1).axes[2].grid(False)
plt.figure(1).axes[2].set_position([0.722885, 0.201885, 0.255811, 0.697830])
plt.figure(1).axes[2].lines[0].set_color("#4c72b0")
plt.figure(1).axes[2].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[2].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[2].get_xaxis().get_label().set_text("Iterations")
plt.figure(1).axes[3].grid(False)
plt.figure(1).axes[3].set_position([0.198784, 0.478804, 0.152863, 0.321584])
plt.figure(1).axes[3].lines[0].set_color("#4c72b0")
plt.figure(1).axes[3].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[3].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[4].grid(False)
plt.figure(1).axes[4].set_position([0.512309, 0.478804, 0.152863, 0.321584])
plt.figure(1).axes[4].lines[0].set_color("#4c72b0")
plt.figure(1).axes[4].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[4].lines[0].set_markerfacecolor("#4c72b0")
plt.figure(1).axes[5].grid(False)
plt.figure(1).axes[5].set_position([0.828954, 0.463271, 0.149744, 0.337116])
plt.figure(1).axes[5].lines[0].set_color("#4c72b0")
plt.figure(1).axes[5].lines[0].set_markeredgecolor("#4c72b0")
plt.figure(1).axes[5].lines[0].set_markerfacecolor("#4c72b0")
#% end: automatic generated code from pylustrator
plt.show()
| 3,889 | 45.86747 | 181 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/AS.py
|
import torch
import torch.nn.functional as F
import timeit
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
# Drift function
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
# Add control
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
x_0 = torch.zeros_like(x)
theta = 0.75
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = 0.05
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
out = model(x)
u = out*x
f = harmonic(x)
g = harmonic_control(x,u)
# Both loss are efficient
# loss = (2-theta)*torch.diagonal(torch.mm(x,g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk == 0:
break
# stop = timeit.default_timer()
# print('per :', stop-start)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
# torch.save(torch.tensor(L), './data/harmonic/loss_AS.pt')
# torch.save(model.state_dict(), './data/harmonic/algo2_net.pkl')
| 2,766 | 26.39604 | 143 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/generate.py
|
import numpy as np
import math
import torch
import numpy as np
import timeit
from AS import *
from Control_Nonlinear_Icnn import *
start = timeit.default_timer()
# Harmonic linear oscillator
model = Net(D_in,H1,D_out)
# Generate trajectory with nonlinaer AS control
def algo2(z,X,N,dt):
model = Net(D_in,H1,D_out)
model.load_state_dict(torch.load('./data/harmonic/algo2_net.pkl'))
beta = 0.5
for i in range(N):
x = X[i]
with torch.no_grad():
u = model(torch.tensor(x))
# -model((torch.tensor([0.0,0.0])))
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]*x1
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1]*x2)*math.sqrt(dt)
# new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]
# new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1])*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
# Generate trajectory with linear ES(+Quadratic) control
def algo1(z,X,N,dt,a,b,c,d):
beta = 0.5
for i in range(N):
x = X[i]
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*(a*x1+b*x2)
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+c*x1+d*x2)*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
# Generate trajectory with nonlinear ES(+ICNN) control
def algo_icnn(z,X,N,dt):
model2 = ControlNet(D_in,H1,D_out)
model2.load_state_dict(torch.load('./data/harmonic/icnn_net.pkl'))
beta = 0.5
for i in range(N):
x = X[i]
with torch.no_grad():
u = model2(torch.tensor(x))
x1,x2 = x[0],x[1]
new_x1 = x1 + x2*dt + math.sqrt(dt)*z[i]*u[0]*x1
new_x2 = x2 + (-x1-2*beta*x2)*dt + z[i]*(-3*x1+2.15*x2+u[1]*x2)*math.sqrt(dt)
X.append([new_x1,new_x2])
X = torch.tensor(X)
return X
def generate(m,N,dt):
X,Y,Z,W = torch.zeros(m,N+1,2),torch.zeros(m,N+1,2),torch.zeros(m,N+1,2),torch.zeros(m,N+1,2)
for r in range(m):
# x0 = [0.3,0.5] #Fixed initial
x0 = [np.random.uniform(-2,2),np.random.uniform(-2,2)] #random initial
np.random.seed(12*r)
z = np.random.normal(0,1,N)
X[r,:] = algo1(z,[x0],N,dt,0,0,0,0) # Without control
Y[r,:] = algo_icnn(z,[x0],N,dt)
Z[r,:] = algo1(z,[x0],N,dt,1.726,-0.4946,2.0548,0.3159) #Quadratic 2.2867,0.3492,1.593,-0.4191 61.6973088
W[r,:] = algo2(z,[x0],N,dt)
print(r)
return {'X':X,'Y':Y,'Z':Z,'W':W}
# Sample numbers, Iterations in per trajectory, and sample time interval : 20,400000,0.00001
torch.save(generate(20,400000,0.00001),'./data/harmonic/data_long.pt')
torch.save(generate(20,400000,0.00001),'./data/harmonic/data_long_random.pt')
stop = timeit.default_timer()
print('total time:',stop-start)
| 2,835 | 33.585366 | 113 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/ES_ICNN.py
|
import torch
import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
from Control_Nonlinear_Icnn import *
# Drift function
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
# Add stochastic control
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
eps = 0.001
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = LyapunovFunction(D_in,H1,D_out,(D_in,),0.1,[6,6,1],eps)
i = 0
t = 0
max_iters = 200
learning_rate = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
output, u = model(x)
f = harmonic(x)
g = harmonic_control(x,u)
x = x.clone().detach().requires_grad_(True)
ws = model._icnn._ws
bs = model._icnn._bs
us = model._icnn._us
smooth = model.smooth_relu
input_shape = (D_in,)
V1 = lya(ws,bs,us,smooth,x,input_shape)
V0 = lya(ws,bs,us,smooth,torch.zeros_like(x),input_shape)
num_V = smooth(V1-V0)+eps*x.pow(2).sum(dim=1)
V = torch.sum(smooth(V1-V0)+eps*x.pow(2).sum(dim=1))
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(N):
L_V = torch.sum(Vx[0,2*r:2*r+2]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[2*r:2*r+2,2*r:2*r+2],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,2*r:2*r+2]*g[r,:])
v = num_V[0,r]
loss[r] = Vxg**2/(v**2) - 2.1*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk < 0.001:
break
# stop = timeit.default_timer()
# print('per:',stop-start)
i += 1
# torch.save(torch.tensor(L),'./data/harmonic/loss_icnn.pt')
# torch.save(model._control.state_dict(),'./data/harmonic/icnn_net.pkl')
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
| 2,972 | 26.527778 | 142 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/ES_Quadratic.py
|
import torch
import torch.nn.functional as F
import timeit
from hessian import hessian
from hessian import jacobian
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
self.control = torch.nn.Linear(n_input,2,bias=False)
def forward(self,x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
u = self.control(x)
return out,u
def harmonic(x):
y = []
beta = 0.5
for i in range(0,len(x)):
f = [x[i,1],-x[i,0]-2*beta*x[i,1]]
y.append(f)
y = torch.tensor(y)
return y
def harmonic_control(x,u):
y = []
k1,k2 = -3,2.15
for i in range(0,len(x)):
f = [0.0,k1*x[i,0]+k2*x[i,1]]
y.append(f)
y = torch.tensor(y)
y[:,0] = y[:,0] + u[:,0]
y[:,1] = y[:,1] + u[:,1]
return y
'''
For learning
'''
N = 500 # sample size
D_in = 2 # input dimension
H1 = 6 # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(-6, 6)
l = 0.01
x_0 = torch.zeros([1,2])
out_iters = 0
# valid = False
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
max_iters = 200
learning_rate = 0.03
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
L = []
while i < max_iters:
# start = timeit.default_timer()
V_net, u = model(x)
W1 = model.layer1.weight
W2 = model.layer2.weight
B1 = model.layer1.bias
B2 = model.layer2.bias
X0,u0 = model(x_0)
f = harmonic(x)
g = harmonic_control(x,u)
x = x.clone().detach().requires_grad_(True)
output = torch.mm(torch.tanh(torch.mm(x,W1.T)+B1),W2.T)+B2
# V = torch.sum(output)
num_v = torch.sum(l*x*x + ( x*output)**2,1)
# num_v = torch.sum(output,1)
V = torch.sum(l*x*x + (x*output)**2)
Vx = jacobian(V,x)
Vxx = hessian(V,x)
loss = torch.zeros(N)
for r in range(N):
L_V = torch.sum(Vx[0,2*r:2*r+2]*f[r,:]) + 0.5*torch.mm(g[r,:].unsqueeze(0),torch.mm(Vxx[2*r:2*r+2,2*r:2*r+2],g[r,:].unsqueeze(1)))
Vxg = torch.sum(Vx[0,2*r:2*r+2]*g[r,:])
v = num_v[r]
loss[r] = Vxg**2/(v**2) - 2.1*L_V/v
Lyapunov_risk = (F.relu(-loss)).mean()
L.append(Lyapunov_risk)
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
if Lyapunov_risk < 2.0:
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
if Lyapunov_risk < 0.001:
break
# stop = timeit.default_timer()
# print('per:',stop-start)
q = model.control.weight.data.numpy()
i += 1
print(q)
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
out_iters+=1
# torch.save(torch.tensor(L),'./data/harmonic/loss_quad.pt')
| 3,376 | 26.016 | 142 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/plot.py
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import matplotlib
matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
matplotlib.rcParams['text.usetex'] = True
import sys
sys.path.append('./data/harmonic')
'''
Data is dictionary {'X','Y','Z','W'},corresponds to 20 sample trajectories under
original system, ES+ICNN,ES+Quad and AS control, we set dt=1e-5,N=400000,in Euler method
data size for each system is [20,400001,2]
'''
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
alpha = 0.1
methods = ['ES+ICNN', 'ES+Quad', 'AS']
fontsize = 35
fontsize_legend = 17
framealpha = 0.7
legend_loc = "lower right"
shade_color = colors[0]
labelpad=-30
linewidth = 3
sc_step = 10
delt_step = 10 * sc_step
data = torch.load('./data/harmonic/data_long.pt') # fixed initial (0.3,0.5)
# data = torch.load('./data/harmonic/data_long_random.pt') # random initial
X = data['X'][:,0:300001:delt_step,:]#Original system
Y = data['Y'][:,0:300001:delt_step,:]#ES+ICNN
Z = data['Z'][:,0:300001:delt_step,:]#ES+Quad
W = data['W'][torch.tensor([0,1,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19]),0:300001:delt_step,:]
# Two trajectories diverge due to the dt in Euler method is not small enough,we test these two trajectories
# with random seed 12*2 and 12*12 with dt = 1e-6, and the corresponding trajectories converge to zero.
mid_init = 24000 // sc_step #Start of small time
mid_end = 30000 // sc_step #End of small time
target_big_X_lim = [24000.0 / sc_step, 30000.0 /sc_step]
# target_small_X_lim = [-300.0, 6300.0]
target_small_X_lim = [0.0, 6000.0 / sc_step]
x1_ylim = [-20, 20]
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
def plt_x1_ylim():
plt.ylim(x1_ylim[0], x1_ylim[1])
def plt_tick_time_0_to_3():
# time [2.4, 3.0]
plt.xlim(-1500.0 / sc_step, 31500.0 / sc_step)
plt.xticks([0.0, 10000.0 / sc_step, 20000.0 / sc_step, 30000.0 / sc_step], ["$0$", "$1.0$", "$2.0$", "$3.0$"])
# plt.xticks([0.0, 10000.0, 20000.0, 30000.0], ["$0$", "$~$", "$~$", "$3.0$"])
def plt_tick_time_24_to_30(case=1):
# time [2.4, 3.0]
# plt.xlim(-300, 6300)
plt.xlim(0, 6000/sc_step)
plt.xticks([0.0, 2000.0/sc_step, 4000.0/sc_step, 6000.0/sc_step], ["$2.4$", "$2.6$", "$2.8$", "$3.0$"])
# plt.xticks([0.0, 2000.0, 4000.0, 6000.0], ["$2.4$", "$~$", "$~$", "$3.0$"])
if case==1:
# plt.ylim(-0.115, 0.115)
# plt.yticks([-0.1, -0.05, 0, 0.05, 0.1], ["$-0.1$", "$~$", "$0$", "$~$", "$0.1$"])
plt.ylim(-0.23, 0.23)
plt.yticks([-0.2, -0.1, 0, 0.1, 0.2], ["$-0.2$", "$~$", "$0$", "$~$", "$0.2$"])
if case==2:
plt.ylim(-0.23, 0.23)
plt.yticks([-0.2, -0.1, 0, 0.1, 0.2], ["$-0.2$", "$~$", "$0$", "$~$", "$0.2$"])
def plot_x1(subfig=1):
X1 = X[:,:,0]#x1 component of original system
mean_x1 = torch.mean(X1,0)
std_x1 = torch.std(X1,0)
Y1 = Y[:,:,0]#x1 component of ES(+ICNN)
mean_y1 = torch.mean(Y1,0)
std_y1 = torch.std(Y1,0)
Z1 = Z[:,:,0]#x1 component of ES(+Quadratic)
mean_z1 = torch.mean(Z1,0)
std_z1 = torch.std(Z1,0)
W1 = W[:,:,0]#x1 component of AS
mean_w1 = torch.mean(W1,0)
std_w1 = torch.std(W1,0)
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
if subfig==1:
# plt.subplot(251)
plt.fill_between(np.arange(X1.size(1)),mean_x1-std_x1,mean_x1+std_x1,color='r',alpha=alpha)
plt.plot(np.arange(X1.size(1)),mean_x1,color='r',alpha=0.5,label=r"$x_1$", linewidth=linewidth)
# plt.title('Original System', fontsize=fontsize)
plt_tick_time_0_to_3()
# plt.ylabel(r"$x_1$", fontsize=fontsize)
# plt.xlabel("Time", fontsize=fontsize)
plt.tick_params(labelsize=fontsize)
if subfig==2:
# plt.subplot(232)
plt.fill_between(np.arange(Y1.size(1)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(Y1.size(1)),mean_y1,color='r',alpha=0.5,label=methods[0], linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label=methods[1], linewidth=linewidth)
plt.fill_between(np.arange(W1.size(1)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(W1.size(1)),mean_w1,color='g',alpha=0.5,label=methods[2], linewidth=linewidth)
plt.legend(fontsize=fontsize_legend, framealpha=framealpha, loc=legend_loc)
# plt.title('ES(ICNN), ES(Quad), AS')
plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt_x1_ylim()
plt.tick_params(labelsize=fontsize)
if subfig==3:
# plt.subplot(233)
#Tune time
mean_y1 = mean_y1[mid_init:mid_end]
std_y1 = std_y1[mid_init:mid_end]
mean_z1 = mean_z1[mid_init:mid_end]
std_z1 = std_z1[mid_init:mid_end]
mean_w1 = mean_w1[mid_init:mid_end]
std_w1 = std_w1[mid_init:mid_end]
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label='mean value', linewidth=linewidth)
# plt.title('Time Magnify : [2.4,3.0]')
plt_tick_time_24_to_30(case=1)
plt.tick_params(labelsize=fontsize)
def plot_x2(subfig=1):
#Plot x2 component
mean_x1 = torch.mean(X[:,:,1],0)
std_x1 = torch.std(X[:,:,1],0)
mean_y1 = torch.mean(Y[:,:,1],0)
std_y1 = torch.std(Y[:,:,1],0)
mean_z1 = torch.mean(Z[:,:,1],0)
std_z1 = torch.std(Z[:,:,1],0)
mean_w1 = torch.mean(W[:,:,1],0)
std_w1 = torch.std(W[:,:,1],0)
if subfig==1:
# plt.subplot(256)
plt.fill_between(np.arange(mean_x1.size(0)),mean_x1-std_x1,mean_x1+std_x1,color='g',alpha=alpha)
plt.plot(np.arange(mean_x1.size(0)),mean_x1,color='g',alpha=0.5,label=r"$x_2$", linewidth=linewidth)
# plt.ylabel(r"$x_2$", fontsize=fontsize)
# plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt.tick_params(labelsize=fontsize)
if subfig==2:
# plt.subplot(235)
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label=methods[0], linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label=methods[1], linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label=methods[2], linewidth=linewidth)
plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_0_to_3()
plt.legend(fontsize=fontsize_legend, framealpha=framealpha, loc=legend_loc)
plt_x1_ylim()
plt.tick_params(labelsize=fontsize)
if subfig==3:
# plt.subplot(236)
#Tune time
mean_y1 = mean_y1[mid_init:mid_end]
std_y1 = std_y1[mid_init:mid_end]
mean_z1 = mean_z1[mid_init:mid_end]
std_z1 = std_z1[mid_init:mid_end]
mean_w1 = mean_w1[mid_init:mid_end]
std_w1 = std_w1[mid_init:mid_end]
plt.fill_between(np.arange(mean_y1.size(0)),mean_y1-std_y1,mean_y1+std_y1,color='r',alpha=alpha)
plt.plot(np.arange(mean_y1.size(0)),mean_y1,color='r',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_z1.size(0)),mean_z1-std_z1,mean_z1+std_z1,color='b',alpha=alpha)
plt.plot(np.arange(mean_z1.size(0)),mean_z1,color='b',alpha=0.5,label='mean value', linewidth=linewidth)
plt.fill_between(np.arange(mean_w1.size(0)),mean_w1-std_w1,mean_w1+std_w1,color='g',alpha=alpha)
plt.plot(np.arange(mean_w1.size(0)),mean_w1,color='g',alpha=0.5,label='mean value', linewidth=linewidth)
# plt.xlabel("Time", fontsize=fontsize)
plt_tick_time_24_to_30(case=2)
plt.tick_params(labelsize=fontsize)
from matplotlib.patches import ConnectionPatch
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(16, 16)
fig = plt.figure(figsize=(18, 12))
# plt.subplot(231)
plt.subplot(gs[0:7, 0:4])
plot_x1(subfig=1)
plot_x2(subfig=1)
plt.legend(fontsize=fontsize_legend, framealpha=framealpha)
plot_grid()
plt.ylim(-10, 10)
plt.yticks([-10, -5, 0, 5, 10], ["$-10$", "$~$", "$0$", "$~$", "$10$"])
# sub_x1 = plt.subplot(223)
sub_x1 = plt.subplot(gs[9:16, 0:7])
plot_x1(subfig=2)
plt.ylabel('$x_1$', fontsize=fontsize, labelpad=labelpad)
sub_x1.fill_between((target_big_X_lim[0],target_big_X_lim[1]), -20, 30, facecolor=shade_color, alpha=0.2)
plot_grid()
plt.ylim(-20, 20)
plt.yticks([-20, -10, 0, 10, 20], ["$-20$", "$~$", "$0$", "$~$", "$20$"])
a = 0.47
# sub_x1_small = fig.add_axes([a, 0.58, 0.1, 0.1])
# sub_x1_small = plt.subplot(232)
sub_x1_small = plt.subplot(gs[0:7, 6:10])
plt.ylabel('$x_1$', fontsize=fontsize, labelpad=labelpad)
# a, b = 5, 10
# sub_x1_small = plt.subplot(a, b, 2*b + b//2)
plot_x1(subfig=3)
plot_grid()
con1 = ConnectionPatch(xyA=(target_big_X_lim[0], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[0], -0.23/2), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con1)
con2 = ConnectionPatch(xyA=(target_big_X_lim[1], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[1], -0.23/2), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con2)
# plt.subplot(256)
# plot_x2(subfig=1)
# sub_x1 = plt.subplot(224)
sub_x1 = plt.subplot(gs[9:16, 9:16])
plot_x2(subfig=2)
sub_x1.fill_between((target_big_X_lim[0],target_big_X_lim[1]), -20, 30, facecolor=shade_color, alpha=0.2)
plot_grid()
plt.ylabel('$x_2$', fontsize=fontsize, labelpad=labelpad)
plt.ylim(-20, 20)
plt.yticks([-20, -10, 0, 10, 20], ["$-20$", "$~$", "$0$", "$~$", "$20$"])
# sub_x1_small = plt.subplot(233)
sub_x1_small = plt.subplot(gs[0:7, 12:16])
plot_x2(subfig=3)
plot_grid()
plt.ylabel('$x_2$', fontsize=fontsize, labelpad=labelpad)
con1 = ConnectionPatch(xyA=(target_big_X_lim[0], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[0], -0.23), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con1)
con2 = ConnectionPatch(xyA=(target_big_X_lim[1], 0), coordsA=sub_x1.transData,
xyB=(target_small_X_lim[1], -0.23), coordsB=sub_x1_small.transData, color =shade_color)
fig.add_artist(con2)
plt.show()
| 11,933 | 39.317568 | 114 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/table1.py
|
import numpy as np
import torch
data = torch.load('./data/harmonic/data_long.pt')
# Calculate the data in table1
def L2_norm(st,a):
Y = data[st][torch.tensor([0,1,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19]),:,:]
Y = Y.detach().numpy()
X = np.linalg.norm(Y,axis=2)
Z = np.mean(X,0)
index = np.where(Z<0.05)
print('{} min :'.format(a),np.min(Z))
print('{} convergence time of 0.05:'.format(a), format(index[0][0]*1e-5,'.3f'))
L2_norm('Y','ICNN')
L2_norm('Z','Quad')
L2_norm('W','AS')
| 513 | 24.7 | 83 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/Neural Stochastic Control/harmonic/Control_Nonlinear_Icnn.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ICNN(nn.Module):
def __init__(self, input_shape, layer_sizes, activation_fn):
super(ICNN, self).__init__()
self._input_shape = input_shape
self._layer_sizes = layer_sizes
self._activation_fn = activation_fn
ws = []
bs = []
us = []
prev_layer = input_shape
w = torch.empty(layer_sizes[0], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[0], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
for i in range(len(layer_sizes))[1:]:
w = torch.empty(layer_sizes[i], *input_shape)
nn.init.xavier_normal_(w)
ws.append(nn.Parameter(w))
b = torch.empty([layer_sizes[i], 1])
nn.init.xavier_normal_(b)
bs.append(nn.Parameter(b))
u = torch.empty([layer_sizes[i], layer_sizes[i-1]])
nn.init.xavier_normal_(u)
us.append(nn.Parameter(u))
self._ws = nn.ParameterList(ws)
self._bs = nn.ParameterList(bs)
self._us = nn.ParameterList(us)
def forward(self, x):
# x: [batch, data]
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(self._input_shape) + 1))
x = x.permute(*data_dims, 0)
z = self._activation_fn(torch.addmm(self._bs[0], self._ws[0], x))
for i in range(len(self._us)):
u = F.softplus(self._us[i])
w = self._ws[i + 1]
b = self._bs[i + 1]
z = self._activation_fn(torch.addmm(b, w, x) + torch.mm(u, z))
return z
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_hidden)
self.layer3 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
return out
class LyapunovFunction(nn.Module):
def __init__(self,n_input,n_hidden,n_output,input_shape,smooth_relu_thresh=0.1,layer_sizes=[64, 64],lr=3e-4,eps=1e-3):
super(LyapunovFunction, self).__init__()
torch.manual_seed(2)
self._d = smooth_relu_thresh
self._icnn = ICNN(input_shape, layer_sizes, self.smooth_relu)
self._eps = eps
self._control = ControlNet(n_input,n_hidden,n_output)
def forward(self, x):
g = self._icnn(x)
g0 = self._icnn(torch.zeros_like(x))
u = self._control(x)
u0 = self._control(torch.zeros_like(x))
return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u*x
# return self.smooth_relu(g - g0) + self._eps * x.pow(2).sum(dim=1), u-u0
def smooth_relu(self, x):
relu = x.relu()
# TODO: Is there a clean way to avoid computing both of these on all elements?
sq = (2*self._d*relu.pow(3) -relu.pow(4)) / (2 * self._d**3)
lin = x - self._d/2
return torch.where(relu < self._d, sq, lin)
def lya(ws,bs,us,smooth,x,input_shape):
if len(x.shape) < 2:
x = x.unsqueeze(0)
else:
data_dims = list(range(1, len(input_shape) + 1))
x = x.permute(*data_dims, 0)
z = smooth(torch.addmm(bs[0],ws[0], x))
for i in range(len(us)):
u = F.softplus(us[i])
w = ws[i + 1]
b = bs[i + 1]
z = smooth(torch.addmm(b, w, x) + torch.mm(u, z))
return z
| 3,754 | 34.424528 | 122 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/model_free/functions.py
|
import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
import matplotlib.pyplot as plt
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
data =data[:,1:2]
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
# return out*x*torch.tensor([0.0,1.0,1.0,0.0,0.0,0.0])
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
# self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
# d_u = self._dcontrol(data)
return s_u
| 2,097 | 32.301587 | 89 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/model_free/run.py
|
import numpy as np
from scipy import integrate
import torch
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
from functions import *
def f(x,u=0):
a, b, c = 1, 1, 1
U2 = np.array([0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371])
x1, x2, x3, x4, x5, x6 = x+U2
dx1 = 0.5 - a * x1
dx2= 5 * x1 / ((1 + x1) * (1 + x3**4)) - b * x2
dx3= 5 * x4 / ((1 + x4) * (1 + x2**4)) - c * x3
dx4 = 0.5 / (1 + x2**4) - a * x4
dx5 = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2**4) - a * x5
dx6 = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3**4) - a * x6
return np.array([dx1,dx2,dx3,dx4,dx5,dx6])
models = ControlNet(1,6,1)
models.load_state_dict(torch.load('./data/node_S_2.pkl'))
# models = ControlNet(2,12,2)
# models.load_state_dict(torch.load('./data/node_S.pkl'))
def run_0(n,dt,case,seed):
np.random.seed(seed)
U2 = np.array([0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371])
x0 = np.array([0.5,-0.9,0.6,-0.6,-0.9,0.5])
X = np.zeros([n,6])
DU = np.zeros([n-1,6])
SU = np.zeros([n-1,6])
X[0,:]=x0
z = np.random.normal(0,1,n) # common noise
# z = np.random.normal(0, 1, [n,6]) # common noise
for i in range(n-1):
x = X[i,:]
df = f(x)
if case == 0:
X[i+1,:] = x+df*dt
if case == 'S':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = models(input).detach().numpy()
X[i+1,:]=x+df*dt
# X[i+1:i+2,1:3] += np.sqrt(dt)*z[i]*(u)
X[i + 1:i + 2, 1:2] += np.sqrt(dt) * z[i] * (u)
return X
'''
data generate
'''
seed = 3
n = 50000
# dt = 0.00001
dt = 0.0003
m = 10
# X = np.zeros([11,5000,6])
# X[0,:] = run_0(5000,0.001,0,0)
# for i in range(10):
# X[i+1,:] = run_0(50000,dt,'S',i)[0:50000:10,:]
# print(i)
# np.save('./data/pin_control_2',X)
'''
test
'''
# X = run_0(n,dt,'S',1)
# for i in range(6):
# plt.plot(np.arange(len(X))*dt,X[:,i],label=r'$x_{}$'.format(i))
# plt.legend()
'''
plot
'''
font_size = 20
def subplot(X,xticks1,xticks2,yticks1,yticks2,ylim,title):
alpha = 0.5
mean_x,std_x,mean_y,std_y=np.mean(X[:,:,0],axis=0),np.std(X[:,:,0],axis=0),np.mean(X[:,:,1],axis=0),np.std(X[:,:,1],axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length),mean_x-std_x,mean_x+std_x,color=colors[0],alpha=alpha)
plt.plot(np.arange(length),mean_x,color=colors[0],label=r'$x$')
plt.fill_between(np.arange(length),mean_y-std_y,mean_y+std_y,color=colors[1],alpha=alpha)
plt.plot(np.arange(length),mean_y,color=colors[1],label=r'$y$')
plot_grid()
plt.legend(fontsize=font_size)
plt.xticks(xticks1,xticks2,fontsize=font_size)
plt.yticks(yticks1,yticks2,fontsize=font_size)
plt.ylim(ylim)
plt.title('{}'.format(title),fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
def plot(alpha=0.5):
data = np.load('./data/pin_control_2.npy')
plt.subplot(121)
X=data[0,:]
for i in range(6):
plt.plot(np.arange(len(X))*0.001,X[:,i],color=colors[i],label=r'$x_{}$'.format(i))
# plt.legend(fontsize=font_size*0.7,ncol=3)
plt.ylabel('State variables',fontsize=font_size)
plt.xlabel('Time', fontsize=font_size)
plt.yticks([-2, 0, 2],fontsize=font_size)
plt.xticks([0, 2.5, 5.0], fontsize=font_size)
plot_grid()
# plt.legend(fontsize=font_size*0.7 , ncol=6, bbox_to_anchor=(1.5, 1.1))
plt.subplot(122)
X=data[1:,:]
for i in range(6):
x = X[:,:,i]
mean_x = np.mean(x,axis=0)
std_x = np.mean(x,axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length)*0.003, mean_x - std_x, mean_x + std_x, color=colors[i], alpha=alpha)
plt.plot(np.arange(length)*0.003, mean_x, color=colors[i], label=r'$x_{}$'.format(i))
plt.xticks([0,15],fontsize=font_size)
plt.yticks([-2,0,2],fontsize=font_size)
plt.ylim(-2,2)
# plt.ylabel('state variables',fontsize=font_size)
plt.xlabel('Time', fontsize=font_size)
plot_grid()
plot()
plt.show()
| 4,155 | 29.335766 | 127 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/model_free/NODE.py
|
# import sys
# sys.path.append('./neural_sde/NODE')
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
# device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
true_y0 = torch.tensor([[0.5, -0.9, 0.6, -0.6, -0.9, 0.5]]).to(device)
# true_y0 = torch.Tensor(10,6).uniform_(-2,2).to(device)
t = torch.linspace(0., 15., args.data_size).to(device)
class Cell_Fate_ODEFunc(nn.Module):
dim = 6
a, b, c = 1, 1, 1
def forward(self, t, x):
# x shape: [1, 6]
dx = torch.zeros_like(x)
U2 = torch.tensor([[0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371]])
x = x + U2
x1, x2, x3, x4, x5, x6 = x[:, 0], x[:, 1], x[:, 2], x[:, 3], x[:, 4], x[:, 5]
dx[:, 0] = 0.5 - self.a * x1
dx[:, 1] = 5 * x1 / ((1 + x1) * (1 + x3**4)) - self.b * x2
dx[:, 2] = 5 * x4 / ((1 + x4) * (1 + x2**4)) - self.c * x3
dx[:, 3] = 0.5 / (1 + x2**4) - self.a * x4
dx[:, 4] = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2**4) - self.a * x5
dx[:, 5] = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3**4) - self.a * x6
return dx
with torch.no_grad():
# true_y = odeint(Lambda(), true_y0, t, method='dopri5')
true_y = odeint(Cell_Fate_ODEFunc(), true_y0, t)
def get_batch():
s = torch.from_numpy(
np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(6, 50),
nn.Tanh(),
nn.Linear(50, 6),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
return self.net(y)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
# optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
optimizer = optim.Adam(func.parameters(), lr=1e-2)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint(func, batch_y0, batch_t).to(device)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
print(itr, loss)
# if itr % args.test_freq == 0:
# with torch.no_grad():
# pred_y = odeint(func, true_y0, t)
# loss = torch.mean(torch.abs(pred_y - true_y))
# print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
# visualize(true_y, pred_y, func, ii)
# ii += 1
# torch.save(func.state_dict(),'./neural_sde/NODE/symmetry.pkl')
end = time.time()
data = func(1.0, true_y)
torch.save(data[:,0,:], './data/node1.pt')
print(data.shape)
| 4,670 | 31.213793 | 118 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/model_free/NSC_train.py
|
import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=float, default=1000)
parser.add_argument('--num', type=float, default=6)
parser.add_argument('--lr', type=float, default=0.05)
args = parser.parse_args()
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
data = data[:,1:2]
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
# return out*x*torch.tensor([0.0,1.0,1.0,0.0,0.0,0.0])
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
# self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
# d_u = self._dcontrol(data)
return s_u
def f_(data):
a, b, c = 1, 1, 1
z = torch.zeros_like(data)
U2 = torch.tensor([[0.5, 0.74645887, 1.05370735, 0.38154169, 1.68833014, 0.83746371]])
x = data + U2
for i in range(len(data)):
x1, x2, x3, x4, x5, x6 = x[i,:]
z[i, 0] = 0.5 - a * x1
z[i, 1] = 5 * x1 / ((1 + x1) * (1 + x3 ** 4)) - b * x2
z[i, 2] = 5 * x4 / ((1 + x4) * (1 + x2 ** 4)) - c * x3
z[i, 3] = 0.5 / (1 + x2 ** 4) - a * x4
z[i, 4] = (x1 * x4 / (1 + x1 * x4) + 4 * x3 / (1 + x3)) / (1 + x2 ** 4) - a * x5
z[i, 5] = (x1 * x4 / (1 + x1 * x4) + 4 * x2 / (1 + x2)) / (1 + x3 ** 4) - a * x6
# x,y=data[i,:]
# z[i,:] = torch.tensor([y,G*np.sin(x)/L +(-b*y)/(m*L**2)])#+u[i]
return z
def g_(data,u):
z = torch.zeros_like(data)
for i in range(len(data)):
z[i,:] = 0.0+u[i]
return z
'''
For learning
'''
N = args.N # sample size
D_in = 1 # input dimension
H1 = 6 * D_in # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
# Data = torch.Tensor(N,6).uniform_(-5,5)
Data = torch.load('./data/node1.pt')
# print(Data.shape)
theta = 0.9
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in, H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
s_u = model(Data)
f = f_(Data)[:,1:2]
# g = g_(Data,s_u)[:,1:3]
g = s_u
x = Data[:,1:2]
# loss = (2-theta)*torch.diagonal(torch.mm(x, g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(
# 2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
# L_B = 2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3 - gamma*torch.log(1+torch.abs(h(v))) # barrier function 1
# L_B = (2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3)
# lossB = 2*L_B/h(v)-(1-theta)*(2*(v-M/2)*g[:,3:4])**2/h(v)**4
AS_loss = (F.relu(-loss)).mean()
print(i, "AS loss=", AS_loss.item())
optimizer.zero_grad()
AS_loss.backward()
optimizer.step()
if AS_loss < 1e-8:
break
# if AS_loss<0.5:
# optimizer=torch.optim.Adam(model.parameters(),lr=0.005)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters += 1
torch.save(model._scontrol.state_dict(),'./data/node_S_2.pkl')
# torch.save(model._dcontrol.state_dict(),'./data/D.pkl')
| 4,004 | 31.04 | 153 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/multiple_k/AS.py
|
import torch
import torch.nn.functional as F
import timeit
import math
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
def f_value(x):
y = []
for i in range(0,len(x)):
f = [x[i]*math.log(1+abs(x[i]))]
y.append(f)
y = torch.tensor(y)
return y
'''
For learning
'''
N = 4000 # sample size
D_in = 1 # input dimension
H1 = 6 # hidden dimension
D_out = 1 # output dimension
torch.manual_seed(10)
x = torch.Tensor(N, D_in).uniform_(0,50)
theta = 0.9
out_iters = 0
while out_iters < 1:
start = timeit.default_timer()
model = Net(D_in,H1, D_out)
i = 0
t = 0
max_iters = 50
learning_rate = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
out = model(x)
g = out*x
f = f_value(x)
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
Lyapunov_risk = (F.relu(-loss)).mean()
print(i, "Lyapunov Risk=",Lyapunov_risk.item())
optimizer.zero_grad()
Lyapunov_risk.backward()
optimizer.step()
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters+=1
torch.save(model.state_dict(), './data/theta0.9_1d_log_net_100.pkl')
| 1,716 | 21.012821 | 72 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/multiple_k/functions.py
|
import numpy as np
import math
import torch
import timeit
from scipy import integrate
import matplotlib.pyplot as plt
start = timeit.default_timer()
np.random.seed(1)
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(x))
out = self.layer2(h_1)
return out
log_model = Net(1,6,1)
log_model.load_state_dict(torch.load('./data/theta0.9_1d_log_net.pkl'))
N = 100000
dt = 0.00001
m = 20
T = 50
def k_list(N,dt,k,m):
x0 = [20.0]
data = torch.zeros([N+1,m])
for r in range(m):
np.random.seed(r * 4 + 1)
X = []
X.append(x0)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
new_x = x + x*math.log(1+abs(x))*dt + k*x*math.sqrt(dt)*z[i]
X.append([new_x])
X = torch.tensor(X)
data[:,r] = X[:,0]
return data
def learning_control(N,dt,m):
x0 = [20.0]
data = torch.zeros([N+1,m])
for r in range(m):
X = []
X.append(x0)
np.random.seed(r*4+1)
z = np.random.normal(0,1,N)
for i in range(N):
x = X[i][0]
k = log_model(torch.tensor([X[i]]))
new_x = x + x*math.log(1+abs(x))*dt + k[0]*x*math.sqrt(dt)*z[i]
X.append([new_x])
X = torch.tensor(X)
data[:,r] = X[:,0]
print(r)
return data
def multiple_k(T,N,dt,m):
x0 = [50.0]
data = torch.zeros([T, N + 1, m])
def generate(k):
data = torch.zeros([N+1,m])
for r in range(m):
X = []
X.append(x0)
np.random.seed(r * 4 + 1)
z = np.random.normal(0, 1, N)
for i in range(N):
x = X[i][0]
new_x = x + x * math.log(1 + abs(x)) * dt + k * x * math.sqrt(dt) * z[i]
X.append([new_x])
X = torch.tensor(X)
data[:, r] = X[:, 0]
print(r)
return data
for j in range(T):
k = 0.2*(j+1)
data[j,:]=generate(k)
torch.save(data,'./data/k_table_x0_20.pt')
return data
def stopping_time(j):
data = torch.load('./data/k_table_x0_20_100.pt').numpy()
X = data[j,:]
t_x = 0.0
dt = 0.0001
for i in range(20):
norm_x = np.abs(X[:, i])
ind = np.where(norm_x < 0.1)[0][0] if np.min(norm_x) < 0.1 else int(len(X)) - 1
t_x += ind*dt
print(t_x/20)
return t_x/20
def single_k_energy(j):
data = torch.load('./data/k_table_x0_20_100.pt').numpy()
# data = Data['data']
# X = data[i,:75001,:]
# N = int(len(X))-1
X = data[j,:]
# dt = 0.00001
dt = 0.00001
k = ((j+1)*0.2)**2
gx = k*X**2
# a = np.linspace(0, dt*N, N+1)
v_x = 0
max_norm = 0.0
for i in range(20):
norm_x = np.abs(X[:, i])
ind = np.where(norm_x < 0.1)[0][0] if np.min(norm_x) < 0.1 else int(len(X))-1
a = np.linspace(0, dt * ind, ind + 1)
g_x = gx[:,i]
v_x += integrate.trapz(g_x[0:ind + 1], a)
max_norm += np.sqrt(np.max(gx))
# v_x += integrate.trapz(np.array(g_x), a)
# print(i)
print(v_x/20,max_norm/20)
return v_x/20
'''
generate energy_list for different k
'''
T = 50
energy_list = np.zeros(T)
# time_list = np.zeros(T)
for i in range(T):
energy_list[i] = single_k_energy(i)
# time_list[i] = stopping_time(i)
# np.save('./data/energy_list',energy_list)
# np.save('./data/time_list',time_list)
# energy_list = np.load('./data/energy_list.npy')
plt.plot(np.arange(T),np.log(energy_list))
# plt.axhline(np.log(1438))
# plt.axhline(0.38)
# plt.show()
# Data = torch.load('./data/20seed_learning_control.pt')
# data = Data['data'].detach().numpy()
# Y = data[0,:][:,np.delete(np.arange(20),15)]# Delete the diverge trajectory due to the dt is not small enough in Euler method
# max_norm = 0.0
# for i in range(19):
# g_y = (log_model(torch.tensor(Y[:, i]).unsqueeze(1))[:, 0].detach().numpy() * Y[:, i])**2
# max_norm+=np.sqrt(np.max(g_y))
# print(max_norm)
def k_data():
endpoint = torch.zeros(T)
Data = torch.zeros(T,N+1,m)
for i in range(T):
k = i*0.2+0.2
data = k_list(N,dt,k,m)
endpoint[i] = data[-1].mean()
Data[i,:] = data
print(i)
torch.save({'data':Data,'end':endpoint},'./data/k_table_x0_20.pt')
def learning_data():
# data = learning_control(200000,dt,10)
data = learning_control(100000,dt,20)
# torch.save({'data':data},'./neural_sde/Energy/20_learning_control.pt')
torch.save({'data':data},'./data/20seed_learning_control.pt')
def k_energy_cost():
Data = torch.load('k_table.pt')
data = Data['data']
X = data[29,:75001,:]
N = 75000
dt = 0.00001
gx = 6*2*X**2
a = np.linspace(0, dt*N, N+1)
print(a.shape)
v_x = 0
for i in range(20):
g_x = gx[:,i]
v_x += integrate.trapz(np.array(g_x), a)
print(i)
print(v_x/20)
def energy_cost():
Data = torch.load('./data/20seed_learning_control.pt')
data = Data['data'].detach().numpy()
X = data[1,:]
Y = data[0,:][:,np.delete(np.arange(20),15)]# Delete the diverge trajectory due to the dt is not small enough in Euler method
N = 100000
dt = 0.00001
v_x = 0
v_y = 0
# a = np.linspace(0, dt*N, N+1)
for i in range(Y.shape[1]):
g_x = 36*X[:,i]**2
g_y = (log_model(torch.tensor(Y[:,i]).unsqueeze(1))[:,0].detach().numpy()*Y[:,i])**2
norm_x = np.abs(X[:,i])
norm_y = np.abs(Y[:,i])
ind1 = np.where(norm_x<0.1)[0][0]
ind2 = np.where(norm_y<0.1)[0][0]
a1 = np.linspace(0, dt*ind1, ind1+1)
a2 = np.linspace(0, dt*ind2, ind2+1)
v_x += integrate.trapz(g_x[0:ind1+1], a1)
v_y += integrate.trapz(g_y[0:ind2+1], a2)
print(i)
print(v_x/20,v_y/19)
# X = multiple_k(T,n,dt,m) # generate data
# k_energy_cost()
# energy_cost()
# learning_data()
# k_data()
# learning_data()
stop= timeit.default_timer()
print('time:',stop-start)
| 6,281 | 26.432314 | 129 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/multiple_k/plot_appendix.py
|
import numpy as np
import matplotlib.pyplot as plt
import torch
# import matplotlib
# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
# matplotlib.rcParams['text.usetex'] = True
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
energy = np.load('./data/energy_list.npy')
dt = 0.00001*10
# dt = 0.0001
fontsize = 15
data = torch.load('./data/k_table_x0_20.pt')
print(data.shape)
for i in range(5):
plt.subplot(1,6,i+1)
k=(i+1)*2
X=data[10*(i+1)-1,0:50000:10,:]
mean_data = torch.mean(X,1)
std_data = torch.std(X,1)
plt.fill_between(np.arange(len(X)) * dt,mean_data-std_data,mean_data+std_data,color='r',alpha=0.2)
plt.plot(np.arange(len(X)) * dt,mean_data,color='r',alpha=0.9,label='k={}'.format(k))
# plt.title('ME:{}'.format(38418))
plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
if i == 0:
plt.ylabel(r'$x$', fontsize=fontsize)
plt.xticks([0, 0.125, 0.25, 0.375, 0.5],
["$0$", "$~$","$0.25$","$~$", "$0.5$"]
)
plt.yticks([-100, 0, 100, 200])
plt.legend(fontsize=fontsize)
plot_grid()
plt.title('ME:{}'.format(int(energy[10*(i+1)-1])))
plt.tick_params(labelsize=fontsize)
Data = torch.load('./data/20seed_learning_control.pt')
data = Data['data'].detach().numpy()
dt = 0.00001
fig3 = plt.subplot(166)
Y = data[0,:]
Y = Y[:14000,:]
mean_data = np.mean(Y,1)
std_data = np.std(Y,1)
plt.fill_between(np.arange(len(Y))*dt,mean_data-std_data,mean_data+std_data,color='g',alpha=0.2)
plt.plot(np.arange(len(Y))*dt,mean_data,color='g',alpha=0.9,label='Learned control')
# plt.ylim([-100, 200])
plt.xlabel(r'Time', fontsize=fontsize)
plt.xticks([0, 0.075/2, 0.075, (0.075 + 0.15)/2, 0.15],
["$0$", "$~$","$0.075$", "$~$", "$0.15$"]
)
plt.ylabel(r'$x$', fontsize=fontsize)
plt.yticks([-20, 0, 20, 40],
["0", "0.05","0.1", "0.15"]
)
plt.legend(fontsize=fontsize * 0.7)
plot_grid()
plt.tick_params(labelsize=fontsize)
plt.title('ME:{}'.format(1438))
plt.show()
| 2,306 | 30.60274 | 102 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/mixed_control/functions.py
|
import numpy as np
from scipy import integrate
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data[:,0:4]
return out*x
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
# torch.manual_seed(2)
self.net = nn.Sequential(
nn.Linear(n_input, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden,n_output)
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.001)
nn.init.constant_(m.bias, val=0)
def forward(self, x):
return self.net(x)
| 2,307 | 31.507042 | 89 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/mixed_control/run.py
|
import numpy as np
from scipy import integrate
import torch
import matplotlib.pyplot as plt
import math
import timeit
from scipy.integrate import odeint
from functions import *
from cvxopt import solvers,matrix
def f(x,u=0):
u,v = x
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
return np.array([v,G*np.sin(u)/L +(-b*v)/(m*L**2)])
models = Net(2,6,2)
models.load_state_dict(torch.load('./data/S.pkl'))
modeld = Net(2,6,2)
modeld.load_state_dict(torch.load('./data/D.pkl'))
modelmd = Net(2,6,2)
modelmd.load_state_dict(torch.load('./data/MD.pkl'))
modelms = Net(2,6,2)
modelms.load_state_dict(torch.load('./data/MS.pkl'))
def run_0(n,dt,case,seed):
np.random.seed(seed)
x0 = np.array([3.0,-4.0])
X = np.zeros([n,2])
DU = np.zeros([n-1,2])
SU = np.zeros([n-1,2])
X[0,:]=x0
z = np.random.normal(0,1,n) # common noise
# z = np.random.normal(0,1,[n,4]) # uncorrelated noise
for i in range(n-1):
x = X[i,:]
df = f(x)
if case == 0:
X[i+1,:] = x+df*dt#+()*(dt*z[i]**2-dt)/(2*np.sqrt(dt))
if case == 'S':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = models(input).detach().numpy()
X[i+1,:]=x+df*dt+np.sqrt(dt)*z[i]*(u)
SU[i,:] = u
if case == 'D':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
u = modeld(input).detach().numpy()
X[i + 1, :] = x + (df+u) * dt
DU[i, :] = u
if case == 'M':
with torch.no_grad():
input = torch.from_numpy(x).to(torch.float32).unsqueeze(0)
d_u = modelmd(input).detach().numpy()
s_u = modelms(input).detach().numpy()
X[i+1,:]=x+(df+d_u)*dt+np.sqrt(dt)*z[i]*(s_u)
DU[i,:] = d_u
SU[i,:] = s_u
return X,DU,SU
'''
data generate
'''
seed = 3
n = 50000
dt = 0.00001
m = 10
# X,DU,SU = np.zeros([m,n,2]),np.zeros([m,n-1,2]),np.zeros([m,n-1,2])
# for i in range(m):
# X[i,:],DU[i,:],SU[i,:] = run_0(n,dt,'D',2*i+1)
# print(i)
# np.save('./data/S.npy',{'X':X,'DU':DU,'SU':SU}) # (5000,0.0001)
# np.save('./data/M.npy',{'X':X,'DU':DU,'SU':SU}) # throw out 2nd trajectory (5000,0.0001)
# np.save('./data/D.npy',{'X':X,'DU':DU,'SU':SU})
def energy(U,n=5000,dt=0.0001):
n = n-1
a=np.linspace(0,dt*(n-1),n)
e = 0.0
for i in range(len(U)):
e += integrate.trapz(np.array(np.sum(U[i,:]**2,axis=1)),a)
return e/float(len(U))
def stop_time(X,delta=0.001,dt=0.0001):
time = 0
for i in range(len(X)):
norm_x = np.sqrt(X[i,:,0]**2+X[i,:,1]**2)
index = np.where(norm_x<delta)
time += index[0][0]
return time/float(len(X))*dt
def minima(X):
min_x = 0
for i in range(len(X)):
norm_x = np.sqrt(X[i,:,0]**2+X[i,:,1]**2)
min_x += np.min(norm_x)
print(i,np.min(norm_x))
return min_x/float(len(X))
'''
plot
'''
font_size = 20
def subplot(X,xticks1,xticks2,yticks1,yticks2,ylim,title):
alpha = 0.5
mean_x,std_x,mean_y,std_y=np.mean(X[:,:,0],axis=0),np.std(X[:,:,0],axis=0),np.mean(X[:,:,1],axis=0),np.std(X[:,:,1],axis=0)
length = len(mean_x)
plt.fill_between(np.arange(length),mean_x-std_x,mean_x+std_x,color=colors[0],alpha=alpha)
plt.plot(np.arange(length),mean_x,color=colors[0],label=r'$x$')
plt.fill_between(np.arange(length),mean_y-std_y,mean_y+std_y,color=colors[1],alpha=alpha)
plt.plot(np.arange(length),mean_y,color=colors[1],label=r'$y$')
plot_grid()
plt.legend(fontsize=font_size)
plt.xticks(xticks1,xticks2,fontsize=font_size)
plt.yticks(yticks1,yticks2,fontsize=font_size)
plt.ylim(ylim)
plt.title('{}'.format(title),fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
def plot():
plt.subplot(131)
data = np.load('./data/D.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:, 0:n:10, :]
subplot(X,[0,2000,4000],[0,0.2,0.4],[-2,0,2,4],[-2,0,2,4],[-2,5],'deterministic')
plt.ylabel('state variables',fontsize=font_size)
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plt.subplot(132)
data = np.load('./data/M.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:,0:31000:10,:]
subplot(X,[0,1500,3000],[0,0.15,0.3],[0,1,2],[0,'',2],[-0.2,2.5],'mix')
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plt.subplot(133)
data = np.load('./data/S.npy',allow_pickle=True).item()
X,DU,SU = data['X'],data['DU'],data['SU']
X = X[:,0:31000:10,:]
subplot(X,[0,1500,3000],[0,0.15,0.3],[0,1,2,3],[0,1,2,3],[-0.2,2.5],'stochastic')
plt.title('ME:{}'.format(int(energy(DU+SU,n,dt))),fontsize=font_size)
plot()
plt.show()
| 4,956 | 29.598765 | 127 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/mixed_control/NSC_train.py
|
import torch
import torch.nn.functional as F
import numpy as np
import timeit
import argparse
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--N', type=float, default=1000)
parser.add_argument('--num', type=float, default=2)
parser.add_argument('--lr', type=float, default=0.05)
args = parser.parse_args()
class ControlNet(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(ControlNet, self).__init__()
torch.manual_seed(2)
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_output)
def forward(self, data):
sigmoid = torch.nn.ReLU()
h_1 = sigmoid(self.layer1(data))
h_2 = sigmoid(self.layer2(h_1))
out = self.layer3(h_2)
x = data
return out * x
class Net(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(Net,self).__init__()
self._scontrol = ControlNet(n_input,n_hidden,n_output)
self._dcontrol = ControlNet(n_input,n_hidden,n_output)
def forward(self,data):
s_u = self._scontrol(data)
d_u = self._dcontrol(data)
return d_u,s_u
def f_(data,u):
G = 9.81 # gravity
L = 0.5 # length of the pole
m = 0.15 # ball mass
b = 0.1 # friction
z = torch.zeros_like(data)
for i in range(len(data)):
x,y=data[i,:]
z[i,:] = torch.tensor([y,G*np.sin(x)/L +(-b*y)/(m*L**2)])#+u[i]
return z
def g_(data,u):
z = torch.zeros_like(data)
for i in range(len(data)):
z[i,:] = 0.0+u[i]
return z
'''
For learning
'''
N = args.N # sample size
D_in = 2 # input dimension
H1 = 3 * D_in # hidden dimension
D_out = 2 # output dimension
torch.manual_seed(10)
Data = torch.Tensor(N,2).uniform_(-10,10)
theta = 0.8
out_iters = 0
while out_iters < 1:
# break
start = timeit.default_timer()
model = Net(D_in, H1, D_out)
i = 0
t = 0
max_iters = 200
learning_rate = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
while i < max_iters:
d_u,s_u = model(Data)
f = f_(Data,d_u)
g = g_(Data,s_u)
x = Data
# loss = (2-theta)*torch.diagonal(torch.mm(x, g.T))**2-torch.diagonal(torch.mm(x,x.T))*torch.diagonal(
# 2*torch.mm(x,f.T)+torch.mm(g,g.T))
loss = (2-theta)*((x*g)**2)-x**2*(2*x*f+g**2)
# L_B = 2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3 - gamma*torch.log(1+torch.abs(h(v))) # barrier function 1
# L_B = (2*(v-M/2)*f[:,3:4]/h(v)**2+g[:,3:4]**2/h(v)**2+4*g[:,3:4]**2*(v-M/2)**2/h(v)**3)
# lossB = 2*L_B/h(v)-(1-theta)*(2*(v-M/2)*g[:,3:4])**2/h(v)**4
AS_loss = (F.relu(-loss)).mean()
print(i, "AS loss=", AS_loss.item())
optimizer.zero_grad()
AS_loss.backward()
optimizer.step()
if AS_loss < 1e-8:
break
# if AS_loss<0.5:
# optimizer=torch.optim.Adam(model.parameters(),lr=0.005)
i += 1
stop = timeit.default_timer()
print('\n')
print("Total time: ", stop - start)
print("Verified time: ", t)
out_iters += 1
torch.save(model._scontrol.state_dict(),'./data/node_S.pkl')
# torch.save(model._dcontrol.state_dict(),'./data/D.pkl')
| 3,379 | 28.137931 | 153 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/comparison/lqr.py
|
import numpy as np
from cvxopt import solvers,matrix
import matplotlib.pyplot as plt
import torch
def harmonic(n,dt):
x0 = np.array([2.0,2.0])
X = np.zeros([n,2])
X[0,:]=x0
z = np.random.normal(0, 1, n)
for i in range(n-1):
x1,x2 = X[i,:]
X[i+1,0] = x1 + (x2-4.45*x1-0.09*x2)*dt
X[i+1,1] = x2 + (-x1-x2-0.09*x1-3.6*x2)*dt+(-3*x1+2.15*x2)*np.sqrt(dt)*z[i]
return X
n = 6000
dt = 0.0001
X = np.zeros([10,n,2])
for i in range(10):
np.random.seed(20*i)
X[i,:] = harmonic(n,dt)
np.save('lqr.npy',X)
# X = harmonic(n,dt)
# plt.plot(np.arange(len(X)),X[:,0])
# plt.plot(np.arange(len(X)),X[:,1])
# plt.show()
| 662 | 21.1 | 83 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/comparison/riccati.py
|
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
#由题目定义矩阵
A = np.matrix([[0, 1.0], [-1.0, -1.0]])
AT = np.matrix([[0,-1.0], [1, -1.0]])
B = np.matrix([[1.0,0.0], [0.0,1.0]])
BT = np.matrix([[1.0,0.0], [0.0,1.0]])
F = np.matrix([[1, 0], [0, 2]])
Q = np.matrix([[20.0, 0.0], [0.0, 20.0]])
R = np.matrix([[1.0, 0.0], [0.0, 1.0]])
RN = 2
#根据边界条件给定的值
step_num = 200
t = 3
step = -t / step_num
P = F
# 定义黎卡提方程
def Ricatti_P(t, P):
f = -(P * A + A.T * P - P * B * R.I * B.T * P + Q)
return f
ys_0, ys_1, ys_2, ys_3 = [], [], [], []
ts = []
while t > 0:
t += step
k1 = step * Ricatti_P(t, P)
k2 = step * Ricatti_P(t + step * 0.5, P + k1 * step * 0.5)
k3 = step * Ricatti_P(t + step * 0.5, P + k2 * step * 0.5)
k4 = step * Ricatti_P(t + step, P + k3 * step)
P = P + (k1 + k2 * 2 + k3 * 2 + k4)/6
P = np.array(P)
ts.append(t)
ys_0.append(P[0][0])
ys_1.append(P[0][1])
ys_2.append(P[1][0])
ys_3.append(P[1][1])
# print(ys_0)
print(P)
# P = np.matrix([[3, 1], [1, 3]])
# print(P * A + A.T * P)
| 1,075 | 22.911111 | 62 |
py
|
Neural-Stochastic-Control
|
Neural-Stochastic-Control-main/code_rebuttal/comparison/run.py
|
import numpy as np
from cvxopt import solvers,matrix
import matplotlib.pyplot as plt
import torch
import seaborn as sns
class ControlNet(torch.nn.Module):
def __init__(self,n_input,n_hidden,n_output):
super(ControlNet,self).__init__()
torch.manual_seed(2)
self.layer1=torch.nn.Linear(n_input,n_hidden)
self.layer2=torch.nn.Linear(n_hidden,n_hidden)
self.layer3=torch.nn.Linear(n_hidden,n_output)
def forward(self,x):
sigmoid=torch.nn.ReLU()
h_1=sigmoid(self.layer1(x))
h_2=sigmoid(self.layer2(h_1))
out=self.layer3(h_2)
return out
def qp(x1,x2,epi=0.1,p=10.0):
P = matrix(np.diag([2.0,2.0,2*p]))
q = matrix([0.0,0.0,0.0])
G = matrix(np.array([[x1,x2,-1.0]]))
h = matrix([(-3.0*x1+2.15*x2)**2/2-x2**2-(x1**2+x2**2)/(2*epi)]) # 在Lie算子里加入V/epi项
# h = matrix([(-3.0*x1+2.15*x2)**2/2-x2**2])
solvers.options['show_progress']=False
sol=solvers.qp(P,q,G,h) # 调用优化函数solvers.qp求解
u =np.array(sol['x'])
return u
def osqp(x1,x2,epi=0.1,p=10.0):
P = matrix(np.diag([2.0,2.0,2*p]))
q = matrix([0.0,0.0,0.0])
G = matrix(np.array([[3*x1+x2,x1+3*x2,-1.0]]))
h = matrix([x1**2+x1*x2+2*x2**2-(3*x1**2+2*x1*x2+3*x2**2)/(2*epi)-3*(-3.0*x1+2.15*x2)**2/2])
solvers.options['show_progress']=False
sol=solvers.qp(P,q,G,h) # 调用优化函数solvers.qp求解
u =np.array(sol['x'])
return u
model = ControlNet(2,6,2)
model.load_state_dict(torch.load('icnn_net.pkl'))
def harmonic(n,dt,case):
x0 = np.array([-2.0,2.0])
X = np.zeros([n,2])
X[0,:]=x0
z = np.random.normal(0,1,n)
for i in range(n-1):
x1,x2 = X[i,:]
if case != 3:
if case == 0:
u1,u2,d = np.zeros(3)
if case == 1:
u1,u2,d = qp(x1,x2)
if case == 2:
u1,u2,d=osqp(x1,x2)
X[i+1,0] = x1 + (x2+u1)*dt
X[i+1,1] = x2 + (-x1-x2+u2)*dt+(-3*x1+2.15*x2)*np.sqrt(dt)*z[i]
if case == 3:
with torch.no_grad():
u = model(torch.from_numpy(X[i,:]).to(torch.float32))
u = u.detach().numpy()
u1,u2 = u[0],u[1]
X[i+1,0]=x1+(x2)*dt + np.sqrt(dt)*z[i]*u1*x1
X[i+1,1]=x2+(-x1-x2)*dt+(-3*x1+2.15*x2+u2*x2)*np.sqrt(dt)*z[i]
if i%3000 == 0:
print(i,u1,u2)
return X
n = 4000
dt = 0.00001
font_size=20
X = np.zeros([10,n,2])
# for i in range(10):
# np.random.seed(20*i)
# X[i,:] = harmonic(n,dt,3)
# # np.save('qp.npy',X)
# # X = np.load('ES.npy')
# plt.plot(np.arange(n),np.mean(X[:,:,0],axis=0))
# plt.plot(np.arange(n),np.mean(X[:,:,1],axis=0))
def plot_grid():
plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)
# minor grid lines
plt.minorticks_on()
plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)
# plt.grid(b=True, which='both', color='beige', alpha=0.1, ls='-', lw=1)
pass
colors = [
[233/256, 110/256, 236/256], # #e96eec
# [0.6, 0.6, 0.2], # olive
# [0.5333333333333333, 0.13333333333333333, 0.3333333333333333], # wine
[255/255, 165/255, 0],
# [0.8666666666666667, 0.8, 0.4666666666666667], # sand
# [223/256, 73/256, 54/256], # #df4936
[107/256, 161/256,255/256], # #6ba1ff
[0.6, 0.4, 0.8], # amethyst
[0.0, 0.0, 1.0], # ao
[0.55, 0.71, 0.0], # applegreen
# [0.4, 1.0, 0.0], # brightgreen
[0.99, 0.76, 0.8], # bubblegum
[0.93, 0.53, 0.18], # cadmiumorange
[11/255, 132/255, 147/255], # deblue
[204/255, 119/255, 34/255], # {ocra}
]
colors = np.array(colors)
def plot1(alpha=0.1):
X1 = np.load('ES.npy')
X1 = X1[:, 0:40000:10, :]
X2 = np.load('qp.npy')[:, :4000, :]
X3 = np.load('osqp.npy')[:, :4000, :]
X4 = np.load('lqr.npy')[:, :4000, :]
plt.subplot(144)
plt.fill_between(np.arange(n), np.mean(X1[:, :, 0], 0) - np.std(X1[:, :, 0], 0),
np.mean(X1[:, :, 0], 0) + np.std(X1[:, :, 0], 0),
color='r', alpha=alpha)
plt.plot(np.arange(n), np.mean(X1[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X1[:, :, 1], 0) - np.std(X1[:, :, 1], 0),
np.mean(X1[:, :, 1], 0) + np.std(X1[:, :, 1], 0),
color='r', alpha=alpha)
plt.plot(np.arange(n), np.mean(X1[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('ES+ICNN', fontsize=font_size)
plot_grid()
plt.subplot(142)
plt.fill_between(np.arange(n), np.mean(X2[:, :, 0], 0) - np.std(X2[:, :, 0], 0),
np.mean(X2[:, :, 0], 0) + np.std(X2[:, :, 0], 0),
color='b', alpha=alpha)
plt.plot(np.arange(n), np.mean(X2[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X2[:, :, 1], 0) - np.std(X2[:, :, 1], 0),
np.mean(X2[:, :, 1], 0) + np.std(X2[:, :, 1], 0),
color='b', alpha=alpha)
plt.plot(np.arange(n), np.mean(X2[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('HDSCLF',fontsize=font_size)
plot_grid()
plt.subplot(143)
plt.fill_between(np.arange(n), np.mean(X3[:, :, 0], 0) - np.std(X3[:, :, 0], 0),
np.mean(X3[:, :, 0], 0) + np.std(X3[:, :, 0], 0),
color='g', alpha=alpha)
plt.plot(np.arange(n), np.mean(X3[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X3[:, :, 1], 0) - np.std(X3[:, :, 1], 0),
np.mean(X3[:, :, 1], 0) + np.std(X3[:, :, 1], 0),
color='g', alpha=alpha)
plt.plot(np.arange(n), np.mean(X3[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('BALSA', fontsize=font_size)
plot_grid()
plt.subplot(141)
plt.fill_between(np.arange(n), np.mean(X4[:, :, 0], 0) - np.std(X4[:, :, 0], 0),
np.mean(X4[:, :, 0], 0) + np.std(X4[:, :, 0], 0),
color='orange', alpha=alpha)
plt.plot(np.arange(n), np.mean(X4[:, :, 0], axis=0), color='r', label=r'$x_1$')
plt.fill_between(np.arange(n), np.mean(X4[:, :, 1], 0) - np.std(X4[:, :, 1], 0),
np.mean(X4[:, :, 1], 0) + np.std(X4[:, :, 1], 0),
color='orange', alpha=alpha)
plt.plot(np.arange(n), np.mean(X4[:, :, 1], axis=0), color='orange', label=r'$x_2$')
plt.xticks([0, 2000, 4000], [0, 0.2, 0.4])
plt.xlabel(r'$t$', fontsize=font_size)
plt.ylabel(r'$x_1$', fontsize=font_size)
plt.ylim(-4, 4.0)
plt.legend(loc=4)
plt.title('LQR', fontsize=font_size)
plot_grid()
def plot2(alpha=0.1):
X1 = np.load('ES.npy')
X1 = X1[:,0:40000:10,:]
X2 = np.load('qp.npy')[:,:4000,:]
X3 = np.load('osqp.npy')[:,:4000,:]
X4 = np.load('lqr.npy')[:,:4000,:]
plt.subplot(121)
plt.fill_between(np.arange(n),np.mean(X1[:,:,0],0)-np.std(X1[:,:,0],0),np.mean(X1[:,:,0],0)+np.std(X1[:,:,0],0),
color=colors[0],alpha=alpha)
plt.plot(np.arange(n),np.mean(X1[:,:,0],axis=0),color=colors[0],label='ES+ICNN')
plt.fill_between(np.arange(n),np.mean(X2[:,:,0],0)-np.std(X2[:,:,0],0),np.mean(X2[:,:,0],0)+np.std(X2[:,:,0],0),
color=colors[1],alpha=alpha)
plt.plot(np.arange(n),np.mean(X2[:,:,0],axis=0),color=colors[1],label='HDSCLF')
plt.fill_between(np.arange(n),np.mean(X3[:,:,0],0)-np.std(X3[:,:,0],0),np.mean(X3[:,:,0],0)+np.std(X3[:,:,0],0),
color=colors[2],alpha=alpha)
plt.plot(np.arange(n),np.mean(X3[:,:,0],axis=0),color=colors[2],label='BALSA')
plt.fill_between(np.arange(n),np.mean(X4[:,:,0],0)-np.std(X4[:,:,0],0),np.mean(X4[:,:,0],0)+np.std(X4[:,:,0],0),
color=colors[5],alpha=alpha)
plt.plot(np.arange(n),np.mean(X4[:,:,0],axis=0),color=colors[5],label='LQR')
plt.xticks([0,2000,4000],[0,0.2,0.4], fontsize=font_size)
plt.xlabel('Time',fontsize=font_size)
plt.ylabel(r'$x_1$',fontsize=font_size)
plt.yticks([-3,0,3],fontsize=font_size)
plt.ylim(-3,3.0)
# plt.legend(loc=4, fontsize=font_size*0.6,)
# plt.legend(fontsize=font_size * 0.7, ncol=4, bbox_to_anchor=(1.5, 1.1))
plot_grid()
plt.subplot(122)
plt.fill_between(np.arange(n),np.mean(X1[:,:,1],0)-np.std(X1[:,:,1],0),np.mean(X1[:,:,1],0)+np.std(X1[:,:,1],0),
color=colors[0],alpha=alpha)
plt.plot(np.arange(n),np.mean(X1[:,:,1],axis=0),color=colors[0],label='ES+ICNN')
plt.fill_between(np.arange(n),np.mean(X2[:,:,1],0)-np.std(X2[:,:,1],0),np.mean(X2[:,:,1],0)+np.std(X2[:,:,1],0),
color=colors[1],alpha=alpha)
plt.plot(np.arange(n),np.mean(X2[:,:,1],axis=0),color=colors[1],label='HDSCLF')
plt.fill_between(np.arange(n),np.mean(X3[:,:,1],0)-np.std(X3[:,:,1],0),np.mean(X3[:,:,1],0)+np.std(X3[:,:,1],0),
color=colors[2],alpha=alpha)
plt.plot(np.arange(n),np.mean(X3[:,:,1],axis=0),color=colors[2],label='BALSA')
plt.fill_between(np.arange(n),np.mean(X4[:,:,1],0)-np.std(X4[:,:,1],0),np.mean(X4[:,:,1],0)+np.std(X4[:,:,1],0),
color=colors[5],alpha=alpha)
plt.plot(np.arange(n),np.mean(X4[:,:,1],axis=0),color=colors[5],label='LQR')
plt.xticks([0,2000,4000],[0,0.2,0.4], fontsize=font_size)
# plt.legend(loc=1, fontsize=font_size*0.6)
plt.xlabel('Time',fontsize=font_size)
plt.ylabel(r'$x_2$',fontsize=font_size)
plt.yticks([ 0, 6], fontsize=font_size)
plt.ylim(-1,6)
plot_grid()
# plot1()
plot2()
plt.show()
| 10,101 | 40.572016 | 116 |
py
|
gsdmm
|
gsdmm-master/setup.py
|
from setuptools import setup
VERSION=0.1
INSTALL_REQUIRES = [
'numpy'
]
setup(
name='gsdmm',
packages=['gsdmm'],
version=0.1,
url='https://www.github.com/rwalk/gsdmm',
author='Ryan Walker',
author_email='[email protected]',
description='GSDMM: Short text clustering ',
license='MIT',
install_requires=INSTALL_REQUIRES
)
| 363 | 18.157895 | 48 |
py
|
gsdmm
|
gsdmm-master/gsdmm/mgp.py
|
from numpy.random import multinomial
from numpy import log, exp
from numpy import argmax
import json
class MovieGroupProcess:
def __init__(self, K=8, alpha=0.1, beta=0.1, n_iters=30):
'''
A MovieGroupProcess is a conceptual model introduced by Yin and Wang 2014 to
describe their Gibbs sampling algorithm for a Dirichlet Mixture Model for the
clustering short text documents.
Reference: http://dbgroup.cs.tsinghua.edu.cn/wangjy/papers/KDD14-GSDMM.pdf
Imagine a professor is leading a film class. At the start of the class, the students
are randomly assigned to K tables. Before class begins, the students make lists of
their favorite films. The teacher reads the role n_iters times. When
a student is called, the student must select a new table satisfying either:
1) The new table has more students than the current table.
OR
2) The new table has students with similar lists of favorite movies.
:param K: int
Upper bound on the number of possible clusters. Typically many fewer
:param alpha: float between 0 and 1
Alpha controls the probability that a student will join a table that is currently empty
When alpha is 0, no one will join an empty table.
:param beta: float between 0 and 1
Beta controls the student's affinity for other students with similar interests. A low beta means
that students desire to sit with students of similar interests. A high beta means they are less
concerned with affinity and are more influenced by the popularity of a table
:param n_iters:
'''
self.K = K
self.alpha = alpha
self.beta = beta
self.n_iters = n_iters
# slots for computed variables
self.number_docs = None
self.vocab_size = None
self.cluster_doc_count = [0 for _ in range(K)]
self.cluster_word_count = [0 for _ in range(K)]
self.cluster_word_distribution = [{} for i in range(K)]
@staticmethod
def from_data(K, alpha, beta, D, vocab_size, cluster_doc_count, cluster_word_count, cluster_word_distribution):
'''
Reconstitute a MovieGroupProcess from previously fit data
:param K:
:param alpha:
:param beta:
:param D:
:param vocab_size:
:param cluster_doc_count:
:param cluster_word_count:
:param cluster_word_distribution:
:return:
'''
mgp = MovieGroupProcess(K, alpha, beta, n_iters=30)
mgp.number_docs = D
mgp.vocab_size = vocab_size
mgp.cluster_doc_count = cluster_doc_count
mgp.cluster_word_count = cluster_word_count
mgp.cluster_word_distribution = cluster_word_distribution
return mgp
@staticmethod
def _sample(p):
'''
Sample with probability vector p from a multinomial distribution
:param p: list
List of probabilities representing probability vector for the multinomial distribution
:return: int
index of randomly selected output
'''
return [i for i, entry in enumerate(multinomial(1, p)) if entry != 0][0]
def fit(self, docs, vocab_size):
'''
Cluster the input documents
:param docs: list of list
list of lists containing the unique token set of each document
:param V: total vocabulary size for each document
:return: list of length len(doc)
cluster label for each document
'''
alpha, beta, K, n_iters, V = self.alpha, self.beta, self.K, self.n_iters, vocab_size
D = len(docs)
self.number_docs = D
self.vocab_size = vocab_size
# unpack to easy var names
m_z, n_z, n_z_w = self.cluster_doc_count, self.cluster_word_count, self.cluster_word_distribution
cluster_count = K
d_z = [None for i in range(len(docs))]
# initialize the clusters
for i, doc in enumerate(docs):
# choose a random initial cluster for the doc
z = self._sample([1.0 / K for _ in range(K)])
d_z[i] = z
m_z[z] += 1
n_z[z] += len(doc)
for word in doc:
if word not in n_z_w[z]:
n_z_w[z][word] = 0
n_z_w[z][word] += 1
for _iter in range(n_iters):
total_transfers = 0
for i, doc in enumerate(docs):
# remove the doc from it's current cluster
z_old = d_z[i]
m_z[z_old] -= 1
n_z[z_old] -= len(doc)
for word in doc:
n_z_w[z_old][word] -= 1
# compact dictionary to save space
if n_z_w[z_old][word] == 0:
del n_z_w[z_old][word]
# draw sample from distribution to find new cluster
p = self.score(doc)
z_new = self._sample(p)
# transfer doc to the new cluster
if z_new != z_old:
total_transfers += 1
d_z[i] = z_new
m_z[z_new] += 1
n_z[z_new] += len(doc)
for word in doc:
if word not in n_z_w[z_new]:
n_z_w[z_new][word] = 0
n_z_w[z_new][word] += 1
cluster_count_new = sum([1 for v in m_z if v > 0])
print("In stage %d: transferred %d clusters with %d clusters populated" % (
_iter, total_transfers, cluster_count_new))
if total_transfers == 0 and cluster_count_new == cluster_count and _iter>25:
print("Converged. Breaking out.")
break
cluster_count = cluster_count_new
self.cluster_word_distribution = n_z_w
return d_z
def score(self, doc):
'''
Score a document
Implements formula (3) of Yin and Wang 2014.
http://dbgroup.cs.tsinghua.edu.cn/wangjy/papers/KDD14-GSDMM.pdf
:param doc: list[str]: The doc token stream
:return: list[float]: A length K probability vector where each component represents
the probability of the document appearing in a particular cluster
'''
alpha, beta, K, V, D = self.alpha, self.beta, self.K, self.vocab_size, self.number_docs
m_z, n_z, n_z_w = self.cluster_doc_count, self.cluster_word_count, self.cluster_word_distribution
p = [0 for _ in range(K)]
# We break the formula into the following pieces
# p = N1*N2/(D1*D2) = exp(lN1 - lD1 + lN2 - lD2)
# lN1 = log(m_z[z] + alpha)
# lN2 = log(D - 1 + K*alpha)
# lN2 = log(product(n_z_w[w] + beta)) = sum(log(n_z_w[w] + beta))
# lD2 = log(product(n_z[d] + V*beta + i -1)) = sum(log(n_z[d] + V*beta + i -1))
lD1 = log(D - 1 + K * alpha)
doc_size = len(doc)
for label in range(K):
lN1 = log(m_z[label] + alpha)
lN2 = 0
lD2 = 0
for word in doc:
lN2 += log(n_z_w[label].get(word, 0) + beta)
for j in range(1, doc_size +1):
lD2 += log(n_z[label] + V * beta + j - 1)
p[label] = exp(lN1 - lD1 + lN2 - lD2)
# normalize the probability vector
pnorm = sum(p)
pnorm = pnorm if pnorm>0 else 1
return [pp/pnorm for pp in p]
def choose_best_label(self, doc):
'''
Choose the highest probability label for the input document
:param doc: list[str]: The doc token stream
:return:
'''
p = self.score(doc)
return argmax(p),max(p)
| 7,818 | 37.141463 | 115 |
py
|
gsdmm
|
gsdmm-master/gsdmm/__init__.py
|
from .mgp import MovieGroupProcess
| 34 | 34 | 34 |
py
|
gsdmm
|
gsdmm-master/test/__init__.py
| 0 | 0 | 0 |
py
|
|
gsdmm
|
gsdmm-master/test/test_gsdmm.py
|
from unittest import TestCase
from gsdmm.mgp import MovieGroupProcess
import numpy
class TestGSDMM(TestCase):
'''This class tests the Panel data structures needed to support the RSK model'''
def setUp(self):
numpy.random.seed(47)
def tearDown(self):
numpy.random.seed(None)
def compute_V(self, texts):
V = set()
for text in texts:
for word in text:
V.add(word)
return len(V)
def test_grades(self):
grades = list(map(list, [
"A",
"A",
"A",
"B",
"B",
"B",
"B",
"C",
"C",
"C",
"C",
"C",
"C",
"C",
"C",
"C",
"C",
"D",
"D",
"F",
"F",
"P",
"W"
]))
grades = grades + grades + grades + grades + grades
mgp = MovieGroupProcess(K=100, n_iters=100, alpha=0.001, beta=0.01)
y = mgp.fit(grades, self.compute_V(grades))
self.assertEqual(len(set(y)), 7)
for words in mgp.cluster_word_distribution:
self.assertTrue(len(words) in {0,1}, "More than one grade ended up in a cluster!")
def test_simple_example(self):
# example from @spattanayak1
docs=[['house',
'burning',
'need',
'fire',
'truck',
'ml',
'hindu',
'response',
'christian',
'conversion',
'alm']]
mgp = MovieGroupProcess(K=10, alpha=0.1, beta=0.1, n_iters=30)
vocab = set(x for doc in docs for x in doc)
n_terms = len(vocab)
n_docs = len(docs)
y = mgp.fit(docs, n_terms)
def test_short_text(self):
# there is no perfect segmentation of this text data:
texts = [
"where the red dog lives",
"red dog lives in the house",
"blue cat eats mice",
"monkeys hate cat but love trees",
"green cat eats mice",
"orange elephant never forgets",
"orange elephant must forget",
"monkeys eat banana",
"monkeys live in trees",
"elephant",
"cat",
"dog",
"monkeys"
]
texts = [text.split() for text in texts]
V = self.compute_V(texts)
mgp = MovieGroupProcess(K=30, n_iters=100, alpha=0.2, beta=0.01)
y = mgp.fit(texts, V)
self.assertTrue(len(set(y))<10)
self.assertTrue(len(set(y))>3)
| 2,638 | 24.621359 | 94 |
py
|
MixLacune
|
MixLacune-main/process-lacunes.py
|
# -*- coding: utf-8 -*-
import os
import torch
import torchvision
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import SimpleITK as sitk
import glob
import torch.nn as nn
import nibabel as nib
import shutil
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(0)
test_data_path = glob.glob(f'input_data/**/')
for x in range(len(test_data_path)):
t1_path = glob.glob(test_data_path[x]+'/*T1*')
t2_path = glob.glob(test_data_path[x]+'/*T2*')
flair_path = glob.glob(test_data_path[x]+'/*_FLAIR*')
sub_no = str(t1_path[0])
sub_no = sub_no.rsplit('/', 1)[-1][0:7]
print("Loading: T1, T2, Flair\n")
im = sitk.ReadImage(t1_path[0])
#-------------------Functions------------------------------
def zscore_normalize(img, mask=None):
"""
normalize a target image by subtracting the mean of the whole brain
and dividing by the standard deviation
Args:
img (nibabel.nifti1.Nifti1Image): target MR brain image
mask (nibabel.nifti1.Nifti1Image): brain mask for img
Returns:
normalized (nibabel.nifti1.Nifti1Image): img with WM mean at norm_value
"""
img_data = img.get_fdata()
if mask is not None and not isinstance(mask, str):
mask_data = mask.get_fdata()
elif mask == 'nomask':
mask_data = img_data == img_data
else:
mask_data = img_data > img_data.mean()
logical_mask = mask_data > 0. # force the mask to be logical type
mean = img_data[logical_mask].mean()
std = img_data[logical_mask].std()
normalized = nib.Nifti1Image((img_data - mean) / std, img.affine, img.header)
return normalized
def read_img(path):
nib_img = nib.load(path)
normal = zscore_normalize(nib_img)
normal = normal.get_fdata()
normal = normal.astype(np.float32)
img_as_tensor = torch.from_numpy(normal)
img_as_tensor = img_as_tensor.permute(2,1,0)
img_as_tensor = img_as_tensor.unsqueeze(1)
return img_as_tensor
def extract_patches_2d(img,patch_shape,step=[1.0,1.0],batch_first=False):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if(img.size(2)<patch_H):
num_padded_H_Top = (patch_H - img.size(2))//2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0,0,num_padded_H_Top,num_padded_H_Bottom),0)
img = padding_H(img)
if(img.size(3)<patch_W):
num_padded_W_Left = (patch_W - img.size(3))//2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left,num_padded_W_Right,0,0),0)
img = padding_W(img)
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,img[:,:,-patch_H:,].permute(0,1,3,2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,patches_fold_H[:,:,:,-patch_W:,:].permute(0,1,2,4,3).unsqueeze(3)),dim=3)
patches = patches_fold_HW.permute(2,3,0,1,4,5)
patches = patches.reshape(-1,img.size(0),img.size(1),patch_H,patch_W)
#patches = patches[:,0,:,:,:]
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patches = patches[0,:,:,:,:]
#patches = patches[0,:,:,:,:]
return patches
def reconstruct_from_patches_2d(patches,img_shape,step=[1.0,1.0],batch_first=False):
patches = patches.unsqueeze(1)
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2),max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
r_nrow = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow,r_ncol,img_size[0],img_size[1],patch_H,patch_W)
img = torch.zeros(img_size, device = patches.device)
overlap_counter = torch.zeros(img_size, device = patches.device)
for i in range(nrow):
for j in range(ncol):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += patches[i,j,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += patches[-1,j,]
overlap_counter[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += patches[i,-1,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += 1
if((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:,:,-patch_H:,-patch_W:] += patches[-1,-1,]
overlap_counter[:,:,-patch_H:,-patch_W:] += 1
img /= overlap_counter
if(img_shape[0]<patch_H):
num_padded_H_Top = (patch_H - img_shape[0])//2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:,:,num_padded_H_Top:-num_padded_H_Bottom,]
if(img_shape[1]<patch_W):
num_padded_W_Left = (patch_W - img_shape[1])//2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:,:,:,num_padded_W_Left:-num_padded_W_Right]
return img
m = nn.Upsample(scale_factor=4, mode='nearest')
d = nn.Upsample(scale_factor=0.25, mode='nearest')
#-------------------Load volume------------------------------
t1 = read_img(t1_path[0])
t2 = read_img(t2_path[0])
flair = read_img(flair_path[0])
height = t1.shape[2]
width = t1.shape[3]
tensor = torch.cat(( t1,t2,flair),1)
print("Volume created\n")
#-------------------Prevalence map------------------------------
print("Starting the registration\n")
def register():
import os
import elastix
import imageio
import elastix
import numpy as np
import imageio
import os
import SimpleITK as sitk
def change_parameter(input_path, old_text, new_text, output_path):
"""
replaces the old_text to the next_text in parameter files
Parameters
----------
input_path : str
parameter file path to be changed.
old_text : str
old text.
new_text : str
new text.
output_path : str
changed paramter file path.
Returns
-------
None.
"""
#check if input_path exists
if not os.path.exists(input_path):
print(input_path + ' does not exist.')
a_file = open(input_path)
list_of_lines = a_file.readlines()
for line in range(0,len(list_of_lines)):
if (list_of_lines[line] == old_text):
list_of_lines[line] = new_text
a_file = open(output_path, 'w')
a_file.writelines(list_of_lines)
a_file.close()
# IMPORTANT: these paths may differ on your system, depending on where
# Elastix has been installed. Please set accordingly.
#ELASTIX_PATH = os.path.join('elastix-5.0.1-linux/bin/elastix')
#TRANSFORMIX_PATH = os.path.join('elastix-5.0.1-linux/bin/transformix')
ELASTIX_PATH = os.path.join('elastix-5.0.1-linux/bin/elastix')
TRANSFORMIX_PATH = os.path.join('elastix-5.0.1-linux/bin/transformix')
if not os.path.exists(ELASTIX_PATH):
raise IOError('Elastix cannot be found, please set the correct ELASTIX_PATH.')
if not os.path.exists(TRANSFORMIX_PATH):
raise IOError('Transformix cannot be found, please set the correct TRANSFORMIX_PATH.')
# Make a results directory if non exists
if os.path.exists('results') is False:
os.mkdir('results')
# Define the paths to the two images you want to register
target_dir = os.path.join(t1_path[0])
moving_dir = os.path.join( 'example_data', 'mni.nii')
moving_mask_dir = os.path.join('example_data', 'Prevalence_map-csv.nii.gz')
output_dir='results'
# Define a new elastix object 'el' with the correct path to elastix
el = elastix.ElastixInterface(elastix_path=ELASTIX_PATH)
# Register the moving image to the target image with el →
el.register(
fixed_image=target_dir,
moving_image=moving_dir,
parameters=[os.path.join( 'example_data', 'affine.txt'), os.path.join('example_data', 'bspline.txt')],
output_dir=os.path.join('results'))
# NOTE: two TransformParameters files will come out of this. Check which one to use for transformix. One file calls the other, so only provide one.
# Find the results
transform_path = os.path.join(output_dir, 'TransformParameters.1.txt')
result_path = os.path.join(output_dir, 'result.1.nii')
param_path=transform_path
for i in range(len(param_path)):
old_text = '(FinalBSplineInterpolationOrder 3)\n'
new_text = '(FinalBSplineInterpolationOrder 0)\n'
change_parameter(param_path , old_text, new_text, param_path)
# Feed the directory of the parameters from the registration to a tr →
tr = elastix.TransformixInterface(parameters=transform_path,
transformix_path=TRANSFORMIX_PATH)
tr.transform_image(moving_mask_dir, output_dir=r'results')
# Apply it to the moving prostate segmentation →
transformed_image_path = tr.transform_image(moving_mask_dir, output_dir=r'results')
moving_img_mask = sitk.GetArrayFromImage(sitk.ReadImage(transformed_image_path))
#print(moving_img_mask)
img1= sitk.ReadImage('results/result.nii')
Im = img1
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
sitk.WriteImage(BinIm, 'results/prevalence_map.nii.gz')
register()
print("Registration done\n")
map_path = 'results/prevalence_map.nii.gz'
prev_map_itk = sitk.ReadImage(map_path)
prev_map_arr = sitk.GetArrayFromImage(prev_map_itk)
#-------------------Prediction RCNN------------------------------
model = torch.load('model_RCNN.pt', map_location=device)
model.to(device)
print("Model Mask RCNN loaded\n")
print("Predicting with Mask RCNN......\n")
# Do prediction on all 64 pacthes == 1 slice
def pred_patches(upsample_patch):
upsample_patch = upsample
patch_pred = torch.zeros(0,1,256,256)
for f in range(len(upsample)):
#for f in range(36):
one_patch = upsample[f,:,:,:]
model.eval()
with torch.no_grad():
prediction = model([one_patch.to(device)])
mask = prediction[0]['masks']
mask = mask.cpu()
threshold, upper, lower = 0.1, 1, 0
bmask=np.where(mask>threshold, upper, lower)
if len(mask) !=0:
mm0 = bmask[0 ,:,:, :]
for f in range(len(bmask)):
m = bmask[f ,:,:, :]
mm0 = mm0 + m
#binarize
threshold, upper, lower = 0.1, 1, 0
fuse=np.where(mm0>threshold, upper, lower)
fuse = torch.from_numpy(fuse)
fuse = fuse.unsqueeze(0)
#print(fuse.shape)
elif len(mask) == 0:
fuse = torch.zeros(1,256,256)
fuse = fuse.unsqueeze(0)
patch_pred = torch.cat((patch_pred,fuse),0)
downsample = d(patch_pred)
vol = reconstruct_from_patches_2d(downsample, [height,width], batch_first=False)
return vol
slices = torch.zeros(0,1,height,width)
for f in range(len(tensor)):
one_slice = tensor[f,:,:,:]
one_slice = one_slice.unsqueeze(0)
patches = extract_patches_2d(one_slice, [64,64], batch_first=True)
m = nn.Upsample(scale_factor=4, mode='nearest')
upsample = m(patches)
slice_pred = pred_patches(upsample)
slices = torch.cat((slices,slice_pred),0)
print("Prediction done\n")
foo = slices.squeeze(1)
it_img = sitk.GetImageFromArray(foo)
it_img.CopyInformation(im)
sitk.WriteImage(it_img, 'results/rcnn_pred-script.nii.gz')
rcnn_pred_itk = it_img
rcnn_pred_arr = foo
#-------------------Prediction - map------------------------------
print("Prediction from Mask RCNN - Prevalence map in progress\n")
im = sitk.ReadImage('results/rcnn_pred-script.nii.gz')
arr = sitk.GetArrayFromImage(im)
im2 = sitk.ReadImage('results/prevalence_map.nii.gz')
arr2 = sitk.GetArrayFromImage(im2)
#arr = rcnn_pred_arr
#arr2 = prev_map_arr
out_arr = arr + arr2
out_im = sitk.GetImageFromArray(out_arr)
out_im.CopyInformation(im)
Im = out_im
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1.1)
BinThreshImFilt.SetUpperThreshold(2)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
sitk.WriteImage(BinIm, 'results/rcnn_pred-map.nii.gz')
rcnn_pred_map_itk = BinIm
rcnn_pred_map_arr = sitk.GetArrayFromImage(rcnn_pred_map_itk)
#-------------------Prediction UNet ------------------------------
print("Prediction with Unet\n")
from torchvision.models import resnext50_32x4d
class ConvRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel, padding):
super().__init__()
self.convrelu = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.convrelu(x)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = ConvRelu(in_channels, in_channels // 4, 1, 0)
self.deconv = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,
stride=2, padding=1, output_padding=0)
self.conv2 = ConvRelu(in_channels // 4, out_channels, 1, 0)
def forward(self, x):
x = self.conv1(x)
x = self.deconv(x)
x = self.conv2(x)
return x
class ResNeXtUNet(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.base_model = resnext50_32x4d(pretrained=True)
self.base_layers = list(self.base_model.children())
filters = [4*64, 4*128, 4*256, 4*512]
# Down
self.encoder0 = nn.Sequential(*self.base_layers[:3])
self.encoder1 = nn.Sequential(*self.base_layers[4])
self.encoder2 = nn.Sequential(*self.base_layers[5])
self.encoder3 = nn.Sequential(*self.base_layers[6])
self.encoder4 = nn.Sequential(*self.base_layers[7])
# Up
self.decoder4 = DecoderBlock(filters[3], filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0])
# Final Classifier
self.last_conv0 = ConvRelu(256, 128, 3, 1)
self.last_conv1 = nn.Conv2d(128, n_classes, 3, padding=1)
def forward(self, x):
# Down
x = self.encoder0(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Up + sc
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
#print(d1.shape)
# final classifier
out = self.last_conv0(d1)
out = self.last_conv1(out)
out = torch.sigmoid(out)
return out
rx50 = torch.load('model_UNet32.pt', map_location=device)
rx50.to(device)
print("Model rx50 loaded\n")
mask_path = sitk.ReadImage('results/rcnn_pred-map.nii.gz')
mask_img = sitk.GetArrayFromImage(mask_path)
mask = torch.from_numpy(mask_img)
#mask = torch.from_numpy(rcnn_pred_map_arr)
mask = mask.unsqueeze(1)
volume = torch.cat((tensor, mask),1)
print("Predicting with UNet rx50\n")
# Do prediction on all 256 pacthes == 1 slice
def pred_patches_UNet(patches):
patches = patches
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(0)
train_dataloader = DataLoader(patches, batch_size=1, num_workers=0, shuffle=False)
inp_tensor = torch.zeros(0,1,32,32)
for i, (data) in enumerate(train_dataloader):
if data[:,3,:,:].max()==0:
data = data[:,3,:,:]
data = data.unsqueeze(0)
inp_tensor = torch.cat((inp_tensor,data),0)
# LAcunes are here
elif data[:,3,:,:].max()!=0:
mask = data[:,3,:,:]
x = data[:,:3,:,:]
bla2 = x / 255
pred = rx50(bla2.to(device))
pred = pred.detach().cpu().numpy()[0,0,:,:]
pred_tensor = torch.from_numpy(pred)
pred_tensor = pred_tensor.unsqueeze(0)
pred_tensor = pred_tensor.unsqueeze(0)
## Apply thresholding
inp_tensor = torch.cat((inp_tensor,pred_tensor),0)
return inp_tensor
slices = torch.zeros(0,1,height,width)
for f in range(len(volume)):
one_slice = volume[f,:,:,:]
one_slice = one_slice.unsqueeze(0)
patches = extract_patches_2d(one_slice, [32,32], batch_first=True)
bla = pred_patches_UNet(patches)
vol = reconstruct_from_patches_2d(bla, [height,width], batch_first=False)
slices = torch.cat((slices,vol),0)
#a = np.array(slices)
#threshold, upper, lower = 0.7, 1, 0
#mask=np.where(a>threshold, upper, lower)
foo = slices.squeeze(1)
#foo = mask.squeeze(1)
it_img = sitk.GetImageFromArray(foo)
it_img.CopyInformation(im)
sitk.WriteImage(it_img, 'results/unet_pred.nii.gz')
unet_pred_itk = it_img
unet_pred_arr = foo
print("Done\n")
#-------------------UNet pred - Map ------------------------------
print("Prediction from UNet - Prevalence map.....\n")
im = sitk.ReadImage('results/unet_pred.nii.gz')
arr = sitk.GetArrayFromImage(im)
#arr = unet_pred_arr
im2 = sitk.ReadImage('results/prevalence_map.nii.gz')
arr2 = sitk.GetArrayFromImage(im2)
out_arr = arr + arr2
out_im = sitk.GetImageFromArray(out_arr)
out_im.CopyInformation(im)
Im = out_im
BinThreshImFilt = sitk.BinaryThresholdImageFilter()
BinThreshImFilt.SetLowerThreshold(1.1)
#BinThreshImFilt.SetUpperThreshold(2)
BinThreshImFilt.SetOutsideValue(0)
BinThreshImFilt.SetInsideValue(1)
BinIm = BinThreshImFilt.Execute(Im)
end = '/'+ sub_no + '_space-T1_binary_prediction.nii.gz'
pred_path = os.path.join('output_data' + end)
sitk.WriteImage(BinIm, pred_path)
print("final prediction done \n")
rem_path = ('results')
shutil.rmtree(rem_path)
print("results removed \n")
| 21,262 | 36.173077 | 155 |
py
|
MixLacune
|
MixLacune-main/process.py
|
import SimpleITK
import numpy as np
from evalutils import SegmentationAlgorithm
from evalutils.validators import UniqueImagesValidator
# added imports
#import tensorflow as tf
from typing import Tuple, List
from pathlib import Path
import re
import subprocess
from evalutils.io import (ImageLoader, SimpleITKLoader)
class MixLacune(SegmentationAlgorithm):
def __init__(self):
self.input_modalities = ['T1', 'T2', 'FLAIR']
self.first_modality = self.input_modalities[0]
self.flag_save_uncertainty = True
super().__init__(
# (Skip UniquePathIndicesValidator, because this will error when there are multiple images
# for the same subject)
validators=dict(input_image=(UniqueImagesValidator(),)),
# Indicate with regex which image to load as input, e.g. T1 scan
file_filters={'input_image':
re.compile("/input/sub-.*_space-.*_desc-masked_%s.nii.gz" % self.first_modality)}
)
print("==> Initializing model")
# --> Load model
# TODO add code to load model
print("==> Model loaded")
def _load_input_image(self, *, case) -> Tuple[List[SimpleITK.Image], List[Path]]:
input_image_file_path = case["path"]
input_image_file_loader = self._file_loaders["input_image"]
if not isinstance(input_image_file_loader, ImageLoader):
raise RuntimeError(
"The used FileLoader was not of subclass ImageLoader"
)
input_images = []
input_path_list = []
# Load the image(s) for this case
for modality in self.input_modalities:
# Load all input images, e.g. T1, T2 and FLAIR
scan_name = Path(input_image_file_path.name.replace('%s.nii.gz' % self.first_modality,
'%s.nii.gz' % modality))
modality_path = input_image_file_path.parent / scan_name
input_images.append(input_image_file_loader.load_image(modality_path))
input_path_list.append(modality_path)
# Check that it is the expected image
if input_image_file_loader.hash_image(input_images[0]) != case["hash"]:
raise RuntimeError("Image hashes do not match")
return input_images, input_path_list
def process_case(self, *, idx, case):
# Load and test the image for this case
input_images, input_path_list = self._load_input_image(case=case)
# Segment case
list_results = self.predict(input_images=input_images)
if self.flag_save_uncertainty:
assert len(list_results) == 2, "Error, predict function should return a list containing 2 images, " \
"the predicted segmentation and the predicted uncertainty map. " \
"Or change flag_save_uncertainty to False"
else:
assert len(list_results) == 1, "Error, predict function should return a list containing 1 image, " \
"only the predicted segmentation. " \
"Or change flag_save_uncertainty to True"
# Write resulting segmentation to output location
if not self._output_path.exists():
self._output_path.mkdir()
save_description = ['prediction', 'uncertaintymap']
output_path_list = []
for i, outimg in enumerate(list_results):
output_name = Path(input_path_list[0].name.split("desc-masked_")[0] + "%s.nii.gz" % save_description[i])
segmentation_path = self._output_path / output_name
print(segmentation_path)
output_path_list.append(segmentation_path)
SimpleITK.WriteImage(outimg, str(segmentation_path), True)
input_name_list = [p.name for p in input_path_list]
output_name_list = [p.name for p in output_path_list]
# Write segmentation file path to result.json for this case
return {
"outputs": [
dict(type="metaio_image", filename=output_name_list)
],
"inputs": [
dict(type="metaio_image", filename=input_name_list)
],
"error_messages": [],
}
def predict(self, *, input_images: List[SimpleITK.Image]) -> List[SimpleITK.Image]:
print("==> Running prediction")
# Process-lacunes.py assumes images are in a sub-folder and the first seven characters are the subject ID
for i in range(len(self.input_modalities)):
SimpleITK.WriteImage(input_images[i], '/home/input_data/lacunes/lacunes_'+self.input_modalities[i]+'.nii.gz')
subprocess.run(["sh", "/home/run.sh"])
prediction_image = SimpleITK.ReadImage('/home/output_data/lacunes_space-T1_binary_prediction.nii.gz')
# Compute a (fake) uncertainty image. Uncertainty is usually at the lesion boundaries. By using dilation,
# a 1-pixel boundary at the border of every segmentation (donut-shape) is created. This is done in
# 2D (3x3x0 kernel), because most uncertainty between raters is within-slice and not through-slice.
# Only dilation, because our method tends to under-segment and thus errors are on the outside.
#
# I feel dirty doing this, I'm sorry.
dilated_prediction_image = SimpleITK.BinaryDilate(prediction_image, [1, 1, 0])
uncertainty_image = SimpleITK.Subtract(dilated_prediction_image, prediction_image)
print("==> Prediction done")
return [prediction_image, uncertainty_image]
if __name__ == "__main__":
MixLacune().process()
| 5,806 | 40.776978 | 121 |
py
|
slfrank
|
slfrank-master/tests/test_linop.py
|
import unittest
import numpy as np
import numpy.testing as npt
from linop import DiagSum
if __name__ == '__main__':
unittest.main()
class TestLinop(unittest.TestCase):
def test_DiagSum(self):
X = np.array([[1, 2], [3, 4]])
A = DiagSum(2)
npt.assert_allclose(A(X), [3, 5, 2])
| 311 | 18.5 | 44 |
py
|
slfrank
|
slfrank-master/slfrank/design.py
|
import numpy as np
import cvxpy as cp
import sigpy as sp
import sigpy.mri.rf as rf
import scipy.sparse
from . import linop, prox, transform
def design_rf(n=64, tb=4, ptype='ex', d1=0.01, d2=0.01, phase='linear',
oversamp=15, lamda=None, solver='PDHG',
max_iter=None, sigma=None, verbose=True):
"""Design single-band RF pulse using SLfRank.
Args:
n (int): length of RF pulse.
tb (float): time-bandwidth product.
ptype (str): pulse type. Can be
'ex' for excitation,
'sat' for saturation,
'inv' for inversion,
'se' for spin-echo refocussing.
d1 (float): pass-band ripple.
d2 (float): stop-band ripple.
phase (str): phase phase. Can be
'linear' for linear phase,
'min' for minimnum phase,
'max' for maximum phase.
oversamp (int): oversampling for number of points in frequency domain.
lamda (float or None): minimum phase parameter.
solver (str): solver name. Can be
'PDHG' for primal-dual hybrid gradient using SigPy,
'SCS' or 'MOSEK' for solvers in CVXPy.
max_iter (int or None): maxmimum number of iterations for PDHG.
sigma (float or None): step-size parameter for PDHG.
verbose (bool): verbose print-out.
Return:
array: length-n RF pulse.
"""
dinf = rf.dinf(d1, d2)
w = dinf / tb
bands = [[-np.pi, -(1 + w) * tb / n * np.pi],
[-(1 - w) * tb / n * np.pi, (1 - w) * tb / n * np.pi],
[(1 + w) * tb / n * np.pi, np.pi]]
if ptype == 'ex' and phase == 'linear':
m_xy_vals = [0, lambda omega: np.exp(-1j * omega * n / 2), 0]
m_xy_deltas = [d2, d1, d2]
m_z_vals = [1, 0, 1]
m_z_deltas = [1 - (1 - d2**2)**0.5, (1 - (1 - d1)**2)**0.5, 1 - (1 - d2**2)**0.5]
beta_vals = []
beta_deltas = []
if lamda is None:
lamda = 0
if solver == 'PDHG':
if sigma is None:
sigma = 1000
if max_iter is None:
max_iter = 3000
elif ptype == 'ex' and phase in ['min', 'max']:
m_xy_vals = [0, 0, 0]
m_xy_deltas = [d2, 1, d2]
m_z_vals = [1, 0, 1]
m_z_deltas = [1 - (1 - d2**2)**0.5, (1 - (1 - d1)**2)**0.5, 1 - (1 - d2**2)**0.5]
beta_vals = []
beta_deltas = []
if lamda is None:
lamda = 1
if sigma is None:
sigma = 100
if max_iter is None:
max_iter = 20000
elif ptype == 'sat' and phase in ['min', 'max']:
m_xy_vals = [0, 0, 0]
m_xy_deltas = [(1 - (1 - d2)**2)**0.5, 1, (1 - (1 - d2)**2)**0.5]
m_z_vals = [1, 0, 1]
m_z_deltas = [d2, d1, d2]
beta_vals = []
beta_deltas = []
if lamda is None:
lamda = 1
if sigma is None:
sigma = 1000
if max_iter is None:
max_iter = 10000
elif ptype == 'inv' and phase in ['min', 'max']:
m_xy_vals = [0, 0, 0]
m_xy_deltas = [(1 - (1 - d2)**2)**0.5, (1 - (1 - d1)**2)**0.5, (1 - (1 - d2)**2)**0.5]
m_z_vals = [1, -1, 1]
m_z_deltas = [d2, d1, d2]
beta_vals = []
beta_deltas = []
if lamda is None:
lamda = 1
if solver == 'PDHG':
if sigma is None:
sigma = 1000
if max_iter is None:
max_iter = 20000
elif ptype == 'se' and phase == 'linear':
m_xy_vals = []
m_xy_deltas = []
m_z_vals = []
m_z_deltas = []
beta_vals = [0, lambda omega: np.exp(-1j * omega * (n - 1) / 2), 0]
beta_deltas = [d2**0.5, (1 - (1 - d1)**0.5) / 2, d2**0.5]
if lamda is None:
lamda = 0
if solver == 'PDHG':
if sigma is None:
sigma = 1000
if max_iter is None:
max_iter = 3000
else:
raise ValueError(f'ptype={ptype} and phase={phase} not implemented.')
m = n * oversamp
omega = 2 * np.pi * (np.arange(m) - m // 2) / m
m_xy, d_xy = bands_to_arrays(omega, bands, m_xy_vals, m_xy_deltas)
m_z, d_z = bands_to_arrays(omega, bands, m_z_vals, m_z_deltas)
beta, d_beta = bands_to_arrays(omega, bands, beta_vals, beta_deltas)
if solver == 'PDHG':
a, b = DesignPaulynomials(
n, omega, m_xy, d_xy, m_z, d_z, beta, d_beta,
max_iter=max_iter, sigma=sigma, lamda=lamda).run()
else:
a, b = design_paulynomials(
n, omega, m_xy, d_xy, m_z, d_z, beta, d_beta,
lamda=lamda, solver=solver, verbose=verbose)
b1 = transform.inverse_slr(a, b * 1j)
if phase == 'max':
b1 = b1[::-1]
return b1
def bands_to_arrays(omega, bands, vals, deltas):
"""Convert magnetization band specifications to arrays.
Args:
omega (array): array of frequency values between -pi to pi.
bands (list of bands): list of frequency bands, specified by
starting and end points in radians between -pi to pi.
vals (list of floats or functions):
desired magnitude response for each band.
Must have the same length as bands. For example,
[0, 1, 0].
deltas (list of floats or functions):
maximum deviation for each band.
Return:
(array, array): magnetization array and maximum deviation array
evaluated on omega.
"""
m = len(omega)
v = np.zeros(m, dtype=np.complex)
d = np.ones(m)
for band, val, delta, in zip(bands, vals, deltas):
i = (omega >= band[0]) & (omega <= band[1])
if np.isscalar(val):
v[i] = val
else:
v[i] = val(omega[i])
if np.isscalar(delta):
d[i] = delta
else:
d[i] = delta(omega[i])
return v, d
class DesignPaulynomials(sp.app.App):
"""Design Shinnar-Le-Roux polynomials given desired profiles using SigPy.
Args:
n (int): number of hard pulses.
omega (array): array of frequency values between -pi to pi.
m_xy (array): desired transverse magnetization evaluated on omega.
d_xy (array): desired transverse ripple size evaluated on omega.
m_z (array): desired longitudinal magnetization evaluated on omega.
d_xy (array): desired longitudinal ripple size evaluated on omega.
beta_z (array): desired beta parameter evaluated on omega.
beta_xy (array): desired beta parameter ripple size evaluated on omega.
sigma (float): dual step-size.
lamda (float): minimum phase pulse parameter.
max_iter (int): maximum number of iterations.
Returns:
array, array: Cayley-Klein polynomials of length n.
"""
def __init__(self, n, omega, m_xy, d_xy, m_z, d_z, beta, d_beta,
sigma=100, lamda=0, max_iter=10000):
m = len(m_xy)
# Create linear operators
A_11 = sp.linop.Slice((2 * n + 1, 2 * n + 1), (slice(1), slice(1)))
S_b = sp.linop.Slice((2 * n + 1, 2 * n + 1), (slice(n + 1, 2 * n + 1), 0))
S_aa = sp.linop.Slice((2 * n + 1, 2 * n + 1), (slice(1, n + 1), slice(1, n + 1)))
S_ba = sp.linop.Slice((2 * n + 1, 2 * n + 1), (slice(n + 1, 2 * n + 1), slice(1, n + 1)))
S_bb = sp.linop.Slice(
(2 * n + 1, 2 * n + 1), (slice(n + 1, 2 * n + 1), slice(n + 1, 2 * n + 1)))
D = linop.DiagSum(n)
F = np.exp(-1j * np.outer(omega, np.arange(-n + 1, n)))
F_b = np.exp(-1j * np.outer(omega, np.arange(n)))
A_b = sp.linop.Reshape((m, ), (m, 1)) * sp.linop.MatMul((n, 1), F_b) * sp.linop.Reshape((n, 1), (n, )) * S_b
A_xy = sp.linop.Reshape((m, ), (m, 1)) * sp.linop.MatMul((2 * n - 1, 1), F) * sp.linop.Reshape((2 * n - 1, 1), (2 * n - 1, )) * D * (2 * S_ba)
A_z = sp.linop.Reshape((m, ), (m, 1)) * sp.linop.MatMul((2 * n - 1, 1), F) * sp.linop.Reshape((2 * n - 1, 1), (2 * n - 1, )) * D * (S_aa - S_bb)
A_I = D * (S_aa + S_bb)
As = [A_11, A_b, A_xy, A_z, A_I]
A = sp.linop.Vstack(As)
# Create proximal operators
dirac = np.zeros(2 * n - 1)
dirac[n - 1] = 1
proxf_1 = sp.prox.LInfProj([1], 0, 1)
proxf_b = sp.prox.LInfProj(A_b.oshape, d_beta, beta)
proxf_xy = sp.prox.LInfProj(A_xy.oshape, d_xy, m_xy)
proxf_z = sp.prox.LInfProj(A_z.oshape, d_z, m_z)
proxf_I = sp.prox.LInfProj(A_I.oshape, 0, dirac)
proxf = sp.prox.Stack([
proxf_1, proxf_b, proxf_xy, proxf_z, proxf_I])
proxfc = sp.prox.Conj(proxf)
proxg = prox.Objective((2 * n + 1, 2 * n + 1), lamda)
# Get step-size
sigma_1 = sigma
sigma_b = sigma / m
sigma_xy = sigma / (4 * n * m)
sigma_z = sigma / (2 * n * m)
sigma_I = sigma / (2 * n)
tau = 1 / sigma
sigma = np.concatenate(
[[sigma_1],
np.full(A_b.oshape, sigma_b),
np.full(A_xy.oshape, sigma_xy),
np.full(A_z.oshape, sigma_z),
np.full(A_I.oshape, sigma_I)])
# Get variables
self.X = np.zeros((2 * n + 1, 2 * n + 1), dtype=np.complex)
self.X[0, 0] = 1
self.a = self.X[1:n + 1, 0]
self.b = self.X[n + 1:, 0]
u = np.zeros(A.oshape, dtype=np.complex)
alg = sp.alg.PrimalDualHybridGradient(
proxfc, proxg, A, A.H, self.X, u, tau, sigma,
max_iter=max_iter)
self.m_xy = m_xy
self.m_z = m_z
self.beta = beta
self.d_xy = d_xy
self.d_z = d_z
self.d_beta = d_beta
self.As = As
self.lamda = lamda
self.dirac = dirac
super().__init__(alg)
def _summarize(self):
if self.show_pbar:
A_11, A_b, A_xy, A_z, A_I = self.As
err_beta = np.max(
np.clip(np.abs(A_b(self.X) - self.beta) - self.d_beta, 0, None))
err_xy = np.max(
np.clip(np.abs(A_xy(self.X) - self.m_xy) - self.d_xy, 0, None))
err_z = np.max(
np.clip(np.abs(A_z(self.X) - self.m_z) - self.d_z, 0, None))
err_I = np.max(np.abs(A_I(self.X) - self.dirac))
w, v = np.linalg.eigh(self.X)
err_rank1 = np.sum(w[:-1])
n = (len(self.X) + 1) // 2
obj = self.X[1, 0]
obj += self.X[0, 1]
obj += self.lamda * self.X[0, n + 1]
obj += self.lamda * self.X[n + 1, 0]
obj = np.real(obj)
self.pbar.set_postfix(err_rank1='{0:.2E}'.format(err_rank1),
err_beta='{0:.2E}'.format(err_beta),
err_I='{0:.2E}'.format(err_I),
err_xy='{0:.2E}'.format(err_xy),
err_z='{0:.2E}'.format(err_z),
obj='{0:.2E}'.format(obj))
def _output(self):
return self.a, self.b
def design_paulynomials(n, omega, m_xy, d_xy, m_z, d_z, beta, d_beta,
verbose=False, lamda=0, solver='SCS'):
"""Design Shinnar-Le-Roux polynomials given desired profiles using CVXPy.
Args:
n (int): number of hard pulses.
omega (array): array of frequency values between -pi to pi.
m_xy (array): desired transverse magnetization evaluated on omega.
d_xy (array): desired transverse ripple size evaluated on omega.
m_z (array): desired longitudinal magnetization evaluated on omega.
d_xy (array): desired longitudinal ripple size evaluated on omega.
beta_z (array): desired beta parameter evaluated on omega.
beta_xy (array): desired beta parameter ripple size evaluated on omega.
verbose (bool): verbose print-out.
lamda (float): minimum phase pulse parameter.
solver (str): solver name.
Returns:
array, array: Cayley-Klein polynomials of length n.
"""
# Initialize variables
X = cp.Variable((2 * n + 1, 2 * n + 1), complex=True)
X = (X + cp.conj(X.T)) / 2
a = X[1:n + 1, 0]
b = X[n + 1:, 0]
P = X[1:, 1:]
aa = P[:n, :n]
ba = P[n:2 * n, :n]
bb = P[n:2 * n, n:2 * n]
# Get constraints
constraints = [X >> 0, X[0, 0] == 1]
# Trace equals to 1
dirac = np.zeros(2 * n - 1, dtype=np.complex)
dirac[n - 1] = 1
constraints.append(diag_sum(aa + bb) == dirac)
if m_xy is not None:
A = np.exp(-1j * np.outer(omega, np.arange(-n + 1, n)))
constraints.append(
cp.abs(m_xy - 2 * A @ diag_sum(ba)) <= d_xy)
if m_z is not None:
A = np.exp(-1j * np.outer(omega, np.arange(-n + 1, n)))
constraints.append(
cp.abs(m_z - A @ diag_sum(aa - bb)) <= d_z)
if beta is not None:
A = np.exp(-1j * np.outer(omega, np.arange(n)))
constraints.append(cp.abs(beta - A @ b) <= d_beta)
# Set up problem
objective = cp.Maximize(cp.real(a[0] + lamda * b[0]))
prob = cp.Problem(objective, constraints)
prob.solve(verbose=verbose, solver=solver)
if verbose:
w, v = np.linalg.eigh(X.value)
print(f'Rank-1 approximation error: {np.sum(w[:-1])}')
if prob.status == 'infeasible':
raise ValueError('Infeasible: try relaxing constraints.')
return a.value, b.value
def diag_sum(X):
n = X.shape[0] - 1
si = np.zeros((n + 1)**2)
sj = np.zeros((n + 1)**2)
ss = np.ones((n + 1)**2)
c = 0
for i in range(n + 1):
for j in range(n + 1):
si[c] = i - j + n
sj[c] = i + j * (n + 1)
c = c + 1
shape = (2 * n + 1, (n + 1)**2)
A = scipy.sparse.coo_matrix((ss, (si, sj)), shape=shape)
return A @ cp.vec(X)
| 13,848 | 35.253927 | 152 |
py
|
slfrank
|
slfrank-master/slfrank/linop.py
|
import sigpy as sp
import numpy as np
import numba as nb
class DiagSum(sp.linop.Linop):
"""A Linop that sums along the diagonals of a matrix.
Args:
n (int): width of matrix.
"""
def __init__(self, n):
self.n = n
super().__init__((2 * n - 1, ), (n, n))
def _apply(self, input):
return diag_sum(input)
def _adjoint_linop(self):
return DiagEmbed(self.n)
class DiagEmbed(sp.linop.Linop):
"""A Linop that embeds an array along the diagonals of a matrix.
Args:
n (int): width of matrix.
"""
def __init__(self, n):
self.n = n
super().__init__((n, n), (2 * n - 1, ))
def _apply(self, input):
return diag_embed(input)
def _adjoint_linop(self):
return DiagSum(self.n)
@nb.jit(cache=True)
def diag_sum(input):
n = input.shape[0]
output = np.zeros(2 * n - 1, dtype=input.dtype)
for i in range(n):
for j in range(n):
output[n - 1 + i - j] += input[i, j]
return output
@nb.jit(cache=True)
def diag_embed(input):
n = (input.shape[0] + 1) // 2
output = np.empty((n, n), dtype=input.dtype)
for i in range(n):
for j in range(n):
output[i, j] = input[n - 1 + i - j]
return output
| 1,294 | 18.923077 | 68 |
py
|
slfrank
|
slfrank-master/slfrank/transform.py
|
import numpy as np
def hard_pulse(a, b, b1):
"""Apply hard pulse rotation to input magnetization.
Args:
theta (complex float): complex B1 value in radian.
Returns:
array: magnetization array after hard pulse rotation,
in representation consistent with input.
"""
c = np.cos(abs(b1) / 2)
if abs(b1) == 0:
s = 0
else:
s = 1j * b1 / abs(b1) * np.sin(abs(b1) / 2)
return c * a - s.conjugate() * b, s * a + c * b
def inverse_hard_pulse(a, b):
"""
"""
phi = 2 * np.arctan2(np.abs(b[0]), np.abs(a[0]))
theta = np.angle(-1j * b[0] * a[0].conjugate())
b1 = phi * np.exp(1j * theta)
c = np.cos(abs(b1) / 2)
if abs(b1) == 0:
s = 0
else:
s = 1j * b1 / abs(b1) * np.sin(abs(b1) / 2)
return c * a + s.conjugate() * b, -s * a + c * b, b1
def precess(a, b):
"""Simulate precess to input magnetization.
Args:
p (array): magnetization array.
omega (array): off-resonance.
dt (float): free induction decay duration.
Returns:
array: magnetization array after hard pulse rotation,
in representation consistent with input.
"""
a = np.concatenate((a, [0]))
b = np.concatenate(([0], b))
return a, b
def inverse_precess(a, b):
"""Simulate precess to input magnetization.
Args:
p (array): magnetization array.
omega (array): off-resonance.
dt (float): free induction decay duration.
Returns:
array: magnetization array after hard pulse rotation,
in representation consistent with input.
"""
return a[:-1], b[1:]
def forward_slr(b1):
"""Shinnar Le Roux forward evolution.
The function uses the hard pulse approximation. Given an array of
B1 complex amplitudes, it simulates a sequence of precess
followed by a hard pulse rotation.
Args:
b1 (array): complex B1 array in Gauss.
dt (float): delta time in seconds.
gamma (float): gyromagnetic ratio in radian / seconds / Gauss.
Returns:
array: polynomial of shape (n, 2)
Examples:
Simulating an on-resonant spin under 90 degree pulse.
The 90 degree pulse is discretized into 1000 time points.
1 ms.
>>> n = 1000
>>> b1 = np.pi / 2 / n * np.ones(n)
>>> a, b = forward_slr(b1, dt)
"""
a, b = hard_pulse(np.array([1]), np.array([0]), b1[0])
for b1_i in b1[1:]:
a, b = precess(a, b)
a, b = hard_pulse(a, b, b1_i)
return a, b
def inverse_slr(a, b):
"""Shinnar Le Roux inverse evolution.
The function uses the hard pulse approximation. Given an array of
B1 complex amplitudes, it simulates a sequence of precess
followed by a hard pulse rotation.
Args:
a (array): alpha polynomial of length n.
b (array): beta polynomial of length n.
Returns:
array: polynomial of shape (n)
"""
n = len(a)
b1 = []
for i in range(n):
a, b, b1_i = inverse_hard_pulse(a, b)
a, b = inverse_precess(a, b)
b1 = [b1_i] + b1
return np.array(b1)
| 3,163 | 23.71875 | 70 |
py
|
slfrank
|
slfrank-master/slfrank/plot.py
|
import numpy as np
import matplotlib.pyplot as plt
import sigpy.mri.rf as rf
from . import transform
from . import design
def plot_slr_pulses(pulse_slr, pulse_slfrank,
m=1000, ptype='ex', phase='linear',
omega_range=[-np.pi, np.pi],
tb=4, d1=0.01, d2=0.01,
fontsize='x-large', labelsize='large'):
n = len(pulse_slr)
dinf = rf.dinf(d1, d2)
w = dinf / tb
bands = [[max(-np.pi, omega_range[0]), -(1 + w) * tb / n * np.pi],
[-(1 - w) * tb / n * np.pi, (1 - w) * tb / n * np.pi],
[(1 + w) * tb / n * np.pi, min(np.pi, omega_range[1])]]
boundaries = [-(1 + w) * tb / n * np.pi,
-(1 - w) * tb / n * np.pi,
(1 - w) * tb / n * np.pi,
(1 + w) * tb / n * np.pi,
]
fig, axs = plt.subplots(2, 2)
axs[0][0].plot(pulse_slr.real,
linewidth=0.5,
label='SLR',
color='tab:orange')
axs[0][0].plot(pulse_slfrank.real,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][0].set_title(r'$B_{1}$')
axs[0][0].set_xlabel('Time')
axs[0][0].legend()
omega = np.linspace(omega_range[0], omega_range[1], m)
psi_z = np.exp(-1j * np.outer(omega, np.arange(n)))
a_slr, b_slr = transform.forward_slr(pulse_slr)
alpha_slr = psi_z @ a_slr
beta_slr = psi_z @ b_slr
a_slfrank, b_slfrank = transform.forward_slr(pulse_slfrank)
alpha_slfrank = psi_z @ a_slfrank
beta_slfrank = psi_z @ b_slfrank
if ptype == 'se':
m_xy_slr = beta_slr**2
m_xy_slr *= np.exp(1j * omega * (n - 1))
m_z_slr = 2 * np.imag(alpha_slr * beta_slr)
m_xy_slfrank = beta_slfrank**2
m_xy_slfrank *= np.exp(1j * omega * (n - 1))
m_z_slfrank = 2 * np.imag(alpha_slfrank * beta_slfrank)
axs[1][0].set_title(r'$M_{\mathrm{x}}$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].vlines(x=boundaries,
ymin=min(np.amin(np.real(m_xy_slr)), np.amin(np.real(m_xy_slfrank))),
ymax=max(np.amax(np.real(m_xy_slr)), np.amax(np.real(m_xy_slfrank))),
colors='gray',
linestyle='dotted',
linewidth=0.5,
label='Band Boundaries')
axs[1][0].plot(omega, np.real(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.real(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$M_{\mathrm{y}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].vlines(x=boundaries,
ymin=min(np.amin(np.imag(m_xy_slr)), np.amin(np.imag(m_xy_slfrank))),
ymax=max(np.amax(np.imag(m_xy_slr)), np.amax(np.imag(m_xy_slfrank))),
colors='gray',
linestyle='dotted',
linewidth=0.5,
label='Band Boundaries')
axs[1][1].plot(omega, np.imag(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.imag(m_xy_slfrank),
linewidth=0.5,
label='SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].vlines(x=boundaries,
ymin=min(np.amin(m_z_slr), np.amin(m_z_slfrank)),
ymax=max(np.amax(m_z_slr), np.amax(m_z_slfrank)),
colors='gray',
linestyle='dotted',
linewidth=0.5,
label='Band Boundaries')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label='SLfRank',
color='tab:blue')
else:
m_xy_slr = 2 * alpha_slr.conjugate() * beta_slr
m_z_slr = np.abs(alpha_slr)**2 - np.abs(beta_slr)**2
m_xy_slfrank = 2 * alpha_slfrank.conjugate() * beta_slfrank
m_z_slfrank = np.abs(alpha_slfrank)**2 - np.abs(beta_slfrank)**2
if phase == 'linear':
m_xy_slr *= np.exp(1j * omega * n / 2)
m_xy_slfrank *= np.exp(1j * omega * n / 2)
axs[1][0].set_title(r'$|M_{\mathrm{xy}}|$')
axs[1][0].set_xlabel(r'$\omega$ [radian]')
axs[1][0].vlines(x=boundaries,
ymin=min(np.amin(np.abs(m_xy_slr)), np.amin(np.abs(m_xy_slfrank))),
ymax=max(np.amax(np.abs(m_xy_slr)), np.amax(np.abs(m_xy_slfrank))),
colors='gray',
linestyle='dotted',
linewidth=0.5,
label='Band Boundaries')
axs[1][0].plot(omega, np.abs(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][0].plot(omega, np.abs(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[1][1].set_title(r'$\angle M_{\mathrm{xy}}$')
axs[1][1].set_xlabel(r'$\omega$ [radian]')
axs[1][1].plot(omega, np.angle(m_xy_slr),
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[1][1].plot(omega, np.angle(m_xy_slfrank),
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
axs[0][1].set_title(r'$M_{\mathrm{z}}$')
axs[0][1].set_xlabel(r'$\omega$ [radian]')
axs[0][1].vlines(x=boundaries,
ymin=min(np.amin(m_z_slr), np.amin(m_z_slfrank)),
ymax=max(np.amax(m_z_slr), np.amax(m_z_slfrank)),
colors='gray',
linestyle='dotted',
linewidth=0.5,
label='Band Boundaries')
axs[0][1].plot(omega, m_z_slr,
linewidth=0.5,
label=r'SLR',
color='tab:orange')
axs[0][1].plot(omega, m_z_slfrank,
linewidth=0.5,
label=r'SLfRank',
color='tab:blue')
return fig
| 6,895 | 40.293413 | 94 |
py
|
slfrank
|
slfrank-master/slfrank/__init__.py
|
from .design import *
from .linop import *
from .prox import *
from .transform import *
from .plot import *
| 108 | 17.166667 | 24 |
py
|
slfrank
|
slfrank-master/slfrank/prox.py
|
import sigpy as sp
class Objective(sp.prox.Prox):
def __init__(self, shape, lamda):
self.lamda = lamda
super().__init__(shape)
def _prox(self, alpha, input):
xp = sp.get_array_module(input)
n = (len(input) - 1) // 2
output = input.copy()
output[1, 0] += alpha
output[n + 1, 0] += alpha * self.lamda
w, v = xp.linalg.eigh(output)
w[w < 0] = 0
return (v * w) @ v.conjugate().T
| 468 | 22.45 | 46 |
py
|
stistools
|
stistools-master/stistools/r_util.py
|
import os
import os.path
import copy
NOT_APPLICABLE = 'n/a'
def expandFileName(filename):
"""Expand environment variable in a file name.
If the input file name begins with either a Unix-style or IRAF-style
environment variable (e.g. $lref/name_dqi.fits or lref$name_dqi.fits
respectively), this routine expands the variable and returns a complete
path name for the file.
Parameters
-----------
filename : str
A file name, possibly including an environment variable.
Returns
--------
fullname : str
The file name with environment variable expanded.
"""
n = filename.find("$")
if n == 0:
if filename != NOT_APPLICABLE:
# Unix-style file name.
filename = os.path.expandvars(filename)
elif n > 0:
# IRAF-style file name.
temp = "$" + filename[0:n] + os.sep + filename[n+1:]
filename = os.path.expandvars(temp)
# If filename contains "//", delete all of them.
double_sep = os.sep + os.sep
filename = filename.replace(double_sep, os.sep)
return filename
def interpolate(x, values, xp):
"""Interpolate.
Linear interpolation is used. If the specified indpendent variable
value xp is outside the range of the array x, the first (or last)
value in values will be returned.
Parameters
-----------
x : a sequence object, e.g. an array, int or float
Array of independent variable values.
values : a sequence object, e.g. an array (not character)
Array of dependent variable values.
xp : int or float
Independent variable value at which to interpolate.
Returns
-------
interp_vals : the same type as one element of values
Linearly interpolated value.
"""
nvalues = len(values)
if nvalues == 1 or xp <= x.item(0):
value = copy.deepcopy(values[0])
elif xp >= x.item(nvalues-1):
value = copy.deepcopy(values[nvalues-1])
else:
# search for independent variable values that bracket the specified xp
for i in range(nvalues-1):
x0 = x.item(i)
x1 = x.item(i+1)
if xp >= x0 and xp <= x1:
if x0 == x1:
value = copy.deepcopy(values[i])
else:
p = float(x1 - xp)
q = float(xp - x0)
value = (p * values[i] + q * values[i+1]) / (p + q)
break
return value
| 2,505 | 27.804598 | 78 |
py
|
stistools
|
stistools-master/stistools/basic2d.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Perform basic 2-D calibration of STIS data.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.basic2d.basic2d("o66p01020_raw.fits", verbose=True,
... trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import basic2d
>>> from stsci.tools import teal
>>> teal.teal("basic2d")
From command line::
% ./basic2d.py -v -s o66p01020_raw.fits o66p01020_flt.fits
% ./basic2d.py -r
"""
__taskname__ = "basic2d"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 1:
prtOptions()
print("At least a raw file name must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "rtv:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
output = ""
outblev = ""
verbose = False
timestamps = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs1.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs1.e", "-r"])
return 0
if options[i][0] == "-v":
verbose = True
if options[i][0] == "-t":
timestamps = True
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
output = pargs[1]
status = basic2d(input, output=output, outblev=outblev,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print("")
print("Following the options, list one or more input raw file names,")
print(" enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used.")
print("One or more output file names may be specified (the same number")
print(" as the input file names).")
def basic2d(input, output="", outblev="",
dqicorr="perform", atodcorr="omit", blevcorr="perform",
doppcorr="perform",
lorscorr="perform", glincorr="perform", lflgcorr="perform",
biascorr="perform", darkcorr="perform", flatcorr="perform",
shadcorr="omit", photcorr="perform", statflag=True,
darkscale="",
verbose=False, timestamps=False,
trailer="", print_version=False, print_revision=False):
"""Perform basic 2-D calibration of STIS raw data.
Some calibration steps are relevant only for CCD or only for MAMA, and
since an output file of calstis or basic2d may be used as the input,
some steps may have already been done. Most calibration steps will not
be done if they are not relevant or if they have already been done,
regardless of the value of the calibration switch (e.g. flatcorr).
Parameters
----------
input: str
Name of the input raw file.
output: str
Name of the output file, or "" (the default). If no name was
specified, the output name will be constructed from the input name.
outblev: str
Name of the output text file for blev info, or "" (the default).
dqicorr: str
If "perform", update the DQ array.
atodcorr: str
The analog-to-digital correction is ignored because it was never
implemented.
blevcorr: str
If "perform", subtract a bias level based on the overscan values.
(CCD only.)
doppcorr: str
If "perform", convolve reference files (bpixtab, darkfile,
flatfile) as needed with the Doppler shift offset throughout the
exposure, if Doppler correction was done on-board. (MAMA only,
because for the CCD Doppler correction is not done on-board.)
lorscorr: str
If "perform", bin high-res data to lo-res. (MAMA only.)
glincorr: str
If "perform", correct for global non-linearity. (MAMA only.)
lflgcorr: str
If "perform", flag local non-linearity. (MAMA only.)
biascorr: str
If "perform", subtract the bias image. (CCD only.)
darkcorr: str
If "perform", subtract the dark image, scaled by the exposure time
and possibly also a temperature-dependent factor.
flatcorr: str
If "perform", divide by the flat field image.
shadcorr: str
The shutter shading correction is ignored because it was never
implemented.
photcorr: str
If "perform", determine the photometric parameters and populate
keywords PHOTFLAM, PHOTZPT, PHOTPLAM and PHOTBW. (Imaging only.)
statflag: bool
If True, compute statistics for image arrays and update keywords.
darkscale: str
This may be used to override the time and/or temperature dependent
scale factor that would normally be applied to the dark image
before subtracting from the raw data. It's a string rather than
a float in order to accept a different scale factor for each
image set in the input data. calstis reads the value or values
(separated by blanks) from the string, and if the value is greater
than zero, it will be used instead of the value determined from
the temperature and time. (CCD or NUV-MAMA only.)
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs1.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs1.e
will be printed.
2 is returned if the specified input file or files were not found,
or if there is a mismatch between the number of input, output,
and/or outblev files specified.
"""
if print_version:
status = subprocess.call(["cs1.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs1.e", "-r"])
return 0
cumulative_status = 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
if output:
outfiles = []
output1 = output.split()
for out1 in output1:
if out1:
output2 = out1.split(",")
for out2 in output2:
if out2:
outfiles.append(out2)
else:
outfiles = None
if outblev:
outblev_txt = []
outblev1 = outblev.split()
for out1 in outblev1:
if out1:
outblev2 = out1.split(",")
for out2 in outblev2:
if out2:
outblev_txt.append(out2)
else:
outblev_txt = None
same_length = True # optimistic initial value
n_infiles = len(infiles)
if outfiles and len(outfiles) != n_infiles:
same_length = False
print("You specified {} input files but {} output files.".format(
n_infiles, len(outfiles)))
print("The number of input and output files must be the same.")
if outblev_txt and len(outblev_txt) != n_infiles:
same_length = False
print("The number of input and outblev files must be the same.")
if not same_length:
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for (i, infile) in enumerate(infiles):
arglist = ["cs1.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
if darkscale:
arglist.append("-dscl")
arglist.append("%s" % darkscale)
arglist.append(infile)
if outfiles:
arglist.append(outfiles[i])
else:
arglist.append('')
if outblev_txt:
arglist.append(outblev_txt[i])
if dqicorr == "perform":
arglist.append("-dqi")
if blevcorr == "perform":
arglist.append("-blev")
if doppcorr == "perform":
arglist.append("-dopp")
if lorscorr == "perform":
arglist.append("-lors")
if glincorr == "perform":
arglist.append("-glin")
if lflgcorr == "perform":
arglist.append("-lflg")
if biascorr == "perform":
arglist.append("-bias")
if darkcorr == "perform":
arglist.append("-dark")
if flatcorr == "perform":
arglist.append("-flat")
if photcorr == "perform":
arglist.append("-phot")
if statflag:
arglist.append("-stat")
if verbose:
print("Running basic2d on {}".format(infile))
print(" {}".format(str(arglist)))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the basic2d function."""
return basic2d.__doc__
def run(configobj=None):
"""TEAL interface for the basic2d function."""
basic2d(input=configobj["input"],
output=configobj["output"],
outblev=configobj["outblev"],
dqicorr=configobj["dqicorr"],
atodcorr=configobj["atodcorr"],
blevcorr=configobj["blevcorr"],
doppcorr=configobj["doppcorr"],
lorscorr=configobj["lorscorr"],
glincorr=configobj["glincorr"],
lflgcorr=configobj["lflgcorr"],
biascorr=configobj["biascorr"],
darkcorr=configobj["darkcorr"],
flatcorr=configobj["flatcorr"],
shadcorr=configobj["shadcorr"],
photcorr=configobj["photcorr"],
statflag=configobj["statflag"],
darkscale=configobj["darkscale"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 12,134 | 30.437824 | 76 |
py
|
stistools
|
stistools-master/stistools/calstis.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Calibrate STIS data.
The input raw files should be in the default directory. This is not
always necessary, but it will always work. For spectroscopic data, if
a path is specified for the input file, the wavecal file may not be
found unless the wavecal file name (including path) was explicitly
specified.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.calstis.calstis("o66p01020_raw.fits", verbose=True,
... trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import calstis
>>> from stsci.tools import teal
>>> teal.teal("calstis")
From command line::
% ./calstis.py -v -s o66p01020_raw.fits out/
% ./calstis.py -r
"""
__taskname__ = "calstis"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 1:
prtOptions()
print("At least a raw file name must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "srtvw:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
outroot = ""
wavecal = ""
verbose = False
timestamps = False
savetmp = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs0.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs0.e", "-r"])
return 0
if options[i][0] == "-v":
verbose = True
if options[i][0] == "-t":
timestamps = True
if options[i][0] == "-s":
savetmp = True
if options[i][0] == "-w":
wavecal = options[i][1]
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
outroot = pargs[1]
status = calstis(input, wavecal=wavecal, outroot=outroot,
savetmp=savetmp,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print(" -s (save temporary files)")
print(" -w wavecal")
print("")
print("Following the options, list one or more input raw file names,")
print(" enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used.")
print("An output directory (include a trailing '/') or a root name for")
print(" the output files may be specified.")
def calstis(input, wavecal="", outroot="", savetmp=False,
verbose=False, timestamps=False,
trailer="", print_version=False, print_revision=False):
"""Calibrate STIS data.
Parameters
----------
input: str
Name of the input file.
wavecal: str
Name of the input wavecal file, or "" (the default). This is
only needed if the name is not the "normal" name
(rootname_wav.fits).
outroot: str
Root name for the output files, or "" (the default). This can
be a directory name, in which case the string must end in '/'.
savetmp: bool
True if calstis should not delete temporary files.
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs0.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs0.e
will be printed.
2 is returned if the specified input file or files were not found.
"""
if print_version:
status = subprocess.call(["cs0.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs0.e", "-r"])
return 0
cumulative_status = 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for infile in infiles:
arglist = ["cs0.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
if savetmp:
arglist.append("-s")
arglist.append(infile)
if outroot:
arglist.append(outroot)
if wavecal:
arglist.append("-w")
arglist.append("%s" % wavecal)
if verbose:
print("Running calstis on {}".format(infile))
print(" {}".format(str(arglist)))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the calstis function."""
return calstis.__doc__
def run(configobj=None):
"""TEAL interface for the calstis function."""
calstis(input=configobj["input"],
wavecal=configobj["wavecal"],
outroot=configobj["outroot"],
savetmp=configobj["savetmp"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 7,287 | 27.46875 | 76 |
py
|
stistools
|
stistools-master/stistools/tastis.py
|
#! /usr/bin/env python
from math import modf, sqrt
import os
import argparse
from astropy.io import fits
import numpy as np
__doc__ = """
Analyze STIS target acquisition images. :func:`tastis` will print general
information about each input target acquisition image, and will analyze both
types of STIS target acquisitions: ACQs and ACQ/PEAKs
ACQ procedure is described in "STIS Onboard CCD Target Acquisitions" in the
STIS Instrument Handbook. The ACQ/PEAK procedure is described in "Onboard
Target-Acquisition Peakups (ACQ/PEAK)" also in the STIS Instrument Handbook.
Target positions in global and local (subarray) coordinates and the total flux
of the target in the maximum checkbox during both acquistions phases (course
and fine) are displayed.
If update=True, keywords are added to the header to make problems easier to
locate in batch processing. Warnings are given if the spt file is not present
when :func:`tastis` is run.
Examples
--------
:func:`tastis` with the default of update=False:
>>> import stistools
>>> stistools.tastis.tastis("ocmv0lw6q_raw.fits")
===============================================================================
oc7w11viq HST/STIS G430L 0.3X0.05ND ACQ/PEAK-UP
prop: 13465 visit: 11 line: 3 target: HD128621-2
obs date, time: 2014-07-24 22:05:06 exposure time: 0.10
dom GS/FGS: S7QX000330F1 sub-dom GS/FGS: S7QX000694F2
ACQ params: bias sub: 1510 method: RETURN-TO-BRIGHTEST
subarray (axis1,axis2): size=(1022,32) corner=(25,500)
-------------------------------------------------------------------------------
Scan type: LINEARAXIS2 Step size (mas): 250
[210 753 0]
axis1 axis2 axis1 axis2 V2 V3
(pixels) (arcsec) (arcsec)
Estimated slew: 0.0 -0.1 0.000 -0.005 -0.004 0.004
Flux in post-slew confirmation image (751752) - Pedestal (748587) = 3165 DN
-------------------------------------------------------------------------------
The flux in the confirmation image is 320% greater than the maximum flux
in the ACQ/PEAK scan. An excess greater than 100% indicates
problems in the ACQ/PEAK.
The flux in the confirmation image is 16% of the recommended minimum
of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in
the AC
===============================================================================
:func:`tastis` with update=True:
>>> import stistools
>>> stistools.tastis.tastis("ocmv0lw6q_raw.fits", update=True)
===============================================================================
ocmv0lw6q HST/STIS MIRVIS F25ND3 ACQ/POINT
prop: 13760 visit: 0L line: 1 target: CD-59D3300
obs date, time: 2016-09-29 23:43:50 exposure time: 1.10
dom GS/FGS: S4B0000993F2 sub-dom GS/FGS: S4B0000953F1
ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID
subarray (axis1,axis2): size=(100,100) corner=(487,466)
-------------------------------------------------------------------------------
Coarse locate phase: Target flux in max checkbox (DN): 1560
global local
axis1 axis2 axis1 axis2
Target location: 534.2 507.0 48.2 42.0
axis1 axis2 axis1 axis2 V2 V3
(pixels) (arcsec) (arcsec)
Estimated slew: -1.5 -9.0 -0.079 -0.457 -0.379 0.268
-------------------------------------------------------------------------------
Fine locate phase: Target flux in max checkbox (DN): 1559
global local
axis1 axis2 axis1 axis2
Target location: 534.2 516.8 48.2 51.8
Ref ap location: 537.5 517.0 19.5 17.0
axis1 axis2 axis1 axis2 V2 V3
(pixels) (arcsec) (arcsec)
Estimated slew: -2.1 -0.2 -0.104 -0.010 -0.081 -0.067
-------------------------------------------------------------------------------
Total est. slew: -3.6 -9.2 -0.183 -0.467 -0.460 0.201
-------------------------------------------------------------------------------
Your ACQ appears to have succeeded, as the fluxes in the coarse
and fine stages agree within 25% and the fine slews were less than
4 pixels as expected
===============================================================================
"""
__taskname__ = "tastis"
__version__ = "1.0"
__vdate__ = "14-June-2018"
__author__ = "Python: Sara Ogaz. C code: R. Katsanis, R. Downes, " \
"Phil Hodge. Original IDL code: George Hartig"
# These are possible values for the bit mask badacq.
BAD_ACQ = 1 # any problem
BAD_SLEW = 2 # ACQ only
BAD_LAMP_LOW = 4 # ACQ only
BAD_RATIO_HIGH = 8 # ACQ or ACQ/PEAK
BAD_RATIO_LOW = 16 # ACQ or ACQ/PEAK
BAD_SATURATED = 32 # ACQ or ACQ/PEAK
BAD_FLUX = 64 # ACQ/PEAK only
BAD_END = 128 # ACQ/PEAK only
BAD_TDF = 256 # take-data flag was down
FATAL_ERROR = 2
LOW_FLUX_CUTOFF = 0.8
HIGH_FLUX_CUTOFF = 2.0
MAX_GOODMAX = 32000. # higher would be saturated */
MIN_GOODMAX = 1900. # lower implies lamp was not on */
MIN_IMAGING_FLUX = 1250.
MIN_SPECTROSCOPIC_FLUX = 20000.
PLATESCALE = 0.0508
COSTHETA = 0.70711
SINTHETA = 0.70711
def tastis(raw_filename, update=False):
"""
Analyze STIS target acquisition images.
Parameters
----------
raw_filename: str
Name of the input raw file. For some raw files you will need a copy of
the spt file in the same directory.
update: bool
If True, keywords associated with tastis checks will be updated.
Default values is False.
"""
# filename can take wildcards, worry about this later
# checks for matching files exits if it can't find any
# loops over files
# build string for raw and spt file
spt_filename = raw_filename.replace("raw", "spt")
spt_exists = os.path.exists(spt_filename)
# open file
head = fits.getheader(raw_filename)
obsmode = head['OBSMODE']
if obsmode == "ACQ" or obsmode == "ACQ/PEAK":
# Check for spt file if obsmode is ACQ/PEAK
if not spt_exists and obsmode == "ACQ/PEAK":
FileNotFoundError("Can't find {} (required for ACQ/PEAK), "
"exiting".format(spt_filename))
keywords = _read_keywords(raw_filename, spt_filename, spt_exists)
_calculate_slews(keywords)
# Read dominant & subdominant FGS from keywords DGESTAR & SGESTAR
# in primary header of spt file.
if spt_exists:
spt_head = fits.getheader(spt_filename)
keywords['domfgs'] = spt_head['dgestar']
keywords['subfgs'] = spt_head['sgestar']
else:
keywords['domfgs'] = ""
keywords['subfgs'] = ""
badacq = _print_output(keywords, spt_exists)
# Update keywords in the input primary header to indicate
# which tests succeeded and which failed.
if update:
with fits.open(raw_filename, mode='update') as raw_hdulist:
if badacq:
raw_hdulist[0].header['acqstat'] = "FAILED"
else:
raw_hdulist[0].header['acqstat'] = "OK"
if keywords['obsmode'] == "ACQ":
# Bad ratio
if badacq & BAD_RATIO_HIGH:
raw_hdulist[0].header['acq_rat'] = "HIRATIO"
elif badacq & BAD_RATIO_LOW:
raw_hdulist[0].header['acq_rat'] = "LORATIO"
else:
raw_hdulist[0].header['acq_rat'] = "OKRATIO"
# Bad slew
if badacq & BAD_SLEW:
raw_hdulist[0].header['acq_slew'] = "BIGSLEW"
else:
raw_hdulist[0].header['acq_slew'] = "OK_SLEW"
# Saturation
if badacq & BAD_SATURATED:
raw_hdulist[0].header['acq_sat'] = "SAT"
else:
raw_hdulist[0].header['acq_sat'] = "UNSAT"
# Bad Lamp
if badacq & BAD_LAMP_LOW:
raw_hdulist[0].header['acq_lamp'] = "LO_LAMP"
else:
raw_hdulist[0].header['acq_lamp'] = "OK_LAMP"
# for ACQ/PEAK
else:
# Bad ratio
if badacq & BAD_RATIO_HIGH:
raw_hdulist[0].header['acqp_rat'] = "HIRATIO"
elif badacq & BAD_RATIO_LOW:
raw_hdulist[0].header['acqp_rat'] = "LORATIO"
else:
raw_hdulist[0].header['acqp_rat'] = "OKRATIO"
# Bad flux
if badacq & BAD_FLUX:
raw_hdulist[0].header['acqp_flx'] = "LO_FLUX"
else:
raw_hdulist[0].header['acqp_flx'] = "OK_FLUX"
# Saturation
if badacq & BAD_SATURATED:
raw_hdulist[0].header['acqp_sat'] = "SAT"
else:
raw_hdulist[0].header['acqp_sat'] = "UNSAT"
# Bad end
if badacq & BAD_END:
raw_hdulist[0].header['acqp_end'] = "HI_END"
else:
raw_hdulist[0].header['acqp_end'] = "OK_END"
# Bad tdf
if badacq & BAD_TDF:
raw_hdulist[0].header['dataflag'] = "TDFDown"
elif not spt_exists:
raw_hdulist[0].header['dataflag'] = "UNKNOWN"
else:
raw_hdulist[0].header['dataflag'] = "TDF_Up"
def _read_keywords(raw_filename, spt_filename, spt_exists):
"""
Read in raw and spt FITS file header keywords used in :func:`tastis`, and
store the results in a dictionary returned by the function.
Parameters
----------
raw_filename: str
Name of the FITS input raw file.
spt_filename: str
Name of the FITS spt file associated with the raw file.
spt_exists: bool
If True, the `spt_filename` exists in the same directory as the raw
file.
Returns
-------
keywords: dict
dictionary containing all keywords and other data needed by
:func:`tastis`.
"""
keywords = {}
with fits.open(raw_filename) as raw_hdulist:
# Read universal primary header keywords
prim_header_keywords = ['rootname', 'obsmode', 'obstype', 'proposid',
'sizaxis1', 'sizaxis2', 'texptime', 'biaslev',
'targname', 'tdateobs', 'ttimeobs', 'linenum',
'centera1', 'centera2']
for key in prim_header_keywords:
keywords[key] = raw_hdulist[0].header[key]
keywords['optelem'] = raw_hdulist[0].header['opt_elem']
# For the aperture name, use PROPAPER if its value ends in "E1"
# or "D1"; otherwise, use APERTURE.
propaper = raw_hdulist[0].header['propaper']
if propaper[-2:] in ['E1', 'D1']:
keywords['aperture'] = propaper
else:
keywords['aperture'] = raw_hdulist[0].header['aperture']
# grab from 1st ext (but not into dict) ngoodpix, goodmean
ngoodpix = raw_hdulist[1].header['ngoodpix']
goodmean = raw_hdulist[1].header['goodmean']
# Obsmode dependent header pulls
if keywords['obsmode'] == 'ACQ':
# 0th header
keywords['acqtype'] = raw_hdulist[0].header['acqtype']
keywords['box_step'] = raw_hdulist[0].header['checkbox']
# 1th header
keywords['counts1'] = raw_hdulist[1].header['maxchcnt']
keywords['targax1'] = raw_hdulist[1].header['targa1']
keywords['targay1'] = raw_hdulist[1].header['targa2']
# 4th header
keywords['counts2'] = raw_hdulist[4].header['maxchcnt']
keywords['goodmax2'] = raw_hdulist[4].header['goodmax']
keywords['targax4'] = raw_hdulist[4].header['targa1']
keywords['targay4'] = raw_hdulist[4].header['targa2']
# 7th header
keywords['goodmax3'] = raw_hdulist[7].header['goodmax']
keywords['apera1'] = raw_hdulist[7].header['apera1']
keywords['apera2'] = raw_hdulist[7].header['apera2']
keywords['aperlka1'] = raw_hdulist[7].header['aperlka1']
keywords['aperlka2'] = raw_hdulist[7].header['aperlka2']
# set pedestal and goodmax1 to 0, not used for "ACQ"
keywords['pedestal'] = 0
keywords['goodmax1'] = 0
if keywords['acqtype'] == "POINT":
keywords['search'] = "FLUX CENTROID"
else:
keywords['search'] = raw_hdulist[0].header['centmeth']
# Read keywords from the ACQ/PEAK primary header and from the
# spt extension header.
if keywords['obsmode'] == "ACQ/PEAK":
keywords['peakcent'] = raw_hdulist[0].header['peakcent']
keywords['search'] = raw_hdulist[0].header['pksearch']
keywords['box_step'] = raw_hdulist[0].header['numsteps']
keywords['peakstep'] = raw_hdulist[0].header['peakstep']
keywords['pedestal'] = raw_hdulist[0].header['pedestal']
keywords['goodmax1'] = raw_hdulist[1].header['goodmax']
# From spt file 1st header
spt_head = fits.getheader(spt_filename, ext=1)
keywords['otaslwa1'] = spt_head['otaslwa1']
keywords['otaslwa2'] = spt_head['otaslwa2']
# Calculate post-slew flux, get pedestal (from raw file) & dwell
# fluxes. For ACQ/PEAKs only.
keywords['counts2'] = 0. # not used for ACQ/PEAK
keywords['goodmax2'] = 0. # not used for ACQ/PEAK
keywords['goodmax3'] = 0. # not used for ACQ/PEAK
keywords['counts1'] = ngoodpix * goodmean
# Read dwell fluxes from 4th extension of raw file.
# I think this whole section (lines 564-573) are just pulling
# the data array from the 4th image extension (raw file)
keywords['naxis1'] = raw_hdulist[4].data.shape[1]
keywords['naxis2'] = raw_hdulist[4].data.shape[0]
keywords['dwell'] = raw_hdulist[4].data
raw_hdulist.close()
# check for spt file
if spt_exists:
spt_head = fits.getheader(spt_filename, ext=1)
keywords['ocstdfx'] = spt_head['ocstdfx']
else:
keywords['ocstdfx'] = "unknown"
# Extract visit & expnum from linenum before period goes into keyword dict
# as visit. if this is the end of the string, fill expnum in the keyword
# dict with 0, otherwise fill expnum with rest of linenum value after
# period converted to float
split_linenum = keywords['linenum'].split(".")
keywords['visit'] = split_linenum[0]
if len(split_linenum) == 1:
keywords['expnum'] = 0
else:
keywords['expnum'] = float(split_linenum[1])
# Calculate corner from 'centera' & sizaxis'.
keywords['corner1'] = keywords['centera1'] - keywords['sizaxis1']/2
keywords['corner2'] = keywords['centera2'] - keywords['sizaxis2']/2
# Calculate coarse, fine local axis & reference aperture locations.
# For ACQs only.
if keywords['obsmode'] == 'ACQ':
keywords['coarse1'] = keywords['targax1'] - (keywords['corner1'] - 1) + 1
keywords['coarse2'] = keywords['targay1'] - (keywords['corner2'] - 1) + 1
keywords['fine1'] = keywords['targax4'] - (keywords['corner1'] - 1) + 1
keywords['fine2'] = keywords['targay4'] - (keywords['corner2'] - 1) + 1
keywords['refaper1'] = keywords['apera1'] - (keywords['corner1'] + 31) +1
keywords['refaper2'] = keywords['apera2'] - (keywords['corner2'] + 34) +1
if keywords['box_step'] > 3:
offset = (keywords['box_step'] + 1)/2
keywords['refaper1'] -= offset
keywords['refaper2'] -= offset
return keywords
def _calculate_slews(keywords):
"""
Calculate slew information used by :func:`tastis` using input data
dictionary. THIS FUNCTION EDITS THE DICTIONARY IN PLACE.
Parameters
----------
keywords: dict
dictionary containing all keywords and other data needed for slew
calculation. DICTIONARY IS EDITED IN PLACE.
"""
# Slew calculation for ACQs.
if keywords['obsmode'] == 'ACQ':
# Define all possible apertures for ACQs.
aperture_acq_dict = {"F25NDQ1": -1.24840,
"F25NDQ2": -1.24840,
"F25NDQ3": -1.24840,
"F25NDQ4": -1.24840,
"F28X50LP": -1.26850,
"F28X50OIII": -1.26850,
"F28X50OII": -1.31570,
"F25ND3": -1.24840,
"F25ND5": -1.24840}
if keywords['aperture'] in aperture_acq_dict:
offset = aperture_acq_dict[keywords['aperture']]
else:
offset = 0.0
# Slews in pixels.
keywords['a1coarse_pix'] = keywords['targax1'] - offset - keywords['aperlka1'] + 1
keywords['a2coarse_pix'] = keywords['targay1'] - keywords['aperlka2'] + 1
keywords['a1fine_pix'] = keywords['targax4'] - offset - keywords['apera1']
keywords['a2fine_pix'] = keywords['targay4'] - keywords['apera2']
keywords['a1total_pix'] = keywords['a1coarse_pix'] + keywords['a1fine_pix']
keywords['a2total_pix'] = keywords['a2coarse_pix'] + keywords['a2fine_pix']
# Slews in arcseconds.
keywords['a1coarse_arc'] = _arcseconds(keywords['a1coarse_pix'])
keywords['a2coarse_arc'] = _arcseconds(keywords['a2coarse_pix'])
keywords['a1fine_arc'] = _arcseconds(keywords['a1fine_pix'])
keywords['a2fine_arc'] = _arcseconds(keywords['a2fine_pix'])
keywords['a1total_arc'] = _arcseconds(keywords['a1total_pix'])
keywords['a2total_arc'] = _arcseconds(keywords['a2total_pix'])
keywords['V2coarse'] = _v2coord(keywords['a2coarse_arc'],
keywords['a1coarse_arc'])
keywords['V3coarse'] = _v3coord(keywords['a1coarse_arc'],
keywords['a2coarse_arc'])
keywords['V2fine'] = _v2coord(keywords['a2fine_arc'],
keywords['a1fine_arc'])
keywords['V3fine'] = _v3coord(keywords['a1fine_arc'],
keywords['a2fine_arc'])
keywords['V2total'] = _v2coord(keywords['a2total_arc'],
keywords['a1total_arc'])
keywords['V3total'] = _v3coord(keywords['a1total_arc'],
keywords['a2total_arc'])
else:
# Slew calculations for ACQ/PEAKs.
if keywords['search'] == "LINEARAXIS2":
finalx = int(keywords['box_step']/2) * \
keywords['peakstep']/(PLATESCALE*1000.0)
finaly = 0.0
elif keywords['search'] == "LINEARAXIS1":
finalx = 0.0
finaly = int(keywords['box_step']/2) * \
keywords['peakstep']/(PLATESCALE*1000.0)
elif keywords['search'] == 'SPIRAL':
x, finaly = modf(sqrt(keywords['box_step']) / 2)
finaly = -1 * finaly * keywords['peakstep'] / (PLATESCALE*1000.0)
x, finalx = modf(sqrt(keywords['box_step']) / 2)
finalx = -1 * finalx * keywords['peakstep'] / (PLATESCALE*1000.0)
# Final slews in pixels.
keywords['a1total_pix'] = keywords['otaslwa1']/10.0 + finaly
keywords['a2total_pix'] = keywords['otaslwa2']/10.0 + finalx
if keywords['search'] == "SPIRAL":
if abs(keywords['a2total_pix']) < 0.05:
keywords['a2total_pix'] = 0.0
if abs(keywords['a1total_pix']) < 0.05:
keywords['a1total_pix'] = 0.0
# Rounding up the pixel values up to the decimal place.
keywords['a1total_pix'] = _ndec(keywords['a1total_pix'])
keywords['a2total_pix'] = _ndec(keywords['a2total_pix'])
# Slews in arcseconds.
keywords['a1total_arc'] = _arcseconds(keywords['a1total_pix'])
keywords['a2total_arc'] = _arcseconds(keywords['a2total_pix'])
keywords['V2total'] = _v2coord(keywords['a2total_arc'],
keywords['a1total_arc'])
keywords['V3total'] = _v3coord(keywords['a1total_arc'],
keywords['a2total_arc'])
def _print_output(keywords, spt_exists):
"""
Print analysis output to stdout for :func:`tastis` report.
Parameters
----------
keywords: dict
dictionary containing all keywords and other data needed by
:func:`tastis`.
spt_exists: bool
If True, the `spt_filename` exists in the same directory as the raw
file.
Returns
-------
badacq: integer (bit flag)
Bit flag integer containing :func:`tastis` error flags.
"""
# Print to stdout
print('=' * 79)
if keywords['obsmode'] == "ACQ":
print("{:>8} HST/STIS MIRVIS {:>7} "
"ACQ/{}".format(keywords['rootname'], keywords['aperture'],
keywords['acqtype']))
else:
print("{:>8} HST/STIS {} {:>7} "
"ACQ/PEAK-UP".format(keywords['rootname'], keywords['optelem'],
keywords['aperture']))
print("prop: {:4d} visit: {} line: {:.0f} target: {}".format(
keywords['proposid'], keywords['visit'], keywords['expnum'],
keywords['targname']))
print("obs date, time: {:>8} {:>8} exposure time: {:5.2f}".format(
keywords['tdateobs'], keywords['ttimeobs'], keywords['texptime']))
if keywords['domfgs'] != "" or keywords['subfgs'] != "":
print("dom GS/FGS: {} sub-dom GS/FGS: {}".
format(keywords['domfgs'], keywords['subfgs']))
if keywords['obsmode'] == "ACQ":
print("ACQ params: bias sub: {:.0f} checkbox: {:d} method: "
"{}".format(keywords['biaslev'], keywords['box_step'],
keywords['search']))
else:
print("ACQ params: bias sub: {:.0f} "
"method: {}".format(keywords['biaslev'], keywords['peakcent']))
print("subarray (axis1,axis2): size=({:d},{:d}) "
"corner=({:d},{:d})".format(int(keywords['sizaxis1']),
int(keywords['sizaxis2']),
int(keywords['corner1']),
int(keywords['corner2'])))
print('-' * 79)
# Print rest of output according to data type: ACQ or ACQ/PEAK.
if keywords['obsmode'] == "ACQ":
print("Coarse locate phase: Target flux in max checkbox "
"(DN): {:.0f}\n".format(keywords['counts1']))
print(" global local")
print(" axis1 axis2 axis1 axis2")
print("Target location: {:4.1f} {:4.1f} {:4.1f} {:4.1f}\n".
format(keywords['corner1'] + keywords['coarse1'] - 1,
keywords['corner2'] + keywords['coarse2'] - 1,
keywords['coarse1'], keywords['coarse2']))
print(" axis1 axis2 axis1 axis2 V2 "
" V3")
print(" (pixels) (arcsec) "
"(arcsec)")
print("Estimated slew: {:4.1f} {:4.1f} {:6.3f} {:6.3f} "
"{:6.3f} {:6.3f}".format(keywords['a1coarse_pix'],
keywords['a2coarse_pix'],
keywords['a1coarse_arc'],
keywords['a2coarse_arc'],
keywords['V2coarse'],
keywords['V3coarse']))
# Print slews
print('-' * 79)
print("Fine locate phase: Target flux in max checkbox (DN):"
" {:.0f}\n".format(keywords['counts2']))
print(" global local")
print(" axis1 axis2 axis1 axis2")
print("Target location: {:4.1f} {:4.1f} {:4.1f} {:4.1f}".
format(keywords['corner1'] + keywords['fine1'] - 1,
keywords['corner2'] + keywords['fine2'] - 1,
keywords['fine1'], keywords['fine2']))
print("Ref ap location: {:4.1f} {:4.1f} {:4.1f} {:4.1f}\n".
format(keywords['apera1'] + 1, keywords['apera2'] + 1,
keywords['refaper1'], keywords['refaper2']))
print(" axis1 axis2 axis1 axis2 "
"V2 V3")
print(" (pixels) (arcsec) "
"(arcsec)")
print("Estimated slew: {:4.1f} {:4.1f} {:6.3f} {:6.3f} "
"{:6.3f} {:6.3f}".format(keywords['a1fine_pix'],
keywords['a2fine_pix'],
keywords['a1fine_arc'],
keywords['a2fine_arc'],
keywords['V2fine'],
keywords['V3fine']))
# Print slews
print('-' * 79)
print("Total est. slew: {:4.1f} {:4.1f} {:6.3f} {:6.3f} "
"{:6.3f} {:6.3f}". format(keywords['a1total_pix'],
keywords['a2total_pix'],
keywords['a1total_arc'],
keywords['a2total_arc'],
keywords['V2total'],
keywords['V3total']))
print('-' * 79)
badacq = _print_warnings(keywords, spt_exists)
else:
print("Scan type: {} Step size (mas): {:.0f}".format(
keywords['search'], keywords['peakstep']))
if keywords['search'] == "SPIRAL":
print("axis 1 -->, axis 2 ^\n")
# Print here the dwell point values
if keywords['search'] == "LINEARAXIS2":
# I think I might actually need to do a transpose here
print("\n", keywords['dwell'].flatten())
else:
# I think I might actually need to do a transpose here (at least
# for one of these)
print("\n", keywords['dwell'].flatten())
print("")
print(" axis1 axis2 axis1 axis2 V2 "
"V3")
print(" (pixels) (arcsec) "
"(arcsec)")
print("Estimated slew: {:4.1f} {:4.1f} {:6.3f} {:6.3f} "
"{:6.3f} {:6.3f}".format(keywords['a1total_pix'],
keywords['a2total_pix'],
keywords['a1total_arc'],
keywords['a2total_arc'],
keywords['V2total'],
keywords['V3total']))
print("Flux in post-slew confirmation image ({:.0f}) - Pedestal "
"({:.0f}) = {:.0f} DN".
format(keywords['counts1'], keywords['pedestal'],
keywords['counts1'] - keywords['pedestal']))
print('-' * 79)
badacq = _print_warnings(keywords, spt_exists)
print('=' * 79)
return badacq
def _print_warnings(keywords, spt_exists):
"""
Print warnings output to stdout for :func:`tastis` report.
Parameters
----------
keywords: dict
dictionary containing all keywords and other data needed by
:func:`tastis`.
spt_exists: bool
If True, the `spt_filename` exists in the same directory as the raw
file.
Returns
-------
badacq: integer (bit flag)
Bit flag integer containing :func:`tastis` error flags.
"""
badacq = 0
max_at_end = 0 # initial value
if keywords['ocstdfx'] == "TDFDown":
print("Telemetry indicates that the intended exposures may not have\n"
"been performed. Check the images for signal.\n")
badacq |= BAD_TDF
if not spt_exists:
print("This output lacks some information because the spt.fits file\n"
"is not present in the directory.\n")
# ACQ warnings.
if keywords['obsmode'] == "ACQ":
if abs(keywords['a1fine_pix']) > 4.0 or \
abs(keywords['a2fine_pix']) > 4.0:
print("The fine slew (to center the target in the reference "
"aperture) is larger\nthan 4 pixels. This may indicate a "
"problem with your acquisition.\n")
badacq |= BAD_SLEW
# Ratio of flux in max checkbox in fine & coarse stages.
ratio = keywords['counts2'] / keywords['counts1']
if (ratio < 0.75) or (ratio > 1.25):
print("The fluxes in the maximum checkbox in the fine and coarse "
"stages differ\nby more than 25%. This may indicate a "
"problem with your acquisition.\n")
if ratio < 0.75:
badacq |= BAD_RATIO_LOW
else:
badacq |= BAD_RATIO_HIGH
if keywords['goodmax2'] > MAX_GOODMAX:
badacq |= BAD_SATURATED
print("Saturation of pixels in the second image may have affected"
"\nthe final centering.\n")
if keywords['goodmax3'] < MIN_GOODMAX:
badacq |= BAD_LAMP_LOW
print("The flux in the third image of the ACQ is lower than the "
"typical value for\n)the lamp; the image should be checked "
"to see if the lamp was illuminated.\n")
if badacq == 0:
print("Your ACQ appears to have succeeded, as the fluxes in the "
"coarse\nand fine stages agree within 25% and the fine "
"slews were less than\n4 pixels as expected\n")
# ACQ/PEAK warnings.
if keywords['obsmode'] == "ACQ/PEAK":
# Calculate maximum flux in the peakup
max_final = 0.0
i_max = -1
j_max = -1
# I'm not sure if dwell would ever contain all negative values, but
# just in case that's a possibility, to replicate original code
# behaviour also, need to check indexing order
if max(keywords['dwell'].flatten()) > max_final:
max_final = max(keywords['dwell'].flatten())
max_indexs = np.where(keywords['dwell'] ==
max(keywords['dwell'].flatten()))
i_max = max_indexs[1][0]
j_max = max_indexs[0][0]
# subtract pedestal
flux = keywords['counts1'] - keywords['pedestal']
flux_ratio = flux / max_final
if flux_ratio < LOW_FLUX_CUTOFF:
print("The flux in the confirmation image is only {:2.0f}% of the "
"maximum flux\nin the ACQ/PEAK scan. Percentages below "
"{:2.0f}% often indicate problems\nin the ACQ/PEAK.\n".
format(flux_ratio*100, LOW_FLUX_CUTOFF*100))
badacq |= BAD_RATIO_LOW
if flux_ratio > HIGH_FLUX_CUTOFF:
print("The flux in the confirmation image is {:2.0f}% greater than"
" the maximum flux\nin the ACQ/PEAK scan. An excess greater"
" than {:3.0f}% indicates\nproblems in the ACQ/PEAK.\n".
format((flux_ratio-1)*100, (HIGH_FLUX_CUTOFF-1)*100))
badacq |= BAD_RATIO_HIGH
if keywords['goodmax1'] > MAX_GOODMAX:
badacq |= BAD_SATURATED
print("Some pixels in the confirmation image were saturated. "
"If saturation also\noccurred in any of the peakup steps, "
"it may have affected the centering.\n")
# Check that the flux level (above pedestal) in the confirmation
# image is above a minimum value.
if keywords['obstype'] == "IMAGING":
if flux < MIN_IMAGING_FLUX:
print("The flux in the confirmation image is {:2.0f}% of the "
"recommended minimum\nof {:.0f} DN for a direct-light "
"ACQ/PEAK. The signal-to-noise in the\nACQ/PEAK may be "
"inadequate for an accurate centering.\n".
format(flux/MIN_IMAGING_FLUX*100, MIN_IMAGING_FLUX))
badacq |= BAD_FLUX
else:
if flux < MIN_SPECTROSCOPIC_FLUX:
print("The flux in the confirmation image is {:2.0f}% of the "
"recommended minimum\nof {:.0f} DN for a dispersed-light"
" ACQ/PEAK. The signal-to-noise in\nthe ACQ/PEAK may be"
" inadequate for an accurate centering.\n".
format(flux/MIN_SPECTROSCOPIC_FLUX*100,
MIN_SPECTROSCOPIC_FLUX))
badacq |= BAD_FLUX
# Search for the word FAILED in keyword PEAKCENT.
# This will check if flux test failed.
if "FAILED" in keywords['peakcent']:
print("The ACQ/PEAK flux test failed, which means that no point in"
" the peakup\nscan has a flux that is at least 30% higher "
"than any other point. The\nACQ/PEAK has failed, and the "
"telescope has returned to the initial\nposition of the "
"ACQ/PEAK\n")
badacq |= BAD_ACQ
# If first & last flux values in LINEAR scans are 0.
if keywords['search'] == "LINEARAXIS1":
if i_max == 0 or i_max == (keywords['naxis1']-1):
max_at_end = 1
badacq |= BAD_END
elif keywords['search'] == "LINEARAXIS2":
if j_max == 0 or j_max == (keywords['naxis2']-1):
max_at_end = 1
badacq |= BAD_END
if max_at_end:
print("The maximum flux in the sequence occurred at one end.\n"
"This may indicate that the target was beyond that end\n"
"or that a neighboring object affected the acquisition.")
if badacq == 0:
print("The confirmation image has a flux between {:3.1f} and "
"{:3.1f} times the\nmaximum flux in the peakup, which is "
"typical of a successful ACQ/PEAK.".format(LOW_FLUX_CUTOFF,
HIGH_FLUX_CUTOFF))
return badacq
def _arcseconds(x):
"""
Translate pixel values to arcsecond units
Parameters
----------
x: int, float
Input pixel value.
Returns
-------
arcsec: float
Pixel translated to arcsecond unit.
"""
return x * PLATESCALE
def _v2coord(x, y):
"""
Translate arcsecond values to v2coordinate system.
Parameters
----------
x: int, float
Input arcsecond value.
y: int, float
Input arcsecond value.
Returns
-------
v2coord: float
v2coord value.
"""
return COSTHETA * x + SINTHETA * y
def _v3coord(x, y):
"""
Translate arcsecond values to v3coordinate system.
Parameters
----------
x: int, float
Input arcsecond value.
y: int, float
Input arcsecond value.
Returns
-------
v3coord: float
v3coord value.
"""
return COSTHETA * x - SINTHETA * y
def _ndec(x):
"""
Return input float value rounded up to nearest tenth decimal place.
Parameters
----------
x: float
Input value to be rounded up.
Returns
-------
rounded: float
input value rounded up to nearest tenth decimal place
"""
if x > 0:
return int(x*10 + 0.5) / 10.0
else:
return int(x*10 - 0.5) / 10.0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Analyze STIS target acquisition images. :func:`tastis` "
"will print general information about each input target "
"acquisition image, and will analyze both types of STIS "
"target acquisitions: ACQs and ACQ/PEAKs")
parser.add_argument('filename', type=str,
help="Name of the input raw file. For some raw files "
"you will need a copy of the spt file in the "
"same directory.")
parser.add_argument('--update', '-u', action='store_true',
help='update header')
args = vars(parser.parse_args())
tastis(args['filename'], args['update'])
| 37,270 | 39.119483 | 90 |
py
|
stistools
|
stistools-master/stistools/inttag.py
|
#! /usr/bin/env python
import numpy as np
from astropy.io import fits
import astropy.stats
from astropy import units as u
from astropy.time import Time
from datetime import datetime as dt
__doc__ = """
The task :func:`inttag` converts an events table of TIME-TAG mode STIS data into a raw, time-integrated ACCUM
image. By default, :func:`inttag` only integrates over the good time intervals (GTI), though the user can
choose to integrate over the entire exposure time by setting allevents=True. The output image can be
calibrated as any other raw image.
The input file for :func:`inttag` is an event stream table of TIME-TAG mode produced by generic conversion.
The data will be Doppler corrected (as required for medium and high resolution spectroscopic modes). This file
will consist of a primary header with no data, and two binary table extensions. The primary header is identical
in structure to the primary header of an ACCUM mode image. The first binary table (EXTNAME=EVENTS) contains a
list of the events themselves (i.e. science data as an event stream), and the second binary table (EXTNAME=GTI)
contains a list of good time intervals for the TIMETAG exposure. Columns "TIME", "AXIS1", and "AXIS2" in the
EVENTS table are read. Columns "START" and "STOP" in the GTI table are read.
The output image is a time integrated (ACCUM mode) image with the same structure as any other STIS MAMA raw
image (i.e. primary header followed by a single or series of triplet extensions: SCI, ERR, DQ). The number of
triplets is determined by the value of rcount. The time interval in the Nth triplet covers from
(starttime + (N-1)*increment) to (starttime + N*increment). The exposure time in each interval need not be
identical, because events are included in the image only if they occur during "good time intervals" (as
determined by the GTI extension table). The keyword OBSMODE in the primary header of the output image will
still be set to "TIME-TAG".
The output science image is ready to be calibrated (see :func:`calstis`, :func:`crreject`, :func:`basic2d`,
:func:`x2d`, :func:`x1d`).
Examples
--------
:func:`inttag` with default values:
>>> import stistools
>>> stistools.inttag.inttag("oddv01050_tag.fits", "oddv01050_raw.fits")
:func:`inttag` with highres output:
>>> import stistools
>>> stistools.inttag.inttag("oddv01050_tag.fits", "oddv01050_raw.fits", highres=True)
:func:`inttag` with multiple output imsets (5 count regions of 200s each):
>>> import stistools
>>> stistools.inttag.inttag("oddv01050_tag.fits", "oddv01050_raw.fits", rcount = 5, increment = 200)
"""
__taskname__ = "inttag"
__version__ = "1.0"
__vdate__ = "13-November-2018"
__author__ = "Python: Doug Branton, C code: R. Katsanis, N. Zarate, Phil Hodge"
def inttag(tagfile, output, starttime=None, increment=None,
rcount=1, highres=False, allevents=False, verbose=True):
"""Convert an events table of TIMETAG into an integrated ACCUM image.
Parameters
----------
tagfile: str
input file that contains TIMETAG event stream. This is ordinarily a
FITS file containing two tables. The TIMETAG data are in the table
with EXTNAME = "EVENTS", and the "good time intervals" are in the
table with EXTNAME = "GTI". If the GTI table is missing or empty,
all times will be considered "good".
output: str
Name of the output FITS file.
starttime: float
Start time for integrating events, in units of seconds since the
beginning of the exposure. The default value of None means that
the start time will be set to the first START time in the GTI table.
increment: float
Time interval in seconds. The default value of None means integrate
to the last STOP time in the GTI table, divided by rcount.
rcount: int
Repeat count, the number of output image sets to create. If rcount is
greater than 1 and increment is not specified, will subdivide the total exposure time by rcount.
highres: bool
Create a high resolution output image? Default is False.
allevents: bool
If allevents is set to True, all events in the input EVENTS table will
be accumulated into the output image. The TIME column in the EVENTS
table will only be used to determine the exposure time, and the GTI
table will be ignored.
verbose: bool
Print additional info?
Returns
-------
"""
# Open Input File (_tag.fits)
with fits.open(tagfile) as tag_hdr:
# Read in TIME-TAG data (Events data and GTI data)
events_data = tag_hdr[1].data
if allevents: # If allevents, ignore GTI and generate gti_data based on the time of the first and last event
gti_data = np.rec.array([(events_data['TIME'][0], events_data['TIME'][-1])],
formats=">f8,>f8", names='START, STOP')
else: # Otherwise, retrieve the GTIs from the GTI extension
gti_data = tag_hdr['GTI'].data
# Read in relevant header info
tag_pri_hdr = tag_hdr[0].header
cenx = tag_pri_hdr['CENTERA1'] # xcenter in c code
ceny = tag_pri_hdr['CENTERA2'] # ycenter in c code
siz_axx = tag_pri_hdr['SIZAXIS1'] # nx in c code
siz_axy = tag_pri_hdr['SIZAXIS2'] # ny in c code
tag_sci_hdr = tag_hdr[1].header
tzero_mjd = tag_sci_hdr['EXPSTART'] # MJD zero point
# Determine start and stop times for counting events
gti_start = gti_data['START'][0]
gti_stop = gti_data['STOP'][-1]
# Calculate corners from array size and centers
xcorner = ((cenx - siz_axx / 2.) - 1) * 2
ycorner = ((ceny - siz_axy / 2.) - 1) * 2
# Adjust axis sizes for highres, determine binning
bin_n = 2
if highres:
siz_axx *= 2
siz_axy *= 2
bin_n = 1
ltvx = ((bin_n - 2.) / 2. - xcorner) / bin_n
ltvy = ((bin_n - 2.) / 2. - ycorner) / bin_n
ltm = 2. / bin_n
# Read in start and stop time parameters
if starttime is None or starttime < gti_start:
starttime = gti_start # The first START time in the GTI (or first event)
if increment is None:
increment = (gti_stop - gti_start)/rcount
stoptime = starttime + increment
imset_hdr_ver = 0 # output header value corresponding to imset
texptime = 0 # total exposure time
hdu_list = []
for imset in range(rcount):
# Truncate stoptime at last available event time (GTI or allevents) if it exceeds that
if stoptime > gti_stop:
stoptime = gti_stop
# Get Exposure Times
exp_time, expstart, expstop, good_events = exp_range(starttime, stoptime, events_data, gti_data, tzero_mjd)
if len(good_events) == 0:
if verbose:
print("Skipping imset, due to no overlap with GTI\n", starttime, stoptime)
starttime = stoptime
stoptime += increment
continue
imset_hdr_ver += 1
if imset_hdr_ver == 1: # If first science header, texpstart keyword value is expstart
texpstart = expstart
texpend = expstop # texpend will be expstop of last imset
if verbose:
print("imset: {}, start: {}, stop: {}, exposure time: {}".format(imset_hdr_ver, starttime, stoptime,
exp_time))
# Convert events table to accum image
accum = events_to_accum(good_events, siz_axx, siz_axy, highres)
# Calculate errors from accum image
# Note: C version takes the square root of the counts, inttag.py uses a more robust confidence interval
conf_int = astropy.stats.poisson_conf_interval(accum, interval='sherpagehrels', sigma=1)
err = conf_int[1] - accum # error is the difference between upper confidence boundary and the data
# Copy EVENTS extension header to SCI, ERR, DQ extensions
sci_hdu = fits.ImageHDU(data=accum, header=tag_sci_hdr.copy(), name='SCI')
err_hdu = fits.ImageHDU(data=err, header=tag_sci_hdr.copy(), name='ERR')
dq_hdu = fits.ImageHDU(header=tag_sci_hdr.copy(), name='DQ')
# Generate datetime for 'DATE' header keyword
dtstr = str(dt.utcnow())
date, h, m, s = [dtstr.split()[0], dtstr.split()[1].split(':')[0], dtstr.split()[1].split(':')[1],
str(round(float(dtstr.split()[1].split(':')[-1])))]
if len(s) == 1:
s = '0' + s
dtval = date + 'T' + h + ':' + m + ':' + s
# Populate extensions
for idx, hdu in enumerate([sci_hdu, err_hdu, dq_hdu]):
hdu.header['EXPTIME'] = exp_time
hdu.header['EXPSTART'] = expstart
hdu.header['EXPEND'] = expstop
date_obs, time_obs = Time(float(expstart), format='mjd').isot.split('T')
hdu.header['DATE-OBS'] = date_obs
hdu.header['TIME-OBS'] = time_obs
hdu.header['EXTVER'] = imset_hdr_ver
hdu.header['DATE'] = (dtval, "Date FITS file was generated")
hdu.header['ORIGIN'] = "stistools inttag.py"
# Check if image-specific WCS keywords already exist in the tag file (older tag files do)
keyword_list = list(hdu.header.keys())
if not any("CTYPE" in keyword for keyword in keyword_list):
n, k = [keyword[-1] for keyword in keyword_list if "TCTYP" in keyword]
# Rename keywords
for val, i in zip([n, k], ['1', '2']):
hdu.header.rename_keyword('TCTYP' + val, 'CTYPE' + i)
hdu.header.rename_keyword('TCRPX' + val, 'CRPIX' + i)
hdu.header.rename_keyword('TCRVL' + val, 'CRVAL' + i)
hdu.header.rename_keyword('TCUNI' + val, 'CUNIT' + i)
hdu.header.rename_keyword('TC{}_{}'.format(n, n), 'CD{}_{}'.format(1, 1))
hdu.header.rename_keyword('TC{}_{}'.format(n, k), 'CD{}_{}'.format(1, 2))
hdu.header.rename_keyword('TC{}_{}'.format(k, n), 'CD{}_{}'.format(2, 1))
hdu.header.rename_keyword('TC{}_{}'.format(k, k), 'CD{}_{}'.format(2, 2))
# Time tag events table keywords
hdu.header['WCSAXES'] = 2
hdu.header['LTM1_1'] = ltm
hdu.header['LTM2_2'] = ltm
hdu.header['LTV1'] = ltvx
hdu.header['LTV2'] = ltvy
# Convert keyword values to lowres scale if not highres
if not highres:
hdu.header['CD1_1'] *= 2
hdu.header['CD1_2'] *= 2
hdu.header['CD2_1'] *= 2
hdu.header['CD2_2'] *= 2
hdu.header['CRPIX1'] = (hdu.header['CRPIX1'] + 0.5) / 2.
hdu.header['CRPIX2'] = (hdu.header['CRPIX2'] + 0.5) / 2.
# Populate DQ header with dq specific keywords
if idx == 2:
hdu.header['NPIX1'] = siz_axx
hdu.header['NPIX2'] = siz_axy
hdu.header['PIXVALUE'] = 0 # Fixes issue with calstis not running on raw output files
# Append imset extensions to header list
hdu_list.append(sci_hdu)
hdu_list.append(err_hdu)
hdu_list.append(dq_hdu)
# Prepare start and stop times for next image in imset
starttime = stoptime
stoptime += increment
texptime += exp_time
# Copy tag file primary header to output header
pri_hdu = fits.PrimaryHDU(header=tag_pri_hdr.copy())
# Add/Modify primary header keywords
pri_hdu.header['NEXTEND'] = imset_hdr_ver * 3 # Three extensions per imset (SCI, ERR, DQ)
pri_hdu.header['NRPTEXP'] = imset_hdr_ver
pri_hdu.header['TEXPSTRT'] = texpstart
pri_hdu.header['TEXPEND'] = texpend
pri_hdu.header['TEXPTIME'] = texptime
pri_hdu.header['BINAXIS1'] = bin_n
pri_hdu.header['BINAXIS2'] = bin_n
pri_hdu.header['FILENAME'] = output.split('/')[-1]
pri_hdu.header['DATE'] = (dtval, "Date FITS file was generated")
pri_hdu.header['ORIGIN'] = "stistools inttag.py"
if not highres:
pri_hdu.header['LORSCORR'] = "COMPLETE" # Corr flag detailing MAMA data conversion to low res
# Write output file
hdu_list = [pri_hdu] + hdu_list
out_hdul = fits.HDUList(hdu_list)
out_hdul.writeto(output, overwrite=True)
def exp_range(starttime, stoptime, events_data, gti_data, tzero_mjd):
"""Calculate exposure time, expstart, and expstop and mask imset
Parameters
----------
starttime: float
Start time of the imset in seconds
stoptime: float
Stop time of the imset in seconds
events_data: record array
Record array of timetag events.
gti_data: record array
Record array of good time intervals (GTIs).
tzero_mjd: bool
Modified Julian Date (MJD) corresponding to the beginning of the exposure
Returns
-------
exp_time: float
Total exposure time in seconds for the given imset. This number accounts for any exposure time lost
to non-GTI time (if the user is not using allevents).
expstart: float
Start time of the imset exposure
expstop: float
Stop time of the imset exposure
good_events: float
The events list within the imset exposure time and within the GTIs.
"""
sec_per_day = (1*u.day).to(u.second).value
imset_events = events_data[(events_data['TIME'] > starttime) * (events_data['TIME'] < stoptime)] # within imset
if len(imset_events) == 0: # No events in imset
exp_time = 0
expstart = tzero_mjd
expstop = tzero_mjd
return exp_time, expstart, expstop, imset_events
# Mask events in imset if there are any lapses in GTI
gti_mask = np.array([False] * len(imset_events)) # Start by assuming all events are outside all GTIs
for gti in gti_data:
# Create mask of events within GTI
mask = (imset_events['TIME'] > gti[0]) * (imset_events['TIME'] < gti[1])
gti_mask = np.logical_or(gti_mask, mask) # OR global gti mask with local gti mask
good_events = imset_events[gti_mask] # All events in the imset within the GTI(s)
if len(good_events) == 0:
exp_time = 0
expstart = tzero_mjd
expstop = tzero_mjd
return exp_time, expstart, expstop, good_events
expstart = tzero_mjd + good_events['TIME'][0] / sec_per_day # exposure start in MJD for imset
expstop = tzero_mjd + good_events['TIME'][-1] / sec_per_day # exposure stop in MJD for imset
# Determine GTI gap regions
gaps = []
if len(gti_data) > 1:
for i, gti in enumerate(gti_data):
if i == 0:
continue
gaps.append((gti_data[i - 1][1], gti[0]))
# Calculate exposure time lost due to non-GTI overlap
exptime_loss = 0
for gap in gaps:
if gap[1] <= stoptime and gap[0] >= starttime:
exptime_loss += gap[1] - gap[0]
elif gap[1] >= stoptime and gap[0] <= stoptime:
exptime_loss += stoptime - gap[0]
elif gap[1] >= starttime and gap[0] <= starttime:
exptime_loss += gap[1] - starttime
else:
continue
exp_time = stoptime - starttime - exptime_loss # exposure time in seconds
return exp_time, expstart, expstop, good_events
def events_to_accum(events_data, size_x, size_y, highres):
"""Map timetag events to a 2d accum image array.
Parameters
----------
events_data: record array
Record array of timetag events.
size_x: int
Number of pixels on axis 1 of the detector.
size_y: int
Number of pixels on axis 2 of the detector.
highres: bool
Boolean value indicating whether the output accum image is in high or low resolution.
Returns
-------
accum: array
2d image of all events in the imset on the detector.
"""
# Extract (x,y) event locations from events_table
axis1 = events_data['AXIS1']
axis2 = events_data['AXIS2']
# Determine resolution-appropriate binning
if highres:
range_y = size_y
range_x = size_x
else:
range_y = size_y * 2
range_x = size_x * 2
# Map events to an accum image using a 2d histogram
accum, xedges, yedges = np.histogram2d(axis2, axis1, bins=[size_y, size_x], range=[[1, range_y], [1, range_x]])
return accum
| 16,741 | 39.148681 | 117 |
py
|
stistools
|
stistools-master/stistools/stisnoise.py
|
#!/usr/bin/env python
import math
from astropy.io import fits
import numpy
import numpy.fft as fft
from scipy import ndimage
from scipy import signal
__version__ = '5.6 (2016-Mar-02)'
def _median(arg):
return numpy.sort(arg)[arg.shape[0]//2]
def medianfilter(time_series, width):
tlen = time_series.shape[0]
res = time_series.copy()
res[:] = 0
res[0] = time_series[0]
beg, end = width//2, (width+1)//2
for j in range(beg):
res[j] = _median(time_series[:j+end])
for j in range(beg, tlen-end):
res[j] = _median(time_series[j-beg:j+end])
for j in range(tlen-end, tlen):
res[j] = _median(time_series[j-beg:])
return res
def wipefilter(time_series, image_type, sst, freqmin, freqmax, scale):
ntime = time_series.shape[0]
# if ntime is a prime number the fft will take forever, so make
# it factorable with small prime factors (not as quick as power
# of 2 but still much quicker).
# Note that padding data out to next power of two is too much
# padding (number of elements is just a bit over 2^20 for STIS
# data)
if image_type == 'raw':
ntimep = ntime+14
else:
ntimep = ntime+7
t2 = numpy.zeros(ntimep, numpy.float64)
t2[:ntime] = time_series
freq = numpy.arange(ntimep)/(ntimep*sst*1.0e-6)
freq[ntimep//2+1:ntimep] = freq[1:ntimep//2][::-1]
tran = fft.fft(t2) / float(len(t2))
# apply filter
ind = numpy.nonzero((freq > freqmin)*(freq < freqmax))
tran[ind] = tran[ind]*scale
# inverse transform
time_series = fft.ifft(tran).real[:ntime+2]
time_series *= time_series.shape[0]
return time_series
def gauss(x, x0, dx, ymax):
if dx > 0.:
arg = numpy.clip(numpy.abs((x-x0)/dx), 0., 9.)
y = numpy.exp(-arg*arg/2.)*(arg < 9.)
else:
y = (0.*x)*(x != x0)+(x == x0)
return y*ymax
def windowfilter(time_series, image_type, sst, freqpeak, width, taper):
ntime = time_series.shape[0]
# if ntime is a prime number the fft will take forever, so make
# it factorable with small prime factors (not as quick as power
# of 2 but still much quicker).
# Note that padding data out to next power of two is too much
# padding (number of elements is just a bit over 2^20 for STIS
# data)
if image_type == 'raw':
ntimep = ntime+14
else:
ntimep = ntime+7
t2 = numpy.zeros(ntimep, numpy.float64)
t2[:ntime] = time_series
freq = numpy.arange(ntimep, dtype=numpy.float64) / (ntimep*sst*1.0e-6)
freq[ntimep//2+1:ntimep] = freq[1:ntimep//2][::-1]
tran = fft.fft(t2) / float(len(t2))
# apply filter
filter = numpy.ones(ntimep, numpy.float64)
ind = numpy.nonzero((freq > (freqpeak - width / 2.0)) *
(freq < (freqpeak + width / 2.0)))
filter[ind] = 0.0
freqstep = 1.0 / (ntimep * sst * 1.0e-6)
width = taper / freqstep # specify window width in freq steps
sigma = width/2.354820044 # convert fwhm to sigma
kernw = int(5*sigma) # make kernel have width of 5 sigma
if kernw % 2 == 0:
kernw = kernw + 1 # make kernel odd
kernx = numpy.arange(kernw)
kerny = gauss(kernx, kernw//2, sigma, 1.0) # gaussian kernel
kerny = kerny/numpy.sum(kerny)
filterc = signal.correlate(filter, kerny, mode='same')
tran = tran * filterc
# inverse transform
time_series = fft.ifft(tran).real[:ntime+2]
time_series *= time_series.shape[0]
return time_series
def stisnoise(infile, exten=1, outfile=None, dc=1, verbose=1,
boxcar=0, wipe=None, window=None):
""" Computes an FFT on STIS CCD frames to evaluate fixed pattern noise.
Fixed pattern noise is most obvious in a FFT of bias
frames. Optional filtering to correct the fixed pattern noise is
provided through keywords boxcar, wipe, and window. Filtered data
can be saved as an output file.
Parameters
-----------
infile : string
STIS FITS file
exten : int, optional
fits extension to be read
dc : int, optional
the power in the first freq bin is set to zero for better
plotting of the power spectrum.
verbose : int, optional [Default: 1]
set to 0 if you do not want brief information about each image.
boxcar : int
width of boxcar smoothing to be applied.
wipe : ndarray
a 3-element array, specifying how to modify the data in
frequency space. If set, the image is converted to a 1-D time
series, fourier transformed to frequency space, modified, inverse
transformed back to time space, and converted back to a 2-D image.
The first and second elements specify the range in frequencies to
be scaled (in hz), and the third element specifies the scaling
factor (should be 0-1).
window : ndarray
a 3 element array, specifying how to modify the data in
frequency space. The first element is the center of the window
(in hz). The second element is the width of the window (in hz).
The third element controls the tapering of the window - it is the
scale (in hz) of the tapering width. Specifically, a square
bandstop is convolved with a gaussian having the FWHM given by the
third parameter.
outfile : string,optional
name of filtered image file
Returns
-------
noise_terms : tuple of arrays
A tuple containing the arrays; namely, the arrays::
freq = frequency in power spectrum (hz)
magn = magnitude in power spectrum
Notes
---------
Authors:
- Original algorithm: Thomas M. Brown (STScI)
- Python version: Paul Barrett (STScI)
"""
# history:
# 11/5/2001 TMB - version 1. Basic idea comes from ACS analysis software
# used for analyzing read noise
# (dino.pro; W.J. McCann & G. Hartig)
# 11/6/2001 TMB - version 2 added other amps, error checking
# 11/6/2001 TMB - version 2.1 added check on sci ext
# 11/6/2001 TMB - version 3 added various filter options
# 11/6/2001 TMB - version 3.1 added ability to read from STIS IDT DB
# 11/7/2001 TMB - version 4 added scale filter and output images
# 11/9/2001 TMB - version 4.1 added new parameter to scale routine,
# changed
# output to a file with header preservation.
# 11/20/2001 TMB - version 5.0 added window routine, which does the
# filtering of "scale" with a more gradual scaling
# 11/26/2001 TMB - version 5.1 cleaned up the code comments.
# 02/25/2002 JAV - version 5.2 added verbose option & output header
# float type spec.
# 05/21/2002 PEB - version 5.3 padded extra pixel with median of row.
# 02/15/2007 PEH - version 5.4 convert from numarray to numpy
# 04/27/2010 PEH - version 5.5 changed '/' to '//' for integer division;
# used explicit float() in some other cases. Added:
# from __future__ import division
# Check filter options
if ((boxcar > 0) + (wipe is not None) + (window is not None)) > 1:
raise ValueError('conflicting filter options')
# Define physical characteristics of STIS CCD
pst = 640.0 # parallel shift time (us)
sst = 22.0 # serial shift period (us)
nc0 = 1062 # number of columns in raw data
nr0 = 1044 # number of rows in raw data
fltxy = 1024 # number of columns and rows in calibrated data
nos = 19 # number of physical overscan columns
pps = pst/sst # number of serial shift intervals in parallel interval
# Retrieve exposure information from header
fin = fits.open(infile)
extname = fin[exten].header['EXTNAME']
inimage = fin[exten].data
himage = fin[0].data
amp = fin[0].header['CCDAMP']
if verbose == 1:
print('Target: {}, Amp: {}, Gain: {}'.format(
fin[0].header['TARGNAME'], amp, fin[0].header['CCDGAIN']))
# Check to ensure the SCI extension is being used
if extname != 'SCI':
raise RuntimeError(
'You should only run this on a SCI extension, not %s.' % extname)
nr, nc = inimage.shape
if (nr, nc) == (nr0, nc0):
image_type = 'raw'
elif (nr, nc) == (fltxy, fltxy):
image_type = 'flt'
else:
raise RuntimeError('This program should be run on 1062x1044 '
'or 1024x1024 data only.')
# Pad data with fake "OVERSCAN" if data have been overscan trimmed
if image_type == 'flt':
temp = numpy.zeros((fltxy, nc0), numpy.float32)
for row in range(fltxy):
temp[row, :] = _median(inimage[row, :])
temp[:, nos:nc0-nos] = inimage
nc = nc0
else:
temp = inimage
# Translate frame so that it is in readout order
if amp == 'A':
image = temp # amp A data -> leave as is
elif amp == 'B':
image = temp[::-1, :] # amp B data -> flip left<->right
elif amp == 'C':
image = temp[:, ::-1] # amp C data -> flip top<->bottom
elif amp == 'D':
image = temp[::-1, ::-1] # amp D data -> rotate by 180 degrees
else:
raise RuntimeError('No amplifier given in header.')
# Convert 2-D array to 1-D time series
nx = nc + pps
time_series = numpy.zeros(int(nx*nr), numpy.float64)
ds = numpy.zeros(int(pps), numpy.float64)
for i in range(nr):
k = int(i*nx)
# (note that non-integer nx prevents phase wandering)
time_series[k:k+nc] = image[i, :]
# pad dead-time
medval = _median(image[i, :])
time_series[k+nc:int(k+nc+pps)] = ds + medval
if int((i+1)*nx) != int(k+nc+pps):
time_series[int((i+1)*nx)-1] = medval
# Begin filtering options ***************
# if median is not None:
# time_series = medianfilter(time_series, median)
if boxcar > 0:
boxcar_filter = signal.boxcar(boxcar) / boxcar
time_series = ndimage.convolve(time_series, boxcar_filter)
elif wipe is not None:
time_series = wipefilter(time_series, image_type, sst,
wipe[0], wipe[1], wipe[2])
elif window is not None:
time_series = windowfilter(time_series, image_type, sst,
window[0], window[1], window[2])
# End filtering options ***************
# Recreate 2-D image from time series
outimage = numpy.zeros((nr, nc), numpy.float32)
for i in range(nr):
outimage[i, :] = time_series[int(i*nx):int(i*nx+nc)]
if image_type == 'flt':
outimage = outimage[:, nos:(nc0-nos)]
# Restore original image orientation
if amp == 'A':
pass # amp A data -> leave as is
elif amp == 'B':
outimage = outimage[::-1, :] # amp B data -> flip left<->right
elif amp == 'C':
outimage = outimage[:, ::-1] # amp C data -> flip top<->bottom
elif amp == 'D':
outimage = outimage[::-1, ::-1] # amp D data -> rotate by 180 degrees
# Trim vector to power of 2 for FFT
# (this is the fastest fft calculation but it doesn't preserve all
# data, as needed in scale routine above)
p2 = int(math.log(nx*nr)/math.log(2))
n_ts = 2**p2
time_series = time_series[:n_ts]
# Perform FFT and return first half
fft_output = fft.fft(time_series) / float(len(time_series))
magnitude = numpy.abs(fft_output)[:n_ts//2]
freq = numpy.arange(n_ts//2, dtype=numpy.float64) / (n_ts*sst*1.0e-6)
if dc == 1:
# set first bin in power spectrum to zero if dc == 1
magnitude[0] = 0
if outfile:
# write primary header then append ext
fout = fits.HDUList()
fout.append(fits.PrimaryHDU(header=fin[0].header))
fout.append(fits.ImageHDU(header=fin[1].header, data=outimage))
fout.writeto(outfile)
return freq, magnitude
| 12,097 | 36.80625 | 79 |
py
|
stistools
|
stistools-master/stistools/ctestis.py
|
from astropy.io import fits
import numpy as np
__doc__ = """
The purpose of this ctestis task is to correct signal levels of point-like
sources in photometry tables measured from STIS CCD images for charge loss
due to imperfect Charge Transfer Efficiency (CTE). The algorithm used to
correct for CTE-induced signal loss is based on the equations published in
Goudfrooij, Bohlin, Maiz-Apellaniz, & Kimble, PASP, October 2006 edition
(astro-ph/0608349). The values of CTE loss derived using this algorithm
should be accurate to about 3% RMS (tested for data taken between March
1997 and August 2004). No significant differences in CTE loss were found
for different aperture sizes, although this has been verified only for a
limited range of aperture sizes (2, 3, and 5 pixel radii). The algorithm
was derived from measurements of point sources in a relatively sparse
field (the outskirts of a Galactic globular cluster), as detailed in the
PASP paper mentioned above.
The function also computes the shift in the Y centroid of point sources
due to distortions in the steller PDF cause by CTE trails. the algorithm
is taken from the Equation 9 of Goodfrooij et al. (2006). Note, however,
that the equation has been multiplied by -1, so that the resulting
correction may be ADDED to measured Y centriod of each star.
The code takes inputs of net counts for a source (background subtracted), a
sky-background estimate, and the source Y-position on the detector (since
CTI effects worsen furthest from the readout). The single-pixel
sky-background estimate should be measured from individual cosmic-ray (CR)
split, bias- and dark-subtracted, and flat-fielded images (flt.fits) that
have not had any sky subtracted. This can determined with random sampling
and/or iterative sigma-clipping of sky pixels (e.g., Goudfrooij et al.
2006). The net counts measured from the science images (summed, sky-
subtracted exposures) should then be scaled to the exposure time of the CR
split FLT image (e.g., if CRSPLIT=5, the net counts are divided by five).
Note that not all extensions composing an FLT file necessarily have equal
exposure times, so a fractional scaling of the CR split to total exposure
time of the CR-combined science image (e.g. CRJ) science image should be
used to scale the counts. The magnitude correction (dmagc) measured using
single-CR split parameters can be added to the magnitude derived from the
total exposure time science image with no further scaling.
If working with CRSPLIT scaled sky and net counts values, the filename
(stisimage) should not be provided to avoid pulling incorrect information
from the image headers. The following parameters therefore should be set
manually: nread=1 (indicating it is just one CRSPLIT exposures), gain,
mjd (start date), and ybin (BINAXIS2). If image is an sx2.fits file, set
sx2=True.
Examples
--------
:func:`ctestis` with ycol set to 182, net set to 5,000 and sky set to 150.
>>> from stistools.ctestis import ctestis
>>> fluxc, dmagc, dyc = ctestis(182., 5000., 150., stisimage='o4qp9g010_crj.fits')
mjd: 50893.30
nread: 2
ybin: 1
gain: 1.0
amp: D
tt0: -2.3865942
lcts: -0.67595399
bck: 75.0
lbck: 2.317577
cti: 1.7314006e-05
fluxc: 2536.7133
dmagc: -0.015828427
cti10000: 0.17314006
dy512: 0.0043051192
dyc: 0.007079903
net: 5000.0
sky: 150.0
ycol: 182.0
fluxc: 2536.7133
dmagc: -0.015828427
dyc: 0.007079903
"""
__taskname__ = "ctestis"
__version__ = "1.0"
__vdate__ = "25-January-2019"
__author__ = "Python version (2018): Sara Ogaz, " \
"IDL version (2015) : Sean Lockwood (edits from John Biretta), " \
"CL version (2006) : P. Goudfrooij (edits from V. Dixon)"
def ctestis(ycol, net, sky, stisimage=None, mjd=None, nread=None,
ybin=None, gain=None, amp='D', sx2=False):
"""
Calculate the STIS empirical correction to magnitude and astrometric shift,
given photometry results.
Parameters
----------
ycol : arr
Y-column # of object
net : arr
Net photometric counts (background subtracted) measured from science image
and scaled to cosmic-ray split exposure time (from which sky is measured)
sky : arr
Single-pixel sky-background estimate measured from individual cosmic-ray
split, bias- and dark-subtracted, flat-fielded images (flt.fits) with no
sky subtraction
stisimage : str, optional
The name of the SX2 file from which to pull the header keywords.
mjd : float, optional
Modified julian date corresponding to the start time of the 1st
exposure, corresponds to the TEXPSTRT keyword. If stisimage file is
defined TEXPSTRT keyword will overwrite any provided mjd value.
nread : int, optional
Number of image sets combined during CR rejection, corresponds to the
NCOMBINE keyword. If stisimage file is defined NCOMBINE keyword will
overwrite any provided nread value.
ybin : int , optional
Axis2 data bin size in unbinned detector pixels, corresponds to the
BINAXIS2 keyword. If stisimage file is defined BINAXIS2 keyword will
overwrite any provided ybin value.
gain : float, optional
The image gain, corresponds to the CCDGAIN keyword. If stisimage file
is defined the CCDGAIN keyword will overwrite any provided gain value.
If the gain is 4.0, it will be upated to 4.08.
amp : str, optional
The amplifier used for the observation (default 'D'). Ignored if
stisimage is provided.
sx2 : bool, optional
Force the procedure to remove the top/bottom 38 rows. This is
automatically done if the file in stisname contains '_sx2'. Default
values is False
Returns
-------
fluxc : arr
The empirically-corrected flux (counts)
dmagc : arr
The empirical photometric correction (delta mag)
dyc : arr
The empirical astrometric correction (delta pixels)
"""
# convert any iterable input to a numpy array
ycol = np.asarray(ycol)
net = np.asarray(net)
sky = np.asarray(sky)
if stisimage is not None:
hdulist = fits.open(stisimage)
mjd = hdulist[0].header['TEXPSTRT']
nread = hdulist[1].header['NCOMBINE']
ybin = hdulist[0].header['BINAXIS2']
gain = hdulist[0].header['CCDGAIN']
amp = hdulist[0].header['CCDAMP'].strip().upper()
# not sure if I really need this rootname splitter or not
else:
opt_inputs = {'mjd': mjd, 'nread': nread, 'ybin': ybin, 'gain': gain}
for key, eleme in opt_inputs.items():
if eleme is None:
raise ValueError("If no image filename is specified, mjd, "
"nread, ybin, and gain must be specified. {} "
"is missing".format(key))
amp = amp.upper()
# check that amp makes sense
if amp not in ['A', 'B', 'C', 'D']:
raise ValueError("Amplifier must be 'A', 'B', 'C', or 'D'. {} is "
"note a recognized value".format(amp))
print("MJD = ", mjd)
print("NREAD = ", nread)
print("YBIN = ", ybin)
print("GAIN = ", gain, " --> 4.08" if gain == 4.0 else "")
if gain == 4.0:
gain = 4.08
# cte equation constants from table 8
a = 0.000133
b = 0.54
c = 0.205
d = 0.05
e = 0.82
f = 3.6
g = 0.21
# equation 8 inputs
tt0 = (mjd - 51765) / 365.25
cts = np.maximum((net * gain / nread), 1)
bck = np.maximum((sky * gain / nread), 0)
lcts = np.log(cts) - 8.5
lbck = np.log(np.sqrt(bck * bck + 1)) - 2
# equation 8 for image cti
cti1 = a * np.exp(-b * lcts) * (c * tt0 + 1)
cti2 = d * np.exp(-e * lbck) + (1 - d) * np.exp(-f * ((bck / cts) ** g))
cti = cti1 * cti2
# equation 9 for the shift at the central row
cti10000 = cti * 10000
dy512 = 0.025 * cti10000 - (0.00078 * cti10000 * cti10000)
# prep ycol
if sx2 or ((stisimage is not None) and ("_sx2" in stisimage)):
remove_buffer = 38
else:
remove_buffer = 0
ycol -= remove_buffer # remove the bottom 38 pixels from _SX2 files
# Double check direction for other amplifiers
if amp in ['B', 'D']:
ycol_dir = ycol
else:
ycol_dir = 1024. / ybin - ycol
# Outputs
# equation 11 for corrected counts
fluxc = cts / ((1 - cti) ** (1024. - ycol_dir * ybin))
# mag correction
dmagc = 2.5 * np.log10(cts / fluxc)
# scale central row shift for other rows
dyc = dy512 * ((1024. - ycol_dir) / 512)
# print results
print("")
print("mjd: {:8.2f}\n"
"nread: {:6}\n"
"ybin: {:6}\n"
"gain: {:4.2}\n"
"amp: {}\n".
format(mjd, str(nread), str(ybin), float(gain), amp))
if type(lcts) is np.ndarray:
print("tt0: {:.8}\n"
"lcts: {}\n"
"bck: {}\n"
"lbck: {}\n"
"cti: {}\n"
"fluxc: {}\n"
"dmagc: {}\n"
"cti10000: {}\n"
"dy512: {}\n"
"dyc: {}\n".
format(tt0, lcts, bck, lbck, cti, fluxc, dmagc, cti10000,
dy512, dyc))
print('net: {}\n'
'sky: {}\n'
'ycol: {}\n'
'fluxc: {}\n'
'dmagc: {}\n'
'dyc: {}\n'.format(net, sky, ycol, fluxc, dmagc, dyc))
else:
print("tt0: {:.8}\n"
"lcts: {:.8}\n"
"bck: {:.8}\n"
"lbck: {:.8}\n"
"cti: {:.8}\n"
"fluxc: {:.8}\n"
"dmagc: {:.8}\n"
"cti10000: {:.8}\n"
"dy512: {:.8}\n"
"dyc: {:.8}\n".
format(tt0, lcts, bck, lbck, cti, fluxc, dmagc, cti10000,
dy512, dyc))
print('net: {:.8}\n'
'sky: {:.8}\n'
'ycol: {:.8}\n'
'fluxc: {:.8}\n'
'dmagc: {:.8}\n'
'dyc: {:.8}\n'.format(net, sky, ycol, fluxc, dmagc, dyc))
return fluxc, dmagc, dyc
| 10,161 | 35.163701 | 82 |
py
|
stistools
|
stistools-master/stistools/observation.py
|
from astropy.io import fits
def initObservation(input, instrument, sci_num):
"""Construct an Observation object for the current mode.
Parameters
----------
input: str
The name of an input file.
instrument: str
Value of keyword INSTRUME, should be "COS" or "STIS"
Returns
-------
obs: an Observation object
Information about the observation, mostly from header keywords.
"""
instrument = instrument.upper()
if instrument == "STIS":
obs = Observation(input, sci_num)
else:
raise RuntimeError("instrument '{}' is not supported".
format(instrument))
return obs
class Observation(object):
"""Get information about an observation from its headers."""
def __init__(self, input, sci_ext=1):
"""Invoked by a subclass.
Parameters
----------
input: str
The name of an input file.
"""
self.input = input
self.sci_ext = sci_ext
self.ra_targ = None
self.dec_targ = None
self.cenwave = None
self.expstart = None
self.expend = None
self.dispersion = None
def getInfo(self):
"""Get information about the exposure."""
fd = fits.open(self.input, mode="readonly")
phdr = fd[0].header
hdr = fd['sci', self.sci_ext].header
self.ra_targ = phdr["ra_targ"]
self.dec_targ = phdr["dec_targ"]
self.cenwave = phdr.get("cenwave", default=0)
if self.cenwave <= 0:
raise ValueError("CENWAVE = %d" % self.cenwave)
if phdr["detector"] == "CCD":
highres_factor = 1.0
else:
highres_factor = 2.0 # either MAMA detector
self.expstart = hdr["expstart"]
self.expend = hdr["expend"]
cd1_1 = hdr.get("cd1_1", 1.)
ltm1_1 = hdr.get("ltm1_1", 1.)
self.dispersion = cd1_1 * ltm1_1 / highres_factor
if self.dispersion == 0.:
raise ValueError("dispersion is zero")
fd.close()
| 2,082 | 24.096386 | 71 |
py
|
stistools
|
stistools-master/stistools/gettable.py
|
import math
import numpy as np
from astropy.io import fits
STRING_WILDCARD = "ANY"
INT_WILDCARD = -1
def getTable(table, filter, sortcol=None,
exactly_one=False, at_least_one=False):
"""Return row(s) of a table that match the filter.
Rows that match every item in the filter (a dictionary of
column_name=value) will be returned. If the value in the table is
STRING_WILDCARD or INT_WILDCARD (depending on the data type of the
column), that value is considered to match the filter for that column.
Also, for a given filter key, if the corresponding value in the filter
is STRING_WILDCARD, the test on filter will be skipped for that key
(i.e. a wildcard filter element matches any row).
If more than one row matches the filter, there is an option to sort
these rows based on the values of one of the table columns.
It is an error if exactly_one or at_least_one is True but no row
matches the filter. A warning will be printed if exactly_one is True
but more than one row matches the filter.
Parameters
-----------
table : string
name of the reference table
filter : dict
each key is a column name, and the corresponding value
is a possible table value in that column
sortcol : string
the name of a column on which to sort the table rows
(if there is more than one matching row), or None to disable sorting
exactly_one : bool
set this to True if there must be one and only one
matching row
at_least_one : bool
set this to True if there must be at least one
matching row
Returns
-------
match_rows : rec_array
an array of the rows of the table that match the filter;
note that if only one row matches the filter, the function value
will still be an array
"""
fd = fits.open(table, mode="readonly")
data = fd[1].data
# There will be one element of select_arrays for each non-trivial
# selection criterion. Each element of select_arrays is an array
# of flags, true if the row matches the criterion.
select_arrays = []
for key in filter.keys():
if filter[key] == STRING_WILDCARD:
continue
column = data.field(key)
if len(column) == 0:
return None
selected = (column == filter[key])
# Test for for wildcards in the table.
wild = None
if isinstance(column, np.chararray):
wild = (column == STRING_WILDCARD)
elif isinstance(column[0], np.integer):
wild = (column == INT_WILDCARD)
if wild is not None:
selected = np.logical_or(selected, wild)
select_arrays.append(selected)
if len(select_arrays) > 0:
selected = select_arrays[0]
for sel_i in select_arrays[1:]:
selected = np.logical_and(selected, sel_i)
newdata = data[selected]
else:
newdata = fd[1].data.copy()
fd.close()
nselect = len(newdata)
if nselect < 1:
newdata = None
if (exactly_one or at_least_one) and nselect < 1:
message = "Table has no matching row;\n" + \
"table name is " + table + "\n" + \
"row selection is " + repr(filter)
raise RuntimeError(message)
if exactly_one and nselect > 1:
print("Table has more than one matching row;")
print("table name is", table)
print("row selection is", repr(filter))
print("only the first will be used.")
if len(newdata) > 1 and sortcol is not None:
newdata = sortrows(newdata, sortcol)
return newdata
def sortrows(rowdata, sortcol, ascend=True):
"""Return a copy of rowdata, sorted on sortcol."""
if len(rowdata) <= 1:
return rowdata
column = rowdata.field(sortcol)
index = column.argsort()
if not ascend:
ind = list(index)
ind.reverse()
index = np.array(ind)
return rowdata[index]
def rotateTrace(trace_info, expstart):
"""Rotate a2displ, if MJD and DEGPERYR are in the trace table.
Parameters
-----------
trace_info : rec_array
an array of the relevant rows of the table;
the A2DISPL column will be modified in-place if the MJD and
DEGPERYR columns are present
expstart : float
exposure start time (MJD)
"""
if expstart < 0:
return
# If these columns are not in the table, just return.
names = []
for name in trace_info.names:
names.append(name.lower())
if "degperyr" in names and "mjd" in names:
degperyr = trace_info.field("degperyr")
mjd = trace_info.field("mjd")
else:
return
a2displ = trace_info.field("a2displ")
nelem = trace_info.field("nelem")
for i in range(len(trace_info)):
angle = (degperyr[i] * (expstart - mjd[i]) / 365.25)
tan_angle = math.tan(angle * math.pi / 180.)
x = np.arange(nelem[i], dtype=np.float64)
x -= (nelem[i] // 2)
a2displ[i][:] -= (x * tan_angle)
| 5,092 | 30.245399 | 76 |
py
|
stistools
|
stistools-master/stistools/wavecal.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
import numpy.random as rn # used by mkRandomName
from astropy.io import fits
from stsci.tools import parseinput, teal
"""
Perform wavelength calibration of STIS data.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.wavecal.wavecal("o66p01020_flt.fits", "o66p01020_wav.fits",
... verbose=True, trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import wavecal
>>> from stsci.tools import teal
>>> teal.teal("wavecal")
In Pyraf:
>>> import stistools
>>> teal wavecal
From command line::
% ./wavecal.py -v -s o66p01020_flt.fits o66p01020_wav.fits
% ./wavecal.py -v -s o66p01020_flt.fits o66p01020_w2d_tmp.fits
% ./wavecal.py -r
"""
__taskname__ = "wavecal"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
# MJD after which the external shutter was closed for CCD HITM wavecals.
SH_CLOSED = 51126.0
def main(args):
if len(args) < 2:
prtOptions()
print("Specify at least a calibrated science file and its wavecal.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "srtv:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
input = ""
inwave = ""
savetmp = False
verbose = False
timestamps = False
rn.seed() # used by mkRandomName
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs4.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs4.e", "-r"])
return 0
elif options[i][0] == "-v":
verbose = True
elif options[i][0] == "-t":
timestamps = True
elif options[i][0] == "-s":
savetmp = True
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
outroot = pargs[1]
status = wavecal(input, wavecal=inwave, debugfile="",
savetmp=savetmp,
option="linear", angle=None,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print(" -s (save temporary files)")
print("")
print("Following the options, list the input flt file names and")
print(" the associated raw (or calibrated) wavecal file names.")
def wavecal(input, wavecal, debugfile="", savetmp=False,
option="linear", angle=None,
verbose=False, timestamps=False,
trailer="", print_version=False, print_revision=False):
"""Perform wavecal processing for STIS data.
Parameters
----------
input: str
Names of the flt or crj file or files for the science exposure.
The SHIFTA1 and SHIFTA2 keywords will be updated in these files,
based on the results of processing the wavecal file(s).
wavecal: str
Names of the associated wavecal file or files (either raw or
calibrated). If this is a raw file, it will first be calibrated
using cs1.e (basic2d), then with cs7.e (x2d) for first-order
grating data. These calibrated files are regarded as temporary,
and (unless savetmp) they will be deleted when processing has
been completed.
debugfile: str
If specified, debugging information will be written to a file with
this name. For echelle data this will be a FITS file, but for
first-order data it will be a text file (and possibly a FITS file
as well).
savetmp: bool
If wavecal is a raw wavecal file, some calibration will be
performed, depending on mode. If savetmp is False (the default),
the calibrated wavecal files will be deleted after wavecal
processing is complete.
option: str
If the wavecal file contains more than one image set, the shifts
will be interpolated between wavecal exposures that bracket the
science exposure. This argument gives the interpolation option,
either "linear" (the default) or "nearest". If the science
exposure was before the first or after the last exposure in the
wavecal file, the shifts will be copied from the first or last
exposure respectively.
angle: float or None
This argument is only relevant for echelle data for which the
wavecal was taken with a long slit (e.g. 6X0.2). The angles have
not been measured accurately; they vary from one grating to
another, and they even vary depending on location on the detector.
This argument specifies the slit angle, in degrees measured
clockwise from the Y axis. Here are some approximate values:
- E230M: 0.9 to 1.2
- E230H: 4.9 to 6.9
- E140H: -3.8 to -5.8
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0, without checking any other argument.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs4.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs4.e
will be printed.
2 is returned if the specified input file or files were not found,
of if the numbers of input and wavecal files (or of debugfiles) are
not the same.
"""
if print_version:
status = subprocess.call(["cs4.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs4.e", "-r"])
return 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
wavecal_files = []
wavecal1 = wavecal.split()
for wav1 in wavecal1:
if wav1:
wavecal2 = wav1.split(",")
for wav2 in wavecal2:
if wav2:
wavecal_files.append(wav2)
dbgfiles = []
if debugfile:
dbgfiles1 = debugfile.split()
for dbg1 in dbgfiles1:
dbgfiles2 = dbg1.split(",")
for dbg2 in dbgfiles2:
dbgfiles.append(dbg2)
same_length = True # optimistic initial value
n_infiles = len(infiles)
if wavecal_files and len(wavecal_files) != n_infiles:
same_length = False
print("You specified {} input files but {} wavecal files.".format
(n_infiles, len(wavecal_files)))
print("The number of input and wavecal files must be the same.")
if dbgfiles and len(dbgfiles) != n_infiles:
same_length = False
print("The number of input and debugfile files must be the same.")
if not same_length:
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for (i, infile) in enumerate(infiles):
tempfnames = []
# Run cs1.e on the wavecal file.
(flag, fwv_file) = runBasic2d(wavecal_files[i], tempfnames,
verbose, timestamps, fd_trailer)
# Subtract a fraction of the infile (flt or crj) from the wavecal,
# if the exposure was taken long enough ago.
(flag, cwv_file) = runCs11(fwv_file, infile, tempfnames,
verbose, timestamps, fd_trailer)
# Run cs7.e on the wavecal flt file (except for echelle or prism).
(flag, w2d_file) = runX2d(cwv_file, angle, tempfnames,
verbose, timestamps, fd_trailer)
# Now run cs4.e on the w2d_file to find the shifts.
if dbgfiles:
dbg = dbgfiles[i]
else:
dbg = None
runWavecal(w2d_file, dbg, angle, verbose, timestamps, fd_trailer)
# Run cs12.e to copy the shifts to infile.
runCs12(w2d_file, infile, option, verbose, timestamps, fd_trailer)
if not savetmp:
for tmp_file in tempfnames:
if verbose:
print(" ... deleting temporary file {}".format(tmp_file))
try:
os.remove(tmp_file)
except OSError:
print("Warning: couldn't delete temporary file {}.".format
(tmp_file))
if f_trailer is not None:
f_trailer.close()
return 0
def mkRandomNameW(prefix="wavecal_", suffix="_tmp.fits", n=100000000):
MAX_TRIES = 100
done = False
k = 0
while not done:
i = rn.randint(0, n, 1)[0]
filename = "%s%d%s" % (prefix, i, suffix)
k += 1
if not os.access(filename, os.F_OK):
done = True
if k > MAX_TRIES:
break
if done:
return filename
else:
return None
def runBasic2d(wavecal, tempfnames, verbose, timestamps, fd_trailer):
flag = False # initial value
# First check whether the wavecal file is already calibrated.
fd = fits.open(wavecal)
dqicorr = fd[0].header.get("dqicorr", default="missing")
blevcorr = fd[0].header.get("blevcorr", default="missing")
darkcorr = fd[0].header.get("darkcorr", default="missing")
flatcorr = fd[0].header.get("flatcorr", default="missing")
detector = fd[0].header.get("detector", default="missing")
fd.close()
if dqicorr == "COMPLETE" or blevcorr == "COMPLETE" or \
darkcorr == "COMPLETE" or flatcorr == "COMPLETE":
# wavecal is already calibrated.
fwv_file = wavecal
flag = False
else:
# Create pseudo-random fwv_file name.
prefix = "wavecal_"
suffix = "_fwv_tmp.fits"
fwv_file = mkRandomNameW(prefix, suffix)
if fwv_file is None:
raise RuntimeError("Couldn't create temp file name"
" %s<digits>%s" % (prefix, suffix))
arglist = ["cs1.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.append(wavecal)
arglist.append(fwv_file)
arglist.append("-dqi")
if detector == "CCD":
arglist.append("-blev")
arglist.append("-bias")
else:
arglist.append("-lors")
arglist.append("-dark")
arglist.append("-flat")
if verbose:
print("Running cs1.e on {}".format(wavecal))
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
raise RuntimeError("status = {} from cs1.e".format(status))
tempfnames.append(fwv_file)
flag = True
return flag, fwv_file
def runCs11(fwv_file, infile, tempfnames, verbose, timestamps, fd_trailer):
"""Subtract a fraction of the science image from the wavecal image."""
# Check whether we need to run cs11.e.
fd = fits.open(fwv_file)
sclamp = fd[0].header.get("sclamp", default="missing")
detector = fd[0].header.get("detector", default="missing")
texpstrt = fd[0].header.get("texpstrt", default="missing")
fd.close()
if detector == "CCD" and sclamp.startswith("HITM") and \
texpstrt <= SH_CLOSED:
# Create pseudo-random cwv_file name.
prefix = "wavecal_"
suffix = "_cwv_tmp.fits"
cwv_file = mkRandomNameW(prefix, suffix)
if cwv_file is None:
raise RuntimeError("Couldn't create temp file name"
" %s<digits>%s" % (prefix, suffix))
arglist = ["cs11.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.append(fwv_file)
arglist.append(infile)
arglist.append(cwv_file)
if verbose:
print("Running cs11.e")
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
raise RuntimeError("status = {} from cs11.e".format(status))
tempfnames.append(cwv_file)
flag = True
else:
cwv_file = fwv_file
flag = False
return flag, cwv_file
def runX2d(cwv_file, angle, tempfnames,
verbose, timestamps, fd_trailer):
flag = False # initial value
fd = fits.open(cwv_file)
opt_elem = fd[0].header.get("opt_elem", default="missing")
x2dcorr = fd[0].header.get("x2dcorr", default="missing")
fd.close()
# Skip 2-D rectification for echelle or prism data.
skip_it = opt_elem.startswith("E") or opt_elem == "PRISM"
if skip_it:
w2d_file = cwv_file
flag = False
elif x2dcorr == "COMPLETE":
# The wavecal is already fully calibrated.
w2d_file = cwv_file
flag = False
else:
# Create pseudo-random w2d_file name.
prefix = "wavecal_"
suffix = "_w2d_tmp.fits"
w2d_file = mkRandomNameW(prefix, suffix)
if w2d_file is None:
raise RuntimeError("Couldn't create temp file name"
" %s<digits>%s" % (prefix, suffix))
arglist = ["cs7.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
# Specify -x2d and no other calibration switch so that fluxcorr
# will not be done.
arglist.append("-x2d")
arglist.append(cwv_file)
arglist.append(w2d_file)
if angle is not None:
arglist.append("-angle")
arglist.append("%.20g" % angle)
if verbose:
print("Running x2d on {}".format(cwv_file))
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
raise RuntimeError("status = %d from cs7.e" % status)
tempfnames.append(w2d_file)
flag = True
return flag, w2d_file
def runWavecal(w2d_file, dbg, angle, verbose, timestamps, fd_trailer):
arglist = ["cs4.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.append(w2d_file)
if angle is not None:
arglist.append("-angle")
arglist.append("%.20g" % angle)
if dbg: # text file for debug output
arglist.append("-d")
arglist.append(dbg)
if verbose:
print("Running cs4.e on {}".format(w2d_file))
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
raise RuntimeError("status = %d from cs4.e" % status)
def runCs12(w2d_file, infile, option, verbose, timestamps, fd_trailer):
arglist = ["cs12.e"]
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.append(w2d_file)
arglist.append(infile)
arglist.append(option)
if verbose:
print("Running cs12.e")
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
raise RuntimeError("status = %d from cs12.e" % status)
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the wavecal function."""
return wavecal.__doc__
def run(configobj=None):
"""TEAL interface for the wavecal function."""
wavecal(input=configobj["input"],
wavecal=configobj["wavecal"],
debugfile=configobj["debugfile"],
savetmp=configobj["savetmp"],
option=configobj["option"],
angle=configobj["angle"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 17,785 | 30.817531 | 79 |
py
|
stistools
|
stistools-master/stistools/doppinfo.py
|
#! /usr/bin/env python
import sys
import math
import numpy as np
from astropy.io import fits
from . import observation
from . import orbit
__doc__ = """
This class computes Doppler shift information for each imset of a dataset.
Keywords will be read from the science file and from the support file. The
Doppler shift information is printed to the standard output by default, but
this can be turned off by setting quiet to True. Three task parameters will be
updated by doppinfo; these allow the user to compute the Doppler shift in
high-res pixels or km/s at any time close to the time of the exposure.
The printed information will be in one of two formats, depending on the value
of increment. If increment is zero, the average and extreme values of the
Doppler shift during the exposure will be printed. If increment is
greater than zero, the actual Doppler shift and radial velocity will be
printed at the beginning of the exposure and at every increment seconds
thereafter until the end of the exposure. Both the printed value of the
Doppler shift and the doppmag parameter will be in high-res pixels.
Most of the Doppler shift information is computed directly from the orbital
elements of HST, as given in the support file primary header. Some information,
however, is computed based on the approximation of a circular orbit. This
approximation is used for the average Doppler shift during the exposure
(printed if increment is zero) and for the task parameters doppzero, doppmag
and radvel that are updated by doppinfo. These parameters are applied as
terms in a sine function, which inherently involves a circular-orbit
approximation.
The parameters for the circular orbit are determined as follows. The HST
orbital elements are gotten from the primary header of the support file. The
target position is taken from the keywords RA_TARG and DEC_TARG in the science
file primary header. The velocity of HST is computed from the orbital elements
at 64 equally spaced times throughout an orbit, centered on the midpoint of the
exposure, (EXPSTART + EXPEND) / 2, and the component of this velocity in the
direction away from the target (i.e. the radial velocity) is taken. A sine
function is fit to these radial velocities; the amplitude is radvel, and the
amplitude and phase are used to compute doppmag and doppzero.
Examples
--------
:class:`Doppinfo` with dt of 100:
>>> import stistools
>>> stistools.doppinfo.Doppinfo("ocb6o2020_raw.fits", dt=100, spt="ocb6o2020_spt.fits")
# orbitper doppzero doppmag doppmag_v file
5728.67 56752.114170 11.68643135 7.40391177 ocb6o2020_raw.fits[sci,1]
# time (MJD) shift radvel
56752.165175 -11.59 -7.345
56752.166333 -11.37 -7.203
56752.167490 -11.01 -6.975
56752.168647 -10.52 -6.663
56752.169805 -9.90 -6.272
56752.170962 -9.16 -5.805
56752.172120 -8.32 -5.269
56752.173277 -7.37 -4.669
56752.174434 -6.34 -4.014
56752.175592 -5.23 -3.311
56752.176749 -4.05 -2.568
56752.177907 -2.83 -1.794
56752.179064 -1.58 -0.998
56752.180222 -0.30 -0.190
# orbitper doppzero doppmag doppmag_v file
5728.67 56752.180505 11.68734454 7.40449032 ocb6o2020_raw.fits[sci,2]
# time (MJD) shift radvel
56752.181784 1.42 0.902
56752.182941 2.68 1.700
56752.184099 3.91 2.477
56752.185256 5.09 3.225
56752.186413 6.21 3.935
56752.187571 7.26 4.598
56752.188728 8.22 5.205
56752.189886 9.08 5.750
56752.191043 9.83 6.227
56752.192200 10.46 6.628
56752.193358 10.97 6.950
56752.194515 11.35 7.189
56752.195673 11.59 7.342
56752.196830 11.69 7.406
"""
__version__ = "3.0"
__vdate__ = "2018-09-14"
# multiply by DEG_RAD to convert from degrees to radians
DEG_RAD = (math.pi / 180.0)
TWOPI = (math.pi * 2.0)
SPEED_OF_LIGHT = 299792.458 # km / s
SEC_PER_DAY = 86400.0
# number of points for computing circular-orbit parameters
NPTS = 64
NPTS_D = 64.
class Doppinfo(object):
"""Compute Doppler parameters and information from HST orbital elements.
This class previously supported both COS and STIS data, but now only
supports STIS data. The class will print doppler shift information for all
imsets contained in the input image.
Results will be printed to standard out. To have the DOPPZERO, DOPPMAG,
and DOPPPMAGV keywords inserted/updated in the header you can set update to
True."""
def __init__(self, input, spt=None, dt=0., update=False, quiet=False):
"""Compute Doppler parameters.
Parameters
----------
input: str
The name of an input file; Only STIS data is supported.
spt: str or None
The name of the support (_spt.fits) file to use for getting
the orbital parameters; if not specified, the spt file name
will be constructed from the input file name by truncating
after "raw", "corrtag", "flt", "counts" or "x1d" and appending
"spt.fits".
dt: float
Time interval (seconds) for printing Doppler shift throughout
the orbit.
update: boolean
By default, Doppinfo just prints the Doppler parameter values.
If you specify update=True, the input files will be modified
(in-place) by updating the keywords ORBITPER, DOPPZERO,
DOPPMAG, and DOPPMAGV in the first extension.
quiet: boolean
If True, values of Doppler parameters and other info will not
be printed.
"""
self.input = input
self.update = update
self.quiet = quiet
self.obs = {}
# unit vector (3-element list) pointing toward the target
self.target = None
instrument, nextend = self._getInitInfo()
self.sci_num = nextend // 3
dt /= SEC_PER_DAY # convert dt to days
for sci_ext in np.arange(self.sci_num)+1:
self.obs = observation.initObservation(input, instrument, sci_ext)
self.obs.getInfo()
if spt is None:
spt = self._findSptName()
self.orbit = orbit.HSTOrbit(spt)
self._getDoppParam()
if not quiet:
print("# orbitper doppzero doppmag doppmag_v "
"file")
print(" {:.7g} {:.6f} {:.8f} {:.8f} {}[sci,{}]".
format(self.orbitper, self.doppzero, self.doppmag,
self.doppmag_v, self.input.split("/")[-1],
sci_ext))
self.printDopplerShift(dt)
# Maybe shouldn't be in loop
if update:
self._updateKeywords(input, sci_ext)
def _getInitInfo(self):
"""
Get nextend and instrument.
"""
fd = fits.open(self.input, mode="readonly")
instrument = fd[0].header.get("instrume", "missing")
nextend = fd[0].header['nextend']
fd.close()
return instrument, nextend
def _findSptName(self):
"""Get the name of the support file.
Returns
-------
spt: str
Name of the support (_spt.fits) file.
"""
i = self.input.rfind("raw")
if i < 0:
i = self.input.rfind("corrtag")
if i < 0:
i = self.input.rfind("flt")
if i < 0:
i = self.input.rfind("counts")
if i < 0:
i = self.input.rfind("x1d")
if i >= 0:
spt = self.input[0:i] + "spt.fits"
else:
raise RuntimeError("Don't understand input file name '{}'".
format(self.input))
return spt
def _getDoppParam(self):
"""Compute Doppler parameters.
The following attributes will be assigned:
orbitper: orbital period (seconds)
doppzero: time (MJD) when the Doppler shift is zero and increasing
doppmag_v: magnitude of the Doppler shift in km/s
doppmag: magnitude of the Doppler shift in pixels
"""
# Convert target ra,dec to rectangular coordinates (unit radius).
self.target = self._sph_rec(self.obs.ra_targ * DEG_RAD,
self.obs.dec_targ * DEG_RAD)
self.orbitper = self.orbit.getOrbitper() # seconds
orbit_period = self.orbitper / SEC_PER_DAY # days
expmiddle = (self.obs.expstart + self.obs.expend) / 2.0
# Compute Fourier coefficients based on NPTS points in an orbit,
# centered on the middle of the exposure.
sum_sin = 0.0
sum_cos = 0.0
t_origin = expmiddle - orbit_period / 2.0
for i in range(NPTS):
delt = i * (orbit_period / NPTS_D)
time = t_origin + delt
radvel = self._get_rv(time)
sum_sin += radvel * math.sin(TWOPI * delt / orbit_period)
sum_cos += radvel * math.cos(TWOPI * delt / orbit_period)
# Normalize by dividing by the sum of (sin**2) at NPTS equally spaced
# times in one orbit.
acoeff = sum_sin / (NPTS_D/2.0)
bcoeff = sum_cos / (NPTS_D/2.0)
"""Find doppzero and doppmag, assuming a circular orbit.
Assume that the radial velocity has this form:
radvel = acoeff * sin(arg) + bcoeff * cos(arg)
where:
arg = (time - t_origin) * 2*pi/P
time is in MJD
t_origin = MJD at the middle of the exposure - P/2
P is the orbital periond of HST
Write:
acoeff = doppmag_v * cos(theta)
bcoeff = doppmag_v * sin(theta)
theta will give us doppzero, as explained below.
Then the expression for radial velocity vs time is:
radvel = doppmag_v * sin(arg + theta)
From the definitions of doppmag_v and doppzero:
radvel = doppmag_v * sin((time - doppzero) * 2*pi/P)
so:
arg + theta = (time - doppzero) * 2*pi/P
(time - t_origin) * 2*pi/P + theta = (time - doppzero) * 2*pi/P
theta = [(time - doppzero) - (time - t_origin)] * 2*pi/P
= (t_origin - doppzero) * 2*pi/P
and:
doppzero = -theta * P/(2*pi) + t_origin
We have already computed acoeff and bcoeff, so:
theta = atan2(bcoeff, acoeff)
"""
self.doppzero = -math.atan2(bcoeff, acoeff) \
* orbit_period / TWOPI + t_origin
self.doppmag_v = math.sqrt(acoeff*acoeff + bcoeff*bcoeff)
self.doppmag = self._rvToPixels(self.doppmag_v)
def _sph_rec(self, ra_targ, dec_targ):
"""Convert from RA & Dec to rectangular coordinates.
Parameters
----------
ra_targ: float
Right ascension (radians) of the target.
dec_targ: float
Declination (radians) of the target.
Returns
-------
target: list of three floats
Unit vector (rectangular coords.) pointing toward the target.
"""
target = [0., 0., 0.]
target[0] = math.cos(dec_targ) * math.cos(ra_targ)
target[1] = math.cos(dec_targ) * math.sin(ra_targ)
target[2] = math.sin(dec_targ)
return target
def _get_rv(self, time):
"""Compute the radial velocity.
Parameters
----------
time: float
A particular time (MJD).
Returns
-------
radial velocity: float
The component of the velocity away from the target.
"""
(x, v) = self.orbit.getPos(time)
# This is the component of velocity toward the target.
dot_product = self.target[0] * v[0] + self.target[1] * v[1] + \
self.target[2] * v[2]
# Change the sign to get the component away from the target.
return -dot_product
def _rvToPixels(self, radvel):
"""Convert radial velocity to Doppler shift in pixels.
Parameters
----------
radvel: float
Radial velocity in km/s.
Returns
-------
doppmag: str
Maximum value of Doppler shift (pixels) over entire orbit.
"""
doppmag = radvel * self.obs.cenwave / \
(SPEED_OF_LIGHT * self.obs.dispersion)
return doppmag
def _pixelsToRv(self, doppmag):
"""Convert Doppler shift in pixels to radial velocity.
Parameters
----------
doppmag: float
Maximum value of Doppler shift (pixels) over entire orbit.
Returns
-------
radvel: float
Radial velocity in km/s.
"""
radvel = doppmag * SPEED_OF_LIGHT * self.obs.dispersion / \
self.obs.cenwave
return radvel
def printDopplerShift(self, dt):
"""Compute and print the Doppler shift at intervals of dt.
Parameters
----------
dt: float
Time interval (seconds) for printing Doppler shift throughout
the orbit, or if dt is zero print the min and max Doppler shift
during the orbit.
"""
expstart = self.obs.expstart
expend = self.obs.expend
expmiddle = (expstart + expend) / 2.
doppzero = self.doppzero
orbit_period = self.orbitper / SEC_PER_DAY # days
if dt > 0.:
print("# time (MJD) shift radvel")
# Add 1.e-4 to expend to include end of interval, in case
# increment divides exposure time evenly.
done = False
time = expstart
while not done:
if time <= expend+1.e-4:
radvel = self._get_rv(time)
doppmag = self._rvToPixels(radvel)
print("{:12.6f} {:7.2f} {:8.3f}".
format(time, doppmag, radvel))
time += dt
else:
done = True
else:
# Use the radial velocity at the middle of the exposure
# as an initial value for finding min & max radial velocity.
mid_radvel = self._get_rv(expmiddle)
min_radvel = mid_radvel
max_radvel = mid_radvel
t_min = expmiddle
t_max = expmiddle
delta = 1.e-4
done = False
time = expstart + delta
while not done:
if time <= expend:
radvel = self._get_rv(time)
if radvel < min_radvel:
min_radvel = radvel
t_min = time
if radvel > max_radvel:
max_radvel = radvel
t_max = time
time += delta
else:
done = True
# Explicitly check the radial velocities at the endpoints.
min_at_end = False # initial values
max_at_end = False
radvel = self._get_rv(expstart)
if radvel < min_radvel:
min_radvel = radvel
t_min = expstart
min_at_end = True
if radvel > max_radvel:
max_radvel = radvel
t_max = expstart
max_at_end = True
radvel = self._get_rv(expend)
if radvel < min_radvel:
min_radvel = radvel
t_min = expend
min_at_end = True
if radvel > max_radvel:
max_radvel = radvel
t_max = expend
max_at_end = True
# Improve the values of min and max radial velocity.
rv = [0., 0., 0.]
if not min_at_end:
rv[0] = self._get_rv(t_min-delta)
rv[1] = self._get_rv(t_min)
rv[2] = self._get_rv(t_min+delta)
time = self._peakQuadratic(rv, t_min, delta)
time = max(time, expstart)
time = min(time, expend)
min_radvel = self._get_rv(time)
if not max_at_end:
rv[0] = self._get_rv(t_max-delta)
rv[1] = self._get_rv(t_max)
rv[2] = self._get_rv(t_max+delta)
time = self._peakQuadratic(rv, t_max, delta)
time = max(time, expstart)
time = min(time, expend)
max_radvel = self._get_rv(time)
# Compute the average radial velocity. Note that this
# assumes a circular orbit.
if expend == expstart:
avg_radvel = self._get_rv(expmiddle)
else:
avg_dopp = self.doppmag * \
(math.cos(TWOPI * (expstart - doppzero) / orbit_period) -
math.cos(TWOPI * (expend - doppzero) / orbit_period)) * \
orbit_period / TWOPI / (expend - expstart)
avg_radvel = self._pixelsToRv(avg_dopp)
mid_dopp = self._rvToPixels(mid_radvel)
avg_dopp = self._rvToPixels(avg_radvel)
min_dopp = self._rvToPixels(min_radvel)
max_dopp = self._rvToPixels(max_radvel)
print("# midpoint midpoint Doppler average Doppler "
"minimum Doppler maximum Doppler")
print("# MJD pixels km/s pixels km/s "
"pixels km/s pixels km/s")
print("{:12.6f} {:8.2f} {:6.3f} {:8.2f} {:6.3f} "
"{:8.2f} {:6.3f} {:8.2f} {:6.3f} {}".
format(expmiddle, mid_dopp, mid_radvel, avg_dopp, avg_radvel,
min_dopp, min_radvel, max_dopp, max_radvel,
self.input.split("/")[-1]))
print("")
def _peakQuadratic(self, y, x_middle, spacing):
"""Get the location of the maximum (or minimum) of a quadratic.
Parameters
----------
y: array_like
Values of a function at three uniformly spaced points.
x_middle: float
Independent variable at the middle point.
spacing: float
Increment in the independent variable between elements of `y`.
Returns
-------
x: float
Independent variable of the maximum (or minimum) of the
quadratic that passes through the three uniformly spaced
points.
"""
denominator = y[0] - 2.0 * y[1] + y[2]
if denominator == 0.0:
return x_middle
dx = (y[0] - y[2]) / (2.0 * denominator)
return dx * spacing + x_middle
def _updateKeywords(self, input, sci_ext):
"""Update keywords in the first extension header.
Parameters
----------
input: str
The name of an input file (modified in-place).
sci_ext: int
The number of the science extension.
"""
fd = fits.open(input, mode="update")
hdr = fd['sci', sci_ext].header
old_orbitper = hdr.get("orbitper", -999)
old_doppzero = hdr.get("doppzero", -999)
old_doppmag = hdr.get("doppmag", -999)
old_doppmag_v = hdr.get("doppmagv", -999)
hdr["orbitper"] = self.orbitper
hdr["doppzero"] = self.doppzero
hdr["doppmag"] = self.doppmag
hdr["doppmagv"] = self.doppmag_v
fd.close()
if not self.quiet:
print("{}[sci,{}] has been updated as follows:".
format(input.split("/")[-1], sci_ext))
if old_orbitper == -999:
print("orbitper: {:.4f} (added)".format(self.orbitper))
else:
print("orbitper: {:.4f} --> {:.4f}".
format(old_orbitper, self.orbitper))
if old_doppzero == -999:
print("doppzero: {:.7f} (added)".format(self.doppzero))
else:
print("doppzero: {:.7f} --> {:.7f}".
format(old_doppzero, self.doppzero))
if old_doppmag == -999:
print("doppmag: {:.6f} (added)".format(self.doppmag))
else:
print("doppmag: {:.6f} --> {:.6f}".
format(old_doppmag, self.doppmag))
if old_doppmag_v == -999:
print("doppmagv: {:.6f} (added)".format(self.doppmag_v))
else:
print("doppmagv: {:.6f} --> {:.6f}".
format(old_doppmag_v, self.doppmag_v))
print("")
if __name__ == "__main__":
main(sys.argv[1:])
| 20,721 | 34.422222 | 87 |
py
|
stistools
|
stistools-master/stistools/x1d.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Extract 1-D spectrum.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.x1d.x1d("o66p01020_flt.fits", output="test_x1d.fits",
... verbose=True, trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import x1d
>>> from stsci.tools import teal
>>> teal.teal("x1d")
From command line::
% ./x1d.py -v o66p01020_flt.fits o66p01020_x1d.fits
% ./x1d.py -r
"""
__taskname__ = "x1d"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 1:
prtOptions()
print("At least an input file name must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "rtv:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
output = ""
verbose = False
timestamps = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs6.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs6.e", "-r"])
return 0
if options[i][0] == "-v":
verbose = True
if options[i][0] == "-t":
timestamps = True
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
output = pargs[1]
status = x1d(input, output=output,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print("")
print("Following the options, list one or more flt or crj file names,")
print(" enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used.")
print("One or more output file names may be specified (the same number")
print(" as the input file names).")
def x1d(input, output="",
backcorr="perform", ctecorr="perform", dispcorr="perform",
helcorr="perform", fluxcorr="perform",
sporder=None, a2center=None, maxsrch=None,
globalx=False, extrsize=None,
bk1size=None, bk2size=None, bk1offst=None, bk2offst=None, bktilt=None,
backord=None, bksmode="median", bksorder=3,
blazeshift=None, algorithm="unweighted", xoffset=None,
verbose=False, timestamps=False, trailer="",
print_version=False, print_revision=False):
"""Extract a 1-D spectrum from an flt or crj file.
Parameters
----------
input: str
Name of the input raw file.
output: str
Name of the output file, or "" (the default). If no name was
specified, the output name will be constructed from the input name.
backcorr: str
If "perform", subtract the background.
ctecorr: str
If "perform", apply CTE correction (CCD only).
dispcorr: str
If "perform", compute wavelengths from the dispersion relation.
helcorr: str
If "perform", correct for heliocentric Doppler shift.
fluxcorr: str
If "perform", convert to absolute flux.
sporder: int or None
The number of the spectral order to extract.
a2center: float or None
maxsrch: float or None
globalx: bool
If True, use the global cross correlation offset (i.e. average for
all orders) for all spectral orders.
extrsize: float or None
Size of extraction box. None means extrsize is not specified.
bk1size: float or None
Size of first background region. None means bk1size is not specified.
bk2size: float or None
Size of second background region. None means bk2size is not specified.
bk1offst: float or None
Offset of first background region. None means bk1offst is not
specified.
bk2offst: float or None
Offset of first background region. None means bk2offst is not
specified.
bktilt: float or None
Background tilt. None means bktilt is not specified.
backord: int or None
Background order (0 or 1). None means backord is not specified.
bksmode: str
Background smoothing mode ("off", "median" (the default), or
"average").
bksorder: int
Background smoothing polynomial order (default is 3).
blazeshift: float or None
Blaze shift (in pixels). None means blazeshift is not specified.
algorithm: str
Extraction algorithm ("unweighted" (the default) or "sc2d")
xoffset: float
Offset in X for slitless extraction.
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs6.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs6.e
will be printed.
2 is returned if the specified input file or files were not found,
or the numbers of input and output files (if the latter was
specified) are not the same.
"""
if print_version:
status = subprocess.call(["cs6.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs6.e", "-r"])
return 0
cumulative_status = 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
if output:
outfiles = []
output1 = output.split()
for out1 in output1:
if out1:
output2 = out1.split(",")
for out2 in output2:
if out2:
outfiles.append(out2)
else:
outfiles = None
n_infiles = len(infiles)
if outfiles and len(outfiles) != n_infiles:
print("You specified {} input files but {} output files.".format
(n_infiles, len(outfiles)))
print("The number of input and output files must be the same.")
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for (i, infile) in enumerate(infiles):
arglist = ["cs6.e"]
arglist.append(infile)
if outfiles:
arglist.append(outfiles[i])
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
if globalx:
arglist.append("-g")
switch_was_set = False
if backcorr == "perform":
arglist.append("-back")
switch_was_set = True
if ctecorr == "perform":
arglist.append("-cte")
switch_was_set = True
if dispcorr == "perform":
arglist.append("-disp")
switch_was_set = True
if helcorr == "perform":
arglist.append("-hel")
switch_was_set = True
if fluxcorr == "perform":
arglist.append("-flux")
switch_was_set = True
if not switch_was_set:
arglist.append("-x1d")
if sporder is not None:
arglist.append("-s")
arglist.append("%d" % sporder)
if a2center is not None:
arglist.append("-c")
arglist.append("%.10g" % a2center)
if maxsrch is not None:
arglist.append("-r")
arglist.append("%.10g" % maxsrch)
if extrsize is not None:
arglist.append("-x")
arglist.append("%.10g" % extrsize)
if bk1size is not None:
arglist.append("-b1")
arglist.append("%.10g" % bk1size)
if bk2size is not None:
arglist.append("-b2")
arglist.append("%.10g" % bk2size)
if bk1offst is not None:
arglist.append("-o1")
arglist.append("%.10g" % bk1offst)
if bk2offst is not None:
arglist.append("-o2")
arglist.append("%.10g" % bk2offst)
if bktilt is not None:
arglist.append("-k")
arglist.append("%.10g" % bktilt)
if backord is not None:
arglist.append("-n")
arglist.append("%d" % backord)
if blazeshift is not None:
arglist.append("-bs")
arglist.append("%.10g" % blazeshift)
if bksmode:
if bksmode == "off":
arglist.append("-bn")
elif bksmode == "median":
arglist.append("-bm")
arglist.append("-bo")
arglist.append("%d" % bksorder)
elif bksmode == "average":
arglist.append("-bb")
arglist.append("-bo")
arglist.append("%d" % bksorder)
else:
raise RuntimeError("bksmode must be one of 'off',"
" 'median', 'average'; you specified '%s'" % bksmode)
if algorithm:
if algorithm == "unweighted":
arglist.append("-a")
arglist.append("unweighted")
elif algorithm == "sc2d":
arglist.append("-a")
arglist.append("unweighted")
arglist.append("-idt")
else:
raise RuntimeError("algorithm must be either 'unweighted'"
" or 'sc2d'; you specified '%s'" % algorithm)
if xoffset is not None:
arglist.append("-st")
arglist.append("%.10g" % xoffset)
if verbose:
print("Running x1d on {}".format(infile))
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the x1d function."""
return x1d.__doc__
def run(configobj=None):
"""TEAL interface for the x1d function."""
x1d(input=configobj["input"],
output=configobj["output"],
backcorr=configobj["backcorr"],
ctecorr=configobj["ctecorr"],
dispcorr=configobj["dispcorr"],
helcorr=configobj["helcorr"],
fluxcorr=configobj["fluxcorr"],
sporder=configobj["sporder"],
a2center=configobj["a2center"],
maxsrch=configobj["maxsrch"],
globalx=configobj["globalx"],
extrsize=configobj["extrsize"],
bk1size=configobj["bk1size"],
bk2size=configobj["bk2size"],
bk1offst=configobj["bk1offst"],
bk2offst=configobj["bk2offst"],
bktilt=configobj["bktilt"],
backord=configobj["backord"],
bksmode=configobj["bksmode"],
bksorder=configobj["bksorder"],
blazeshift=configobj["blazeshift"],
algorithm=configobj["algorithm"],
xoffset=configobj["xoffset"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 13,036 | 28.697039 | 88 |
py
|
stistools
|
stistools-master/stistools/ocrreject.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Add STIS exposures, rejecting cosmic rays.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.ocrreject.ocrreject("o3tt02020_flt.fits",
... "o3tt02020_crj.fits", verbose=True, trailer="o3tt02020.trl")
In Python with TEAL:
>>> from stistools import ocrreject
>>> from stsci.tools import teal
>>> teal.teal("ocrreject")
From command line::
% ./ocrreject.py -v -s o3tt02020_flt.fits o3tt02020_crj.fits
% ./ocrreject.py -r
"""
__taskname__ = "ocrreject"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 2:
prtOptions()
print("At least input and output file names must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "rtv:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
verbose = False
timestamps = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs2.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs2.e", "-r"])
return 0
elif options[i][0] == "-v":
verbose = True
elif options[i][0] == "-t":
timestamps = True
nargs = len(pargs)
if nargs != 2:
prtOptions()
sys.exit()
input = pargs[0]
output = pargs[1]
status = ocrreject(input, output,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print("")
print("Following the options, list one or more input files")
print(" (enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used) and one output file name.")
def ocrreject(input, output,
all=True, crrejtab="", scalense="", initgues="",
skysub="", crsigmas="",
crradius=None, crthresh=None, badinpdq=None,
crmask="",
verbose=False, timestamps=False,
trailer="", print_version=False, print_revision=False):
"""Find and reject cosmic rays in STIS data.
Parameters
----------
input: str
Name of the input file or files.
output: str
Name of the output file. See all for further information.
all: bool
If True (the default), combine all input files into one output
file. In this case, output should just be one file name. If
False, the number of input and output file names must be the same.
crrejtab: str
This argument may be used to override the CRREJTAB value in the
primary headers of the input files.
scalense: str
If specified, this overrides SCALENSE in the CRREJTAB.
initgues: str
If specified, this overrides INITGUES in the CRREJTAB. The
allowed values are "min" and "med" and "".
skysub: str
If specified, this overrides SKYSUB in the CRREJTAB. The
allowed values are "none", "mode" and "".
crsigmas: str
If specified, this overrides CRSIGMAS in the CRREJTAB. The
value should be a comma-separated string of one or more
integer or float values. For each such value, calstis will
perform one cosmic-ray-rejection cycle, with the sigma taken
from the numerical value that was specified.
crradius: float or None
If not None, this overrides CRRADIUS in the CRREJTAB. This is
the rejection propagation radius in pixels (e.g. 1.5). After
finding an outlier (a cosmic ray hit), adjacent pixels can also
be flagged and excluded. Neighboring pixels will be rejected if
their values are discrepant by more than crthresh * sigmas * noise,
where noise is based on the noise model (i.e. Poisson noise and
readout noise).
crthresh: float or None
If not None, this overrides CRTHRESH in the CRREJTAB. This is the
rejection propagation threshold (e.g. 0.8). If crthresh = 0 then
all adjacent pixels (see crradius) will be rejected.
badinpdq: int or None
If specified, this overrides BADINPDQ in the CRREJTAB. This is a
data quality flag (or bitwise OR of flags) to allow rejection of
pixels in the input images when forming the "guess" image (the
image with which to compare the input images when looking for
outliers).
crmask: str
If specified, this overrides CRMASK in the CRREJTAB. crmask =
"yes" means that the cosmic rays that are detected should be
flagged in the DQ (data quality) extensions of the input files.
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs2.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs2.e
will be printed.
"""
if print_version:
status = subprocess.call(["cs2.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs2.e", "-r"])
return 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
outfiles = []
output1 = output.split()
for out1 in output1:
if out1:
output2 = out1.split(",")
for out2 in output2:
if out2:
outfiles.append(out2)
n_outfiles = len(outfiles)
if all:
if n_outfiles != 1:
print("You specified {} output files; when all is True,".format(
n_outfiles))
print("output must be exactly one file name.")
return 2
else:
n_infiles = len(infiles)
if n_outfiles != n_infiles:
print("You specified {} input files but {} output files;".format(
n_infiles, n_outfiles))
print("the number of input and output files must be the same.")
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
optional_args = []
if crrejtab:
optional_args.append("-table")
optional_args.append(crrejtab)
if scalense:
optional_args.append("-scale")
optional_args.append(scalense)
if initgues:
optional_args.append("-init")
optional_args.append(initgues)
if skysub:
optional_args.append("-sky")
optional_args.append(skysub)
if crsigmas:
optional_args.append("-sigmas")
optional_args.append(crsigmas)
if crradius:
optional_args.append("-radius")
optional_args.append("%.10g" % crradius)
if crthresh:
optional_args.append("-thresh")
optional_args.append("%.10g" % crthresh)
if badinpdq:
optional_args.append("-pdq")
optional_args.append("%d" % badinpdq)
if crmask:
if crmask == "yes":
optional_args.append("-crmask")
optional_args.append("yes")
elif crmask == "no":
optional_args.append("-crmask")
optional_args.append("no")
else:
raise RuntimeError("crmask = %s, must be yes or no." % crmask)
if all:
arglist = ["cs2.e"]
infilestr = "%s" % infiles[0]
n_infiles = len(infiles)
for i in range(1, n_infiles):
infilestr += " %s" % infiles[i]
arglist.append(infilestr)
arglist.append(output)
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.extend(optional_args)
if verbose:
print("'{}'".format(str(arglist)))
print("Running ocrreject on {}".format(infilestr))
del infilestr
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status and verbose:
print("Warning: status = {}".format(status))
cumulative_status = status
else:
cumulative_status = 0
for (i, infile) in enumerate(infiles):
arglist = ["cs2.e"]
arglist.append(infile)
arglist.append(outfiles[i])
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
arglist.extend(optional_args)
if verbose:
print("Running ocrreject on {}".format(infile))
print(" {}".format(str(arglist)))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the ocrreject function."""
return ocrreject.__doc__
def run(configobj=None):
"""TEAL interface for the ocrreject function."""
ocrreject(input=configobj["input"],
output=configobj["output"],
all=configobj["all"],
crrejtab=configobj["crrejtab"],
scalense=configobj["scalense"],
initgues=configobj["initgues"],
skysub=configobj["skysub"],
crsigmas=configobj["crsigmas"],
crradius=configobj["crradius"],
crthresh=configobj["crthresh"],
badinpdq=configobj["badinpdq"],
crmask=configobj["crmask"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 11,774 | 30.483957 | 77 |
py
|
stistools
|
stistools-master/stistools/__init__.py
|
from __future__ import absolute_import
from .version import *
from . import calstis
from . import basic2d
from . import ocrreject
from . import wavecal
from . import x1d
from . import x2d
from . import mktrace
from . import sshift
from . import stisnoise
from . import wx2d
from . import inttag
from . import doppinfo
from . import tastis
from . import ctestis
from . import defringe
# These lines allow TEAL to print out the names of TEAL-enabled tasks
# upon importing this package.
import os
from stsci.tools import teal
teal.print_tasknames(__name__, os.path.dirname(__file__))
| 583 | 23.333333 | 69 |
py
|
stistools
|
stistools-master/stistools/radialvel.py
|
import numpy as N
DEG_RAD = N.pi / 180. # degrees to radians
ARCSEC_RAD = N.pi / (180.*3600.) # arcseconds to radians
REFDATE = 51544.5 # MJD for 2000 Jan 1, 12h UT
KM_AU = 1.4959787e8 # kilometers per astronomical unit
SEC_DAY = 86400. # seconds per day
def radialVel(ra_targ, dec_targ, mjd):
"""Compute the heliocentric velocity of the Earth.
This function computes the radial velocity of a target based on the
Earth's orbital velocity around the Sun. The space motion of the
target is not taken into account. That is, the radial velocity is
just the negative of the component of the Earth's orbital velocity
in the direction toward the target.
Parameters
-----------
ra_targ : float
right ascension of the target (degrees)
dec_targ : float
declination of the target (degrees)
mjd : float
Modified Julian Date at the time of observation
Returns
--------
radial_vel : float
the radial velocity in km/s
"""
# Convert target position to rectangular coordinate unit vector.
ra = ra_targ * DEG_RAD
dec = dec_targ * DEG_RAD
target = N.zeros(3, dtype=N.float64)
target[0] = N.cos(dec) * N.cos(ra)
target[1] = N.cos(dec) * N.sin(ra)
target[2] = N.sin(dec)
# Precess the target coordinates from J2000 to the current date.
target = precess(mjd, target)
# Get the Earth's velocity vector (km/sec).
velocity = earthVel(mjd)
# Dot product.
vel_r = N.dot(velocity, target)
return -vel_r
def earthVel(mjd):
"""Compute and return the velocity of the Earth at the specified time.
This function computes the Earth's orbital velocity around the Sun
in celestial rectangular coordinates. The expressions are from the
Astronomical Almanac, p C24, which gives low precision formulas for
the Sun's coordinates. We'll apply these formulas directly to get
the velocity of the Sun relative to the Earth, then we'll convert to
km per sec and change the sign to get the velocity of the Earth.
Notes
-----
We get the velocity of the Sun relative to the Earth as follows:
The velocity in the ecliptic plane with the X-axis aligned with the
radius vector is:
- Vx = radius_dot,
- Vy = radius * elong_dot,
- Vz = 0
where:
- radius is the radial distance from Earth to Sun
- elong is the ecliptic longitude of the Sun
- eps is the obliquity of the ecliptic
- _dot means the time derivative
Rotate in the XY-plane by elong to get the velocity in ecliptic
coordinates::
radius_dot * cos (elong) - radius * elong_dot * sin (elong)
radius_dot * sin (elong) + radius * elong_dot * cos (elong)
0
Rotate in the YZ-plane by eps to get the velocity in equatorial
coordinates::
radius_dot * cos (elong) - radius * elong_dot * sin (elong)
(radius_dot * sin (elong) + radius * elong_dot * cos (elong)) * cos (eps)
(radius_dot * sin (elong) + radius * elong_dot * cos (elong)) * sin (eps)
Parameters
----------
mjd : float
time, Modified Julian Date
Returns
-------
vel : ndarray
the velocity vector of the Earth around the Sun, in
celestial coordinates (shape=(3,),ndtype=float64)
"""
# All angular values are in radians.
# g = mean anomaly
# L = mean longitude, corrected for aberration
# elong = ecliptic longitude
# radius = distance to Sun in AU
# eps = obliquity of ecliptic
# time in days since JD 2451545.0
tdays = mjd - REFDATE
g_dot = 0.9856003 * DEG_RAD
L_dot = 0.9856474 * DEG_RAD
eps = (23.439 - 0.0000004 * tdays) * DEG_RAD
g = ((357.528 + 0.9856003 * tdays) % 360.) * DEG_RAD
L = ((280.461 + 0.9856474 * tdays) % 360.) * DEG_RAD
# 1.915 degrees 0.02 degree
elong = L + 0.033423 * N.sin(g) + 0.000349 * N.sin(2.*g)
elong_dot = L_dot + \
0.033423 * N.cos(g) * g_dot + \
0.000349 * N.cos(2.*g) * 2.*g_dot
radius = 1.00014 - 0.01671 * N.cos(g) - 0.00014 * N.cos(2.*g)
radius_dot = 0.01671 * N.sin(g) * g_dot + \
0.00014 * N.sin(2.*g) * 2.*g_dot
x_dot = radius_dot * N.cos(elong) - \
radius * N.sin(elong) * elong_dot
y_dot = radius_dot * N.cos(eps) * N.sin(elong) + \
radius * N.cos(eps) * N.cos(elong) * elong_dot
z_dot = radius_dot * N.sin(eps) * N.sin(elong) + \
radius * N.sin(eps) * N.cos(elong) * elong_dot
# Convert to km/sec with Sun as origin.
velocity = N.zeros(3, dtype=N.float64)
velocity[0] = -x_dot * KM_AU / SEC_DAY
velocity[1] = -y_dot * KM_AU / SEC_DAY
velocity[2] = -z_dot * KM_AU / SEC_DAY
return velocity
def precess(mjd, target):
"""Precess target coordinates from J2000 to the date mjd.
Notes
-----
target can be a single vector, e.g. [x0, y0, z0], or it can be
a 2-D array; in the latter case, the shape should be (n,3)::
target = [[x0, x1, x2, x3, x4],
[y0, y1, y2, y3, y4],
[z0, z1, z2, z3, z4]]
The algorithm used in this function was based on [1]_ and [2]_.
References
----------
.. [1] Lieske, et al. 1976, Astron & Astrophys vol 58, p 1.
.. [2] J.H. Lieske, 1979, Astron & Astrophys vol 73, 282-284.
Parameters
-----------
mjd : float
time, Modified Julian Date
target : array_like object
unit vector pointing toward the target, J2000 coordinates
Returns
-------
vector : ndarray
the target vector (or matrix) precessed to mjd as an
array object of type float64 and the same shape as target,
i.e. either (3,) or (n,3)
"""
target_j2000 = N.array(target, dtype=N.float64)
target_mjd = target_j2000.copy()
dt = (mjd - REFDATE) / 36525.
dt2 = dt**2
dt3 = dt**3
zeta = (2306.2181 * dt + 0.30188 * dt2 + 0.017998 * dt3) * ARCSEC_RAD
z = (2306.2181 * dt + 1.09468 * dt2 + 0.018203 * dt3) * ARCSEC_RAD
theta = (2004.3109 * dt - 0.42665 * dt2 - 0.041833 * dt3) * ARCSEC_RAD
cos_zeta = N.cos(zeta)
sin_zeta = N.sin(zeta)
cos_z = N.cos(z)
sin_z = N.sin(z)
cos_theta = N.cos(theta)
sin_theta = N.sin(theta)
# Create the rotation matrix.
a = N.identity(3, dtype=N.float64)
a[0, 0] = cos_z * cos_theta * cos_zeta - sin_z * sin_zeta
a[0, 1] = -cos_z * cos_theta * sin_zeta - sin_z * cos_zeta
a[0, 2] = -cos_z * sin_theta
a[1, 0] = sin_z * cos_theta * cos_zeta + cos_z * sin_zeta
a[1, 1] = -sin_z * cos_theta * sin_zeta + cos_z * cos_zeta
a[1, 2] = -sin_z * sin_theta
a[2, 0] = sin_theta * cos_zeta
a[2, 1] = -sin_theta * sin_zeta
a[2, 2] = cos_theta
# Convert to matrix objects.
m_a = N.matrix(a)
m_target_j2000 = N.matrix(target_j2000)
# The prefix "m_" indicates that the product is actually a matrix.
m_target_mjd = m_a * m_target_j2000.T
# Return a simple array (rather than a matrix).
return m_target_mjd.T.A[0]
| 7,261 | 30.034188 | 80 |
py
|
stistools
|
stistools-master/stistools/x2d.py
|
#! /usr/bin/env python
import os
import sys
import getopt
import glob
import subprocess
from stsci.tools import parseinput, teal
__doc__ = """
Rectify 2-D STIS spectral data.
Examples
--------
In Python without TEAL:
>>> import stistools
>>> stistools.x2d.x2d("o66p01020_flt.fits", output="test_x2d.fits",
... verbose=True, trailer="o66p01020.trl")
In Python with TEAL:
>>> from stistools import x2d
>>> from stsci.tools import teal
>>> teal.teal("x2d")
From command line::
% ./x2d.py -v o66p01020_flt.fits o66p01020_x2d.fits
% ./x2d.py -r
"""
__taskname__ = "x2d"
__version__ = "3.4"
__vdate__ = "13-November-2013"
__author__ = "Phil Hodge, STScI, November 2013."
def main(args):
if len(args) < 1:
prtOptions()
print("At least an input file name must be specified.")
sys.exit()
try:
(options, pargs) = getopt.getopt(args, "rtv:",
["version"])
except Exception as error:
prtOptions()
sys.exit()
output = ""
blazeshift = None
verbose = False
timestamps = False
for i in range(len(options)):
if options[i][0] == "--version":
status = subprocess.call(["cs7.e", "--version"])
return 0
if options[i][0] == "-r":
status = subprocess.call(["cs7.e", "-r"])
return 0
if options[i][0] == "-v":
verbose = True
if options[i][0] == "-t":
timestamps = True
nargs = len(pargs)
if nargs < 1 or nargs > 2:
prtOptions()
sys.exit()
input = pargs[0]
if nargs == 2:
output = pargs[1]
status = x2d(input, output=output,
verbose=verbose, timestamps=timestamps)
sys.exit(status)
def prtOptions():
"""Print a list of command-line options and arguments."""
print("The command-line options are:")
print(" --version (print the version number and exit)")
print(" -r (print the full version string and exit)")
print(" -v (verbose)")
print(" -t (print timestamps)")
print("")
print("Following the options, list one or more input file names,")
print(" enclosed in quotes if more than one file name is specified")
print(" and/or if wildcards are used.")
print("One or more output file names may be specified (the same number")
print(" as the input file names).")
def x2d(input, output="",
helcorr="perform", fluxcorr="perform", statflag=True,
center=False, blazeshift=None, err_alg="wgt_var",
verbose=False, timestamps=False, trailer="",
print_version=False, print_revision=False):
"""Rectify 2-D STIS spectral data.
Parameters
----------
input: str
Name of the input raw file.
output: str
Name of the output file, or "" (the default). If no name was
specified, the output name will be constructed from the input name.
helcorr: str
If "perform", correct for heliocentric Doppler shift.
fluxcorr: str
If "perform", convert to absolute flux.
statflag: bool
If True, compute statistics for image arrays and update keywords.
center: bool
If True, center the target spectrum in the cross-dispersion
direction. For G140L and G140M spectra, the target has at
different times been offset to a location either above or below
the middle of the detector, to avoid the repeller wire. This
argument allows more convenient comparison of data taken at
widely different times.
blazeshift: float or None
Blaze shift (in pixels). None means blazeshift is not specified.
err_alg: str
Algorithm for computing error estimates. The default is "wgt_var",
which means that the weight (for bilinear interpolation) is applied
to the variances of the input pixels. The alternative is
"wgt_err", to specify that the weight should be applied to the
errors of the input pixels.
verbose: bool
If True, calstis will print more info.
timestamps: bool
If True, calstis will print the date and time at various points
during processing.
trailer: str
If specified, the standard output and standard error will be
written to this file instead of to the terminal. Note, however,
that if print_version or print_revision is specified, the value
will be printed to the terminal, and any name given for the
trailer will be ignored.
print_version: bool
If True, calstis will print the version number (a string) and
then return 0.
print_revision: bool
If True, calstis will print the full version string and then
return 0.
Returns
-------
status: int
0 is OK.
1 is returned if cs7.e (the calstis host executable) returned a
non-zero status. If verbose is True, the value returned by cs7.e
will be printed.
2 is returned if the specified input file or files were not found,
or the numbers of input and output files (if the latter was
specified) are not the same.
"""
if print_version:
status = subprocess.call(["cs7.e", "--version"])
return 0
if print_revision:
status = subprocess.call(["cs7.e", "-r"])
return 0
cumulative_status = 0
# infiles may include one or more file names, separated by blanks
# or commas (or both), and any name may include wildcards.
infiles = []
input1 = input.split()
for in1 in input1:
input2 = in1.split(",")
for in2 in input2:
files = glob.glob(in2)
infiles.extend(files)
if input1 and not infiles:
print("No file name matched the string '{}'".format(input))
return 2
if output:
outfiles = []
output1 = output.split()
for out1 in output1:
if out1:
output2 = out1.split(",")
for out2 in output2:
if out2:
outfiles.append(out2)
else:
outfiles = None
n_infiles = len(infiles)
if outfiles and len(outfiles) != n_infiles:
print("You specified {} input files but {} output files.".format
(n_infiles, len(outfiles)))
print("The number of input and output files must be the same.")
return 2
if trailer:
if verbose and os.access(trailer, os.F_OK):
print("Appending to trailer file {}".format(trailer))
f_trailer = open(trailer, "a")
fd_trailer = f_trailer.fileno()
else:
f_trailer = None
fd_trailer = None
for (i, infile) in enumerate(infiles):
arglist = ["cs7.e"]
arglist.append(infile)
if outfiles is not None:
arglist.append(outfiles[i])
if verbose:
arglist.append("-v")
if timestamps:
arglist.append("-t")
switch_was_set = False
if helcorr == "perform":
arglist.append("-hel")
switch_was_set = True
if fluxcorr == "perform":
arglist.append("-flux")
switch_was_set = True
if not switch_was_set:
arglist.append("-x2d")
if err_alg:
if err_alg == "wgt_err":
arglist.append("-wgt_err")
elif err_alg != "wgt_var":
raise RuntimeError("err_alg must be either 'wgt_err'"
" or 'wgt_var'; you specified '%s'" % err_alg)
if blazeshift is not None:
arglist.append("-b")
arglist.append("%.10g" % blazeshift)
if verbose:
print("Running x2d on {}".format(infile))
print(" {}".format(arglist))
status = subprocess.call(arglist, stdout=fd_trailer,
stderr=subprocess.STDOUT)
if status:
cumulative_status = 1
if verbose:
print("Warning: status = {}".format(status))
if f_trailer is not None:
f_trailer.close()
return cumulative_status
#-------------------------#
# Interfaces used by TEAL #
#-------------------------#
def getHelpAsString(fulldoc=True):
"""Return documentation on the x2d function."""
return x2d.__doc__
def run(configobj=None):
"""TEAL interface for the x2d function."""
x2d(input=configobj["input"],
output=configobj["output"],
helcorr=configobj["helcorr"],
fluxcorr=configobj["fluxcorr"],
statflag=configobj["statflag"],
center=configobj["center"],
blazeshift=configobj["blazeshift"],
err_alg=configobj["err_alg"],
verbose=configobj["verbose"],
timestamps=configobj["timestamps"],
trailer=configobj["trailer"],
print_version=configobj["print_version"],
print_revision=configobj["print_revision"])
if __name__ == "__main__":
main(sys.argv[1:])
| 9,046 | 28.469055 | 81 |
py
|
stistools
|
stistools-master/stistools/wx2d.py
|
#! /usr/bin/env python
import sys
import os
import os.path
import math
import numpy as N
from scipy import signal as convolve
from astropy.io import fits
from . import gettable
from . import wavelen
from . import r_util
__version__ = "1.3 (2016 Feb 24)"
def wx2d(input, output, wavelengths=None, helcorr="",
algorithm="wavelet",
trace=None, order=7, subdiv=8, psf_width=0., rows=None,
subsampled=None, convolved=None):
"""Resample the input, correcting for geometric distortion.
Parameters
----------
input : string
name of input file containing an image set
output : string
name of the output file
wavelengths : string, optional [Default: None]
name of the output file for wavelengths
helcorr : string
specify "perform" or "omit" to override header keyword
algorithm : {'wavelet', 'kd'}
algorithm to use in resampling the input
trace : string or array, or None
trace array, or name of FITS table containing trace(s)
order : int [Default: 7]
polynomial order (an odd number, e.g. 5 or 7)
subdiv : int [Default: 8]
number of subpixels (a power of 2, e.g. 8 or 16)
psf_width : float [Default: 0.]
width of PSF for convolution (e.g. 1.3);
0 means no convolution
rows : tuple, optional [Default: None]
a tuple giving the slice of rows to process; output values
in all other rows will be set to zero.
The default of None means all rows, same as (0, 1024)
subsampled : string, optional [Default: None]
name of the output file with the subsampled image
convolved : string, optional [Default: None]
name of the output file with the convolved image
"""
if algorithm != "wavelet" and algorithm != "kd":
raise ValueError("algorithm can only be 'wavelet' or 'kd'")
if psf_width <= 0. and convolved is not None:
print("Warning: 'convolved' will be ignored because psf_width=0")
convolved = None
if algorithm != "wavelet" \
and (subsampled is not None or convolved is not None):
raise ValueError(
"cannot specify 'subsampled' or 'convolved' if algorithm='kd'")
helcorr = helcorr.upper()
ft = fits.open(input)
nextend = ft[0].header.get("nextend", default=3)
n_imsets = nextend // 3 # number of image sets
if ft[1].header.get("ltm2_2", default=1.0) != 1.0:
raise RuntimeError("can't handle data binned in the Y direction")
phdu = ft[0] # primary header/data unit
# If trace was not specified, get the file name and selection keywords
# from the header.
tracefile = trace_name(trace, phdu.header)
# Update the primary header, in preparation for writing to a new file.
phdu.header.set("WX2DCORR", "COMPLETE", "this file is output from wx2d",
before="X2DCORR")
is_an_array = isinstance(tracefile, N.ndarray)
if is_an_array:
phdu.header.add_history("trace array was specified explicitly")
else:
phdu.header.add_history("trace file " + tracefile)
phdu.header.add_history("order = " + repr(order))
phdu.header.add_history("subdiv = " + repr(subdiv))
phdu.header.add_history("psf_width = " + repr(psf_width))
# Write the primary header to the output file(s);
# we'll append each extension in wx2d_imset.
phdu.header.set("nextend", 0) # no extensions yet
phdu.header.set("filename", os.path.basename(output))
phdu.writeto(output)
if wavelengths is not None:
phdu.header.set("filename", os.path.basename(wavelengths))
phdu.writeto(wavelengths)
if subsampled is not None:
phdu.header.set("filename", os.path.basename(subsampled))
phdu.writeto(subsampled)
if convolved is not None:
phdu.header.set("filename", os.path.basename(convolved))
phdu.writeto(convolved)
for imset0 in range(n_imsets):
wx2d_imset(ft, imset0+1, output, wavelengths, helcorr,
algorithm,
tracefile, order, subdiv, psf_width, rows,
subsampled, convolved)
ft.close()
def wx2d_imset(ft, imset, output, wavelengths, helcorr,
algorithm,
tracefile, order, subdiv, psf_width, rows,
subsampled, convolved):
"""Resample one image set, and append to output file(s).
Parameters
----------
ft : HDUList
Fits HDUList object for the input file.
imset : int
one-indexed image set number
output : string
name of the output file
wavelengths : string or None
name of the output file for wavelengths
helcorr : {'perform', 'omit'}
specify "perform" or "omit" to override header keyword
algorithm : {"wavelet","kd"}
algorithm to use to process input
tracefile : string or array
trace array, or name of FITS table containing trace(s)
order : int
polynomial order
subdiv : int
number of subpixels
psf_width : float
width of PSF for convolution
rows : tuple
a tuple giving the slice of rows to process
subsampled : string, or None
name of the output file with the subsampled image
convolved : string, or None
name of the output file with the convolved image
"""
hdu = ft[("SCI", imset)]
errhdu = ft[("ERR", imset)]
header = hdu.header
nrows, ncols = hdu.data.shape
original_nrows = nrows
if rows is None:
rows = (0, nrows)
else:
rows = (max(rows[0], 0), min(rows[1], nrows))
nrows = rows[1] - rows[0]
if original_nrows > nrows and imset == 1:
ft[0].header.add_history("rows from %d to %d" % (rows[0]+1, rows[1]))
# extract the subset
if original_nrows > nrows:
img = hdu.data[rows[0]:rows[1]]
errimg = errhdu.data[rows[0]:rows[1]]
else:
img = hdu.data
errimg = errhdu.data
# offset of a row from nominal
shifta2 = header.get("shifta2", default=0.)
# offset to be applied to the trace array
offset = rows[0] - header.get("ltv2", default=0.) + shifta2
# Read the array of spectral traces (a2displ), and bin if the input
# data are binned in the dispersion direction.
(a2center, a2displ) = get_trace(tracefile, ft[0].header, header)
if algorithm == "wavelet":
(hdu.data, errhdu.data) = wavelet_resampling(hdu, img, errimg,
original_nrows, nrows, ncols, rows,
a2center, a2displ, offset, shifta2,
imset, order, subdiv, psf_width,
subsampled, convolved)
else:
(hdu.data, errhdu.data) = kd_resampling(img, errimg,
original_nrows, nrows, ncols, rows,
a2center, a2displ, offset, shifta2)
del img
# Write the SCI and ERR HDUs to the output file.
ofd = fits.open(output, mode="update")
ofd.append(hdu)
ofd.append(errhdu)
ofd.close()
if wavelengths is not None:
# Write an array of wavelengths.
wl_hdu = fits.ImageHDU(header=hdu.header)
if not helcorr:
helcorr = ft[0].header.get("helcorr", default="OMIT").upper()
if ft[0].header.get("sclamp", default="NONE") != "NONE":
helcorr = "OMIT"
wl_hdu.data = wavelen.compute_wavelengths((original_nrows, ncols),
ft[0].header, header, helcorr)
ofd = fits.open(wavelengths, mode="update")
ofd[0].header.set("nextend", imset)
if helcorr == "PERFORM":
ofd[0].header.set("helcorr", "COMPLETE")
else:
ofd[0].header.set("helcorr", "OMIT")
ofd.append(wl_hdu)
ofd.close()
# Create data quality array.
hdu = ft[("DQ", imset)]
im3 = hdu.data[rows[0]:rows[1]]
nrows, ncols = im3.shape
image3 = N.zeros((subdiv*nrows, ncols), dtype=im3.dtype)
for j in range(subdiv):
image3[j::subdiv, :] = im3
del im3
hdu.data[:, :] = 4 # "bad detector pixel or beyond aperture"
hdu.data[rows[0]:rows[1]] = \
apply_trace(image3, a2center, a2displ, subdiv, offset, shifta2, "DQ")
del image3
# Write the DQ HDU to the output file.
ofd = fits.open(output, mode="update")
ofd[0].header.set("nextend", imset*3)
ofd.append(hdu)
ofd.close()
def wavelet_resampling(hdu, img, errimg,
original_nrows, nrows, ncols, rows,
a2center, a2displ, offset, shifta2,
imset, order, subdiv, psf_width,
subsampled, convolved):
"""Resample img and errimg using wavelets.
Parameters
----------
hdu : fits header/data unit object
header/data unit for a SCI extension
img : ndarray
SCI image array (could be a subset of full image)
errimg : ndarray
ERR image array (could be a subset of full image)
original_nrows : int
number of image lines (NAXIS2) in input image
nrows : int
number of image lines in subset
ncols : int
number of image columns (NAXIS1)
rows : tuple
tuple giving the slice of rows to process
a2center : ndarray
1-D array of Y locations
a2displ : ndarray
array of traces, one for each a2center; the length of each
trace must be the same as the number of columns in the input image
offset : float
offset of the first row in 'image' from the beginning of
the data block in the original file, needed for trace
shifta2 : float
offset of the row from nominal (from shifta2 keyword)
imset : int
number of the current image set (keyword EXTVER)
order : int
polynomial order
subdiv : int
number of subpixels per input pixel
psf_width : float
width of PSF for convolution (e.g. 1.3);
subsampled : string or None
name of the output file with the subsampled image
convolved : string or None
name of the output file with the convolved image
Returns
--------
img_arr: tuple of ndarrays
the image and error arrays (to replace the input img and errimg)
"""
sub5 = N.zeros((nrows*subdiv, ncols), dtype=N.float32)
err5 = N.zeros((nrows*subdiv, ncols), dtype=N.float32)
for j in range(0, subdiv):
sub5[j::subdiv] = img
err5[j::subdiv] = errimg
# Wavelet interpolation loop
# With each iteration, the number of rows is doubled and the values
# in the tmp array (the subsampled SCI data) are halved; the SCI data
# will later be extracted by summing subsampled values along a column.
# In contrast, the ERR data will be extracted by taking the square root
# of the average of the squares of the err array, so the err array
# values should remain approximately constant rather than halving with
# each iteration; this is the explanation for the extra factor of 2.
# For CCD data the minimum value in the errimg array should be the
# readout noise divided by the square root of the number of images
# that have been combined, but for MAMA data it could be 0.
# minerr is this minimum value, but set a lower limit of 1.
minerr = max(1., errimg.min())
step = subdiv
while step > 1:
tmp = N.zeros((nrows*2*subdiv//step, ncols), dtype=N.float32)
err = tmp.copy()
tmp[::2] = sub5[::step]
tmp[1::2] = inv_avg_interp(order, tmp[::2])
tmp = inv_haar(tmp)
# for computing the error, use minerr as the minimum flux
err_tmp0 = N.maximum(tmp[0::2], minerr)
err_tmp1 = N.maximum(tmp[1::2], minerr)
err[0::2] = N.sqrt(2.*err_tmp0 / (err_tmp0 + err_tmp1)) * err5[::step]
err[1::2] = N.sqrt(2.*err_tmp1 / (err_tmp0 + err_tmp1)) * err5[::step]
step //= 2
sub5[::step] = tmp
err5[::step] = err
for j in range(step):
sub5[j::step] = tmp
err5[j::step] = err
del (err_tmp0, err_tmp1)
if subsampled is not None:
hdu.data = sub5.copy()
ofd = fits.open(subsampled, mode="update")
ofd[0].header.set("nextend", imset)
ofd.append(hdu)
ofd.close()
# Optional PSF convolution
if psf_width > 0.:
cnv = N.zeros(sub5.shape, dtype=N.float32)
krn = N.array([stis_psf(float(j), psf_width*subdiv)
for j in range(-32, 32)])
krn = krn / N.sum(krn)
for j in range(sub5.shape[1]):
cnv[:, j] = convolve.convolve(sub5[:, j], krn, mode='same') # same size
if convolved is not None:
hdu.data = cnv.copy()
ofd = fits.open(convolved, mode="update")
ofd[0].header.set("nextend", imset)
ofd.append(hdu)
ofd.close()
else:
cnv = sub5
if original_nrows > nrows:
result = N.zeros((original_nrows, ncols), dtype=N.float32)
err_result = N.zeros((original_nrows, ncols), dtype=N.float32)
result[rows[0]:rows[1]] = apply_trace(cnv, a2center, a2displ,
subdiv, offset, shifta2, "SCI")
err_result[rows[0]:rows[1]] = apply_trace(err5, a2center, a2displ,
subdiv, offset, shifta2, "ERR")
else:
result = apply_trace(cnv, a2center, a2displ,
subdiv, offset, shifta2, "SCI")
err_result = apply_trace(err5, a2center, a2displ,
subdiv, offset, shifta2, "ERR")
return result, err_result
def kd_resampling(img, errimg,
original_nrows, nrows, ncols, rows,
a2center, a2displ, offset, shifta2):
"""Apply Kris Davidson's resampling method.
Parameters
----------
img : ndarray
SCI image array (could be a subset of full image)
errimg : ndarray
ERR image array (could be a subset of full image)
original_nrows : int
number of image lines (NAXIS2) in input image
nrows : int
number of image lines in subset
ncols : int
number of image columns (NAXIS1)
rows : tuple
tuple giving the slice of rows to process
a2center : ndarray
1-D array of Y locations
a2displ : ndarray
array of traces, one for each a2center; the length of each
trace must be the same as the number of columns in the input image
offset : float
offset of the first row in 'image' from the beginning of
the data block in the original file, needed for trace
shifta2 : float
offset of the row from nominal (from shifta2 keyword)
Returns
-------
img_arr : tuple
the image and error arrays (to replace the input img and errimg)
"""
# image2 is for the error image
subdiv = 8
image2 = N.zeros((subdiv*nrows, ncols), dtype=N.float32)
for j in range(subdiv):
image2[j::subdiv, :] = errimg
if original_nrows > nrows:
result = N.zeros((original_nrows, ncols), dtype=N.float32)
err_result = N.zeros((original_nrows, ncols), dtype=N.float32)
result[rows[0]:rows[1]] = kd_apply_trace(img, a2center, a2displ,
offset, shifta2)
err_result[rows[0]:rows[1]] = apply_trace(image2, a2center, a2displ,
subdiv, offset, shifta2, "ERR")
else:
result = kd_apply_trace(img, a2center, a2displ, offset, shifta2)
err_result = apply_trace(image2, a2center, a2displ,
subdiv, offset, shifta2, "ERR")
return result, err_result
def kd_apply_trace(image, a2center, a2displ, offset=0., shifta2=0.):
"""Kris Davidson's resampling algorithm, following the trace.
Parameters
----------
image : ndarray
input 2-D image array
a2center : ndarray
array of Y locations
a2displ : ndarray
array of traces, one for each a2center; the length of
each trace must be the same as the number of columns in 'image'
offset : float
offset of the first row in 'image' from the beginning
of the data block in the original file, needed for trace
shifta2 : float
offset of the row from nominal (from shifta2 keyword)
Returns
-------
x2d : ndarray
2-D array containing the resampled image
"""
shape = image.shape
x2d_shape = N.array(shape)
x2d = N.zeros(x2d_shape, dtype=N.float32)
total = shape[0] * shape[1]
flat_im = N.ravel(image)
for i in range(x2d_shape[0]):
# y is the location in the cross-dispersion direction.
trace = interpolate_trace(a2center, a2displ, float(i) + offset,
x2d_shape[1])
y = float(i) + shifta2 + trace
nint_y = N.around(y)
s = y - nint_y
n = nint_y.astype(N.int32)
col_range = N.arange(x2d_shape[1], dtype=N.int32)
# These are indices into the flattened image.
nm2 = (n-2) * x2d_shape[1] + col_range
nm1 = (n-1) * x2d_shape[1] + col_range
n0 = n * x2d_shape[1] + col_range
np1 = (n+1) * x2d_shape[1] + col_range
np2 = (n+2) * x2d_shape[1] + col_range
nm2 = N.where(nm2 < 0, 0, nm2)
nm1 = N.where(nm1 < 0, 0, nm1)
n0 = N.where(n0 < 0, 0, n0)
np1 = N.where(np1 < 0, 0, np1)
np2 = N.where(np2 < 0, 0, np2)
nm2 = N.where(nm2 >= total, total-1, nm2)
nm1 = N.where(nm1 >= total, total-1, nm1)
n0 = N.where(n0 >= total, total-1, n0)
np1 = N.where(np1 >= total, total-1, np1)
np2 = N.where(np2 >= total, total-1, np2)
a = - 0.050 * flat_im[nm2] + 0.165 * flat_im[nm1] + 0.77 * flat_im[n0] \
+ 0.165 * flat_im[np1] - 0.050 * flat_im[np2]
b = 0.08 * flat_im[nm2] - 0.66 * flat_im[nm1] \
+ 0.66 * flat_im[np1] - 0.08 * flat_im[np2]
c = 0.04 * flat_im[nm2] + 0.34 * flat_im[nm1] - 0.76 * flat_im[n0] \
+ 0.34 * flat_im[np1] + 0.04 * flat_im[np2]
x2d[i] = a + b * s + c * s**2
return x2d
def stis_psf(x, a):
"""Evaluate the cross-dispersion PSF at x.
Parameters
----------
x : float
offset in pixels from the center of the profile
a : float
a measure of the width of the PSF
Returns
-------
val : float
the PSF evaluated at x
"""
return (1. + (x/float(a))**2)**-2
def apply_trace(image, a2center, a2displ, subdiv,
offset=0., shifta2=0., extname="SCI"):
"""Add together 'subdiv' rows of 'image', following the trace.
Parameters
----------
image : ndarray
input 2-D image array, oversampled by 'subdiv' in axis 0
a2center : ndarray
1-D array of Y locations
a2displ : ndarray
array of traces, one for each a2center; the length of each
trace must be the same as the number of columns in the input image
subdiv : int
number of rows to add together
offset : float
offset of the first row in 'image' from the beginning of
the data block in the original file, needed for trace
shifta2 : float
offset of the row from nominal (from shifta2 keyword)
extname : string
which type of extension (SCI, ERR, DQ)?
Returns
---------
x2d : ndarray
resampled 2-D image array
Notes
-------
The function value is a 2-D array containing the resampled image.
This is binned by subdiv in Y (axis 0), after shifting by trace
(multiplied by subdiv).
For extname = "ERR" the result differs in these ways:
(1) fractions of pixels at the endpoints of the extraction region
are not included
(2) the values are combined as the average of the sum of the squares
For extname = "DQ" the result differs in these ways:
(1) the output is type int16
(2) the output values are nominally the same as the input, while
for SCI the output are subdiv times larger than the input
(3) fractions of pixels at the endpoints of the extraction region
are not included
(4) the values are combined via bitwise OR rather than an average or sum
"""
shape = image.shape
x2d_shape = N.array(shape)
x2d_shape[0] //= subdiv
if extname == "DQ":
x2d = N.zeros(x2d_shape, dtype=N.int16)
else:
x2d = N.zeros(x2d_shape, dtype=N.float32)
for i in range(x2d_shape[0]):
# y is the location in the output, binned image (x2d), while
# locn is the location in the input, oversampled image.
trace = interpolate_trace(a2center, a2displ, float(i) + offset,
x2d_shape[1])
y = float(i) + shifta2 + trace
locn = y * subdiv + (subdiv - 1.) / 2.
if extname == "SCI":
x2d[i] = extract(image, locn, subdiv)
elif extname == "ERR":
x2d[i] = extract_err(image, locn, subdiv)
else:
x2d[i] = extract_i16(image, locn, subdiv)
return x2d
def extract(image, locn, subdiv):
"""Add together 'subdiv' rows of 'image', centered on 'locn'.
Parameters
----------
image : ndarray
input array, oversampled by 'subdiv' in axis 0
locn : ndarray
a 1-D array giving the location at which to extract; an
integer value corresponds to the center of the pixel. The length
must be the same as the number of columns in the input image.
subdiv : int
number of rows to add together
Returns
--------
spec : ndarray
a 1-D array containing the extracted row
"""
# Shift the zero point so the edges of a pixel have integer coordinates.
locn0 = locn + 0.5
shape = image.shape
hw = subdiv // 2
fhw = float(hw)
# floating point locations of upper and lower edges
fhigh = locn0 + fhw
flow = locn0 - fhw
# integer endpoints of range of whole pixels
high = N.floor(fhigh).astype(N.int32)
low = N.ceil(flow).astype(N.int32)
# fractions of pixels at upper and lower edges
dhigh = fhigh - high
dlow = low - flow
spec = N.zeros(shape[1], dtype=N.float32)
for j in range(shape[1]):
s_low = low.item(j)
s_high = high.item(j)
if s_low < 1 or s_high > shape[0]-1:
continue
spec[j] = N.sum(image[s_low: s_high, j])
spec[j] += image.item(s_high, j) * dhigh.item(j)
spec[j] += image.item(s_low-1, j) * dlow.item(j)
return spec
def extract_err(image, locn, subdiv):
"""Average 'subdiv' rows of 'image', centered on 'locn'.
Parameters
----------
image : ndarray
input array, oversampled by 'subdiv' in axis 0
locn : ndarray
a 1-D array giving the location at which to extract; an
integer value corresponds to the center of the pixel
subdiv : int
number of rows to add together
Returns
-------
spec : ndarray
a 1-D array containing the extracted row
Notes
-------
This takes the square root of the average of the squares, intended to be
used for interpolating the ERR array. Fractions of pixels at the upper
and lower edges are excluded.
"""
# Shift the zero point so the edges of a pixel have integer coordinates.
locn0 = locn + 0.5
shape = image.shape
hw = subdiv // 2
fhw = float(hw)
# floating point locations of upper and lower edges
fhigh = locn0 + fhw
flow = locn0 - fhw
# integer endpoints of range of whole pixels
high = N.floor(fhigh).astype(N.int32)
low = N.ceil(flow).astype(N.int32)
spec = N.zeros(shape[1], dtype=N.float32)
for j in range(shape[1]):
s_low = low.item(j)
s_high = high.item(j)
if s_low < 1 or s_high > shape[0]-1:
continue
sum = 0.
for i in range(s_low, s_high+1):
s_image = image.item(i, j)
sum += s_image**2
sum /= (s_high - s_low + 1.)
spec[j] = math.sqrt(sum)
return spec
def extract_i16(image, locn, subdiv):
"""Bitwise OR 'subdiv' rows of 'image', centered on 'locn'.
Parameters
----------
image : ndarray
input array, oversampled by 'subdiv' in axis 0
locn : ndarray
a 1-D array giving the location at which to extract; an
integer value corresponds to the center of the pixel
subdiv : int
number of rows to add together
Returns
--------
spec : ndarray
a 1-D array containing the extracted row
"""
# Shift the zero point so the edges of a pixel have integer coordinates.
locn0 = locn + 0.5
shape = image.shape
hw = subdiv // 2
fhw = float(hw)
# floating point locations of upper and lower edges
fhigh = locn0 + fhw
flow = locn0 - fhw
# integer endpoints of range of whole pixels
high = N.floor(fhigh).astype(N.int32)
low = N.ceil(flow).astype(N.int32)
spec = N.zeros(shape[1], dtype=N.int16)
for j in range(shape[1]):
s_low = low.item(j)
s_high = high.item(j)
if s_low < 1 or s_high > shape[0]-1:
continue
sum = 0
for i in range(s_low, s_high+1):
sum |= image.item(i, j)
spec[j] = sum
return spec
def interpolate_trace(a2center, a2displ, y, length):
"""Interpolate within the array of traces, and return a trace.
Parameters
----------
a2center : ndarray
array of Y locations
a2displ : ndarray
array of traces, one trace for each element of a2center
y : float
Y location on the detector
length : int
length of a trace; needed only if traces is empty
"""
if len(a2displ) < 1:
trace = N.zeros(length, dtype=N.float32)
else:
# interpolate to get the trace at y
trace = r_util.interpolate(a2center, a2displ, y)
return trace
def trace_name(trace, phdr):
"""Return the 1dt table name or array.
Parameters
-----------
trace : string or array or None
if trace is None the header keyword SPTRCTAB will be
gotten from phdr; else if this is a string it should be the name
of a trace file (possibly using an environment variable); otherwise,
it should be a trace, in which case it will be returned unchanged
phdr : fits Header object
primary header, used only if trace is None
Returns
--------
tracefile : string or array
name of a trace file (with environment variable expanded),
or an actual trace array
"""
if trace is None:
try:
tracefile = phdr["sptrctab"]
except KeyError:
raise ValueError("Keyword SPTRCTAB not found; specify trace explicitly.")
tracefile = r_util.expandFileName(tracefile)
elif isinstance(trace, str):
tracefile = r_util.expandFileName(trace)
else:
# trace could already be an array object
tracefile = trace
return tracefile
def get_trace(tracefile, phdr, hdr):
"""Read 1-D traces from the 1dt table (sptrctab).
Parameters
----------
tracefile : string or array
either a trace array or the name of a FITS 1dt table
phdr : fits Header object
primary header of input file
hdr : fits Header object
extension header of input image (for binning info and
time of exposure)
Returns
--------
trace_arrays : tuple of 2 arrays
a pair of arrays, one is the Y location at the middle column,
and the other is an array of trace arrays
Notes
-----
If 'tracefile' is already a trace array, it will just be returned,
together with an arbitrary Y location of 0 (because that will
always be within the image).
opt_elem and cenwave are criteria for selecting the relevant rows
from the 1dt table. There will normally be several rows that match,
and they should have different values of the Y location; the output
list will be sorted on Y location.
"""
# If the input is already an array object, return a2center
# (arbitrarily set to 0) and the trace array.
is_an_array = isinstance(tracefile, N.ndarray)
if is_an_array:
a2center = [0.]
a2displ = [tracefile]
return a2center, a2displ
# These are the row-selection values.
opt_elem = phdr.get("opt_elem", default="")
cenwave = phdr.get("cenwave", default=0)
# These describe the binning in the dispersion direction.
ltm = hdr.get("ltm1_1", default=1.)
ltv = hdr.get("ltv1", default=0.) # one indexing
binaxis1 = int(round(1. / ltm))
if binaxis1 not in [1, 2, 4, 8]:
raise ValueError("LTM1_1 should be either 1., 0.5, 0.25, or 0.125")
filter = {"opt_elem": opt_elem, "cenwave": cenwave}
trace_info = gettable.getTable(tracefile, filter,
sortcol="a2center", at_least_one=True)
expstart = hdr.get("expstart", default=-1.)
gettable.rotateTrace(trace_info, expstart)
a2center = trace_info.field("a2center") - 1. # zero indexing
a2displ = trace_info.field("a2displ")
if binaxis1 > 1 and len(a2displ) > 0:
a2displ = bin_traces(a2displ, binaxis1, ltv)
return a2center, a2displ
def bin_traces(a2displ, binaxis1, ltv):
"""bin the traces by the factor binaxis1
Parameters
----------
a2displ : ndarray
an array of one or more arrays of Y displacements (traces)
binaxis1 : int
binning factor in the dispersion axis
ltv : float
offset in the dispersion axis (one indexing)
Returns
-------
a2displ : ndarray
an array of traces (a2displ), but with the trace arrays binned
and shorter by the factor binaxis1
"""
if len(a2displ) < 1 or binaxis1 == 1:
return a2displ
oldlen = len(a2displ[0])
if binaxis1 == 2:
newlen = 511
elif binaxis1 == 4:
newlen = 255
elif binaxis1 == 8:
newlen = 127
else:
newlen = oldlen
newtraces = []
# Note: one-indexed pixels are used for determining the offset.
# The left edge of the first image pixel is pixel number 0.5; convert
# that point in the binned image to reference pixel coordinates.
# Then compare with the left edge of the first reference pixel to get
# the offset of our binned image (in reference pixel units).
left_edge = (0.5 - ltv) * binaxis1
offset = left_edge - 0.5 # should be an integer
offset = int(round(offset))
for trace in a2displ:
newtrace = N.zeros(newlen, dtype=N.float32)
for i in range(binaxis1):
# this slice can be longer than newlen
temp = trace[i+offset:oldlen:binaxis1]
newtrace += temp[0:newlen]
newtrace /= float(binaxis1)
newtraces.append(newtrace)
return N.array(newtraces)
def inv_haar(image):
image[0::2] = (image[0::2] + image[1::2]) / 2.
image[1::2] = (image[0::2] - image[1::2])
return image
def inv_avg_interp(order, image):
side, j0, j1 = (order-1)//2, (order-1)//2, (order+1)//2
rows, cols = image.shape
order_2 = float(order) / 2.
n = order + 1
x = N.arange(n, dtype=N.float64)
y = N.zeros((n, cols), dtype=N.float64)
d = N.zeros((rows, cols), dtype=N.float32) # single precision result
for j in range(side, rows-side):
y[1:] = N.cumsum(image[j-side:j+side+1], axis=0)
d[j] = -((y[j1] + y[j0]) - 2. * polynomial(x, y, order_2, n))
return d
def polynomial(x, y, z, n):
"""used for interpolation
Parameters
----------
x : ndarray
the integer values from 0 through n-1 inclusive (but float64)
y : ndarray
a 2-D array, axis 0 of length n
z : float
n / 2.
n : int
1 + order of polynomial fit
"""
t = n*[0.]
for k in range(n):
t[k] = y[k]
for j in range(k-1, -1, -1):
t[j] = t[j+1] + (t[j+1] - t[j]) / \
((z - x.item(j)) / (z - x.item(k)) - 1.)
return t[0]
if __name__ == "__main__":
# Note that the command-line options do not include all of the
# wx2d function arguments.
if len(sys.argv) < 3 or len(sys.argv) > 7:
print("Syntax: wx2d.py input output "
"[wavelengths trace minrow maxrow]")
sys.exit()
input = sys.argv[1]
output = sys.argv[2]
wavelengths = None
trace = None
if len(sys.argv) > 3:
if sys.argv[3] != "None":
wavelengths = sys.argv[3]
if len(sys.argv) > 4:
if sys.argv[4] != "None":
trace = sys.argv[4]
if len(sys.argv) == 7:
row0 = int(sys.argv[5]) - 1
row1 = int(sys.argv[6])
wx2d(sys.argv[1], sys.argv[2], wavelengths=wavelengths, trace=trace,
rows=(row0, row1))
else:
wx2d(sys.argv[1], sys.argv[2], wavelengths=wavelengths, trace=trace)
| 33,439 | 31.720157 | 88 |
py
|
stistools
|
stistools-master/stistools/mktrace.py
|
#!/usr/bin/env python
import numpy as np
from astropy.io import fits
import os.path
from scipy import signal
from scipy import ndimage as ni
from stsci.tools import gfit, linefit
from stsci.tools import fileutil as fu
__doc__ = """
Refine a STIS trace table.
- A trace is generated from the science file and a trace
center is computed.
- The two traces bracketing the trace center are extracted
from the trace table and interpolated
- The correction is computed as the difference between the
linear fit to the science and interpolated traces
- The correction is applied to all traces in the trace file
for that particular OPT_ELEM and CENWAVE
- A new trace table is written to the current directory and
the relevant keywords are updates in the header of the input file.
Examples
--------
Simple example of running mktrace on a STIS file named 'file.fits':
>>> import mktrace
>>> mktrace.mktrace('file.fits', [tracecen=509.4], [weights=[(x1,x2),(x3,x4)])
:Authors:
- Author (IDL): Linda Dressel
- Python version: Nadia Dencheva
"""
__version__ = '2.0.0'
__vdate__ = '2017-03-20'
def mktrace(fname, tracecen=0.0, weights=None):
"""
Refine a stis spectroscopic trace.
"""
try:
hdulist = fits.open(fname)
except IOError:
print("\nUNABLE TO OPEN FITS FILE: {} \n".format(fname))
return
data = hdulist[1].data
hdr0 = hdulist[0].header
hdr1 = hdulist[1].header
hdulist.close()
kwinfo = getKWInfo(hdr0, hdr1)
if kwinfo['instrument'] != 'STIS':
print("This trace tool works only on STIS spectroscopic observations.\n")
print("Not processing file {}.\n".format(fname))
return
sizex, sizey = data.shape
if weights is None:
wei = np.ones(sizey)
else:
if not iterable(weights) or not iterable(weights[0]):
print("Weights must be a list of tuples, for example:\n")
print("weights=[(23, 45),(300, 670)] \n")
return
wei = np.zeros(sizey)
for i in np.arange(len(weights)):
for j in np.arange(weights[i][0], weights[i][1]):
wei[j] = 1
# wind are weights indices in the image frame which may be a subarray
wind = np.nonzero(wei)[0]
tr = Trace(fname, kwinfo)
a2center, trace1024 = tr.generateTrace(data, kwinfo, tracecen=tracecen, wind=wind)
# compute the full frame a2center
ffa2center = a2center*kwinfo['binaxis2']
tr_ind, a2disp_ind = tr.getTraceInd(ffa2center)
tr2 = tr.readTrace(tr_ind)
if tr_ind != a2disp_ind[0]:
tr1 = tr.readTrace(tr_ind - 1)
interp_trace = trace_interp(tr1, tr2, ffa2center)
else:
interp_trace = tr2
# convert the weights array into full frame
ind = np.nonzero(wei)[0] * kwinfo['binaxis1']
w = np.zeros(1024)
w[ind] = 1
X = np.arange(1024).astype(np.float)
sparams = linefit.linefit(X, trace1024, weights=w)
rparams = linefit.linefit(X, interp_trace, weights=w)
sciline = sparams[0] + sparams[1] * X
refline = rparams[0] + rparams[1] * X
deltaline = sciline - refline
# create a complete trace similar to a row in a _1dt file
# used only for debugging
tr._a2displ = trace1024
tr._a1center = tr1['a1center']
tr._a2center = a2center
tr._nelem = tr1['nelem']
tr._pedigree = tr1['pedigree']
tr._snr_thresh = tr1['snr_thresh']
tr.writeTrace(fname, sciline, refline, interp_trace,
trace1024, tr_ind, a2disp_ind)
#print 'time', time.time()-start
#the minus sign is for consistency withthe way x2d reports the rotation
print("Traces were rotated by {:.10f} degrees \n".format((-(sparams[1]-rparams[1])*180 / np.pi)))
print('trace is centered on row {:.10f}'.format(tr._a2center))
return tr
def iterable(v):
try:
len(v)
return True
except TypeError:
return False
def interp(y, n):
"""
Given a 1D array of size m, interpolates it to a size n (m < n).
"""
m = float(len(y))
x = np.arange(m)
i = np.arange(n,dtype=np.float)
xx = i * (m-1)/n
xind = np.searchsorted(x, xx)-1
yy = y[xind]+(xx-x[xind])*(y[xind+1]-y[xind])/(x[xind+1]-x[xind])
return yy
def trace_interp(tr1, tr2, cen):
a2disp1 = tr1['a2displ']
a2disp2 = tr2['a2displ']
za2disp1 = a2disp1 - a2disp1[512]
za2disp2 = a2disp2 - a2disp2[512]
high = tr2['a2center']
low = tr1['a2center']
n2 = (cen - low) / (high - low)
n1 = 1.0 - n2
interp_trace = n1 * za2disp1 + n2 * za2disp2
return interp_trace
def getKWInfo(hdr0, hdr1):
kwinfo = {}
kwinfo['instrument'] = hdr0['INSTRUME']
kwinfo['detector'] = hdr0['DETECTOR']
if kwinfo['detector'] == "CCD":
kwinfo['binaxis2'] = hdr0['BINAXIS2']
kwinfo['binaxis1'] = hdr0['BINAXIS1']
else:
kwinfo['binaxis2'] = 1
kwinfo['binaxis1'] = 1
kwinfo['crpix2'] = hdr1['CRPIX2']
kwinfo['ltv2'] = hdr1['LTV2']
kwinfo['sizaxis2'] = hdr0['sizaxis2']
kwinfo['opt_elem'] = hdr0['OPT_ELEM']
kwinfo['cenwave'] = hdr0['CENWAVE']
kwinfo['sporder'] = hdr1['SPORDER']
kwinfo['sptrctab'] = hdr0['SPTRCTAB']
return kwinfo
class Trace:
""" Trace class for a crj or flt file.
Notes
-----
tr=Trace(file)
file is a crj or flt file.
opt_elem, cenwave, sporder are read from the header of the science file
a2center is a2center of the trace generated from the science file
tr_ind= tr.getTraceInd(a2center)
tr_ind is the index of the row in the trace file which brackets
from below a2center as computed fro the generated trace
tr.readTrace(tr_ind)
a2center = tr.generateTrace(...)
"""
def __init__(self, file, kwinfo):
self._opt_elem = kwinfo['opt_elem']
self._cenwave = kwinfo['cenwave']
self._sporder = kwinfo['sporder']
self._nelem = None
self._a2displ = None
self._a1center = None
self._a2center = None
self._snr_thresh = None
self._pedigree = None
self.sptrctabname = kwinfo['sptrctab']
self.sptrctab = self.openTraceFile(fu.osfn(self.sptrctabname))
def openTraceFile(self, filename):
"""
Returns a spectrum trace table
"""
if filename is not None:
try:
f = fits.open(filename)
except IOError:
print("Could not open file {}.\n".format(filename))
return
tab = f[1].data
f.close()
return tab
else:
print("A valid 1-D SPECTRUM TRACE TABLE is required.\n")
return None
def getTraceInd(self, a2center):
"""
Finds the first trace in the trace table whose A2CENTER is larger
than the specified a2center.
"""
opt_ind = self.sptrctab.field('OPT_ELEM') == self._opt_elem
cen_ind = self.sptrctab.field('CENWAVE') == self._cenwave
sp_ind = self.sptrctab.field('SPORDER') == self._sporder
a2disp_ind = opt_ind & cen_ind & sp_ind
ind = np.nonzero(a2disp_ind)
i = np.nonzero(self.sptrctab[ind].field('A2CENTER') > a2center)[0][0] + ind[0][0]
return i, a2disp_ind
def readTrace(self, tr_ind):
"""
reads the specified row from the 1dttab.fits
"""
tr = {}
tr['nelem'] = self.sptrctab[tr_ind].field('NELEM')
tr['a2displ'] = self.sptrctab[tr_ind].field('A2DISPL')
tr['a1center'] = self.sptrctab[tr_ind].field('A1CENTER')
tr['a2center'] = self.sptrctab[tr_ind].field('A2CENTER')
tr['snr_thresh'] = self.sptrctab[tr_ind].field('SNR_THRESH')
tr['pedigree'] = self.sptrctab[tr_ind].field('PEDIGREE')
return tr
def writeTrace(self, fname, sciline, refline, interp_trace, trace1024,
tr_ind, a2disp_ind):
"""
The 'writeTrace' method performs the following steps:
- Adds sciline-refline to all traces with the relevent OPT_ELEM,
CENWAVE and SPORDER.
- Writes the new trace table to the current directory.
- Updates the SPTRCTAB keyword in the header to point to the new table.
- Writes out fits files with the
- science trace - '_sci'
- the fit to the science trace - '_scifit'
- the interpolated trace - '_interp'
- the linear fit to the interpolated trace - '_interpfit'
"""
fpath = fu.osfn(self.sptrctabname)
infile = fname.split('.')
newname = infile[0] + '_1dt.' + infile[1]
# refine all traces for this CENWAVE, OPT_ELEM
fu.copyFile(fpath, newname)
hdulist = fits.open(newname, mode='update')
tab = hdulist[1].data
ind = np.nonzero(a2disp_ind)[0]
for i in np.arange(ind[0], ind[-1] + 1):
tab[i].setfield('A2DISPL', tab[i].field('A2DISPL') + (sciline - refline))
if 'DEGPERYR' in tab.names:
for i in np.arange(ind[0], ind[-1] + 1):
tab[i].setfield('DEGPERYR', 0.0)
hdulist.flush()
hdulist.close()
# update SPTRCTAB keyword in the science file primary header
hdulist = fits.open(fname, mode='update')
hdr0 = hdulist[0].header
hdr0['SPTRCTAB'] = newname
hdulist.close()
# write out the fit to the interpolated trace ('_interpfit' file)
refhdu = fits.PrimaryHDU(refline)
refname = infile[0] + '_1dt_interpfit.' + infile[1]
if os.path.exists(refname):
os.remove(refname)
refhdu.writeto(refname)
# write out the interpolated trace ('_interp' file)
inthdu = fits.PrimaryHDU(interp_trace)
intname = infile[0] + '_1dt_interp.' + infile[1]
if os.path.exists(intname):
os.remove(intname)
inthdu.writeto(intname)
# write out the the fit to the science trace ('_scifit' file)
scihdu = fits.PrimaryHDU(sciline)
sciname = infile[0] + '_1dt_scifit.' + infile[1]
if os.path.exists(sciname):
os.unlink(sciname)
scihdu.writeto(sciname)
# write out the science trace ('_sci' file)
trhdu = fits.PrimaryHDU(trace1024)
trname = infile[0] + '_1dt_sci.' + infile[1]
if os.path.exists(trname):
os.unlink(trname)
trhdu.writeto(trname)
def generateTrace(self, data, kwinfo, tracecen=0.0, wind=None):
"""
Generates a trace from a science file.
"""
if kwinfo['sizaxis2'] is not None and kwinfo['sizaxis2'] < 1023:
subarray = True
else:
subarray = False
if tracecen == 0:
if subarray:
_tracecen = kwinfo['sizaxis2'] / 2.0
else:
_tracecen = kwinfo['crpix2']
else:
_tracecen = tracecen
sizex, sizey = data.shape
subim_size = 40
y1 = int(_tracecen - subim_size/2.)
y2 = int(_tracecen + subim_size/2.)
if y1 < 0:
y1 = 0
if y2 > (sizex -1):
y2 = sizex - 1
specimage = data[y1:y2+1, :]
smoytrace = self.gFitTrace(specimage, y1, y2)
yshift = int(np.median(smoytrace) - 20)
y1 = y1 + yshift
y2 = y2 + yshift
if y1 < 0:
y1 = 0
if y2 > sizex:
y2 = sizex
specimage = data[y1:y2+1, :]
smoytrace = self.gFitTrace(specimage, y1, y2)
med11smoytrace = ni.median_filter(smoytrace, 11)
med11smoytrace[0] = med11smoytrace[2]
diffmed = abs(smoytrace - med11smoytrace)
tolerence = 3 * np.median(abs(smoytrace[wind] - med11smoytrace[wind]))
if tolerence < 0.1:
tolerence = 0.1
badpoint = np.where(diffmed > tolerence)[0]
if len(badpoint) != 0:
np.put(smoytrace, badpoint, med11smoytrace[badpoint])
# Convolve with a gaussian to smooth it.
fwhm = 10.
sigma = fwhm / 2.355
gaussconvxsmoytrace = ni.gaussian_filter1d(smoytrace, sigma)
# Compute the trace center as the median of the pixels
# with nonzero weights.
tracecen = np.median(gaussconvxsmoytrace[wind])
gaussconvxsmoytrace = gaussconvxsmoytrace - tracecen
trace1024 = interp(gaussconvxsmoytrace, 1024) * kwinfo['binaxis2']
tracecen = tracecen + y1 + 1.0
if subarray:
tracecen = tracecen - kwinfo['ltv2']
self.trace1024 = trace1024
return tracecen, trace1024
def gFitTrace(self, specimage, y1, y2):
"""
Fit a gaussian to each column of an image.
"""
sizex, sizey = specimage.shape
smoytrace = np.zeros(sizey).astype(np.float)
boxcar_kernel = signal.boxcar(3) / 3.0
for c in np.arange(sizey):
col = specimage[:, c]
col = col - np.median(col)
smcol = ni.convolve(col, boxcar_kernel).astype(np.float)
fit = gfit.gfit1d(smcol, quiet=1, maxiter=15)
smoytrace[c] = fit.params[1]
return np.array(smoytrace)
| 13,184 | 30.694712 | 101 |
py
|
stistools
|
stistools-master/stistools/add_stis_s_region.py
|
#!/usr/bin/env python
import glob
import math
import os
import sys
import argparse
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import logging
import pysiaf
__doc__ = """
This script will calculate an S_REGION string for STIS data and assign it
to the S_REGION keyword in the science data header. If no S_REGION keyword
exists, one will be added to each SCI header, after the existing PA_APER
keyword. The S_REGION string can be calculated and printed without changing
the science data header by using the --dry_run keyword.
The script uses the pysiaf interface to the HST SIAF database table. The
PROPAPER keyword and detector are used to look up the entry in the SIAF database
using the correspondence table originally documented in STIS 95-008D ("STIS
Science Apertures Revision D").
The aperture is compared with the extent calculated by using
the science data array and existing WCS and the smaller extent used to calculate
the S_REGION footprint, so that if a subarray was used it will ensure that the
S_REGION footprint doesn't incorrectly specify any sky extent outside the field
of view that is read out.
When used from the command line, the script will calculate and add the S-REGION
keyword to all _raw.fits and _tag.fits files in the current directory.
usage: Add S_REGION value to raw data headers [-h] [--dry_run] [-v]
options:
-h, --help show this help message and exit
--dry_run Calculate S_REGION value, but don't write to data header[s]
"""
DEGREESTORADIANS = math.pi / 180.0
RADIANTOARCSEC = 180.0 / math.pi * 3600.0
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
STIS_APERTURE_LOOKUP = {'0.05X29': '050X29',
'0.09X29': '090X29',
'0.2X29': '200X29',
'0.05X31NDA': '050X31A',
'0.05X31NDB': '050X31B',
'0.1X0.03': '100X030',
'0.1X0.06': '100X060',
'0.1X0.09': '100X090',
'0.1X0.2': '100X200',
'0.2X0.05ND': '200X050',
'0.2X0.06': '200X060',
'0.2X0.06FPA': '200X60A',
'0.2X0.06FPB': '200X60B',
'0.2X0.06FPC': '200X60C',
'0.2X0.06FPD': '200X60D',
'0.2X0.06FPE': '200X60E',
'0.2X0.09': '200X090',
'0.2X0.2': '200X200',
'0.2X0.2FPA': '200A',
'0.2X0.2FPB': '200B',
'0.2X0.2FPC': '200C',
'0.2X0.2FPD': '200D',
'0.2X0.2FPE': '200E',
'0.2X0.5': '200X500',
'0.3X0.05ND': '330X050',
'0.3X0.06': '330X060',
'0.3X0.09': '330X090',
'0.3X0.2': '330X200',
'0.5X0.5': '500X500',
'1X0.06': '1X060',
'1X0.2': '1X200',
'25MAMA': '25',
'25MAMAD1': '25D1',
'2X2': '2X2',
'31X0.05NDA': '31X050A',
'31X0.05NDB': '31X050B',
'31X0.05NDC': '31X050C',
'36X0.05P45': '36X050P',
'36X0.05N45': '36X050N',
'36X0.6P45': '36X600P',
'36X0.6N45': '36X600N',
'50CCD': '50',
'50CORON': '50COR',
'52X0.05': 'L050',
'52X0.05D1': 'L050D1',
'52X0.05E1': 'L050E1',
'52X0.05F1': 'L050F1',
'52X0.05F1-R': 'L050R1',
'52X0.05F2': 'L050F2',
'52X0.05F2-R': 'L050R2',
'52X0.1': 'L100',
'52X0.1D1': 'L100D1',
'52X0.1E1': 'L100E1',
'52X0.1F1': 'L100F1',
'52X0.1F1-R': 'L100R1',
'52X0.1F2': 'L100F2',
'52X0.1F2-R': 'L100R2',
'52X0.1B1.0': 'LBAR1',
'52X0.1B1.0-R': 'LBARR1',
'52X0.1B0.5': 'LBAR2',
'52X0.1B0.5-R': 'LBARR2',
'52X0.1B3.0': 'LBAR3',
'52X0.1B3.0-R': 'LBARR3',
'52X0.2': 'L200',
'52X0.2D1': 'L200D1',
'52X0.2E1': 'L200E1',
'52X0.2E2': 'L200E2',
'52X0.2F1': 'L200F1',
'52X0.2F1-R': 'L200R1',
'52X0.2F2': 'L200F2',
'52X0.2F2-R': 'L200R2',
'52X0.5': 'L500',
'52X0.5D1': 'L500D1',
'52X0.5E1': 'L500E1',
'52X0.5E2': 'L500E2',
'52X0.5F1': 'L500F1',
'52X0.5F1-R': 'L500R1',
'52X0.5F2': 'L500F2',
'52X0.5F2-R': 'L500R2',
'52X2': 'L2',
'52X2D1': 'L2D1',
'52X2E1': 'L2E1',
'52X2E2': 'L2E2',
'52X2F1': 'L2F1',
'52X2F1-R': 'L2R1',
'52X2F2': 'L2F2',
'52X2F2-R': 'L2R2',
'6X0.06':'6X060',
'6X0.2': '6X200',
'6X0.5': '6X500',
'6X6': '6X6',
'BAR5': 'BAR5',
'BAR10': 'BAR10',
'F25CIII': '25C3',
'F25CN182': '25CN182',
'F25CN270': '25CN270',
'F25LYA': '25LYA',
'F25MGII': '25MG2',
'F25ND3': '25ND3',
'F25ND5': '25ND5',
'F25NDQ1': '25NDQ1',
'F25NDQ2': '25NDQ2',
'F25NDQ3': '25NDQ3',
'F25NDQ4': '25NDQ4',
'F25QTZ': '25QTZ',
'F25QTZD1': '25QTZD1',
'F25SRF2': '25SRF2',
'F25SRF2D1': '25SRF2D1',
'F28X50LP': '28X50LP',
'F28X50OII': '28X50O2',
'F28X50OIII': '28X50O3',
'WEDGEA0.6': 'WGA06',
'WEDGEA1.0': 'WGA10',
'WEDGEA1.8': 'WGA18',
'WEDGEA2.0': 'WGA20',
'WEDGEA2.5': 'WGA25',
'WEDGEA2.8': 'WGA28',
'WEDGEB1.0': 'WGB10',
'WEDGEB1.8': 'WGB18',
'WEDGEB2.0': 'WGB20',
'WEDGEB2.5': 'WGB25',
'WEDGEB2.8': 'WGB28'}
def get_files_to_process(rootnames):
"""Create a list of files to process from the list of rootnames
"""
endings = ['_raw.fits',
'_tag.fits']
file_list = []
for rootname in rootnames:
if os.path.basename(rootname) != rootname:
log.warning("{}: rootnames should refer to files in the working directory".format(rootname))
fitslist = glob.glob(rootname.lower() + '*.fits')
appended = False
for input_file in fitslist:
for ending in endings:
if input_file.endswith(ending):
appended = True
file_list.append(input_file)
if not appended:
log.warning("No files selected for rootname {}".format(rootname))
if len(file_list) == 0:
log.error("No rootnames selected")
file_list.sort()
return file_list
def add_s_region(stisfile, hst_siaf, dry_run=False):
"""Calculate the S_REGION keyword for a single STIS file. If the
dry_run parameter is False, set the S_REGION in the SCI extensions with
the calculated value. If keyword isn't present, add it
"""
open_mode = 'readonly' if dry_run else 'update'
with fits.open(stisfile, mode=open_mode) as f1:
log.info('Processing file {}'.format(stisfile))
hdr0 = f1[0].header
detector = hdr0['DETECTOR']
aperture = hdr0['APERTURE']
propaper = hdr0['PROPAPER']
if propaper.upper() not in STIS_APERTURE_LOOKUP.keys():
log.info('PROPAPER keyword {} not in lookup table, using APERTURE keyword instead'.format(propaper))
if aperture.upper() not in STIS_APERTURE_LOOKUP.keys():
log.warning('No match for APERTURE keyword either')
else:
propaper = aperture
siaf_entry = get_siaf_entry(hst_siaf, propaper, detector)
for ext in f1[1:]:
if ext.header['EXTNAME'] in ['SCI', 'EVENTS']:
hdr1 = ext.header
ra_aper = hdr1['RA_APER']
dec_aper = hdr1['DEC_APER']
extname = hdr1['EXTNAME']
extver = hdr1['EXTVER']
if siaf_entry is not None:
pa_aper = hdr1['PA_APER']
pa_aper = pa_aper * DEGREESTORADIANS
x, y = siaf_entry.closed_polygon_points('idl')
wcslimits = get_wcs_limits(f1)
siaflimits = get_siaf_limits(x, y)
x, y = smallest_size(wcslimits, siaflimits)
# This is to get the parity right
x = x * -1.0
costheta = math.cos(pa_aper)
sintheta = math.sin(pa_aper)
dra = x * costheta + y * sintheta
dra = dra / math.cos(dec_aper * DEGREESTORADIANS) / 3600.0
ddec = (-x * sintheta + y * costheta) / 3600.0
ra_corners = ra_aper + dra
dec_corners = dec_aper + ddec
s_region = 'POLYGON ICRS'
for ra, dec in zip(ra_corners, dec_corners):
s_region = s_region + ' {} {}'.format(ra, dec)
else:
log.warning("S_REGION set to 10 arcsec diameter circle centered on (RA_APER, DEC_APER)")
radius = 5.0 / 3600.0
s_region = 'CIRCLE ICRS {0:.8f} {1:.7f} {2:.8f}'.format(ra_aper, dec_aper, radius)
log.info("{}[{}, {}] with aperture {} has S_REGION = {}".format(stisfile,
extname, extver, propaper, s_region))
if not dry_run:
write_keyword_to_header(ext, s_region)
else:
log.info('Dry-run - no changes made to science header')
return
def write_keyword_to_header(extension, s_region):
"""Write the S_REGION keyword to the header, creating the keyword if it
doesn't exist"""
if extension.header.get('S_REGION'):
# Keyword exists
extension.header['S_REGION'] = s_region
log.info('Existing S_REGION keyword updated')
else:
# Keyword doesn't exist, need to add it
extension.header.set('S_REGION', s_region, 'Spatial extent of the observation',
after='PA_APER')
log.info('New S_REGION keyword added and updated')
return
def get_siaf_entry(hst_siaf, aperture, detector):
"""The SIAF entry aperture name doesn't correspond to the STIS
APERTURE keyword value. Construct the entry from the aperture and
detector (here PROPAPER is used as the aperture keyword)
"""
# All entries start with uppercase o
entry = 'O'
# Second letter depends on detector
detector_letters = {"CCD": "V",
"NUV-MAMA": "N",
"FUV-MAMA": "F"}
entry = entry + detector_letters[detector]
# The rest depends on the aperture
try:
lookup = STIS_APERTURE_LOOKUP[aperture]
except KeyError:
log.warning("No match for aperture {}".format(aperture))
return None
entry = entry + STIS_APERTURE_LOOKUP[aperture]
try:
siaf_entry = hst_siaf[entry]
except KeyError:
log.warning("Unable to get SIAF data for entry {}".format(entry))
log.warning("Trying other wavebands")
success = False
for letter in "VNF":
entry = 'O' + letter + STIS_APERTURE_LOOKUP[aperture]
try:
siaf_entry = hst_siaf[entry]
success = True
log.info("Succeeded with {}".format(entry))
break
except KeyError:
success = False
if not success:
log.error("No matching aperture found")
return hst_siaf[entry]
def get_wcs_limits(f1):
# For imaging data, any subarray will sometimes limit the observable
# footprint. Calculate the limits of the field of view using the WCS,
# this will be compared with the limits from projecting the aperture
hdr1 = f1[1].header
extname = hdr1['EXTNAME']
if extname == 'SCI':
crpix1 = hdr1['CRPIX1']
crpix2 = hdr1['CRPIX2']
nx = hdr1['NAXIS1']
ny = hdr1['NAXIS2']
if hdr1['CTYPE1'] == 'WAVE':
xmin = None
xmax = None
elif extname == 'EVENTS':
crpix1 = hdr1['TCRPX2']
crpix2 = hdr1['TCRPX3']
nx = hdr1['AXLEN1']
ny = hdr1['AXLEN2']
if hdr1['TCTYP2'] == 'WAVE':
xmin = None
xmax = None
else:
log.error('No SCI or EVENTS extension to get WCS information')
# Calculate pixel scales in arcsec/pixel in the X (row) and Y (column)
# direction
cdelt1, cdelt2 = get_pixel_scales(f1)
xmin = cdelt1 * crpix1 * -1.0
xmax = cdelt1 * (nx - crpix1)
ymin = cdelt2 * crpix2 * -1.0
ymax = cdelt2 * (ny - crpix2)
# If the x-axis is dispersed, don't use the WCS limits
return (xmin, xmax, ymin, ymax)
def get_pixel_scales(f1):
"""Calculate the pixel scales in the X (row) and Y (column) directions
from the WCS CD matrix, in arcsec/pixel
"""
hdr = f1[1].header
extname = hdr['EXTNAME']
if extname == 'SCI':
cd1_1 = hdr['CD1_1']
cd1_2 = hdr['CD1_2']
cd2_1 = hdr['CD2_1']
cd2_2 = hdr['CD2_2']
elif extname == 'EVENTS':
cd1_1 = hdr['TC2_2']
cd1_2 = hdr['TC2_3']
cd2_1 = hdr['TC3_2']
cd2_2 = hdr['TC3_3']
else:
log.error('No SCI or EVENTS extension to get WCS information')
cdelt1 = math.sqrt(cd1_1*cd1_1 + cd2_1*cd2_1)
cdelt2 = math.sqrt(cd1_2*cd1_2 + cd2_2*cd2_2)
cdelt1 = cdelt1 * 3600.0
cdelt2 = cdelt2 * 3600.0
return cdelt1, cdelt2
def get_siaf_limits(x, y):
"""Given arrays of X and Y values from the SIAF in ideal coordinates
(relative to the aperture reference point), return the X and Y limits
"""
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
return (xmin, xmax, ymin, ymax)
def smallest_size(wcslimits, siaflimits):
"""Find the limiting values of X and Y (ideal coords) given
the limits from the SIAF projected aperture and those calculated from the
WCS applied to the limits of the data array
"""
if wcslimits[0] == None:
xmin = siaflimits[0]
xmax = siaflimits[1]
else:
xmin = max(wcslimits[0], siaflimits[0])
xmax = min(wcslimits[1], siaflimits[1])
ymin = max(wcslimits[2], siaflimits[2])
ymax = min(wcslimits[3], siaflimits[3])
return (np.array([xmin, xmin, xmax, xmax, xmin]),
np.array([ymin, ymax, ymax, ymin, ymin]))
def coords_from_s_region(s_region):
"""Helper function to extract the RA and DEC as lists of floating
point numbers from the S_REGION string"""
first_coordinate = 0
coords = s_region.split()
while coords[first_coordinate] in ['POLYGON', 'ICRS']:
first_coordinate = first_coordinate + 1
ra = coords[first_coordinate::2]
ra = [float(x) for x in ra]
dec = coords[first_coordinate+1::2]
dec = [float(x) for x in dec]
return (ra, dec)
def write_region_file(input_file, include_mast=True):
"""Convenience function to write a DS9 regions file from the S_REGION
value in the header of a file. If include_mast is set to True, will
also get the existing S_REGION value from the mast catalog and include
it in the region file. The region file will include the keyword S_REGION
in blue, the RA_APER and DEC_APER in red, and the mast catalog value of the
S_REGION in green, if include_mast is True"""
f1 = fits.open(input_file)
rootname = f1[0].header['ROOTNAME']
ra_aper = f1[1].header['RA_APER']
dec_aper = f1[1].header['DEC_APER']
s_region = f1[1].header['S_REGION']
f1.close()
f_region = open(rootname+'.reg', mode='w')
text = 'icrs; polygon'
ra, dec = coords_from_s_region(s_region)
for x, y in zip(ra, dec):
text = text + ' {} {}'.format(x, y)
text = text + " # color=blue"
f_region.write(text)
f_region.write('\n')
text = 'icrs; point' + ' {} {}'.format(ra_aper, dec_aper) + ' # color=red \n'
# Get existing S_REGION value from MAST
if include_mast:
from astroquery.mast import Observations
row = Observations.query_criteria(obs_id=rootname)
existing_s_region = row['s_region'][0]
f_region.write(text)
text = 'icrs; polygon'
ra, dec = coords_from_s_region(existing_s_region)
for x, y in zip(ra, dec):
text = text + ' {} {}'.format(x, y)
text = text + " # color=green"
f_region.write(text)
f_region.write('\n')
f_region.close()
return
def main(rootnames, dry_run=False):
if rootnames is None:
log.error("No rootnames specified")
return
files_to_process = get_files_to_process(rootnames)
hst_siaf = pysiaf.Siaf('HST')
for input_file in files_to_process:
add_s_region(input_file, hst_siaf, dry_run=dry_run)
return
def call_main():
parser = argparse.ArgumentParser(
"""Add S_REGION value to raw data headers"""
)
parser.add_argument('rootnames', nargs='+',
help='Rootnames to be processed')
parser.add_argument(
'--dry_run', action='store_true',
help="Calculate S_REGION value, but don't write to data header[s]")
args = parser.parse_args()
# if '--version' in args:
# print(__version__)
# sys.exit(0)
main(args.rootnames, dry_run=args.dry_run)
if __name__ == '__main__':
call_main()
| 18,923 | 37.938272 | 112 |
py
|
stistools
|
stistools-master/stistools/wavelen.py
|
import numpy as N
from . import evaldisp
from . import gettable
from . import radialvel
from . import r_util
DEG_RAD = N.pi / 180. # degrees to radians
SPEED_OF_LIGHT = 299792.458 # km / s
def compute_wavelengths(shape, phdr, hdr, helcorr):
"""Compute a 2-D array of wavelengths, one value for each image pixel.
Parameters
----------
shape : tuple of two ints
the number of rows and columns in the output image
phdr : fits Header object
primary header
hdr : fits Header object
extension header
helcorr : string
"PERFORM" if heliocentric correction should be done
Returns
-------
wavelengths : ndarray of float64
an array of wavelengths, of the same shape (nrows, ncols) as
the output image
"""
REF_ANGLE = 0.315 # degrees
PSEUDOAPERTURES = ["E1", "E2", "D1"] # currently defined pseudo-aps
(nrows, ncols) = shape
opt_elem = phdr.get("opt_elem", default="keyword_missing")
cenwave = phdr.get("cenwave", default=0)
aperture = phdr.get("aperture", default="keyword_missing")
propaper = phdr.get("propaper", default="keyword_missing")
sclamp = phdr.get("sclamp", default="NONE")
disptab = phdr.get("disptab", default="keyword_missing")
apdestab = phdr.get("apdestab", default="keyword_missing")
inangtab = phdr.get("inangtab", default="keyword_missing")
ra_targ = phdr.get("ra_targ", default="keyword_missing")
dec_targ = phdr.get("dec_targ", default="keyword_missing")
expstart = hdr.get("expstart", default=0.)
expend = hdr.get("expend", default=0.)
crpix2 = hdr.get("crpix2", default=0.)
ltm = hdr.get("ltm1_1", default=1.)
ltv1 = hdr.get("ltv1", default=0.)
ltv2 = hdr.get("ltv2", default=0.)
shifta1 = hdr.get("shifta1", default=0.)
shifta2 = hdr.get("shifta2", default=0.)
disptab = r_util.expandFileName(disptab)
apdestab = r_util.expandFileName(apdestab)
inangtab = r_util.expandFileName(inangtab)
# Modify ltv and crpix2 for zero-indexed pixels.
ltv1 += (ltm - 1.)
crpix2 -= 1.
binaxis1 = round(1. / ltm) # should be 1, 2, 4 or 8
# These offsets have not been converted from one-indexing to
# zero-indexing, but that's OK because the input image must not
# be binned in the cross-dispersion (axis 2) direction.
offset = shifta2 - ltv2
if sclamp != "NONE":
nchar = len(propaper)
ending = propaper[nchar-2:nchar]
if ending in PSEUDOAPERTURES:
aperture = aperture + ending
if helcorr == "PERFORM":
v_helio = radialvel.radialVel(ra_targ, dec_targ,
(expstart + expend) / 2.)
hfactor = (1. - v_helio / SPEED_OF_LIGHT)
else:
hfactor = 1.
# Get the dispersion relation. This will be in the form of
# a set of coefficients at several positions along the slit, i.e.
# disp_coeff[i] is the set of coefficients at position a2center[i].
filter = {"opt_elem": opt_elem, "cenwave": cenwave}
disp_info = gettable.getTable(disptab, filter,
sortcol="a2center", at_least_one=True)
ref_aper = disp_info.field("ref_aper")[0] # name of reference aperture
a2center = disp_info.field("a2center") - 1. # zero indexing
ncoeff = disp_info.field("ncoeff")[0] # same for all rows
disp_coeff = disp_info.field("coeff")
delta_offset1 = get_delta_offset1(apdestab, aperture, ref_aper)
apdes_info = gettable.getTable(apdestab, {"aperture": aperture},
exactly_one=True)
# Check whether ANGLE is a column in this table.
names = []
for name in apdes_info.names:
names.append(name.lower())
if "angle" in names:
angle = apdes_info.field("angle")[0]
else:
print("Warning: Column ANGLE not found in", apdestab)
angle = REF_ANGLE
del names
delta_tan = N.tan(angle * DEG_RAD) - N.tan(REF_ANGLE * DEG_RAD)
# Note: this assumes a first-order spectrum, but at the time of
# writing there's actually no distinction in any of the iac tables.
filter = {"opt_elem": opt_elem, "cenwave": cenwave, "sporder": 1}
inang_info = gettable.getTable(inangtab, filter, exactly_one=True)
wavelengths = N.zeros((nrows, ncols), dtype=N.float64)
image_pixels = N.arange(ncols, dtype=N.float64)
# Convert from image pixels to reference pixels (but zero indexed).
pixels = (image_pixels - ltv1) * binaxis1
for j in range(nrows):
row = float(j) + offset # account for possible subarray
# Interpolate to get dispersion relation for current (0-indexed) row.
coeff = r_util.interpolate(a2center, disp_coeff, row)
# Apply corrections.
adjust_disp(ncoeff, coeff, delta_offset1, shifta1, inang_info,
delta_tan, row-crpix2, binaxis1)
# Compute wavelength from pixel numbers.
wl = evaldisp.newton(pixels, coeff, cenwave)
wl *= hfactor
wavelengths[j] = wl.copy()
return wavelengths
def get_delta_offset1(apdestab, aperture, ref_aper):
"""Get the incidence angle offset.
Parameters
----------
apdestab : string
name of the aperture description table
aperture : string
aperture (slit) name
ref_aper : string
name of the reference aperture, the one that was
used to calculate the dispersion relation
Returns
-------
angle : float
incidence angle offset in degrees
"""
# Get the offset for the aperture that was used for the observation.
apdes_info = gettable.getTable(apdestab, {"aperture": aperture},
exactly_one=True)
aperture_offset1 = apdes_info.field("offset1")[0]
# Get the offset for the aperture that was used for creating the
# dispersion relation.
apdes_info = gettable.getTable(apdestab, {"aperture": ref_aper},
exactly_one=True)
ref_aper_offset1 = apdes_info.field("offset1")[0]
return aperture_offset1 - ref_aper_offset1
def adjust_disp(ncoeff, coeff, delta_offset1, shifta1, inang_info,
delta_tan, delta_row, binaxis1):
"""Adjust the dispersion coefficients.
The changes to the coefficients are for the incidence angle
correction, the offset from the SHIFTA1 keyword, and the tilt
of the slit. The coefficients will be modified in-place.
Parameters
----------
ncoeff : int
number of dispersion coefficients
coeff : ndarray of float64
array of dispersion coefficients, modified in-place
delta_offset1 : float
incidence angle offset in degrees
shifta1 : float
MSM offset (ref. pixels) in the dispersion direction
delta_tan : float
difference in tangents of slit angle and ref angle
delta_row : float
difference between current row number and CRPIX2
binaxis1 : float
binning factor in dispersion direction
inang_info : rec_array
rows from the incidence-angle table
"""
iac_ncoeff1 = inang_info.field("ncoeff1")[0]
iac_coeff1 = inang_info.field("coeff1")[0]
iac_ncoeff2 = inang_info.field("ncoeff2")[0]
iac_coeff2 = inang_info.field("coeff2")[0]
for i in range(iac_ncoeff1):
coeff[i] += iac_coeff1[i] * delta_offset1
if iac_ncoeff2 > 0:
coeff[0] += iac_coeff2[0] * delta_offset1
if iac_ncoeff2 > 1:
coeff[1] += iac_coeff2[1] * delta_offset1**2
# Correct for MSM shift.
coeff[0] += shifta1
# Correct for slit tilt.
coeff[0] += (delta_tan * delta_row * binaxis1)
| 7,760 | 34.438356 | 78 |
py
|
stistools
|
stistools-master/stistools/sshift.py
|
#!/usr/bin/env python
"""
A Python module for aligning the spectra in different flat-fielded
images of an IMSET. These files can then be combined with
along-the-slit dithering to reject hot pixels and cosmic rays. The
POSTARG2 keyword is used to determine the number of rows to be
shifted.
"""
from astropy.io import fits
__version__ = '1.7 (2010-Apr-27)'
def shiftimage(infile, outfile, shift=0):
"""
Shift each image extension of an input file by N rows and write the
new image extension to the output file.
"""
fin = fits.open(infile) # flat-fielded file
fout = fits.HDUList() # shifted flat-field file
phdr = fin[0].header
phdr.add_history('SSHIFT complete ...')
phdr.add_history(' all extensions were shifted by %d rows' % shift)
fout.append(fits.PrimaryHDU(header=phdr))
for exten in fin[1:]:
image = exten.data.copy()
image[:, :] = 0
if shift > 0:
image[shift:] = exten.data[:-shift]
elif shift < 0:
image[:shift] = exten.data[-shift:]
else:
image[:] = exten.data[:]
fout.append(fits.ImageHDU(header=exten.header, data=image))
fout.writeto(outfile)
def sshift(input, output=None, shifts=None, platescale=None,
tolerance=None):
""" Align spectra from different images of an imset.
Parameters
----------
input : list
A list of input filenames. These must be STIS flat-
fielded (_flt) image FITS files. This argument will accept a
single filename or a list of filenames.
shifts : list, optional
A list of integers indicating the number of rows to shift
each image of each file in the cross-dispersion (Y-) direction.
platescale : float, optional
The size of a pixel in arcseconds. Used to convert
the value of the POSTARG2 keyword to pixels.
tolerance : float, optional
The allowed difference between calculated shifts and
integer pixel shifts (fraction of pixel).
Returns
-------
output : list, optional
A list of output filenames. The number of output
filenames must match the number of input filenames. If no output
is given, then the _flt substring of the input file is replace by
the _sfl substring to create an output file. This option will
accept a single filename or a list of filenames.
Notes
------
Author:
- Paul Barrett (STScI)
"""
# History:
# 2003/09/22 PEB - version 1.0
# 2003/11/05 PEB - version 1.1
# 2003/11/10 PEB - version 1.2 - add postarg1/2, wavecal checks
# 2003/11/17 PEB - version 1.3 - add history cards
# 2004/09/13 PEB - version 1.4 - set PLATESC, default:0.0507
# add tolerance keyword, default: 0.1
# 2004/09/23 PEB - version 1.5 - check for mixed dataset
# - fixed integer shift bug
# removed wavecorr step
# check for binned data and non-integral
# shifts.
# 2004/09/24 PEB - version 1.6 - add keyword consistency checks
# Setup input and output filename lists, so iteration can be done
# over a list of zipped filenames.
if not isinstance(input, list):
input = [input]
elif not input:
raise ValueError(
'No input files found. Possibly using wrong directory.')
if output is None:
output = len(input)*[None]
elif not isinstance(output, list):
output = [output]
if shifts is None:
pass
elif not isinstance(shifts, list):
shifts = [shifts]
if tolerance is None:
tolerance = 0.1
if len(input) != len(output):
raise ValueError(
'number of output files is not equal to number input files')
if shifts is not None:
for shift in shifts:
if not isinstance(shift, int):
raise ValueError('shift value must be an integer')
xposs, yposs, xpos0, ypos0 = [], [], None, None
proposid, obset_id, targname = None, None, None
propaper, opt_elem, cenwave = None, None, None
binaxis1, binaxis2 = None, None
for infile in input:
# Read the POSTARG2 keyword (in the primary header) and the
# CRPIX2 keyword (in the 1st extension) to determine the
# relative shift of each input file. Choose a reference
# position that is closest to Y-pixel 512.
infil = fits.open(infile)
phdr = infil[0].header
if platescale is None:
# platescale = phdr['PLATESC']
platescale = 0.05077
else:
platescale = float(platescale)
if phdr['FLATCORR'].upper() != 'COMPLETE':
raise ValueError(
'Input file has not been flat-fielded corrected.')
# Check that TARGNAME is the same.
if targname is None:
targname = phdr['TARGNAME']
elif targname != phdr['TARGNAME']:
raise ValueError('Not all exposures are for the same target.')
# Check that all PROPOSID and OBSET_ID values are the same.
if proposid is None:
proposid = phdr['PROPOSID']
obset_id = phdr['OBSET_ID']
elif proposid != phdr['PROPOSID'] or obset_id != phdr['OBSET_ID']:
raise ValueError(' Not all exposures are from the same visit;'
' placement of the spectrum on the detector will'
' differ.')
# Check that PROPAPER, OPT_ELEM, CENWAVE are the same.
if propaper is None:
propaper = phdr['PROPAPER']
opt_elem = phdr['OPT_ELEM']
cenwave = phdr['CENWAVE']
elif propaper != phdr['PROPAPER'] or opt_elem != phdr['OPT_ELEM'] or \
cenwave != phdr['CENWAVE']:
raise ValueError('Different observing configurations have been used.')
# Check that BINAXIS1 and BINAXIS2 are the same.
if binaxis1 is None:
binaxis1 = phdr['BINAXIS1']
binaxis2 = phdr['BINAXIS2']
elif binaxis1 != phdr['BINAXIS1'] or binaxis2 != phdr['BINAXIS2']:
raise ValueError('Different binnings have been used.')
# Check that all POSTARG1 values are the same (within reason).
xpos = phdr['POSTARG1']
if xpos0 is None:
xpos0 = xpos
elif abs(xpos - xpos0) > 0.05:
raise ValueError('POSTARG1 values of input files are not equal.')
# Get the POSTARG2 values and the one that is nearest to row 512.
ypos = phdr['POSTARG2'] / platescale
ypix = infil[1].header['CRPIX2'] - 512
if ypos0 is None or abs(ypix + ypos) < abs(ypix + ypos0):
ypos0 = ypos
yposs.append(ypos)
# Check for non-integral POSTARG2 values and calculate array of
# pixel shifts.
if shifts is None:
shifts = []
for ypos in yposs:
dypos = ypos - ypos0
if abs(abs(dypos) - int(abs(dypos)+0.5)) > tolerance:
raise ValueError("POSTARG2 shift not within the specified "
"tolerance {} pix of integer-pixel shift".format(tolerance))
# 'POSTARG2 shift greater than specified tolerance: %d' % tolerance
# 'non-integral POSTARG2 value or incorrect plate scale.'
if dypos < 0.:
ishift = -int(dypos-0.5)
else:
ishift = -int(dypos+0.5)
if ishift % binaxis2:
raise ValueError('Non-integral pixel shift for binned data')
shifts.append(ishift//binaxis2)
# Process each file using corresponding pixel shift.
print('input-file pixel-shift')
for infile, outfile, npixel in zip(input, output, shifts):
fin = fits.open(infile)
# Use default output file name.
if outfile is None:
import re
outfile = re.sub('flt\.', 'sfl.', infile, count=1)
if binaxis2 == 1:
print('{:>18}: {:3}'.format(infile, npixel))
else:
print('{:>18}: {:3} binned'.format(infile, npixel))
shiftimage(infile, outfile, shift=npixel)
fin.close()
if __name__ == '__main__':
import sys
import getopt
output, shifts, scale, toler = None, None, None, None
short_opts = 'o:s:p:t:h'
long_opts = ['output=', 'shifts=', 'platescale=', 'tolerance=', 'help']
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
for opt in opts:
if opt[0] == '-o' or opt[0] == '--output':
output = opt[1].split(',')
elif opt[0] == '-s' or opt[0] == '--shifts':
shifts = [int(v) for v in opt[1].split(',')]
elif opt[0] == '-p' or opt[0] == '--platescale':
scale = eval(opt[1])
elif opt[0] == '-t' or opt[0] == '--tolerance':
toler = eval(opt[1])
elif opt[0] == '-h' or opt[0] == '--help':
print(sshift.__doc__)
sys.exit()
if len(args) > 0:
sshift(args, output=output, shifts=shifts, platescale=scale,
tolerance=toler)
else:
print("""Usage: sshift [-o|--output 'files'] [-s|--shifts 'shifts']
[-p|--platescale scale] [-t|--tolerance tol] [-h|--help] input-files""")
| 9,359 | 34.589354 | 93 |
py
|
stistools
|
stistools-master/stistools/evaldisp.py
|
def newton(x, coeff, cenwave, niter=4):
"""Return the wavelength corresponding to pixel x.
The dispersion solution is evaluated iteratively, and the slope
(dispersion) for Newton's method is determined numerically, using a
difference in wavelength of one Angstrom. Note that the evalDisp
in this file assumes that the grating is first order.
Parameters
-----------
x : float or ndarray
a single pixel number or an array of pixel numbers
coeff : array_like object
a list of eight elements containing the
dispersion coefficients as read from a STIS _dsp.fits table
cenwave : int or float
central wavelength, in Angstroms
niter : int
number of iterations
Returns
-------
wavelength : float or ndarray
a single wavelength or an array (numarray) of wavelengths,
in Angstroms
"""
wl = cenwave
x0 = evalDisp(coeff, wl)
delta_wl = 1. # one Angstrom
for i in range(niter):
x1 = evalDisp(coeff, wl+delta_wl)
dispersion = delta_wl / (x1 - x0)
wl += dispersion * (x - x0)
x0 = evalDisp(coeff, wl)
return wl
def evalDisp(coeff, wl):
"""Return the pixel corresponding to wavelength wl.
Notes
-----
The expression in the calstis code is::
x = coeff[0] +
coeff[1] * m * wl +
coeff[2] * m**2 * wl**2 +
coeff[3] * m +
coeff[4] * wl +
coeff[5] * m**2 * wl +
coeff[6] * m * wl**2 +
coeff[7] * m**3 * wl**3
This version of the function to evaluate the dispersion relation
assumes that the grating is first order, i.e. m = 1. The dispersion
coefficients give one-indexed pixel coordinates (reference pixels),
but this function converts to zero-indexed pixels.
Parameters
-----------
coeff : array_like object
a list of eight elements containing the
dispersion coefficients as read from a STIS _dsp.fits table
wl : float or ndarray
a single wavelength or an array (numarray) of wavelengths,
in Angstroms
Returns
--------
pix_number : float or ndarray
the pixel number (or array of pixel numbers) corresponding
to the input wavelength(s); note that these are zero indexed
"""
c = [0., 0., 0., 0.]
c[0] = coeff[0] + coeff[3]
c[1] = coeff[1] + coeff[4] + coeff[5]
c[2] = coeff[2] + coeff[6]
c[3] = coeff[7]
# x = c[0] + c[1] * wl + c[2] * wl**2 + c[3] * wl**3
x = c[3] * wl
x = (c[2] + x) * wl
x = (c[1] + x) * wl
x = c[0] + x
return x - 1. # zero indexed
| 2,679 | 27.510638 | 72 |
py
|
stistools
|
stistools-master/stistools/orbit.py
|
import math
from astropy.io import fits
TWOPI = (math.pi * 2.0)
SEC_PER_DAY = 86400.0
class HSTOrbit(object):
"""Orbital parameters.
The public methods are getOrbitper and getPos.
"""
def __init__(self, spt):
"""Orbital parameters.
Parameters
----------
spt: str
Name of the support file (rootname_spt.fits).
"""
self.orb = {}
self._readOrbitalParameters(spt)
def _readOrbitalParameters(self, spt):
"""Get the orbital parameters from the spt primary header.
Parameters
----------
spt: str
Name of the support file (rootname_spt.fits)
"""
fd = fits.open(spt, mode="readonly")
phdr = fd[0].header
self.orb = {}
self.orb["argperig"] = phdr["argperig"]
self.orb["cirveloc"] = phdr["cirveloc"]
self.orb["cosincli"] = phdr["cosincli"]
self.orb["ecbdx3"] = phdr["ecbdx3"]
self.orb["eccentry"] = phdr["eccentry"]
self.orb["eccentx2"] = phdr["eccentx2"]
self.orb["ecbdx4d3"] = phdr["ecbdx4d3"]
self.orb["epchtime"] = phdr["epchtime"]
self.orb["esqdx5d2"] = phdr["esqdx5d2"]
self.orb["fdmeanan"] = phdr["fdmeanan"]
self.orb["hsthorb"] = phdr["hsthorb"]
self.orb["meananom"] = phdr["meananom"]
self.orb["rascascn"] = phdr["rascascn"]
self.orb["rcargper"] = phdr["rcargper"]
self.orb["rcascnrv"] = phdr["rcascnrv"]
self.orb["sdmeanan"] = phdr["sdmeanan"]
self.orb["semilrec"] = phdr["semilrec"]
self.orb["sineincl"] = phdr["sineincl"]
fd.close()
def getOrbitper(self):
"""Return the orbital period.
Returns
-------
orbit period: float
Orbital period in seconds.
"""
return 2. * self.orb["hsthorb"]
def getPos(self, mjd):
"""Get position and velocity at a given time.
# S. Hulbert, Oct 91 Original
# PEH, 2008 Oct 3 Converted from SPP to Python
Parameters
----------
mjd: float
Time (Modified Julian Date).
Returns
-------
(x_hst, v_hst): tuple of two vectors (3-element lists)
Position and velocity at the specified time.
"""
# These will be returned, after assigning the actual values.
x_hst = [0., 0., 0.]
v_hst = [0., 0., 0.]
argperig = self.orb["argperig"]
cirveloc = self.orb["cirveloc"]
cosincli = self.orb["cosincli"]
ecbdx3 = self.orb["ecbdx3"]
eccentry = self.orb["eccentry"]
eccentx2 = self.orb["eccentx2"]
ecbdx4d3 = self.orb["ecbdx4d3"]
epchtime = self.orb["epchtime"]
esqdx5d2 = self.orb["esqdx5d2"]
fdmeanan = self.orb["fdmeanan"]
meananom = self.orb["meananom"]
rascascn = self.orb["rascascn"]
rcargper = self.orb["rcargper"]
rcascnrv = self.orb["rcascnrv"]
sdmeanan = self.orb["sdmeanan"]
semilrec = self.orb["semilrec"]
sineincl = self.orb["sineincl"]
# convert time from MJD to seconds since 1985 Jan 1
sec85 = (mjd - 46066.0) * SEC_PER_DAY
# calculate time difference between observation and epoch time
deltim = sec85 - epchtime
# mean anomaly
temp2 = fdmeanan * deltim
temp3 = 0.5 * sdmeanan * deltim*deltim
m = meananom + TWOPI * (temp2 + temp3)
sin_m = math.sin(m)
cos_m = math.cos(m)
# true anomaly (equation of the center)
v = m + sin_m * (eccentx2 + ecbdx3 * cos_m * cos_m -
ecbdx4d3 * sin_m * sin_m + esqdx5d2 * cos_m)
sin_v = math.sin(v)
cos_v = math.cos(v)
# distance
r = semilrec / (1.0 + eccentry * cos_v)
# argument of perigee
wsmall = TWOPI * (argperig + rcargper * deltim)
# longitude of the ascending node
wbig = TWOPI * (rascascn + rcascnrv * deltim)
sin_wbig = math.sin(wbig)
cos_wbig = math.cos(wbig)
# calculate the rectangular coordinates
# (see Smart, Spherical Astronomy, section 75, page 122-124)
f = wsmall + v
sin_f = math.sin(f)
cos_f = math.cos(f)
x_hst[0] = r * (cos_wbig * cos_f - cosincli * sin_wbig * sin_f)
x_hst[1] = r * (sin_wbig * cos_f + cosincli * cos_wbig * sin_f)
x_hst[2] = r * sineincl * sin_f
a0 = cirveloc * eccentry * sin_v / r
a1 = cirveloc * (1.0 + eccentry * cos_v) + \
TWOPI * rcargper * r
v_hst[0] = a0 * x_hst[0] - \
a1 * (cos_wbig * sin_f + cosincli * sin_wbig * cos_f) - \
TWOPI * rcascnrv * x_hst[1]
v_hst[1] = a0 * x_hst[1] - \
a1 * (sin_wbig * sin_f - cosincli * cos_wbig * cos_f) + \
TWOPI * rcascnrv * x_hst[0]
v_hst[2] = a0 * x_hst[2] + a1 * sineincl * cos_f
# Convert from meters to kilometers.
for i in range(3):
x_hst[i] /= 1000.0
v_hst[i] /= 1000.0
return x_hst, v_hst
| 5,144 | 29.443787 | 71 |
py
|
stistools
|
stistools-master/stistools/defringe/_fit1d.py
|
#
# This module reproduces much of the behaviour of IRAF 1-d fitting routines
import math
import numpy as np
from scipy.interpolate import LSQUnivariateSpline
def fit1d(x, y, weights=None,
naverage=1,
function="spline3",
order=3,
low_reject=3.0,
high_reject=3.0,
niterate=0,
grow=0.0):
"""Main 1-d fitting function. The parameters can be used exactly like they are in IRAF tasks
like fit1d, response, etc. The default behaviour is to fit a cubic spline with order 3
(3 equal regions, 2 knots) with no sigma clipping.
Parameters
----------
x: 1-d ndarray
The array of independent values
y: 1-d ndarray
The array of dependent values. Must have same length as x, or function returns None
weights: 1-d ndarray or None
The array of weights. If set to None, the points are given equal weight of 1.0. If not
set to None, must have the same length as x and y, or the function returns None
naverage: int
The number of adjacent elements of x, y, and weight that are averaged before
fitting is done. If naverage=1, no averaging is done.
function: str
Fitting function. Currently only "spline1" and "spline3" are supported
order: int
Order of fitting fuction. The number of knots in the spline fit is (order-1)
low_reject: float
Points with y values less than fit - low_reject*(rms deviation of data from fit) are rejected
when the fit is iterated. Ignored if niterate = 0.
high_reject: float
Points with y values greater than fit + low_reject*(rms deviation of data from fit) are rejected
when the fit is iterated. Ignored if niterate = 0.
niterate: int
Number of times sigma-clipping iterations are performed. niterate=0 corresponds to no
sigma clipping
grow: float
Used to calculate how many elements adjacent to reject pixels are also rejected.
Not currently implemented, but included to duplicate the IRAF fitting parameter.
Returns
-------
fit: scipy.interpolate.fitpack2.LSQUnivariateSpline object
The result of the fit. It can be used to calculate the fitted
values for the input x values:
fitted_values = fit(x)
"""
nx = len(x)
ny = len(y)
if nx != ny:
print("X and Y vectors have different lengths, aborting fit1d")
return None
if weights is not None:
nw = len(weights)
if nw != nx:
print("Weight vector has different length to data vectors, aborting fit1d")
print("Use weight=None to assign equal weights")
return None
else:
weights = np.ones(nx)
xfit, yfit, wfit = wtrebin(x, y, weights, naverage)
fitted = fit_with_rejection(xfit, yfit, wfit, function, order, low_reject, high_reject,
niterate, grow)
return fitted
def get_knots(x, number_of_knots):
"""Calculate equally-spaced knots for an array, given the array and the number of knots
to calculate. The number of knots is 1 fewer than the order of the fit.
Parameters
----------
x: 1-d ndarray
The input array for which knots are to be calculated
number_of_knots: int
The number of knots to calculate
Returns
-------
knots: 1-d ndarray
The array of equally-spaced knots
"""
interval = x[-1] - x[0]
#
# If there are n knots, we are dividing the interval into (n+1) regions
#
subinterval = interval/float(number_of_knots + 1)
knots = x[0] + subinterval*np.arange(number_of_knots + 1)
#
# First knot is at x[0], we don't need that one
return knots[1:]
def fit_once(x, y, weights, function, order):
"""Do a single fit of function to (x, y) data.
Parameters
----------
x: 1-d ndarray
Input array of x-values
y: 1-d array
Input array of y-values
weights: 1-d array
Input array of weights
function: str
Function to be fitted. Currently only "spline1" and "spline3" are supported
order: int
Order of function to be fitted. Since only splines are currently supported,
the order is the same as the number of equal reqions the data domain is split into,
and 1 more than the number of equally-spaced knots.
Returns
-------
fit: scipy.interpolate.fitpack2.LSQUnivariateSpline object
This can be used to calculate the fitted values for the input x values:
fitted_values = fit(x)
"""
nx = len(x)
if function[:6] == "spline":
#
# Number of knots is 1 fewer than the order
knots = get_knots(x, order-1)
if function == "spline3":
k = 3
elif function == "spline1":
k = 1
else:
print("Spline fitting only valid for spline1 and spline3 functions")
return None
fitted = LSQUnivariateSpline(x, y, knots, w=weights, k=k)
else:
print("Not implemented yet")
return None
return fitted
def fit_with_rejection(x, y, weights, function, order, low_reject, high_reject, niterate, grow):
"""Fit with sigma-clipping
Parameters
----------
x: 1-d ndarray
The array of independent values
y: 1-d ndarray
The array of dependent values. Must have same length as x, or function returns None
weights: 1-d ndarray or None
The array of weights. If set to None, the points are given equal weight of 1.0. If not
set to None, must have the same length as x and y, or the function returns None
function: str
Fitting function. Currently only "spline1" and "spline3" are supported
order: int
Order of fitting fuction. The number of knots in the spline fit is (order-1)
low_reject: float
Points with y values less than fit - low_reject*(rms deviation of data from fit) are rejected
when the fit is iterated. Ignored if niterate = 0.
high_reject: float
Points with y values greater than fit + low_reject*(rms deviation of data from fit) are rejected
when the fit is iterated. Ignored if niterate = 0.
niterate: int
Number of times sigma-clipping iterations are performed. niterate=0 corresponds to no
sigma clipping
grow: float
Used to calculate how many elements adjacent to reject pixels are also rejected.
Not currently implemented, but included to duplicate the IRAF fitting parameter.
Returns
-------
fit: scipy.interpolate.fitpack2.LSQUnivariateSpline object
The result of the fit. It can be used to calculate the fitted
values for the input x values:
fitted_values = fit(x)
"""
fitted = fit_once(x, y, weights, function, order)
if niterate <= 0:
return fitted
npts = len(x)
rej_weights = np.ones(npts)
for iter in range(niterate):
rms_deviation = calc_rms_deviation(x, y, weights*rej_weights, fitted)
deviation = (y - fitted(x))*rej_weights
above_hi_rej = np.where(deviation > high_reject*rms_deviation)
rej_weights[above_hi_rej] = 0.0
nhigh = len(above_hi_rej[0])
below_low_rej = np.where(deviation < -low_reject*rms_deviation)
rej_weights[below_low_rej] = 0.0
nlow = len(below_low_rej[0])
nbad = nhigh + nlow
if nbad == 0: break
fitted = fit_once(x, y, weights*rej_weights, function, order)
return fitted
def calc_rms_deviation(x, y, weights, fitted):
"""Calculate the weighted RMS deviation between y and fitted(x)
Parameters
----------
x: 1-d ndarray
Input array of x-values
y: 1-d array
Input array of y-values
weights: 1-d array
Input array of weights
fitted: scipy.interpolate.fitpack2.LSQUnivariateSpline object
The result from running the fitting routine
Returns
-------
rms_deviation: float
The rms deviation
"""
sumsq = 0.0
npts = len(x)
deviation = y - fitted(x)
devsquared = weights*deviation*deviation
sumsq = devsquared.sum()
sumweights = weights.sum()
rms_deviation = (np.sqrt(sumsq/sumweights))
return rms_deviation
def wtrebin(x, y, weights=None, nbin=1):
"""Like the IRAF rg_wtbin. Bin the x, y, and weight data by averaging nbin values.
Parameters
----------
x: 1-d ndarray
The array of independent values
y: 1-d ndarray
The array of dependent values. Must have same length as x, or function
returns (None, None, None)
weights: 1-d ndarray or None
The array of weights. If set to None, the points are given equal weight of 1.0. If not
set to None, must have the same length as x and y, or the function
returns (None, None, None)
nbin: int
The number of adjacent elements of x, y, and weight that are averaged before
fitting is done. If nbin=1, no averaging is done and the input arrays are returned
Returns
-------
(xout, yout, wout): 3-tuple of ndarray objects
The binned/averaged x, y, and weight arrays
"""
if nbin == 1:
return x, y, weights
nx = len(x)
ny = len(y)
if nx != ny:
print("Lengths of x and y vectors different, aborting")
return None, None, None
if type(weights) == type(None):
weights = np.ones(nx)
nw = len(weights)
if nx != nw:
print("Lengths of data and weight vectors different, aborting")
return None, None, None
nout = int(math.ceil(nx/float(nbin)))
xout_temp = np.zeros((nbin, nout))
yout_temp = np.zeros((nbin, nout))
wtout_temp = np.zeros((nbin, nout))
for i in range(nbin):
ntemp = len(x[i::nbin])
xout_temp[i, :ntemp] = x[i::nbin]*weights[i::nbin]
yout_temp[i, :ntemp] = y[i::nbin]*weights[i::nbin]
wtout_temp[i, :ntemp] = weights[i::nbin]
xout = xout_temp.sum(axis=0)/wtout_temp.sum(axis=0)
yout = yout_temp.sum(axis=0)/wtout_temp.sum(axis=0)
wtout = wtout_temp.sum(axis=0)/float(nbin)
return xout, yout, wtout
| 10,262 | 30.194529 | 104 |
py
|
stistools
|
stistools-master/stistools/defringe/defringe.py
|
#! /usr/bin/env python
import argparse
from astropy.io import fits
import datetime
import os
import textwrap
import re
import numpy as np
from ..r_util import expandFileName
# 4 bad detector pixel or beyond aperture
# 8 data masked by occulting bar
# 512 bad pixel in reference file
sdqflags = 4 + 8 + 512 # "serious" data quality flags
def defringe(science_file, fringe_flat, overwrite=True, verbose=True):
# dark_file=None, pixel_flat=None
"""Defringe by dividing the science spectrum by the fringe flat.
Based on the PyRAF `stsdas.hst_calib.stis.defringe` task.
Parameters
----------
science_file: str
The name of the input science file.
fringe_flat: str
The name of the input fringe flat file. This is the output from
`mkfringeflat`.
overwrite: bool
The name of the output file will be constructed from the name of the
input science file (`science_file`) by replacing the suffix with
'drj' or 's2d'. If the input name are the same a RuntimeError will
be raised, rather than modifying the input in-place.
If there is an existing file with the same name as the output name,
the existing file will be overwritten if `overwrite` is True (the
default is True).
verbose: bool
If True (the default), print more info.
Returns
-------
drj_filename: str
The name of the output file. This will have suffix '_drj' if the
input is G750L data, and the output name will have suffix '_s2d'
if the input is G750M.
"""
if science_file.endswith("_raw.fits"):
print('Warning: The science file name ends with "_raw.fits". If this '
'really is raw data, this script will fail.', flush=True)
# Define new filename:
science_file = os.path.normpath(expandFileName(science_file)) # Expand IRAF and UNIX $VARS
# Set the suffix to '_s2d' for G750M data, or to '_drj' for G750L data.
with fits.open(science_file) as fd:
opt_elem = fd[0].header['OPT_ELEM'].upper()
if opt_elem.endswith('M'):
suffix = "_s2d"
else:
suffix = "_drj"
sci_dir, sci_filename = os.path.split(science_file)
sci_root = re.split('\.fits.*', sci_filename, flags=re.IGNORECASE)[0].rsplit('_',1)[0]
drj_filename = os.path.join(sci_dir, sci_root + suffix + '.fits')
if science_file == drj_filename:
raise RuntimeError('The input and output file names cannot be the same.')
# Get the data from the fringe flat file:
fringe_flat = os.path.normpath(expandFileName(fringe_flat)) # Expand IRAF and UNIX $VARS
with fits.open(fringe_flat) as fringe_hdu:
# We assume that either the fringe flat data are in the primary HDU,
# or there is one imset containing the data.
if len(fringe_hdu) == 1:
fringe_data = fringe_hdu[0].data
fringe_dq = None
if verbose:
print('Fringe flat data were read from the primary HDU')
else:
fringe_data = fringe_hdu[("sci",1)].data
fringe_dq = fringe_hdu[("dq",1)].data
if verbose:
if fringe_dq is None:
print('Fringe flat data were read from the first imset')
else:
print('Fringe flat data and DQ were read from the first imset')
if fringe_data is None:
raise RuntimeError('There is no data in the fringe flat.')
# Since we're going to divide by fringe_data, make sure there aren't any
# pixels where it's zero. There shouldn't be any negative values, either.
fringe_mask = (fringe_data <= 0.)
if fringe_dq is not None:
temp = np.bitwise_and(fringe_dq, sdqflags)
fringe_dq_mask = (temp > 0)
del temp
fringe_mask = np.logical_or(fringe_mask, fringe_dq_mask)
# For pixels that are bad in the fringe flat, we will not make any change
# to the science data.
n_fringe_mask = fringe_mask.sum()
if n_fringe_mask > 0:
if verbose:
print('{} pixels in the fringe flat were less than or equal to 0'
.format(n_fringe_mask))
fringe_data[fringe_mask] = 1.
# Correct the data in the science file:
with fits.open(science_file) as science_hdu:
n_hdu = len(science_hdu)
# Get a list of all EXTVER values in the science file
extver = []
for hdunum in range(1, n_hdu):
hdr = science_hdu[hdunum].header
if 'extver' in hdr:
extver.append(int(hdr['extver']))
# Remove duplicates, and sort
imsets = set(extver)
# Apply the fringe flat to all image sets.
for extver in imsets:
try:
science_data = science_hdu[('sci', extver)].data
except KeyError:
print('Warning: HDU ("SCI", {}) not found'.format(extver))
science_data = None
try:
science_dq = science_hdu[('dq', extver)].data
except KeyError:
print('Warning: HDU ("DQ", {}) not found'.format(extver))
science_dq = None
try:
# Note that science_err will be None if this HDU has no data array.
science_err = science_hdu[('err', extver)].data
except KeyError:
print('Warning: HDU ("ERR", {}) not found'.format(extver))
science_err = None
# Divide science data and error arrays by fringe flat:
if science_data is not None:
science_data /= fringe_data
if science_err is not None:
science_err /= fringe_data
# Update the DQ array in the science file.
if science_dq is None:
science_dq = np.zeros(science_data.shape, dtype=np.int16)
if n_fringe_mask > 0:
# Flag pixels that had fringe flat data <= 0.
science_dq[fringe_mask] |= 512 # bad pixel in ref file
if fringe_dq is not None:
# Combine fringe flat DQ with science DQ.
science_dq = np.bitwise_or(fringe_dq, science_dq)
science_hdu[('dq', extver)].data = science_dq
if verbose:
print('Imset {} done'.format(extver))
# Update primary header:
science_hdu[0].header.add_history(' ')
science_hdu[0].header.add_history('DEFRINGING complete ...')
science_hdu[0].header.add_history(' reference fringe flat {}'.format(os.path.basename(fringe_flat)))
science_hdu[0].header.add_history(' {}'.format(str(datetime.datetime.now()).rsplit('.')[0]))
science_hdu[0].header.add_history(' science and error arrays divided by fringe flat.')
# Write to a new FITS file
# Remove old version, if it exists:
if os.path.exists(drj_filename) and overwrite:
print('Removing and recreating {}'.format(drj_filename))
os.remove(drj_filename)
science_hdu.writeto(drj_filename)
print('Defringed science saved to {}'.format(drj_filename))
return drj_filename
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Steps
--------------------------------------------------------------
1. Get the data from the science and fringe flat files
2. Replace zero or negative values in the fringe flat with 1
3. Divide science data and error arrays by fringe flat
4. Flag DQ with 512 where the fringe flat was <= 0
5. Added HISTORY entries in the header
6. Write to a new fits file
Output
----------------------------------------------------------------
Outputs a file that has the same rootname as the input science file,
but with '_drj.fits' or '_s2d.fits' at the end. This is the final
defringed data.
'''), description='Script to calibrate science data in preparation for defringing')
parser.add_argument('science',
type=str,
help='file containing the calibrated science data. This should be '
'the output from prepspec')
parser.add_argument('fringeflat',
type=str,
help='file containing the fringe flat. This should be the output '
'from mkfringeflat')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
defringe(args.science, args.fringeflat)
| 8,739 | 39.841121 | 109 |
py
|
stistools
|
stistools-master/stistools/defringe/normspflat.py
|
#! /usr/bin/env python
import os
import numpy as np
import warnings
from astropy.io import fits
from ..r_util import expandFileName
from ..calstis import calstis
from ._fit1d import fit1d
# Keyword choices for calstis reduction:
PERFORM = {
'ALL': ['DQICORR', 'BLEVCORR', 'BIASCORR', 'DARKCORR', 'FLATCORR', 'CRCORR'],
'G750M': ['WAVECORR', 'HELCORR', 'X2DCORR'],
'G750L': [],}
OMIT_G750L = PERFORM['G750M']
def normspflat(inflat, outflat='.', do_cal=True, biasfile=None, darkfile=None,
pixelflat=None, wavecal=None):
"""Normalize STIS CCD fringe flat.
Based on the PyRAF `stsdas.hst_calib.stis.normspflat` task.
Parameters
----------
inflat: str
Name of input fringe flat
outflat: str
Name of normalized fringe flat output or directory location. (Default=".")
do_cal: bool
Perform bias and dark subtraction and CR rejection? (Default=True)
biasfile: str or None
Name of superbias image. If None, use BIASFILE in main header of the inflat.
darkfile: str or None
Name of superdark image. If None, use DARKFILE in main header of the inflat.
pixelflat: str or None
Name of pixel-to-pixel flat. If None, use PFLTFILE in main header of the inflat.
wavecal: str or None
Name of wavecal file [ONLY FOR G750M SPECTRA]. If None, use WAVECAL in main
header of the inflat.
Returns
-------
outname: str
Fully qualified name of the outflat
"""
# These notes are based on the old STSDAS algorithm.
# Determine properties of inflat (assumed to be {RAW, CRJ, SX2, X2D, clff}):
inflat = os.path.abspath(inflat)
if not os.access(inflat, os.R_OK|os.W_OK):
raise FileNotFoundError('Cannot access inflat: {}'.format(inflat))
hdr0 = fits.getheader(inflat, ext=0)
rootname = hdr0['ROOTNAME'] # *** Or use something derived from os.path.basename(inflat)? ***
opt_elem = hdr0['OPT_ELEM'].upper()
if opt_elem not in ['G750L', 'G750M']:
raise ValueError('Unsupported opt_elem="{}"!'.format(opt_elem))
# *** TO DO: Confirm inflat has >= 2 SCI exts to allow CR-rejection *** line 185 in cl
# *** TO DO: Handle NRPTEXP keyword *** line 194 in cl
# Identify wavecal:
if opt_elem == 'G750M':
wavecal = wavecal or expandFileName(hdr0['WAVECAL'])
if not os.access(wavecal, os.R_OK):
raise FileNotFoundError('Cannot find G750M wavecal: {}'.format(wavecal))
if wavecal in [None, 'N/A', 'n/a', '']:
wavecal = '' # Format for non-specified wavecals going into calstis API
else:
wavecal = os.path.abspath(wavecal)
# Handle outflat filename formatting:
outflat = os.path.abspath(outflat)
if os.path.isdir(outflat):
outflat = os.path.join(outflat, '{}_nsp.fits'.format(rootname))
if not os.access(os.path.dirname(outflat), os.W_OK):
raise IOError('Do not have permission to write to: {}'.format(
os.path.dirname(outflat)))
perform = PERFORM['ALL'] + PERFORM[opt_elem]
if do_cal:
# Resolve reference files from supplied variables or header values:
ref_types = {
'BIASFILE': os.path.abspath(biasfile or expandFileName(hdr0['BIASFILE'])),
'DARKFILE': os.path.abspath(darkfile or expandFileName(hdr0['DARKFILE'])),
'PFLTFILE': os.path.abspath(pixelflat or expandFileName(hdr0['PFLTFILE'])),}
# Populate/repopulate the inflat header accordingly:
for i, (ref_type, ref) in enumerate(ref_types.items()):
if not os.access(ref, os.F_OK):
raise FileNotFoundError('Cannot access reference file: {}'.format(ref))
# Handle reference file paths via environment variables:
ref_var = 'reff{:.0f}'.format(i+1)
os.environ[ref_var] = os.path.abspath(os.path.dirname(ref)) + os.sep
# Keep $oref where it's the same:
if os.path.normpath(os.environ[ref_var]) == os.path.normpath(os.environ['oref']):
ref_var = 'oref'
#os.environ['oref'] = os.path.abspath(os.environ['oref']) # for when we cd
with fits.open(inflat, 'update') as f:
if ref_var == 'oref':
f[0].header[ref_type] = '{}${}'.format(ref_var, os.path.basename(ref))
else:
f[0].header[ref_type] = '${}/{}'.format(ref_var, os.path.basename(ref))
# Update calibration flags prior to calling calstis:
with fits.open(inflat, 'update') as f:
for keyword in perform:
if f[0].header[keyword] != 'COMPLETE':
f[0].header[keyword] = 'PERFORM'
if opt_elem == 'G750M':
output_suffix = 'sx2' # geometrically-corrected
elif opt_elem == 'G750L':
output_suffix = 'crj' # not geometrically-corrected
for keyword in OMIT_G750L:
if f[0].header[keyword] == 'PERFORM':
f[0].header[keyword] = 'OMIT'
# Call calstis:
old_cwd = os.getcwd() # outflat and ref_vars are abs paths
try:
os.chdir(os.path.dirname(inflat)) # Must be in same directory to pick up EPC file
trailer = os.path.join(os.path.dirname(outflat),
'{}_calstis.log'.format(rootname))
outroot = os.path.dirname(outflat) + os.sep
outname = os.path.join(os.path.dirname(outflat),
'{}_{}.fits'.format(rootname, output_suffix))
# Remove files from previous iterations of this script:
for old_file in [outname, trailer]:
if os.access(old_file, os.F_OK):
os.remove(old_file)
res = calstis(os.path.basename(inflat), wavecal=wavecal, outroot=outroot,
trailer=trailer)
if res != 0:
raise Exception('CalSTIS returned non-zero code: {}\n'
'See log for more details: {}'.format(res, trailer))
print('File written: {}'.format(outname))
finally:
os.chdir(old_cwd)
else: # not do_cal
outname = inflat
# Check if user-supplied inflat has all the recommended calibrations performed:
keyword_warnings = []
for keyword in perform:
if hdr0[keyword] != 'COMPLETE':
keyword_warnings.append(keyword)
if keyword_warnings:
warnings.warn('These calibration steps should be COMPLETE:\n{}'.format(
', '.join(keyword_warnings)))
# Read in the calibrated flat data:
data = fits.getdata(outname, ext=1)
numrows, numcols = np.shape(data)
# Do a line-by-line cubic spline fit to the fringe flat to remove the lamp function...
with fits.open(outname) as hdulist:
aperture = hdulist[0].header['APERTURE']
bincols = hdulist[0].header['BINAXIS1']
binrows = hdulist[0].header['BINAXIS2']
cenwave = hdulist[0].header['CENWAVE']
# If short-slit aperture (does not start with "52X"):
if aperture[0:3] != "52X":
flatdata = hdulist[1].data
# Find the row with max counts in the short-slit fringe flat
# Search between the middle 10% of rows and the middle 60% of columns
t_row = round(0.45 * numrows) # iraf: nint(0.45*numlines)+1
b_row = round(0.55 * numrows)
# Uses central 60% of column
l_col = round(0.2 * numcols) - 1 # iraf: nint(0.2*numcols)
r_col = round(0.8 * numcols)
row_avgs = np.array([np.average(row) for row in flatdata[t_row:b_row, l_col:r_col]])
max_row_idx = np.where(row_avgs == np.max(abs(row_avgs)))[0][0] + t_row # CL does an absolute value here
max_row = flatdata[max_row_idx] # xxx should this be used?
# Does some flux-filtering -- I think this is for determining the max rows
# Set rows (startrow, lastrow) to be fit according to aperture name (and possibly OPT_ELEM):
# '0.3X0.09', '0.2X0.06', '52X...'
if aperture == "0.3X0.09":
startrow = max_row_idx - round(4./binrows+0.25) - 1
lastrow = max_row_idx + round(3./binrows+0.25)
elif aperture == "0.2X0.06":
startrow = max_row_idx - round(3. / binrows + 0.25) - 1
lastrow = max_row_idx + round(2. / binrows + 0.25)
elif aperture[0:3] != "52X":
print("ERROR: not able to understand APERTURE keyword")
return
elif opt_elem == "G750M":
startrow = round(92./binrows)
lastrow = startrow + round(1024./binrows) - 1
else:
startrow = 0
lastrow = numrows
# Details of spline fit determined according to OPT_ELEM + CENWAVE.
# G750M (various CENWAVEs), G750L (i.e. CENWAVE == 7751; various binning), other (never used?)
if opt_elem == "G750M":
fitted = np.ones(data.shape, dtype=data.dtype)
if cenwave < 9800:
for idx, row in enumerate(data):
if (idx >= startrow) and (idx < lastrow):
fit_data = row[85:1109]
xrange = np.arange(0, len(fit_data), 1.)
spl = fit1d(xrange, fit_data, naverage=2, function="spline3",
order=1, low_reject=5.0, high_reject=5.0, niterate=2)
row_fit = fit_data/spl(xrange)
row_fit[np.where(row_fit == 0.0)] = 1.0 # avoid zeros in output flat
fitted[idx, 85:1109] = row_fit.copy()
elif cenwave == 9851:
for idx, row in enumerate(data):
if (idx >= startrow) and (idx < lastrow):
fit_data = row[85:1109]
xrange = np.arange(0, len(fit_data), 1.)
spl = fit1d(xrange, fit_data, naverage=2, function="spline1",
order=2, low_reject=5.0, high_reject=5.0, niterate=2)
row_fit = fit_data/spl(xrange)
row_fit[np.where(row_fit == 0.0)] = 1.0 # avoid zeros in output flat
fitted[idx, 85:1109] = row_fit.copy()
else: # This applies to cenwave 9806 and 10363
for idx, row in enumerate(data):
if (idx >= startrow) and (idx < lastrow):
fit_data = row[85:1109]
xrange = np.arange(0, len(fit_data), 1.)
spl = fit1d(xrange, fit_data, naverage=2, function="spline3",
order=2, low_reject=5.0, high_reject=5.0, niterate=2)
row_fit = fit_data/spl(xrange)
row_fit[np.where(row_fit == 0.0)] = 1.0 # avoid zeros in output flat
fitted[idx, 85:1109] = row_fit.copy()
else:
fitted[idx, :] = 1.
# Write to the output file
hdulist[1].data = fitted.copy()
hdulist.writeto(str(outflat.split(".")[0]) + ".fits", overwrite=True)
return
elif cenwave == 7751: # G750L
if bincols == 1:
startcol = 590 # iraf: 591
endcol = 640
highorder = 60
loworder = 12
elif bincols == 2:
startcol = 294 # iraf: 295
endcol = 320
highorder = 50
loworder = 12
elif bincols == 4:
startcol = 144 # iraf: 145
endcol = 160
highorder = 50
loworder = 12
# Fit both the high order and low order splines
fitted_highorder = np.ones(data.shape, dtype=data.dtype)
fitted_loworder = np.ones(data.shape, dtype=data.dtype)
for row_idx in range(numrows):
fit_data = data[row_idx, :]
xrange = np.arange(0, len(fit_data), 1.)
# High Order Fit
spl = fit1d(xrange, fit_data, naverage=2, function="spline3",
order=highorder, low_reject=5.0, high_reject=5.0, niterate=2)
fitted_highorder[row_idx, :] = spl(xrange).copy()
# Low Order Fit
spl = fit1d(xrange, fit_data, naverage=2, function="spline3",
order=loworder, low_reject=5.0, high_reject=5.0, niterate=2)
fitted_loworder[row_idx, :] = spl(xrange).copy()
# Divide both spline fits off the science data
resp_highorder = data/fitted_highorder
resp_loworder = data/fitted_loworder
# Get absolute difference and ratio of the two fits to the data
fit_absdiff = abs(fitted_highorder - fitted_loworder)
fit_ratio = fitted_highorder / fitted_loworder
# Iterate through the rows from startrow to lastrow
fitted = np.ones(data.shape, dtype=data.dtype)
for row_idx in np.arange(startrow, lastrow, 1):
# Find the min pixel location between startcol and endcol
min_idx = np.argmin(fit_absdiff[row_idx, startcol:endcol]) + startcol # the idx in the full array
# Use the ratio between the two fits at the minimum point as the scale factor between the two fits
scale_factor = fit_ratio[row_idx, min_idx]
# Generate the flat using the high order flat for any column left of the min pixel and the low order
# flat times the scale factor for anything right of the min pixel
fitted[row_idx, :min_idx+1] = resp_highorder[row_idx, :min_idx+1]
fitted[row_idx, min_idx+1:] = resp_loworder[row_idx, min_idx+1:] * scale_factor
# Write to the output file
hdulist[1].data = fitted.copy()
hdulist.writeto(str(outflat.split(".")[0]) + ".fits", overwrite=True)
return
else: # There isn't a current mode/cenwave that uses this path
fitted = np.ones(data.shape, dtype=data.dtype)
for row_idx in range(numrows):
row = data[row_idx, :]
xrange = np.arange(0, len(row), 1.)
spl = fit1d(xrange, row, naverage=2, function="spline3",
order=20, low_reject=3.0, high_reject=3.0, niterate=2)
row_fit = data/spl(xrange)
fitted[row_idx, :] = row_fit
# Write to the output file
hdulist[1].data = fitted.copy()
hdulist.writeto(str(outflat.split(".")[0]) + ".fits", overwrite=True)
return
def call_normspflat():
"""Command line entry point for normspflat().
"""
import argparse
parser = argparse.ArgumentParser(
description='Normalize STIS CCD fringe flat')
parser.add_argument('inflat', type=str, help='Name of input fringe flat')
parser.add_argument('--outflat', '-o', type=str, default='.',
help='Name of normalized fringe flat output or directory location (default=".")')
parser.add_argument('--skip_cal', '-s', dest='do_cal', action='store_false',
help='Skip bias and dark subtraction and CR rejection?')
parser.add_argument('--biasfile', '-b', type=str,
help='Name of superbias image. If omitted, use BIASFILE in main header of the '
'inflat.')
parser.add_argument('--darkfile', '-d', type=str,
help='Name of superdark image. If omitted, use DARKFILE in main header of the '
'inflat.')
parser.add_argument('--pixelflat', '-p', type=str,
help='Name of pixel-to-pixel flat. If omitted, use PFLTFILE in main header of '
'the inflat.')
parser.add_argument('--wavecal', '-w', type=str,
help='Name of wavecal file [ONLY FOR G750M SPECTRA]. If omitted, use WAVECAL in '
'main header of the inflat.')
args = vars(parser.parse_args())
normspflat(**args)
if __name__ == '__main__':
call_normspflat()
| 16,395 | 45.05618 | 117 |
py
|
stistools
|
stistools-master/stistools/defringe/_findloc.py
|
import numpy as np
from astropy.modeling import models, fitting
# from lines 280 through 344 of mkfringeflat.cl
def find_loc(input, low_frac=0.2, high_frac=0.8, low_line_frac=0.4):
"""Find the cross-dispersion location of the target spectrum.
Parameters
----------
input: ndarray
The input science data array for the current image set.
low_frac: float
Fraction of image width for the start of a slice.
high_frac: float
Fraction of image width for the end of a slice.
low_line_frac: float
Fraction of image height for the start of a slice for limiting the
region over which to search for the maximum of the slit profile.
Returns
-------
target_locn: float or None
The location (zero based pixel coordinate) of the target in the
cross-dispersion direction.
None will be returned if the quadratic fit to the cross-dispersion
profile has zero curvature.
"""
shape = input.shape
# first column in a slice
fcol = int(round(low_frac * shape[1]))
# last column in a slice
lcol = int(round(high_frac * shape[1])) + 1
# first line in a slice
fline = int(round(low_line_frac * shape[0]**2 / 1024.)) + 1
# last line in a slice
lline = shape[1] - fline
slit_prof = input[:, fcol:lcol].mean(axis=1, dtype=np.float64)
subset_slit_prof = slit_prof[fline:lline]
med = np.median(subset_slit_prof)
sigma = subset_slit_prof.std()
low_lim = med + 3. * sigma
mask = np.where(subset_slit_prof >= low_lim)
# The independent variable is the pixel index in the cross-dispersion
# direction (the vertical direction).
x_array = mask[0]
# The dependent variable is the slit profile data value.
y_array = subset_slit_prof[mask]
# IMHO, this is not sensible, but it's what mkfringeflat.cl does.
# tcalc ("temp_slitprof.tab", "data", \
# "if data < 0.0 then data * (-1.) else data", datatype="real")
y_array = np.where(y_array < 0., -y_array, y_array)
index = y_array.argsort()
# The zero point for the index array is fline.
maxrow = x_array[index[-1]] + fline
ffrow = maxrow - 2
flrow = maxrow + 3
fit = fitting.LinearLSQFitter()
x = np.arange(len(slit_prof), dtype=np.float64)
quad = fit(models.Polynomial1D(2), x[ffrow:flrow], slit_prof[ffrow:flrow])
if quad.c2.value == 0.:
target_locn = None
else:
target_locn = -0.5 * (quad.c1.value / quad.c2.value)
return target_locn
| 2,537 | 31.961039 | 78 |
py
|
stistools
|
stistools-master/stistools/defringe/mkfringeflat.py
|
#! /usr/bin/env python
from astropy.io import fits
from astropy.nddata.blocks import block_reduce
import numpy as np
import math
import os
from scipy.ndimage import shift
from ._findloc import find_loc
from ._response import response
__version__ = 0.1
def mkfringeflat(inspec, inflat, outflat, do_shift=True, beg_shift=-0.5, end_shift=0.5,
shift_step=0.1, do_scale=True, beg_scale=0.8, end_scale=1.2, scale_step=0.04,
extrloc=None, extrsize=None, opti_spreg=None, rms_region=None):
"""Takes an input science spectrum and a fringe flat that has been normalized using
the task `normspflat`. The fringe flat is shifted and scaled to produce the minimum
RMS when divided into the science data.
Based on the PyRAF `stsdas.hst_calib.stis.mkfringeflat` task.
In `mkfringeflat`, the user can specify a range of shifts and scales for the routine
to test creating an optimal fringe flat. `mkfringeflat` will go through the shift and
scale dimensions separately and calculate the RMS using the following steps:
1. For each shift step, apply the shift to the input flat field
2. Divide the science data by the shifted flat
3. Divide out the large-scale SED from the science image using a spline fit in order
to isolate the fringing pattern (this is called the response image)
4. Sum the response image along the columns within the RMS region
5. Calculate the mean and standard deviation of the summed columns of the response
image
6. The RMS value for that shift is given by the standard deviation divided by the mean
found in step 5
7. Fit the RMS values with a quadratic polynomial weighted by the inverse RMS to find
the optimal RMS value
8. Apply the best shift determined in step 7 to the data and repeat steps 1-7 with the
scale values to find the best scaling
The RMS values are printed out for each scale and shift but the final best shift and
best scale values do not necessarily correspond to the printed values. This is
because the routine is calculating the RMS values based on a fit of the data at each
scale and shift, rather than being calculated at each discrete step.
Parameters
----------
inspec: str
Name of input science spectrum datafile
inflat: str
Name of input fringe flat file (usually the output from `normspflat`)
outflat: str
Name of output fringe flat to be used in the defringe task
do_shift: bool
Controls whether the shift between fringe flat and science data is
to be calculated
beg_shift: float
Initial shift to apply to fringe flat
end_shift: float
Final shift to apply to fringe flat
shift_step: float
Step-size between shifts to be applied to fringe flat
do_scale: bool
Controls whether the scaling between fringe flat and science
data is to be calculated
beg_scale: float
Initial scaling to apply to fringe flat
end_scale: float
Final scaling to appply to fringe flat
scale_step: float
Step-size between scaling values to be applied to fringe flat
extrloc: float or None
Extraction location. If set to None, this will be calculated by
parabolic interpolation of the peak of the cross-dispersion
spectral sum
extrsize: float or None
Extraction size in pixels. If set to None, this will be set to a
reasonable value by this routine
opti_spreg: list or array-like or None
A list or array representing the section to be used in normalizing the spectrum
of the science target before it is divided by the shifted/scaled fringe flat.
If set to None, a reasonable range is chosen by this routine. Should be
specified like a Python slice, zero indexed.
rms_region: list or array-like or None
A list or array representing the section to be used in the rms calculation. If set
to None, a reasonable range is chosen by this routine. Should be specified
like a Python slice, zero indexed.
"""
print("mkfringeflat.py version {}".format(__version__))
print(" - matching fringes in a flatfield to those in science data")
sci_hdulist = fits.open(inspec)
flt_hdulist = fits.open(inflat)
scidata = sci_hdulist[1].data
sci_prihdr = sci_hdulist[0].header
sci_hdr = sci_hdulist[1].header
if len(flt_hdulist) == 1:
fltdata = flt_hdulist[0].data
flt_hdr = flt_hdulist[0].header
else:
fltdata = flt_hdulist[1].data
flt_hdr = flt_hdulist[1].header
flt_prihdr = flt_hdulist[0].header
try:
shifted = flt_prihdr['shifted']
except KeyError:
shifted = ""
if "YE" in shifted:
print(" ")
print(" NOTE: Input flat was already shifted")
opt_elem = sci_prihdr['opt_elem']
nrows, ncols = scidata.shape
bincols = sci_prihdr['binaxis1']
binlines = sci_prihdr['binaxis2']
fltbincols = flt_prihdr['binaxis1']
fltbinlines = flt_prihdr['binaxis2']
ltv1 = sci_hdr['ltv1']
if opt_elem == "G750M":
centera2 = sci_prihdr['centera2']
sizaxis2 = sci_prihdr['sizaxis2']
ltv2 = 1 - centera2 + sizaxis2/2.0
flt_centera2 = flt_prihdr['centera2']
flt_sizaxis2 = flt_prihdr['sizaxis2']
flt_ltv2 = 1 - flt_centera2 + flt_sizaxis2/2.0
else:
ltv2 = sci_hdr['ltv2']
flt_ltv2 = flt_hdr['ltv2']
sax0 = round(-ltv1) + 1
sax1 = sax0 + ncols - 1
say0 = int(round(flt_ltv2 - ltv2)) + 1
say1 = say0 + nrows - 1
#
# convert IRAF section limits to Python slice indices
shiftrowstart = say0 - 1
shiftrowstop = say1
shiftcolstart = sax0 - 1
shiftcolstop = sax1
aperture = sci_prihdr['aperture']
if extrsize is None:
if "0.3X0.09" in aperture:
numextrows = int(round(9.0 / binlines + 0.25))
elif "0.2X0.06" in aperture:
numextrows = int(round(7.0 / binlines + 0.25))
elif "52X" not in aperture:
print("ERROR: not able to understand APERTURE keyword")
return
else:
numextrows = 11/binlines
apername = " [Aperture: " + aperture + "]"
else:
numextrows = extrsize
apername = ""
if extrloc is None:
maxrow = int(round(find_loc(scidata)))
else:
maxrow = extrloc
print(" Extraction center: row {}".format(maxrow))
print(" Extraction size: {} pixels {}".format(numextrows, apername))
fline = maxrow - int(round((numextrows - 0.49999)/2.0))
lline = maxrow + int(round((numextrows - 0.49999)/2.0)) + 1
if opti_spreg is None:
if "G750M" in opt_elem:
colstart = int(83/bincols)
colstop = int(1106/bincols)
else:
colstart = int(5/bincols) - 1
colstop = int(1020/bincols)
else:
colstart, colstop = opti_spreg
colstart, colstop = int(colstart), int(colstop)
if rms_region is None:
rms_start = int(725/bincols) - 1
rms_stop = int(900/bincols)
else:
rms_start, rms_stop = rms_region
rms_start, rms_stop = int(rms_start), int(rms_stop)
print("Range to be normalized: [{}:{},{}:{}]".format(fline, lline, colstart, colstop))
shifted_flat = None
if do_shift:
print("")
print("Determining best shift for fringe flat")
print("")
expo = int(round(math.log10(shift_step)-0.49999999))
fshift = int(round(beg_shift/(10**(expo))))
lshift = int(round(end_shift/(10**(expo))))
step = int(round(shift_step/(10**(expo))))
flt_blk = block_reduce(fltdata, (binlines/fltbinlines, bincols/fltbincols),
func=np.mean)
nshifts = (lshift - fshift)//step + 1
rmsvalues = np.zeros(nshifts)
current_shift = np.zeros(nshifts)
for i in range(nshifts):
current_shift[i] = beg_shift + i*shift_step
shifted_flat = shift(flt_blk[shiftrowstart:shiftrowstop, shiftcolstart:shiftcolstop],
(0, current_shift[i]), order=1, mode='nearest')
star_cont_shift = scidata / shifted_flat
if "G750M" in opt_elem:
norm_star_cont_shift = response(star_cont_shift[fline:lline,colstart:colstop],
star_cont_shift[fline:lline,colstart:colstop],
threshold=1.0e-20,
function="spline3",
sample="*",
naverage=2,
order=1,
low_reject=3.0,
high_reject=3.0,
niterate=2,
grow=0.0)
elif "G750L" in opt_elem:
norm_star_cont_shift = response(star_cont_shift[fline:lline,colstart:colstop],
star_cont_shift[fline:lline,colstart:colstop],
threshold=1.0e-20,
function="spline3",
sample="*",
naverage=2,
order=15,
low_reject=3.0,
high_reject=3.0,
niterate=2,
grow=0.0)
summed_line = norm_star_cont_shift.sum(axis=0)
mean = np.mean(summed_line[rms_start:rms_stop], dtype=np.float64)
sigma = np.std(summed_line[rms_start:rms_stop], dtype=np.float64)
rmsvalues[i] = sigma/mean
print("shift = {:10.3f}, rms = {:8.4f}".format(current_shift[i], rmsvalues[i]))
#
# Determine shift that delivers the best RMS by an inverse rms weighted average
weight = 1.0/(rmsvalues*rmsvalues)
rownum = range(nshifts)
weighted_rownum = weight*rownum
minrms = rmsvalues.min()
min_row = np.where(rmsvalues == minrms)[0][0]
if min_row == 0 or min_row == nshifts - 1:
print(" ")
print("WARNING: Best shift found on the edge of the specified shift range.")
print("You are advised to try again after adjusting the shift range accordingly")
theshift = current_shift[min_row]
else:
if min_row >= 2 and min_row <= nshifts - 3:
first_row = min_row - 2
last_row = min_row + 3
elif min_row == 1 or min_row == nshifts - 2:
first_row = min_row - 1
last_row = min_row + 2
w_shift_av = weighted_rownum[first_row:last_row].sum()
weight_av = weight[first_row:last_row].sum()
theshift = w_shift_av/weight_av
theshift = theshift*shift_step + beg_shift
print(" ")
print(" Best shift : {:10.3f} pixels".format(theshift))
# Apply the best shift and create output array
flt_blk = block_reduce(fltdata, (binlines/fltbinlines, bincols/fltbincols),
func=np.mean)
shifted_flat = shift(flt_blk[shiftrowstart:shiftrowstop, shiftcolstart:shiftcolstop],
(0, theshift), order=1, mode='nearest')
# Write out file
fitspos = inflat.find('.fits')
output_filename = inflat[:fitspos] + '_sh.fits'
if os.path.exists(output_filename):
os.remove(output_filename)
fits.writeto(output_filename, data=shifted_flat)
fits.open(output_filename, mode='update')[0].header['shifted'] = 'YES'
print(" Shifted flat : {}".format(output_filename))
print(" (Can be used as input flat for next iteration)")
if do_scale:
print("")
print("Determining best scaling of amplitude of fringes in flat")
print("")
ilow = int(round(beg_scale*100.0))
iupp = int(round(end_scale*100.0))
istep = int(round(scale_step*100.0))
fltdata = get_flat_data(inflat, shifted_flat)
flat_mean = fltdata[fline:lline, colstart:colstop].mean()
nscales = int(round((end_scale - beg_scale)/scale_step)) + 1
rmsvalues = np.zeros(nscales)
current_scale = np.zeros(nscales)
for i in range(nscales):
current_scale[i] = beg_scale + i*scale_step
flat_scaled = fltdata.copy()
flat_scaled[:, colstart+1:colstop-1] = (flat_scaled[:, colstart+1:colstop-1] - flat_mean) * current_scale[i] + \
flat_mean
star_cont_scale = scidata / flat_scaled
if "G750M" in opt_elem:
norm_star_cont_scale = response(star_cont_scale[fline:lline,colstart:colstop],
star_cont_scale[fline:lline,colstart:colstop],
threshold=1.0e-20,
function="spline3",
sample="*",
naverage=2,
order=1,
low_reject=3.0,
high_reject=3.0,
niterate=2,
grow=0.0)
elif "G750L" in opt_elem:
norm_star_cont_scale = response(star_cont_scale[fline:lline,colstart:colstop],
star_cont_scale[fline:lline,colstart:colstop],
threshold=1.0e-20,
function="spline3",
sample="*",
naverage=2,
order=15,
low_reject=3.0,
high_reject=3.0,
niterate=2,
grow=0.0)
summed_line = norm_star_cont_scale.sum(axis=0)
mean = np.mean(summed_line[rms_start:rms_stop], dtype=np.float64)
sigma = np.std(summed_line[rms_start:rms_stop], dtype=np.float64)
rmsvalues[i] = sigma/mean
print("Fringes scaled {:10.3f}: RMS = {:8.4f}".format(current_scale[i], rmsvalues[i]))
#
# Determine scale factor that delivers the best RMS by an inverse-weighted average
weight = 1.0/(rmsvalues*rmsvalues)
rownum = range(nscales)
weighted_rownum = weight*rownum
minrms = rmsvalues.min()
min_row = np.where(rmsvalues == minrms)[0][0]
first_row = min_row - 2
last_row = min_row + 3
if min_row == 0 or min_row == nscales - 1:
print(" ")
print("WARNING: Best scale found on the edge of the specified scale range.")
print("You are advised to try again after adjusting the scale range accordingly")
thescale = current_scale[min_row]
else:
if min_row >= 2 and min_row <= nscales - 3:
first_row = min_row - 2
last_row = min_row + 3
elif min_row == 1 or min_row == nscales - 2:
first_row = min_row - 1
last_row = min_row + 2
w_shift_av = weighted_rownum[first_row:last_row].sum()
weight_av = weight[first_row:last_row].sum()
thescale = w_shift_av/weight_av
thescale = thescale * scale_step + beg_scale
print(" ")
print(" Best scale : {:10.3f}".format(thescale))
# Apply the best scale and create output array
flat_scaled = fltdata.copy()
flat_scaled[:, colstart+1:colstop-1] = (flat_scaled[:, colstart+1:colstop-1] - flat_mean) * thescale + \
flat_mean
# Write out file
fits.writeto(outflat, data=flat_scaled)
print("Output flat : {}".format(outflat))
print(" (to be used as input to task 'defringe.py')")
return
def get_flat_data(inflat, shifted_flat):
if shifted_flat is not None:
return shifted_flat
else:
f1 = fits.open(inflat)
nextend = len(f1)
if nextend == 0:
return f1[0].data
else:
return f1[1].data
def call_mkfringeflat():
"""Command line entry point for mkfringeflat().
"""
import argparse
parser = argparse.ArgumentParser(
description='Match fringes in STIS fringe flat to those in science data')
parser.add_argument('inspec', type=str, help='Name of input science spectrum')
parser.add_argument('inflat', type=str,
help='Name of input normalized fringe flat from normspflat')
parser.add_argument('outflat', type=str, help='Name of output [final] fringe flat')
parser.add_argument('--skip_shift', dest='do_shift', action='store_false',
help='Skip shifting the flat to match fringes in spectrum?')
parser.add_argument('--shift_range', type=float, default=[-0.5, 0.5], nargs=2,
metavar='FLOAT', help='Range for shift determination (default=[-0.5, 0.5])')
parser.add_argument('--shift_step', type=float, default=0.1, metavar='FLOAT',
help='Step size used when calculating shifts (default=0.1)')
parser.add_argument('--skip_scale', dest='do_scale', action='store_false',
help='Skip scaling the fringe amplitude to match science spectrum?')
parser.add_argument('--scale_range', type=float, default=[0.8, 1.2], nargs=2,
metavar='FLOAT', help='Range for scale determination (default=[0.8, 1.2])')
parser.add_argument('--scale_step', type=float, default=0.04, metavar='FLOAT',
help='Step size used when calculating scale factors (default=0.04)')
parser.add_argument('--extrloc', '-l', type=float, default=None, metavar='FLOAT',
help='Central line (row) to be extracted from 2-D spectrum (zero-indexed). '
'If omitted, ...')
parser.add_argument('--extrsize', '-s', type=float, default=None, metavar='INT',
help='Number of lines to be extracted from 2-D spectrum. If omitted, ...')
parser.add_argument('--opti_spreg', type=int, nargs=2, default=None, metavar='INT',
help='Spectral range used in normalizing the spectrum, as specified via '
'zero-indexed pixel numbers. If omitted, ...')
parser.add_argument('--rmsregion', type=int, nargs=2, default=None, metavar='INT',
help='Spectral range used in measuring RMSs, as specified via zero-indexed pixel '
'numbers. If omitted, ...')
args = vars(parser.parse_args())
mkfringeflat(**args)
if __name__ == '__main__':
call_mkfringeflat()
| 19,503 | 38.885481 | 124 |
py
|
stistools
|
stistools-master/stistools/defringe/__init__.py
|
from .prepspec import prepspec
from .normspflat import normspflat
from .mkfringeflat import mkfringeflat
from .defringe import defringe
__doc__ = """
.. HST/STIS CCD Defringing Tools
-----------------------------
- `prepspec` — Calibrate STIS CCD G750L or G750M spectrum before defringing
- `normspflat` — Normalize STIS CCD fringe flat
- `mkfringeflat` — Match fringes in STIS fringe flat to those in science data
- `defringe` — Defringe by dividing the science spectrum by the fringe flat
.. See the stistools documentation on readthedocs for the latest information and user guides:
https://stistools.readthedocs.io/
.. note::
See Section 3.5.5 of the `STIS Data Handbook (DHB)
<http://www.stsci.edu/files/live/sites/www/files/home/hst/instrumentation/stis/documentation/_documents/stis_dhb.pdf>`_
for more details on the defringing process.
.. warning::
These routines are based on PyRAF `stsdas.hst_calib.stis` defringing tasks, though
users should expect numerical discrepancies between these two implementations.
"""
| 1,073 | 38.777778 | 123 |
py
|
stistools
|
stistools-master/stistools/defringe/prepspec.py
|
#! /usr/bin/env python
import os
import shutil
import stat
import re
from astropy.io import fits
from tempfile import mkdtemp
import warnings
from ..r_util import expandFileName
from ..calstis import calstis
def prepspec(inspec, outroot='./', darkfile=None, pixelflat=None, initguess=None):
"""Calibrate STIS CCD G750L or G750M spectrum before defringing.
Based on the PyRAF `stsdas.hst_calib.stis.prepspec` task.
Parameters
----------
inspec: str
Name of input 'raw' science spectrum
outroot: str
Root for output file name. (Default='./')
darkfile: str or None
Name of superdark image. If None, use DARKFILE in main header of input spectrum.
pixelflat: str or None
Name of pixel-to-pixel flat. If None, use PIXELFLAT in main header of input
spectrum.
initguess: str or None
Method for initial value estimate for `ocrreject`: {None, 'minimum', 'median'}.
(Default=None; Use the value in the CRREJTAB.)
Returns
-------
outname: str
Fully qualified name of prepared spectrum (CRJ or SX2 file)
"""
# These notes are based on the old STSDAS algorithm. It may make sense to change the
# order or processing when converting to Python.
# Note that stistools.basic2d.basic2d(), stistools.ocrreject.ocrreject(), and
# stistools.x2d.x2d() likely ignore ext=0 calibration flags.
# Check inputs:
# - inspec: HST/STIS, G750M/G750L, correct filetype, not already corrected
# - outroot: handle dir vs filename; ".fits" in name or not
# - don't allow inspec to be of type {crj, sx2}
# - print/log name of resolved reference files
# - read in reference file data
# Check if we can we CR-combine the data:
# - Check inspec has more than 1 {SCI, ERR, DQ} ext groups (NEXTEND//3).
# - Handle CR-rejection header keywords when NRPTEXP != NEXTEND//3.
# perform only {DQICORR, BLEVCORR} via stistools.basic2d.basic2d() --> Produces temporary file
# perform CRCORR via stistools.ocrreject.ocrreject() --> Produces CRJ file
# perform only {BIASCORR, DARKCORR, FLATCORR, PHOTCORR; STATFLAG} via stistools.basic2d.basic2d()
# If G750L:
# return CRJ file
# If G750M:
# perform only {HELCORR, X2DCORR} via stistools.x2d.x2d()
# return SX2 file
science_data = os.path.abspath(expandFileName(inspec))
sci_root = re.split('\.fits.*', os.path.basename(science_data),
flags=re.IGNORECASE)[0].rsplit('_', 1)[0]
opt_elem = fits.getval(science_data, 'OPT_ELEM').strip().upper()
if (fits.getval(science_data, ext=0, keyword='DETECTOR').strip().upper() != 'CCD') or \
(fits.getval(science_data, ext=0, keyword='INSTRUME').strip() != 'STIS') or \
(opt_elem not in ['G750L', 'G750M']):
raise ValueError('prepspec: Intended for use on STIS/CCD G750L & G750M data!')
if (initguess is not None) and (initguess.lower() not in ['minimum', 'median']):
raise ValueError('initguess must be in {None, "minimum", "median"}!')
if os.path.isdir(outroot):
outroot = os.path.normpath(outroot) + os.sep
if not os.access(os.path.dirname(outroot), os.W_OK):
raise IOError('Cannot write to: {}'.format(os.path.dirname(outroot)))
#if (not os.path.isdir(outroot)) and os.access(outroot, os.F_OK):
# raise FileExistsError('Previous outroot already exists: {}'.format(outroot))
# Make sure the necessary header keywords are set to PERFORM:
with fits.open(science_data, 'update') as f:
f[0].header['STATFLAG'] = True
for keyword in ['DQICORR', 'BLEVCORR', 'BIASCORR', 'DARKCORR', 'FLATCORR', 'CRCORR']:
if not f[0].header[keyword].upper().startswith('COMPLETE'):
f[0].header[keyword] = 'PERFORM'
# A few extra calibration steps if G750M data:
if opt_elem == 'G750M':
for keyword in ['HELCORR', 'X2DCORR']:
if not f[0].header[keyword].upper().startswith('COMPLETE'):
f[0].header[keyword] = 'PERFORM'
# For comparison with PyRAF/IRAF products:
if f[0].header['WAVECORR'].upper().startswith('PERFORM'):
f[0].header['WAVECORR'] = 'OMIT'
ref_types = {
'DARKFILE': os.path.abspath(darkfile or expandFileName(f[0].header['DARKFILE'])),
'PFLTFILE': os.path.abspath(pixelflat or expandFileName(f[0].header['PFLTFILE'])),}
# Handle non-default CR-rejection initial guesses:
if initguess:
# Copy the old CRREJTAB to a temporary location:
crrejtab_tmp_dir = mkdtemp(prefix='crrejtab_')
orig_crrejtab_name = expandFileName(f[0].header['CRREJTAB'])
try:
shutil.copy(orig_crrejtab_name, crrejtab_tmp_dir)
except FileNotFoundError:
warnings.warn('\nUsing CRREJTAB in $oref instead of ext=0 CRREJTAB="{}"'.format(f[0].header['CRREJTAB']))
orig_crrejtab_name = os.path.join(os.environ['oref'], os.path.basename(orig_crrejtab_name))
shutil.copy(orig_crrejtab_name, crrejtab_tmp_dir)
new_crrejtab_name = os.path.join(crrejtab_tmp_dir, os.path.basename(orig_crrejtab_name))
os.chmod(new_crrejtab_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Update header of science_data file:
ref_types['CRREJTAB'] = os.path.abspath(new_crrejtab_name)
# Modify the temporary CRREJTAB:
with fits.open(new_crrejtab_name, 'update') as crrejtab:
crrejtab[1].data['INITGUES'] = initguess.lower()
else:
crrejtab_tmp_dir = None
# Populate/repopulate the inflat header accordingly:
for i, (ref_type, ref) in enumerate(ref_types.items()):
if not os.access(ref, os.F_OK):
raise FileNotFoundError('Cannot access reference file: {}'.format(ref))
# Handle reference file paths via environment variables:
ref_var = 'reff{:.0f}'.format(i+1)
os.environ[ref_var] = os.path.abspath(os.path.dirname(ref)) + os.sep
# Keep $oref where it's the same:
if os.path.normpath(os.environ[ref_var]) == os.path.normpath(os.environ['oref']):
ref_var = 'oref'
f[0].header[ref_type] = '{}${}'.format(ref_var, os.path.basename(ref))
else:
f[0].header[ref_type] = '${}/{}'.format(ref_var, os.path.basename(ref))
# Calibrate with calstis:
trl_file = '{}_trl.txt'.format(sci_root) # Output log goes here
cwd = os.getcwd()
try:
# Run calstis from within the directory with the data to find EPC files properly:
os.chdir(os.path.dirname(science_data))
res = calstis(os.path.basename(science_data), outroot=outroot, trailer=trl_file)
finally:
os.chdir(cwd)
if crrejtab_tmp_dir:
shutil.rmtree(crrejtab_tmp_dir) # Temporary CRREJTAB
# Print out calstis log:
trl_file = os.path.join(os.path.dirname(science_data), trl_file)
with open(trl_file) as trl:
for line in trl:
print (' ' + line.rstrip())
# Raise exception on bad calstis exit code:
if res != 0:
raise Exception('CalSTIS exited with code {}'.format(res))
def call_prepspec():
"""Command line entry point for prepspec().
"""
import argparse
parser = argparse.ArgumentParser(
description='Correct STIS CCD G750L or G750M spectrum for fringing')
parser.add_argument('inspec', type=str, help='Name of input "raw" science spectrum')
parser.add_argument('outroot', type=str, default='./',
help='Root for output file name. (Default="./")')
parser.add_argument('--darkfile', '-d', type=str, default=None,
help='Name of superdark image. If omitted, use DARKFILE in main header of input '
'spectrum.')
parser.add_argument('--pixelflat', '-f', type=str, default=None,
help='Name of pixel-to-pixel flat. If omitted, use PIXELFLAT in main header of '
'input spectrum.')
parser.add_argument('--initguess', '-i', type=str, default=None,
choices=['None', 'minimum', 'median'],
help='Method for initial value estimate for ocrreject (Default=None; use value from CRREJTAB)')
args = vars(parser.parse_args())
if isinstance(args['initguess'], str) and args['initguess'].lower() == 'none':
args['initguess'] = None
prepspec(**args)
if __name__ == '__main__':
call_prepspec()
| 8,610 | 43.61658 | 121 |
py
|
stistools
|
stistools-master/stistools/defringe/_response.py
|
import math
import numpy as np
from astropy.io import fits
from ._fit1d import fit1d
def response(calibration_array, normalization_array,
threshold=None,
function="spline3",
sample='*',
naverage=2,
order=10,
low_reject=3.0,
high_reject=3.0,
niterate=2,
grow=0.0):
"""
RESPONSE -- Determine the response function for 2D spectra.
A calibration image is divided by a normalization spectrum to form
a response image. The normalization spectrum is derived by averaging
the normalization image across dispersion. The normalization spectrum
is then smoothed by curve fitting. The smoothed normalization
spectrum is divided into the calibration image to form the response
function image. The curve fitting may be performed interactively
using the icfit package. A response function is determined for each
input image. Image sections in the calibration image may be used to determine
the response for only part of an image such as with multiple slits.
The images are given by image templates. The number of images must
in each list must match. Image sections are allowed in the calibration
image.
lpar response:
calibration = "star_cont_sh1.fits[5:1020,505:515]" Longslit calibration images
normalizatio = "star_cont_sh1.fits[5:1020,505:515]" Normalization spectrum images
response = "norm_star_cont_sh1.fits" Response function images
(interactive = yes) Fit normalization spectrum interactively?
(threshold = INDEF) Response threshold
(sample = "*") Sample of points to use in fit
(naverage = 2) Number of points in sample averaging
(function = "spline3") Fitting function
(order = 15) Order of fitting function
(low_reject = 3.0) Low rejection in sigma of fit
(high_reject = 3.0) High rejection in sigma of fit
(niterate = 2) Number of rejection iterations
(grow = 0.0) Rejection growing radius
(graphics = "stdgraph") Graphics output device
(cursor = "") Graphics cursor input
(mode = "al")
"""
if sample != "*":
print ('Only sample="*" currently supported for this version of response')
response = make_response(calibration_array, normalization_array, threshold, naverage,
function, order, low_reject, high_reject, niterate, grow)
return response
def make_response(calibration_array, normalization_array, threshold, naverage,
function, order, low_reject, high_reject, niterate, grow):
"""Like re_make in response.x
"""
dispaxis = get_dispaxis(calibration_array)
# Get the normalization spectrum
nrows, ncols = normalization_array.shape
average_spectrum = normalization_array.sum(axis=0) / float(nrows)
x = np.arange(ncols) + 1
fitted = fit1d(x, average_spectrum, weights=None, function=function, order=order,
naverage=naverage, low_reject=low_reject, high_reject=high_reject,
niterate=niterate, grow=grow)
fitted_spectrum = fitted(x)
response = normalise_response(calibration_array, dispaxis, threshold, fitted_spectrum)
return response
def get_dispaxis(calibration_array):
"""For completeness, include a function to get this. For our purposes, dispaxis is
always 1
"""
return 1
def normalise_response(calibration_array, dispaxis, threshold, fitted_spectrum):
"""Like re_normalize in response.x
"""
nrows, ncols = calibration_array.shape
response = calibration_array / fitted_spectrum
if threshold is not None:
low_spectrum = np.where(fitted_spectrum < threshold)
for i in low_spectrum[0]:
response[:,i] = 1.0
low_cal_array = np.where(calibration_array < threshold)
response[low_cal_array] = 1.0
return response
| 4,070 | 36.694444 | 90 |
py
|
stistools
|
stistools-master/tests/resources.py
|
"""HSTCAL regression test helpers."""
from six.moves import urllib
import getpass
import os
import sys
import math
from io import StringIO
import shutil
import datetime
from os.path import splitext
from difflib import unified_diff
import pytest
import requests
from astropy.io import fits
from astropy.io.fits import FITSDiff
from astropy.table import Table
from astropy.utils.data import conf
from .helpers.io import get_bigdata, upload_results
__all__ = ['download_crds',
'ref_from_image', 'raw_from_asn', 'BaseACS',
'BaseSTIS', 'BaseWFC3IR', 'BaseWFC3UVIS', 'BaseWFPC2']
def _download_file(url, filename, filemode='wb', timeout=None):
"""Generic remote data download."""
if url.startswith('http'):
r = requests.get(url, timeout=timeout)
with open(filename, filemode) as fout:
fout.write(r.content)
elif url.startswith('ftp'): # TODO: Support filemode and timeout.
urllib.request.urlretrieve(url, filename=filename)
else: # pragma: no cover
raise ValueError('Unsupported protocol for {}'.format(url))
def download_crds(refdir, refname, timeout=None):
"""Download a CRDS file from HTTP or FTP to current directory."""
# CRDS file for given name never changes, so no need to re-download.
if os.path.exists(refname):
return
try:
url = 'http://ssb.stsci.edu/cdbs/{}/{}'.format(refdir, refname)
local_file = os.path.abspath(refname)
print("Downloading CRDS file: {}".format(local_file))
_download_file(url, refname, timeout=timeout)
except Exception: # Fall back to FTP
url = 'ftp://ftp.stsci.edu/cdbs/{}/{}'.format(refdir, refname)
_download_file(url, refname, timeout=timeout)
def _get_reffile(hdr, key):
"""Get ref file from given key in given FITS header."""
ref_file = None
if key in hdr: # Keyword might not exist
ref_file = hdr[key].strip()
if ref_file.upper() == 'N/A': # Not all ref file is defined
ref_file = None
return ref_file
def ref_from_image(input_image):
"""
Return a list of reference filenames, as defined in the primary
header of the given input image, necessary for calibration; i.e.,
only those associated with ``*CORR`` set to ``PERFORM`` will be
considered.
"""
# NOTE: Add additional mapping as needed.
# Map mandatory CRDS reference file for instrument/detector combo.
reffile_lookup = ['BPIXTAB', 'DARKFILE', 'PFLTFILE', 'LFLTFILE', 'PHOTTAB',
'IMPHTTAB', 'APERTAB', 'CCDTAB', 'BIASFILE', 'CRREJTAB',
'IDCTAB', 'TDSTAB', 'SPTRCTAB', 'SDCTAB', 'PHOTTAB',
'PCTAB', 'TDCTAB', 'MLINTAB', 'GACTAB', 'WCPTAB',
'LAMPTAB', 'APDESTAB', 'XTRACTAB', 'DISPTAB', 'INANGTAB',
'CDSTAB', 'ECHSCTAB', 'EXSTAB', 'HALOTAB', 'TELTAB',
'RIPTAB', 'SRWTAB']
ref_files = []
hdr = fits.getheader(input_image, ext=0)
for reffile in reffile_lookup:
s = _get_reffile(hdr, reffile)
if s is not None:
ref_files.append(s)
return ref_files
def raw_from_asn(asn_file, suffix='_raw.fits'):
"""Return a list of RAW input files in a given ASN."""
raw_files = []
tab = Table.read(asn_file, format='fits')
for row in tab:
if row['MEMTYPE'].startswith('PROD'):
continue
pfx = row['MEMNAME'].lower().strip().replace('\x00', '')
raw_files.append(pfx + suffix)
return raw_files
# Base classes for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
#@pytest.mark.bigdata
class BaseCal(object):
prevdir = os.getcwd()
use_ftp_crds = True
timeout = 30 # seconds
tree = ''
results_root = 'datb-stistools/results'
# Numpy default for allclose comparison
rtol = 5e-7
atol = 0
# To be defined by instrument
refstr = ''
prevref = ''
input_loc = ''
ref_loc = ''
ignore_keywords = []
# To be defined by individual test
subdir = ''
@pytest.fixture(autouse=True)
def setup_class(self, tmpdir, envopt):
"""
Run test in own dir so we can keep results separate from
other tests.
"""
if not tmpdir.ensure(self.subdir, dir=True):
p = tmpdir.mkdir(self.subdir).strpath
else:
p = tmpdir.join(self.subdir).strpath
os.chdir(p)
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = p + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
# This controls astropy.io.fits timeout
conf.remote_timeout = self.timeout
# Update tree to point to correct environment
self.tree = envopt
def teardown_class(self):
"""Reset path and variables."""
conf.reset('remote_timeout')
os.chdir(self.prevdir)
if self.use_ftp_crds and self.prevref is not None:
os.environ[self.refstr] = self.prevref
def get_data(self, *args):
"""
Download `filename` into working directory using
`helpers/io/get_bigdata`. This will then return the full path to
the local copy of the file.
"""
local_file = get_bigdata(self.tree, self.input_loc, *args)
return local_file
def get_input_file(self, *args, refsep='$'):
"""
Download or copy input file (e.g., RAW) into the working directory.
The associated CRDS reference files in ``refstr`` are also
downloaded, if necessary.
"""
filename = self.get_data(*args)
print(filename)
ref_files = ref_from_image(filename)
print("Looking for REF_FILES: {}".format(ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
refname = self.get_data('customRef', ref_file)
else: # Download from FTP, if applicable
s = ref_file.split(refsep)
refdir = s[0]
refname = s[1]
if self.use_ftp_crds:
download_crds(refdir, refname, timeout=self.timeout)
return filename
def compare_outputs(self, outputs, raise_error=True, delete_history=False):
"""
Compare output with "truth" using appropriate
diff routine; namely,
``fitsdiff`` for FITS file comparisons
``unified_diff`` for ASCII products.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order.
raise_error : bool
Raise ``AssertionError`` if difference is found.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testpath, testname = os.path.split(os.path.abspath(os.curdir))
# organize results by day test was run...could replace with git-hash
whoami = getpass.getuser() or 'nobody'
dt = datetime.datetime.now().strftime("%d%b%YT")
ttime = datetime.datetime.now().strftime("%H_%M_%S")
user_tag = 'NOT_CI_{}_{}'.format(whoami, ttime)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', 'standalone')
testdir = "{}_{}_{}".format(testname, build_tag, build_suffix)
tree = os.path.join(self.results_root, self.input_loc,
dt, testdir) + os.sep
updated_outputs = []
for actual, desired in outputs:
# Get "truth" image
s = self.get_data('truth', desired)
if s is not None:
desired = s
if actual.endswith('fits'):
# Working with FITS files...
if delete_history is True:
actual = fits.open(actual)
desired = fits.open(desired)
if 'HISTORY' in actual[0].header:
del actual[0].header['HISTORY']
if 'HISTORY' in desired[0].header:
del desired[0].header['HISTORY']
fdiff = FITSDiff(actual, desired, rtol=self.rtol,
atol=self.atol,
ignore_keywords=self.ignore_keywords)
if delete_history is True:
actual.close()
desired.close()
creature_report += fdiff.report()
if not fdiff.identical:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not fdiff.identical and all_okay:
all_okay = False
else:
# Try ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
old_stdout = sys.stdout
udiffIO = StringIO()
sys.stdout = udiffIO
sys.stdout.writelines(udiff)
sys.stdout = old_stdout
udiff_report = udiffIO.getvalue()
creature_report += udiff_report
if len(udiff_report) > 2 and all_okay:
all_okay = False
if len(udiff_report) > 2:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay:
# Write out JSON file to enable retention of different results
new_truths = [os.path.abspath(i[1]) for i in updated_outputs]
for files in updated_outputs:
print("Renaming {} as new 'truth' file: {}".format(
files[0], files[1]))
shutil.move(files[0], files[1])
log_pattern = [os.path.join(os.path.dirname(x), '*.log')
for x in new_truths]
upload_results(pattern=new_truths + log_pattern,
testname=testname,
target=tree)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
class BaseSTIS(BaseCal):
refstr = 'oref'
prevref = os.environ.get(refstr)
input_loc = ''
ref_loc = '/ref'
ignore_keywords = ['date', 'filename', 'iraf-tlm', 'fitsdate', 'history']
#''cal_ver']
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
| 12,883 | 33.449198 | 79 |
py
|
stistools
|
stistools-master/tests/test_tastis.py
|
import pytest
from stistools.tastis import tastis
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestDoppinfo(BaseSTIS):
input_loc = 'tastis'
ref_loc = 'tastis'
def test_header_update1(self, capsys):
"""
oc7w11viq # ACQ/PEAK-UP, RETURN-TO-BRIGHTEST
"""
# Prepare input files.
self.get_data("input", "oc7w11viq_raw.fits")
self.get_data("input", "oc7w11viq_spt.fits")
capsys.readouterr()
tastis('oc7w11viq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"oc7w11viq HST/STIS G430L 0.3X0.05ND ACQ/PEAK-UP\n" \
"prop: 13465 visit: 11 line: 3 target: HD128621-2\n" \
"obs date, time: 2014-07-24 22:05:06 exposure time: 0.10\n" \
"dom GS/FGS: S7QX000330F1 sub-dom GS/FGS: S7QX000694F2\n" \
"ACQ params: bias sub: 1510 method: RETURN-TO-BRIGHTEST\n" \
"subarray (axis1,axis2): size=(1022,32) corner=(25,500)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS2 Step size (mas): 250\n" \
"\n" \
" [210 753 0]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 0.0 -0.1 0.000 -0.005 -0.004 0.004\n" \
"Flux in post-slew confirmation image (751752) - Pedestal (748587) = 3165 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The flux in the confirmation image is 320% greater than the maximum flux\n" \
"in the ACQ/PEAK scan. An excess greater than 100% indicates\n" \
"problems in the ACQ/PEAK.\n" \
"\n" \
"The flux in the confirmation image is 16% of the recommended minimum\n" \
"of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in\n" \
"the ACQ/PEAK may be inadequate for an accurate centering.\n" \
"\n" \
"===============================================================================\n"
# Compare results
outputs = [("oc7w11viq_raw.fits", "oc7w11viq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update2(self, capsys):
"""
octka7jeq # ACQ/PEAK-UP, flux error
"""
# Prepare input files.
self.get_data("input", "octka7jeq_raw.fits")
self.get_data("input", "octka7jeq_spt.fits")
capsys.readouterr()
tastis('octka7jeq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"octka7jeq HST/STIS G430L 0.2X0.09 ACQ/PEAK-UP\n" \
"prop: 14161 visit: A7 line: 2 target: HD-84937\n" \
"obs date, time: 2016-05-09 23:15:29 exposure time: 0.20\n" \
"dom GS/FGS: N6U6000023F2 sub-dom GS/FGS: N6U7000178F1\n" \
"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\n" \
"subarray (axis1,axis2): size=(1022,32) corner=(26,500)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS1 Step size (mas): 69\n" \
"\n" \
" [ 0 16309 83580 21884 8029]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\n" \
"Flux in post-slew confirmation image (852814) - Pedestal (791686) = 61128 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The flux in the confirmation image is only 73% of the maximum flux\n" \
"in the ACQ/PEAK scan. Percentages below 80% often indicate problems\n" \
"in the ACQ/PEAK.\n" \
"\n" \
"===============================================================================\n"
# Compare results
outputs = [("octka7jeq_raw.fits", "octka7jeq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update3(self, capsys):
"""
octr11hrq # ACQ/PEAK-UP, flux errors
"""
# Prepare input files.
self.get_data("input", "octr11hrq_raw.fits")
self.get_data("input", "octr11hrq_spt.fits")
capsys.readouterr()
tastis('octr11hrq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"octr11hrq HST/STIS G430M 31X0.05NDA ACQ/PEAK-UP\n" \
"prop: 14341 visit: 11 line: 9 target: HD128621-2\n" \
"obs date, time: 2016-08-28 22:33:14 exposure time: 0.10\n" \
"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\n" \
"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\n" \
"subarray (axis1,axis2): size=(1022,32) corner=(25,500)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS1 Step size (mas): 39\n" \
"\n" \
" [5478 0 798 3264 4796 1923 4876]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 0.2 0.0 0.010 0.000 0.007 0.007\n" \
"Flux in post-slew confirmation image (882661) - Pedestal (871184) = 11477 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The flux in the confirmation image is 110% greater than the maximum flux\n" \
"in the ACQ/PEAK scan. An excess greater than 100% indicates\n" \
"problems in the ACQ/PEAK.\n" \
"\n" \
"The flux in the confirmation image is 57% of the recommended minimum\n" \
"of 20000 DN for a dispersed-light ACQ/PEAK. The signal-to-noise in\n" \
"the ACQ/PEAK may be inadequate for an accurate centering.\n" \
"\n" \
"The maximum flux in the sequence occurred at one end.\n" \
"This may indicate that the target was beyond that end\n" \
"or that a neighboring object affected the acquisition.\n" \
"===============================================================================\n"
# Compare results
outputs = [("octr11hrq_raw.fits", "octr11hrq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update4(self, capsys):
"""
ocui04xeq # ACQ/PEAK-UP, flux errors
"""
# Prepare input files.
self.get_data("input", "ocui04xeq_raw.fits")
self.get_data("input", "ocui04xeq_spt.fits")
capsys.readouterr()
tastis('ocui04xeq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"ocui04xeq HST/STIS MIRVIS 52X0.1E1 ACQ/PEAK-UP\n" \
"prop: 14086 visit: 04 line: 2 target: M62-VLA1\n" \
"obs date, time: 2016-07-22 06:10:30 exposure time: 20.00\n" \
"dom GS/FGS: S8ES000684F2 sub-dom GS/FGS: S8ES000207F1\n" \
"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\n" \
"subarray (axis1,axis2): size=(32,32) corner=(524,883)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS1 Step size (mas): 75\n" \
"\n" \
" [17007 5446 1717 993 0]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -2.6 0.0 -0.132 0.000 -0.093 -0.093\n" \
"Flux in post-slew confirmation image (56705) - Pedestal (43530) = 13175 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The flux in the confirmation image is only 77% of the maximum flux\n" \
"in the ACQ/PEAK scan. Percentages below 80% often indicate problems\n" \
"in the ACQ/PEAK.\n" \
"\n" \
"The maximum flux in the sequence occurred at one end.\n" \
"This may indicate that the target was beyond that end\n" \
"or that a neighboring object affected the acquisition.\n" \
"===============================================================================\n"
# Compare results
outputs = [("ocui04xeq_raw.fits", "ocui04xeq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update5(self, capsys):
"""
ocyw05afq # ACQ/PEAK-UP, LINEARAXIS2, no errors
"""
# Prepare input files.
self.get_data("input", "ocyw05afq_raw.fits")
self.get_data("input", "ocyw05afq_spt.fits")
capsys.readouterr()
tastis('ocyw05afq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"ocyw05afq HST/STIS G430L 0.2X0.09 ACQ/PEAK-UP\n" \
"prop: 14084 visit: 05 line: 2 target: BD-11D916\n" \
"obs date, time: 2016-09-22 08:33:17 exposure time: 1.80\n" \
"dom GS/FGS: S2AE000156F1 sub-dom GS/FGS: S2AE000086F2\n" \
"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\n" \
"subarray (axis1,axis2): size=(1022,32) corner=(26,500)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS2 Step size (mas): 150\n" \
"\n" \
" [ 5139 67252 0]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 0.0 0.0 0.000 0.000 0.000 0.000\n" \
"Flux in post-slew confirmation image (907707) - Pedestal (838752) = 68955 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The confirmation image has a flux between 0.8 and 2.0 times the\n" \
"maximum flux in the peakup, which is typical of a successful ACQ/PEAK.\n" \
"===============================================================================\n"
# Compare results
outputs = [("ocyw05afq_raw.fits", "ocyw05afq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update6(self, capsys):
"""
ocmv0lw6q # ACQ/IMAGE, F25ND3, no errors
"""
# Prepare input files.
self.get_data("input", "ocmv0lw6q_raw.fits")
self.get_data("input", "ocmv0lw6q_spt.fits")
capsys.readouterr()
tastis('ocmv0lw6q_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"ocmv0lw6q HST/STIS MIRVIS F25ND3 ACQ/POINT\n" \
"prop: 13760 visit: 0L line: 1 target: CD-59D3300\n" \
"obs date, time: 2016-09-29 23:43:50 exposure time: 1.10\n" \
"dom GS/FGS: S4B0000993F2 sub-dom GS/FGS: S4B0000953F1\n" \
"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\n" \
"subarray (axis1,axis2): size=(100,100) corner=(487,466)\n" \
"-------------------------------------------------------------------------------\n" \
"Coarse locate phase: Target flux in max checkbox (DN): 1560\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 534.2 507.0 48.2 42.0\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -1.5 -9.0 -0.079 -0.457 -0.379 0.268\n" \
"-------------------------------------------------------------------------------\n" \
"Fine locate phase: Target flux in max checkbox (DN): 1559\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 534.2 516.8 48.2 51.8\n" \
"Ref ap location: 537.5 517.0 19.5 17.0\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -2.1 -0.2 -0.104 -0.010 -0.081 -0.067\n" \
"-------------------------------------------------------------------------------\n" \
"Total est. slew: -3.6 -9.2 -0.183 -0.467 -0.460 0.201\n" \
"-------------------------------------------------------------------------------\n" \
"Your ACQ appears to have succeeded, as the fluxes in the coarse\n" \
"and fine stages agree within 25% and the fine slews were less than\n" \
"4 pixels as expected\n" \
"\n" \
"===============================================================================\n"
# Compare results
outputs = [("ocmv0lw6q_raw.fits", "ocmv0lw6q_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update7(self, capsys):
"""
octr11h4q # ACQ/IMAGE, F25ND5, large fine slew error
"""
# Prepare input files.
self.get_data("input", "octr11h4q_raw.fits")
self.get_data("input", "octr11h4q_spt.fits")
capsys.readouterr()
tastis('octr11h4q_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"octr11h4q HST/STIS MIRVIS F25ND5 ACQ/POINT\n" \
"prop: 14341 visit: 11 line: 1 target: HD128620\n" \
"obs date, time: 2016-08-28 19:57:49 exposure time: 0.30\n" \
"dom GS/FGS: S7QX000303F1 sub-dom GS/FGS: S7QX000751F2\n" \
"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\n" \
"subarray (axis1,axis2): size=(100,100) corner=(487,466)\n" \
"-------------------------------------------------------------------------------\n" \
"Coarse locate phase: Target flux in max checkbox (DN): 278\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 557.0 473.0 71.0 8.0\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 21.3 -43.0 1.080 -2.184 -0.781 2.308\n" \
"-------------------------------------------------------------------------------\n" \
"Fine locate phase: Target flux in max checkbox (DN): 280\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 547.0 564.0 61.0 99.0\n" \
"Ref ap location: 537.6 517.3 19.6 17.3\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: 10.6 46.7 0.541 2.372 2.060 -1.295\n" \
"-------------------------------------------------------------------------------\n" \
"Total est. slew: 31.9 3.7 1.621 0.188 1.279 1.013\n" \
"-------------------------------------------------------------------------------\n" \
"The fine slew (to center the target in the reference aperture) is larger\n" \
"than 4 pixels. This may indicate a problem with your acquisition.\n" \
"\n" \
"===============================================================================\n"
# Compare results
outputs = [("octr11h4q_raw.fits", "octr11h4q_raw_ref.fits")]
self.compare_outputs(outputs)
def test_header_update8(self, capsys):
"""
ocu252cmq # ACQ/IMAGE, F28X50OII, flux error
"""
# Prepare input files.
self.get_data("input", "ocu252cmq_raw.fits")
self.get_data("input", "ocu252cmq_spt.fits")
capsys.readouterr()
tastis('ocu252cmq_raw.fits', update=True)
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"ocu252cmq HST/STIS MIRVIS F28X50OII ACQ/POINT\n" \
"prop: 14143 visit: 52 line: 1 target: BD+41-3306\n" \
"obs date, time: 2016-06-06 08:30:05 exposure time: 2.10\n" \
"dom GS/FGS: N2JU001340F2 sub-dom GS/FGS: N2K1001229F1\n" \
"ACQ params: bias sub: 1510 checkbox: 3 method: FLUX CENTROID\n" \
"subarray (axis1,axis2): size=(100,100) corner=(487,466)\n" \
"-------------------------------------------------------------------------------\n" \
"Coarse locate phase: Target flux in max checkbox (DN): 1442\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 527.8 513.1 41.8 48.1\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -7.9 -2.9 -0.400 -0.147 -0.387 -0.179\n" \
"-------------------------------------------------------------------------------\n" \
"Fine locate phase: Target flux in max checkbox (DN): 611\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 534.1 516.1 48.1 51.1\n" \
"Ref ap location: 537.5 516.5 19.5 16.5\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -2.1 -0.4 -0.106 -0.020 -0.089 -0.061\n" \
"-------------------------------------------------------------------------------\n" \
"Total est. slew: -10.0 -3.3 -0.506 -0.168 -0.477 -0.239\n" \
"-------------------------------------------------------------------------------\n" \
"The fluxes in the maximum checkbox in the fine and coarse stages differ\n" \
"by more than 25%. This may indicate a problem with your acquisition.\n" \
"\n" \
"===============================================================================\n"
# Compare results
outputs = [("ocu252cmq_raw.fits", "ocu252cmq_raw_ref.fits")]
self.compare_outputs(outputs)
def test_tastis_zero_divide(self):
"""
o4er06llq # ACQ/PEAK-UP, empty
"""
# Prepare input files.
self.get_data("input", "o4er06llq_raw.fits")
self.get_data("input", "o4er06llq_spt.fits")
with pytest.raises(ZeroDivisionError):
tastis('o4er06llq_raw.fits')
def test_tastis_output_geometric_center(self, capsys):
"""
ocoa03q2q # ACQ/IMAGE, GEOMETRIC-CENTER
"""
self.get_data("input", "ocoa03q2q_raw.fits")
self.get_data("input", "ocoa03q2q_spt.fits")
capsys.readouterr()
tastis('ocoa03q2q_raw.fits')
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"ocoa03q2q HST/STIS MIRVIS F28X50LP ACQ/DIFFUSE\n" \
"prop: 13693 visit: 03 line: 1 target: CERES-2\n" \
"obs date, time: 2015-08-16 16:34:18 exposure time: 1.10\n" \
"dom GS/FGS: SCHN000911F2 sub-dom GS/FGS: SCHK000675F1\n" \
"ACQ params: bias sub: 1510 checkbox: 3 method: GEOMETRIC-CENTER\n" \
"subarray (axis1,axis2): size=(104,104) corner=(485,464)\n" \
"-------------------------------------------------------------------------------\n" \
"Coarse locate phase: Target flux in max checkbox (DN): 87956\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 528.0 515.0 44.0 52.0\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -7.7 -1.0 -0.393 -0.051 -0.314 -0.242\n" \
"-------------------------------------------------------------------------------\n" \
"Fine locate phase: Target flux in max checkbox (DN): 87849\n" \
"\n" \
" global local\n" \
" axis1 axis2 axis1 axis2\n" \
"Target location: 534.0 517.0 50.0 54.0\n" \
"Ref ap location: 537.3 515.4 21.3 17.4\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -2.0 1.6 -0.103 0.081 -0.015 -0.130\n" \
"-------------------------------------------------------------------------------\n" \
"Total est. slew: -9.8 0.6 -0.496 0.030 -0.329 -0.372\n" \
"-------------------------------------------------------------------------------\n" \
"Your ACQ appears to have succeeded, as the fluxes in the coarse\n" \
"and fine stages agree within 25% and the fine slews were less than\n" \
"4 pixels as expected\n" \
"\n" \
"===============================================================================\n"
def test_tastis_output_linearaxis1(self, capsys):
"""
od3v01bfq # ACQ/PEAK-UP, LINEARAXIS1, no errors
"""
self.get_data("input", "od3v01bfq_raw.fits")
self.get_data("input", "od3v01bfq_spt.fits")
capsys.readouterr()
tastis('od3v01bfq_raw.fits')
captured = capsys.readouterr()
assert captured.out == "===============================================================================\n" \
"od3v01bfq HST/STIS MIRVIS 52X0.05 ACQ/PEAK-UP\n" \
"prop: 14493 visit: 01 line: 5 target: 2MASS-J23062928-0502285\n" \
"obs date, time: 2016-09-26 04:17:57 exposure time: 10.00\n" \
"dom GS/FGS: SB5F000135F1 sub-dom GS/FGS: SB5F000156F2\n" \
"ACQ params: bias sub: 1510 method: MAX-FLUX-CENTROID\n" \
"subarray (axis1,axis2): size=(32,32) corner=(521,500)\n" \
"-------------------------------------------------------------------------------\n" \
"Scan type: LINEARAXIS1 Step size (mas): 26\n" \
"\n" \
" [ 0 848 3069 3432 1912 555 228]\n" \
"\n" \
" axis1 axis2 axis1 axis2 V2 V3\n" \
" (pixels) (arcsec) (arcsec)\n" \
"Estimated slew: -0.2 0.0 -0.010 0.000 -0.007 -0.007\n" \
"Flux in post-slew confirmation image (40210) - Pedestal (35982) = 4228 DN\n" \
"-------------------------------------------------------------------------------\n" \
"The confirmation image has a flux between 0.8 and 2.0 times the\n" \
"maximum flux in the peakup, which is typical of a successful ACQ/PEAK.\n" \
"===============================================================================\n"
| 30,805 | 61.869388 | 116 |
py
|
stistools
|
stistools-master/tests/conftest.py
|
"""Project default for pytest"""
import os
import pytest
import re
import crds
from astropy.tests.helper import enable_deprecations_as_exceptions
# Uncomment the following line to treat all DeprecationWarnings as exceptions
enable_deprecations_as_exceptions()
def pytest_addoption(parser):
# Add option to run slow tests
parser.addoption(
"--runslow",
action="store_true",
help="run slow tests"
)
parser.addoption(
"--slow",
action="store_true",
help="run slow tests"
)
# Add option to use big data sets
parser.addoption(
"--bigdata",
action="store_true",
help="use big data sets (intranet)"
)
parser.addoption(
"--env",
choices=['dev', 'stable', ''],
default='',
help="specify what environment to test"
)
@pytest.fixture(scope='function', autouse=True)
def _jail(tmpdir):
""" Perform test in a pristine temporary working directory
"""
os.chdir(tmpdir.strpath)
yield
@pytest.fixture
def envopt(request):
return request.config.getoption("env")
def require_crds_context(required_context):
"""Ensure CRDS context is a certain level
Parameters
----------
level: int
The minimal level required
Returns
-------
pytest.mark.skipif decorator
"""
current_context_string = crds.get_context_name('jwst')
match = re.match('jwst_(\d\d\d\d)\.pmap', current_context_string)
current_context = int(match.group(1))
return pytest.mark.skipif(
current_context < required_context,
reason='CRDS context {} less than required context {}'.format(
current_context_string, required_context
)
)
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
config.addinivalue_line("markers", "bigdata: use big data sets (intranet)")
config.addinivalue_line("markers", "not_under_travis: mark as test to skip if running under a TravisCI")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
skip_bigdata = pytest.mark.skip(reason="need --bigdata option to run")
skip_travis = pytest.mark.skip(reason="temporarily disabled due to performance issues")
for item in items:
if "slow" not in item.keywords:
item.add_marker(skip_slow)
if "bigdata" not in item.keywords:
item.add_marker(skip_bigdata)
if "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true":
item.add_marker(skip_travis)
| 2,737 | 27.821053 | 108 |
py
|
stistools
|
stistools-master/tests/test_wx2d.py
|
import pytest
from stistools.wx2d import wx2d
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestWx2d(BaseSTIS):
input_loc = 'wx2d'
ref_loc = 'wx2d'
def test_wx2d_t1(self):
"""
Test for wx2d using rows parameter
"""
# Prepare input files.
self.get_input_file("../stisnoise/input", "o6ih10060_crj.fits")
# Run wx2d
wx2d("o6ih10060_crj.fits", "o6ih10060_wx2d.fits",
wavelengths="o6ih10060_wl.fits", helcorr="perform",
rows=(843, 942))
# Compare results
outputs = [("o6ih10060_wx2d.fits", "o6ih10060_wx2d_ref.fits"),
("o6ih10060_wl.fits", "o6ih10060_wl_ref.fits")]
self.compare_outputs(outputs, delete_history=True)
def test_wx2d_t2(self):
"""
Test for wx2d
"""
# Prepare input files.
self.get_input_file("input", "o4d301030_crj.fits")
# Run wx2d
wx2d("o4d301030_crj.fits", "o4d301030_wx2d.fits", helcorr="perform")
# Compare results
# Compare results
outputs = [("o4d301030_wx2d.fits", "o4d301030_wx2d_ref.fits")]
self.compare_outputs(outputs, delete_history=True)
| 1,251 | 24.04 | 76 |
py
|
stistools
|
stistools-master/tests/test_calstis.py
|
from stistools.calstis import calstis
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestCalstis(BaseSTIS):
input_loc = 'calstis'
ref_loc = 'calstis/ref'
def test_ccd_imaging(self):
"""
This test is for calstis on CCD imaging data
"""
# Prepare input files.
self.get_input_file("input", "oci402010_raw.fits")
self.get_data("input", "oci402dnj_epc.fits")
self.get_data("input", "oci402doj_epc.fits")
outroot = "stis_test1"
outfile = outroot + "_flt.fits"
outcrj = outroot + "_crj.fits"
outsx2 = outroot + "_sx2.fits"
reffile_flt = 'reference_stis_01_flt.fits'
reffile_crj = 'reference_stis_01_crj.fits'
reffile_sx2 = 'reference_stis_01_sx2.fits'
calstis("oci402010_raw.fits", outroot=outroot)
# Compare results
outputs = [(outfile, reffile_flt), (outcrj, reffile_crj),
(outsx2, reffile_sx2)]
self.compare_outputs(outputs)
# def test1_lev1_FUV(self):
# """
# This test is for level 1? FUV data
# """
#
# # Prepare input files.
# self.get_input_file("input", "o5cl02040_raw.fits")
# self.get_input_file("input", "o5cl02040_wav.fits")
# outroot = "calstis_lev1_FUVspec"
#
# # Run test
# calstis("o5cl02040_raw.fits", outroot=outroot)
#
# # Compare results
# outputs = [(outroot+"_flt.fits", outroot+"_flt_ref.fits"),
# (outroot+"_x1d.fits", outroot+"_x1d_ref.fits")]
# self.compare_outputs(outputs)
def test2_lev1_FUV(self):
"""
This test is for level 1? FUV data
"""
# Prepare input files.
self.get_input_file("input", "odj102010_raw.fits")
self.get_input_file("input", "odj102010_wav.fits")
outroot = "calstis_1lev1_FUVspec"
# Run test
calstis("odj102010_raw.fits", outroot=outroot)
# Compare results
outputs = [(outroot+"_flt.fits", outroot+"_flt_ref.fits"),
(outroot+"_x1d.fits", outroot+"_x1d_ref.fits")]
self.compare_outputs(outputs)
def test_lev2_CCD(self):
"""
This test is for level 2 CCD data
"""
# Prepare input files.
self.get_input_file("input", "o3wd01060_raw.fits")
self.get_input_file("input", "o3wd01060_wav.fits")
self.get_data("input", "o3wd01060_spt.fits")
outroot = "calstis_lev2_CCD"
# Run test
calstis("o3wd01060_raw.fits", outroot=outroot)
# Compare results
outputs = [(outroot + "_flt.fits", outroot + "_flt_ref.fits"),
(outroot + "_crj.fits", outroot + "_crj_ref.fits"),
(outroot + "_sx1.fits", outroot + "_sx1_ref.fits"),
(outroot + "_sx2.fits", outroot + "_sx2_ref.fits")]
self.compare_outputs(outputs)
def test_lev3_FUV(self):
"""
This test is for level 3 FUV data
"""
# Prepare input files.
self.get_input_file("input", "o5in01tnq_raw.fits")
outroot = "calstis_lev3_FUV"
# Run test
calstis("o5in01tnq_raw.fits", outroot=outroot)
# Compare results
outputs = [(outroot + "_flt.fits", outroot + "_flt_ref.fits"),
(outroot + "_x2d.fits", outroot + "_x2d_ref.fits")]
self.compare_outputs(outputs)
def test_lev3_NUV(self):
"""
This test is for level 3 NUV data
"""
# Prepare input files.
self.get_input_file("input", "o6d806030_raw.fits")
self.get_data("input", "o6d806030_wav.fits")
outroot = "calstis_lev3_NUV"
# Run test
calstis("o6d806030_raw.fits", outroot=outroot)
# Compare results
outputs = [(outroot + "_flt.fits", outroot + "_flt_ref.fits"),
(outroot + "_x1d.fits", outroot + "_x1d_ref.fits"),
(outroot + "_x2d.fits", outroot + "_x2d_ref.fits")]
self.compare_outputs(outputs)
| 4,115 | 30.419847 | 70 |
py
|
stistools
|
stistools-master/tests/test_basic2d.py
|
from stistools.basic2d import basic2d
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestBasic2d(BaseSTIS):
input_loc = 'basic2d'
ref_loc = 'basic2d/ref'
def test_basic2d_lev1a(self):
"""
BASIC2D - stsdas/hst_calib/stis/basic2d: level 1a
This regression test for this level of this task is a running of the
task with ALL parameters blazing (equal to 'perform'). This includes
the additional creation of an output test file for bias levels.
The input data for this test is a STIS NUV_MAMA CCD image.
"""
# Prepare input files.
self.get_input_file("input", "o6d806030_raw.fits")
# Run basic2d
basic2d("o6d806030_raw.fits", output="basic2d_lev1a_flt.fits")
# Compare results
outputs = [("basic2d_lev1a_flt.fits", "basic2d_lev1a_flt_ref.fits")]
self.compare_outputs(outputs)
def test_basic2d_lev3a_blev(self):
"""
The regression test for this level of this task is a running of the
task with ONLY the bias level parameter parameter set (equal to
'perform'). This includes the additional creation of an output test
file for bias levels. The input data for this test is a STIS CCD image.
"""
# Prepare input files.
self.get_input_file("input", "o3wd01060_raw.fits")
self.get_data("input", "o3wd01060_wav.fits")
self.get_data("input", "o3wd01060_spt.fits")
# Run basic2d
basic2d('o3wd01060_raw.fits', output="basic2d_lev3a_blev.fits",
outblev="basic2d_lev3a_blev.dat", dqicorr="omit",
doppcorr="omit", lorscorr="omit", glincorr="omit",
lflgcorr="omit", biascorr="omit", darkcorr="omit",
flatcorr="omit", photcorr="omit")
# Compare results
outputs = [("basic2d_lev3a_blev.fits", "basic2d_lev3a_blev_ref.fits"),
("basic2d_lev3a_blev.dat", "basic2d_lev3a_blev_ref.dat")]
self.compare_outputs(outputs)
def test_basic2d_lev3b_blev(self):
"""
The regression test for this level of this task is a running of the
task with the bias level parameter and Doppler smoothing correction
parameters set (equal to 'perform'). This includes the additional
creation of an output test file for bias levels. The input data for
this test is a STIS CCD image.
"""
# Prepare input files.
self.get_input_file("input", "o3wd01060_raw.fits")
self.get_data("input", "o3wd01060_wav.fits")
self.get_data("input", "o3wd01060_spt.fits")
# Run basic2d
basic2d('o3wd01060_raw.fits', output="basic2d_lev3b_blev.fits",
outblev="basic2d_lev3b_blev.dat", dqicorr="omit",
lorscorr="omit", glincorr="omit", lflgcorr="omit",
biascorr="omit", darkcorr="omit", flatcorr="omit",
photcorr="omit")
# Compare results
outputs = [("basic2d_lev3b_blev.fits", "basic2d_lev3b_blev_ref.fits"),
("basic2d_lev3b_blev.dat", "basic2d_lev3b_blev_ref.dat")]
self.compare_outputs(outputs)
def test_basic2d_lev3c_blev(self):
"""
The regression test for this level of this task is a running of the
task with the bias level, Doppler smoothing correction and BIAS image
sub. parameters set (equal to 'perform'). This includes the additional
creation of an output test file for bias levels. The input data for
this test is a STIS CCD image.
"""
# Prepare input files.
self.get_input_file("input", "o3wd01060_raw.fits")
self.get_data("input", "o3wd01060_wav.fits")
self.get_data("input", "o3wd01060_spt.fits")
# Run basic2d
basic2d('o3wd01060_raw.fits', output="basic2d_lev3c_blev.fits",
outblev="basic2d_lev3c_blev.dat", dqicorr="omit",
lorscorr="omit", glincorr="omit", lflgcorr="omit",
darkcorr="omit", flatcorr="omit", photcorr="omit")
# Compare results
outputs = [("basic2d_lev3c_blev.fits", "basic2d_lev3c_blev_ref.fits"),
("basic2d_lev3c_blev.dat", "basic2d_lev3c_blev_ref.dat")]
self.compare_outputs(outputs)
def test_basic2d_lev3d_blev(self):
"""
The regression test for this level of this task is a running of the
task with the bias level, Doppler smoothing correction, BIAS image
subtraction, and dark subtraction parameters set (equal to 'perform').
This includes the additional creation of an output test file for bias
levels. The input data for this test is a STIS CCD image.
"""
# Prepare input files.
self.get_input_file("input", "o3wd01060_raw.fits")
self.get_data("input", "o3wd01060_wav.fits")
self.get_data("input", "o3wd01060_spt.fits")
# Run basic2d
basic2d('o3wd01060_raw.fits', output="basic2d_lev3d_blev.fits",
outblev="basic2d_lev3d_blev.dat", dqicorr="omit",
lorscorr="omit", glincorr="omit", lflgcorr="omit",
flatcorr="omit", photcorr="omit")
# Compare results
outputs = [("basic2d_lev3d_blev.fits", "basic2d_lev3d_blev_ref.fits"),
("basic2d_lev3d_blev.dat", "basic2d_lev3d_blev_ref.dat")]
self.compare_outputs(outputs)
| 5,491 | 40.606061 | 79 |
py
|
stistools
|
stistools-master/tests/test_doppinfo.py
|
from stistools.doppinfo import Doppinfo
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestDoppinfo(BaseSTIS):
input_loc = 'doppinfo'
ref_loc = 'doppinfo'
def test_doppinfo_basic(self, capsys):
"""
Test stis.doppnfo.Doppinfo with defaults, no updating
Data info: CENWAVE -- 2263.000
FILTER -- E230H
ocb6o2020 # IMAGE
"""
# Prepare input files.
self.get_data("input", "ocb6o2020_raw.fits")
self.get_data("input", "ocb6o2020_spt.fits")
capsys.readouterr()
# Run doppinfo
Doppinfo("ocb6o2020_raw.fits", dt=100, spt="ocb6o2020_spt.fits")
# Compare results
captured = capsys.readouterr()
assert captured.out == "# orbitper doppzero doppmag doppmag_v file\n" \
" 5728.67 56752.114170 11.68643135 7.40391177 ocb6o2020_raw.fits[sci,1]\n" \
"# time (MJD) shift radvel\n" \
"56752.165175 -11.59 -7.345\n" \
"56752.166333 -11.37 -7.203\n" \
"56752.167490 -11.01 -6.975\n" \
"56752.168647 -10.52 -6.663\n" \
"56752.169805 -9.90 -6.272\n" \
"56752.170962 -9.16 -5.805\n" \
"56752.172120 -8.32 -5.269\n" \
"56752.173277 -7.37 -4.669\n" \
"56752.174434 -6.34 -4.014\n" \
"56752.175592 -5.23 -3.311\n" \
"56752.176749 -4.05 -2.568\n" \
"56752.177907 -2.83 -1.794\n" \
"56752.179064 -1.58 -0.998\n" \
"56752.180222 -0.30 -0.190\n" \
"\n" \
"# orbitper doppzero doppmag doppmag_v file\n" \
" 5728.67 56752.180505 11.68734454 7.40449032 ocb6o2020_raw.fits[sci,2]\n" \
"# time (MJD) shift radvel\n" \
"56752.181784 1.42 0.902\n" \
"56752.182941 2.68 1.700\n" \
"56752.184099 3.91 2.477\n" \
"56752.185256 5.09 3.225\n" \
"56752.186413 6.21 3.935\n" \
"56752.187571 7.26 4.598\n" \
"56752.188728 8.22 5.205\n" \
"56752.189886 9.08 5.750\n" \
"56752.191043 9.83 6.227\n" \
"56752.192200 10.46 6.628\n" \
"56752.193358 10.97 6.950\n" \
"56752.194515 11.35 7.189\n" \
"56752.195673 11.59 7.342\n" \
"56752.196830 11.69 7.406\n" \
"\n"
def test_doppinfo_update(self, capsys):
"""
This tests stis.doppinfo.Doppinfo with updating.
Data info: CENWAVE -- 7283.000
FILTER -- G750M
"""
# Prepare input files.
self.get_data("input", "oac6010a0_raw.fits")
self.get_data("input", "oac6010a0_spt.fits")
capsys.readouterr()
# Run doppinfo
Doppinfo("oac6010a0_raw.fits", dt=2, spt="oac6010a0_spt.fits",
update=True)
# Compare results
captured = capsys.readouterr()
assert captured.out == "# orbitper doppzero doppmag doppmag_v file\n" \
" 5749.288 55011.984418 0.14207078 3.23985005 oac6010a0_raw.fits[sci,1]\n" \
"# time (MJD) shift radvel\n" \
"55012.034045 -0.14 -3.241\n" \
"55012.034068 -0.14 -3.241\n" \
"55012.034092 -0.14 -3.242\n" \
"55012.034115 -0.14 -3.242\n" \
"55012.034138 -0.14 -3.242\n" \
"55012.034161 -0.14 -3.242\n" \
"55012.034184 -0.14 -3.242\n" \
"55012.034207 -0.14 -3.242\n" \
"55012.034230 -0.14 -3.242\n" \
"55012.034254 -0.14 -3.242\n" \
"\n" \
"oac6010a0_raw.fits[sci,1] has been updated as follows:\n" \
"orbitper: 5749.2879 (added)\n" \
"doppzero: 55011.9844183 (added)\n" \
"doppmag: 0.142071 (added)\n" \
"doppmagv: 3.239850 (added)\n" \
"\n" \
"# orbitper doppzero doppmag doppmag_v file\n" \
" 5749.288 55011.984418 0.14208142 3.24009286 oac6010a0_raw.fits[sci,2]\n" \
"# time (MJD) shift radvel\n" \
"55012.034393 -0.14 -3.242\n" \
"55012.034416 -0.14 -3.242\n" \
"55012.034439 -0.14 -3.242\n" \
"55012.034462 -0.14 -3.242\n" \
"55012.034485 -0.14 -3.242\n" \
"55012.034508 -0.14 -3.242\n" \
"55012.034532 -0.14 -3.242\n" \
"55012.034555 -0.14 -3.242\n" \
"55012.034578 -0.14 -3.242\n" \
"55012.034601 -0.14 -3.241\n" \
"\n" \
"oac6010a0_raw.fits[sci,2] has been updated as follows:\n" \
"orbitper: 5749.2879 (added)\n" \
"doppzero: 55011.9844183 (added)\n" \
"doppmag: 0.142081 (added)\n" \
"doppmagv: 3.240093 (added)\n" \
"\n"
outputs = [("oac6010a0_raw.fits", "oac6010a0_raw_ref.fits")]
self.compare_outputs(outputs)
| 6,932 | 51.522727 | 118 |
py
|
stistools
|
stistools-master/tests/test_ctestis.py
|
import numpy as np
from stistools.ctestis import ctestis
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestCtestis(BaseSTIS):
"""
We need to add more tests for this
"""
input_loc = 'ctestis'
ref_loc = 'ctestis'
def test_single_value_ctestis(self, capsys):
"""
Test ctestis with a single input set.
"""
# Prepare input files.
self.get_data("input", "o4qp9g010_crj.fits")
self.get_data("input", "o4qp9g010_spt.fits")
capsys.readouterr()
# Run ctestis
fluxc, dmagc, dyc = ctestis(182., 5000., 150.,
stisimage='o4qp9g010_crj.fits')
captured = capsys.readouterr()
assert captured.out == "\n" \
"mjd: 50893.30\n" \
"nread: 2 \n" \
"ybin: 1 \n" \
"gain: 1.0\n" \
"amp: D\n" \
"\n" \
"tt0: -2.3865942\n" \
"lcts: -0.67595399\n" \
"bck: 75.0\n" \
"lbck: 2.317577\n" \
"cti: 1.7314006e-05\n" \
"fluxc: 2536.7133\n" \
"dmagc: -0.015828427\n" \
"cti10000: 0.17314006\n" \
"dy512: 0.0043051192\n" \
"dyc: 0.007079903\n" \
"\n" \
"net: 5000.0\n" \
"sky: 150.0\n" \
"ycol: 182.0\n" \
"fluxc: 2536.7133\n" \
"dmagc: -0.015828427\n" \
"dyc: 0.007079903\n" \
"\n"
assert np.allclose([2536.7133], [fluxc])
assert np.allclose([-0.015828427], [dmagc])
assert np.allclose([0.007079903], [dyc])
def test_list_values_ctestis(self, capsys):
"""
Test ctestis with a list of values.
"""
# Prepare input files.
self.get_data("input", "o4qp9g010_crj.fits")
self.get_data("input", "o4qp9g010_spt.fits")
capsys.readouterr()
# Run ctestis
fluxc, dmagc, dyc = ctestis([182., 182.], [5000., 1000.], [150., 150.],
stisimage='o4qp9g010_crj.fits')
captured = capsys.readouterr()
assert captured.out == "\n" \
"mjd: 50893.30\n" \
"nread: 2 \n" \
"ybin: 1 \n" \
"gain: 1.0\n" \
"amp: D\n" \
"\n" \
"tt0: -2.3865942\n" \
"lcts: [-0.67595399 -2.2853919 ]\n" \
"bck: [75. 75.]\n" \
"lbck: [2.31757699 2.31757699]\n" \
"cti: [1.73140064e-05 2.15163301e-05]\n" \
"fluxc: [2536.71326136 509.14102614]\n" \
"dmagc: [-0.01582843 -0.01967022]\n" \
"cti10000: [0.17314006 0.2151633 ]\n" \
"dy512: [0.00430512 0.00534297]\n" \
"dyc: [0.0070799 0.00878668]\n" \
"\n" \
"net: [5000. 1000.]\n" \
"sky: [150. 150.]\n" \
"ycol: [182. 182.]\n" \
"fluxc: [2536.71326136 509.14102614]\n" \
"dmagc: [-0.01582843 -0.01967022]\n" \
"dyc: [0.0070799 0.00878668]\n" \
"\n"
assert np.allclose([2536.71326136, 509.14102614], [fluxc])
assert np.allclose([-0.01582843, -0.01967022], [dmagc])
assert np.allclose([0.0070799, 0.00878668], [dyc])
| 4,270 | 38.546296 | 79 |
py
|
stistools
|
stistools-master/tests/test_mktrace.py
|
from stistools.mktrace import mktrace
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestMktrace(BaseSTIS):
input_loc = 'mktrace'
ref_loc = 'mktrace'
atol = 1e-14
def test_mktrace_t1(self, capsys):
"""
This tests a basic usage of stis.mktrace. To plot the results run
plot*.py
"""
# Prepare input files.
self.get_input_file("input", "o45a03010_crj.fits")
capsys.readouterr()
# Run mktrace
mktrace('o45a03010_crj.fits')
# Compare results
captured = capsys.readouterr()
assert captured.out == "Traces were rotated by 0.0066273929 degrees \n" \
"\n" \
"trace is centered on row 509.2475494293\n"
outputs = [("o45a03010_crj_1dt.fits", "o45a03010_crj_1dt_ref.fits"),
("o45a03010_crj_1dt_interp.fits", "o45a03010_crj_1dt_interp_ref.fits"),
("o45a03010_crj_1dt_interpfit.fits", "o45a03010_crj_1dt_interpfit_ref.fits"),
("o45a03010_crj_1dt_sci.fits", "o45a03010_crj_1dt_sci_ref.fits"),
("o45a03010_crj_1dt_scifit.fits", "o45a03010_crj_1dt_scifit_ref.fits")]
self.compare_outputs(outputs)
def test_mktrace_t2(self, capsys):
"""
This test uses E1 aperture position and places the
target near row 900. Originally there was a problem
with the interpolated trace due to incorrect determination
of the index of the trace inthe trace table.
"""
# Prepare input files.
self.get_input_file("input", "o8pp31020_crj.fits")
capsys.readouterr()
# Run mktrace
mktrace('o8pp31020_crj.fits')
# Compare results
captured = capsys.readouterr()
assert captured.out == "Traces were rotated by 0.0317233207 degrees \n" \
"\n" \
"trace is centered on row 893.7059688464\n"
outputs = [("o8pp31020_crj_1dt.fits", "o8pp31020_crj_1dt_ref.fits"),
("o8pp31020_crj_1dt_interp.fits", "o8pp31020_crj_1dt_interp_ref.fits"),
("o8pp31020_crj_1dt_interpfit.fits", "o8pp31020_crj_1dt_interpfit_ref.fits"),
("o8pp31020_crj_1dt_sci.fits", "o8pp31020_crj_1dt_sci_ref.fits"),
("o8pp31020_crj_1dt_scifit.fits", "o8pp31020_crj_1dt_scifit_ref.fits")]
self.compare_outputs(outputs)
| 2,525 | 35.085714 | 96 |
py
|
stistools
|
stistools-master/tests/test_stisnoise.py
|
from stistools.stisnoise import stisnoise
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestStisnoise(BaseSTIS):
input_loc = 'stisnoise'
ref_loc = 'stisnoise'
# Really should add some output and return array checks for these tests
def test_stisnoise_t1(self, capsys):
"""
stisnoise with boxcar test and check output
"""
# Prepare input files.
self.get_input_file("input", "o6ih10060_crj.fits")
capsys.readouterr()
# Run stisnoise
stisnoise('o6ih10060_crj.fits', outfile='o6ih10060_fcrj1.fits',
boxcar=5)
# Compare results
captured = capsys.readouterr()
assert captured.out == "Target: V1016-CYG, Amp: D, Gain: 1\n"
outputs = [("o6ih10060_fcrj1.fits", "o6ih10060_fcrj1_ref.fits")]
self.compare_outputs(outputs)
def test_stisnoise_t2(self):
"""
stisnoise with window test
"""
# Prepare input files.
self.get_input_file("input", "o6ih10060_crj.fits")
# Run stisnoise
stisnoise('o6ih10060_crj.fits', outfile='o6ih10060_fcrj2.fits',
window=[0.5, 0.5, 0.1])
# Compare results
outputs = [("o6ih10060_fcrj2.fits", "o6ih10060_fcrj2_ref.fits")]
self.compare_outputs(outputs)
| 1,364 | 25.764706 | 75 |
py
|
stistools
|
stistools-master/tests/test_ocrreject.py
|
from stistools.ocrreject import ocrreject
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestOcrreject(BaseSTIS):
input_loc = 'ocrreject'
ref_loc = 'ocrreject/ref'
input_list = ["o58i01q7q_flt.fits", "o58i01q8q_flt.fits",
"o58i01q9q_flt.fits", "o58i01qcq_flt.fits",
"o58i01qdq_flt.fits", "o58i01qeq_flt.fits",
"o58i01qhq_flt.fits", "o58i01qiq_flt.fits",
"o58i01qjq_flt.fits", "o58i01qmq_flt.fits",
"o58i01qnq_flt.fits", "o58i01qoq_flt.fits",
"o58i01qrq_flt.fits", "o58i01qsq_flt.fits"]
# Make input file string
input_file_string = ", ".join(input_list)
def test_ocrrject_lev2(self):
"""
This regression test for this level of this task is different than
level three in two ways. Two parameters are set to give an initial
guess to the sky value and define a sky subraction method. It also
removes cosmic rays from 14 STIS/CCD images and creates a single
'clean' image which is compared to a reference file using 'FITSDIFF'.
"""
# Prepare input files.
for filename in self.input_list:
self.get_input_file("input", filename)
# Run ocrreject
ocrreject(self.input_file_string, output="ocrreject_lev2_crj.fits",
initgues="med", skysub="mode")
# Compare results
outputs = [("ocrreject_lev2_crj.fits", "ocrreject_lev2_crj_ref.fits")]
self.compare_outputs(outputs)
def test_ocrrject_lev3(self):
"""
This regression test for this level on this task is a simple default
parameter execution of the task. It attempts to remove cosmic rays
from 14 STIS/CCD images. The resulting calibration is compared to a
reference file using 'FITSDIFF'.
"""
# Prepare input files.
for filename in self.input_list:
self.get_input_file("input", filename)
# Run ocrreject
ocrreject(self.input_file_string, output="ocrreject_lev3_crj.fits")
# Compare results
outputs = [("ocrreject_lev3_crj.fits", "ocrreject_lev3_crj_ref.fits")]
self.compare_outputs(outputs)
| 2,283 | 35.253968 | 78 |
py
|
stistools
|
stistools-master/tests/test_defringe.py
|
from stistools.defringe import normspflat, prepspec, mkfringeflat, defringe
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestDefringe(BaseSTIS):
input_loc = 'defringe'
def test_normspflat_g750l(self):
"""Compare normspflat output for a g750l spectrum"""
sci_file = 'o49x18010'
flat_file = 'o49x18020'
output = flat_file+"_nsp_out.fits"
for input_file in [flat_file+'_raw.fits', sci_file+'_wav.fits']:
self.get_input_file("input", input_file)
normspflat(flat_file+"_raw.fits",output,
do_cal=True,wavecal=sci_file+"_wav.fits")
outputs = [(output, flat_file+"_nsp.fits")]
self.compare_outputs(outputs)
def test_prepspec_g750l(self):
"""Compare prepspec output for a g750l spectrum"""
sci_file = 'o49x18010'
flat_file = 'o49x18020'
output = sci_file+"_crj.fits"
for input_file in [sci_file+'_raw.fits', sci_file+'_wav.fits']:
self.get_input_file("input", input_file)
prepspec(sci_file+"_raw.fits")
outputs = [(output, sci_file+"_crj.fits")]
self.compare_outputs(outputs)
def test_mkfringeflat_g750l(self):
"""compare mkfringeflat output for a g750l spectrum"""
sci_file = 'o49x18010'
flat_file = 'o49x18020'
output = flat_file+"_frr_out.fits"
for input_file in [sci_file+'_crj.fits', flat_file+'_nsp.fits']:
self.get_input_file("input", input_file)
mkfringeflat(sci_file+"_crj.fits", flat_file+"_nsp.fits", output)
outputs = [(output, flat_file+"_frr.fits")]
self.compare_outputs(outputs)
def test_defringe_g750l(self):
"""compare defringe output for a g750l spectrum"""
sci_file = 'o49x18010'
flat_file = 'o49x18020'
for input_file in [sci_file+'_crj.fits', flat_file+'_frr.fits']:
self.get_input_file("input", input_file)
output = defringe(f"{sci_file}_crj.fits",f"{flat_file}_frr.fits")
outputs = [(output, sci_file+"_drj.fits")]
self.compare_outputs(outputs)
def test_normspflat_g750m(self):
"""compare normspflat output for a g750m spectrum"""
sci_file = "oe36m10g0"
flat_file = "oe36m10j0"
output = flat_file+"_frr_out.fits"
for input_file in [flat_file+'_raw.fits', sci_file+'_wav.fits']:
self.get_input_file("input", input_file)
normspflat(flat_file+"_raw.fits", output,
do_cal=True, wavecal=sci_file+"_wav.fits")
outputs = [(output, flat_file+"_nsp.fits")]
self.compare_outputs(outputs)
def test_prepspec_g750m(self):
"""Compare prepspec output for a g750m spectrum"""
sci_file = "oe36m10g0"
flat_file = "oe36m10j0"
output = sci_file+"_sx2.fits"
for input_file in [sci_file+'_raw.fits', sci_file+'_wav.fits']:
self.get_input_file("input", input_file)
prepspec(sci_file+"_raw.fits")
outputs = [(output, sci_file+"_sx2.fits")]
self.compare_outputs(outputs)
def test_mkfringeflat_g750m(self):
"""compare mkfringeflat output for a g750m spectrum"""
sci_file = "oe36m10g0"
flat_file = "oe36m10j0"
output = flat_file+"_frr_out.fits"
for input_file in [sci_file+'_sx2.fits', flat_file+'_nsp.fits']:
self.get_input_file("input", input_file)
mkfringeflat(sci_file+"_sx2.fits", flat_file+"_nsp.fits", output,
beg_shift=-1.0, end_shift=0.5, shift_step=0.1,
beg_scale=0.8, end_scale=1.5, scale_step=0.04)
outputs = [(output, flat_file+"_frr.fits")]
self.compare_outputs(outputs)
def test_defringe_g750m(self):
"""compare defringe output for a g750m spectrum"""
sci_file = "oe36m10g0"
flat_file = "oe36m10j0"
for input_file in [sci_file+'_sx2.fits', flat_file+'_frr.fits']:
self.get_input_file("input", input_file)
output = defringe(f"{sci_file}_sx2.fits", f"{flat_file}_frr.fits")
outputs = [(output, sci_file+"_s2d.fits")]
self.compare_outputs(outputs)
| 4,310 | 29.359155 | 75 |
py
|
stistools
|
stistools-master/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
stistools
|
stistools-master/tests/test_inttag.py
|
from stistools.inttag import inttag
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestInttag(BaseSTIS):
input_loc = 'inttag'
def test_accum_lores(self):
"""Compare accum image output for a single lowres imset"""
self.get_data("input", "oddv01050_tag.fits")
output = "inttag_accum_lores_out.fits"
inttag("oddv01050_tag.fits", output, highres=False)
outputs = [(output, "inttag_accum_lores.fits")]
self.compare_outputs(outputs)
def test_accum_hires(self):
"""Compare accum image output for a single highres imset"""
self.get_data("input", "oddv01050_tag.fits")
output = "inttag_accum_hires_out.fits"
inttag("oddv01050_tag.fits", output, highres=True)
outputs = [(output, "inttag_accum_hires.fits")]
self.compare_outputs(outputs)
def test_accum_gtigap(self):
"""Compare accum image output for a single imset with a GTI gap"""
self.get_data("input", "gtigap_tag.fits")
output = "inttag_accum_gtigap_out.fits"
inttag("gtigap_tag.fits", output, highres=False)
outputs = [(output, "inttag_accum_gtigap.fits")]
self.compare_outputs(outputs)
def test_accum_allevents(self):
"""Compare accum image output in allevents mode"""
self.get_data("input", "gtigap_tag.fits")
output = "inttag_accum_allevents_out.fits"
inttag("gtigap_tag.fits", output, highres=False, allevents=True)
outputs = [(output, "inttag_accum_allevents.fits")]
self.compare_outputs(outputs)
def test_accum_rcount(self):
"""Compare accum image multi-imset output"""
self.get_data("input", "gtigap_tag.fits")
output = "inttag_accum_rcount_out.fits"
inttag("gtigap_tag.fits", output, starttime=700, increment=200, rcount=5, highres=False)
outputs = [(output, "inttag_accum_rcount.fits")]
self.compare_outputs(outputs)
def test_primary_hdr_nogap(self):
"""Compare generated primary header keywords (with no gti gap in data)"""
self.get_data("input", "ob3001xqq_tag.fits")
output = "inttag_prihdr_nogap_out.fits"
inttag("ob3001xqq_tag.fits", output)
outputs = [(output, "inttag_prihdr_nogap.fits")]
self.compare_outputs(outputs)
def test_primary_hdr_gap(self):
"""Compare generated primary header keywords (with some gti gap in data)"""
self.get_data("input", "gtigap_tag.fits")
output = "inttag_prihdr_gap_out.fits"
inttag("gtigap_tag.fits", output)
outputs = [(output, "inttag_prihdr_gap.fits")]
self.compare_outputs(outputs)
def test_exptime_truncation(self):
"""Check if inttag handles input rcount sizes correctly by truncating at the last event"""
self.get_data("input", "od7s08010_tag.fits")
output = "inttag_exptime_trunc_out.fits"
inttag("od7s08010_tag.fits", output, increment=1675., rcount=2, starttime=0., allevents=False)
outputs = [(output, "inttag_exptime_trunc.fits")]
self.compare_outputs(outputs)
| 3,152 | 36.535714 | 102 |
py
|
stistools
|
stistools-master/tests/Test_x1d.py
|
from stistools.x1d import x1d
from .resources import BaseSTIS
import pytest
@pytest.mark.bigdata
@pytest.mark.slow
class TestX1d(BaseSTIS):
input_loc = 'x1d'
ref_loc = 'x1d/ref'
def test_x1d(self):
"""
Basic x1d test, mostly using default parameters
"""
# Prepare input files.
self.get_input_file("input", "o56j02020_flt.fits")
# Run basic2d
x1d("o56j02020_flt.fits", output="x1d_lev3.fits", ctecorr="omit")
# Compare results
outputs = [("x1d_lev3.fits", "x1d_lev3_ref.fits")]
self.compare_outputs(outputs)
| 606 | 21.481481 | 73 |
py
|
stistools
|
stistools-master/tests/helpers/utils.py
|
import os
import re
import requests
from astropy.io import fits
__all__ = ['cmp_fitshdr', 'cmp_gen_hdrkeywords',
'word_precision_check', 'abspath',
'download', 'check_url']
RE_URL = re.compile('\w+://\S+')
default_compare = dict(
ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
keys=['primary', 'sci', 'dq'],
rtol=0.000001,
)
def cmp_fitshdr(left, right, **kwargs):
"""Compare FITS header values using keywords
Parameters
----------
left, right: str
The FITS files to compare
keys: list
The header keywords to compare `left` and `right`
(See `defaults_compare` for initial values)
rtol: float
The relative difference to allow when comparing two float values either
in header values, image arrays, or table columns.
(See `defaults_compare` for initial values)
no_assert: boolean
Return result object instead of asserting
kwargs: dict
Additional arguments to be passed to `FITSDiff`
Returns
-------
None
Assert left and right are identical
"""
assert isinstance(left, str)
assert isinstance(right, str)
local_kwargs = dict(
keys=kwargs.get('keys', default_compare['keys']),
rtol=kwargs.get('rtol', default_compare['rtol']),
ignore_keywords = kwargs.get('ignore_keywords',
default_compare['ignore_keywords'])
)
keys = local_kwargs['keys']
rtol = local_kwargs['rtol']
ignore_keywords = local_kwargs['ignore_keywords']
assert isinstance(keys, list)
assert isinstance(rtol, float)
assert isinstance(ignore_keywords, list)
with fits.open(left) as a:
with fits.open(right) as b:
result = fits.diff.FITSDiff(fits.HDUList([a[kw] for kw in keys]),
fits.HDUList([b[kw] for kw in keys]),
ignore_keywords=ignore_keywords,
rtol=rtol,
**kwargs)
if no_assert:
return result
assert result.identical, result.report()
def cmp_gen_hdrkeywords(fitsobj, base, keywords, limit=0, start=0):
"""Generate list of FITS header elements to compare
TODO: Use correct terminology
Parameters
----------
fitsobj: HDUList
TODO
base: str
Primary keyword
keywords: list
Additional keywords to use
limit: int
Number of extensions
Note: 1-indexed
start: int
Start with extension number
Returns
-------
output: list
Keywords to compare
"""
assert isinstance(fitsobj, fits.HDUList)
assert isinstance(base, str)
assert isinstance(keywords, list)
output = list(fitsobj[base])
if limit and not start:
start += 1
for idx in range(start, limit + 1):
for key in keywords:
if not limit:
output.append(fitsobj[key])
else:
output.append(fitsobj[key, idx])
return output
# Check strings based on words using length precision
def word_precision_check(str1, str2, length=5):
"""Check to strings word-by-word based for word length
The strings are checked word for word, but only for the first
`length` characters
Parameters
----------
str1, str2: str
The strings to compare
length: int
The number of characters in each word to check.
Returns
-------
match: boolean
True if the strings match
"""
words1 = str1.split()
words2 = str2.split()
if len(words1) != len(words2):
return False
for w1, w2 in zip(words1, words2):
if w1[:length] != w2[:length]:
break
else:
return True
return False
def test_word_precision_check():
"""Test word_precision_check"""
s1 = "a b c"
s2 = "aa bb cc"
s3 = "aa bb cc dd"
s4 = "aazz bbzz cczz"
assert word_precision_check(s1, s1)
assert not word_precision_check(s1, s2)
assert word_precision_check(s1, s2, length=1)
assert not word_precision_check(s2, s3)
assert word_precision_check(s2, s4, length=2)
def abspath(filepath):
"""Get the absolute file path"""
return os.path.abspath(os.path.expanduser(os.path.expandvars(filepath)))
def download(url, dest):
"""Simple http/https downloader
"""
dest = os.path.abspath(dest)
with requests.get(url, stream=True) as r:
with open(dest, 'w+b') as data:
for chunk in r.iter_content(chunk_size=0x4000):
data.write(chunk)
return dest
def check_url(url):
""" Determine if `url` can be resolved without error
"""
if RE_URL.match(url) is None:
return False
r = requests.head(url, allow_redirects=True)
if r.status_code >= 400:
return False
return True
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = os.splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
| 5,942 | 25.650224 | 79 |
py
|
stistools
|
stistools-master/tests/helpers/__init__.py
|
from .io import *
from .utils import *
| 39 | 12.333333 | 20 |
py
|
stistools
|
stistools-master/tests/helpers/io.py
|
import copy
import json
import os
import shutil
from .utils import check_url, download
UPLOAD_SCHEMA = {"files": [
{"pattern": "",
"target": "",
"props": None,
"recursive": "false",
"flat": "true",
"regexp": "false",
"explode": "false",
"excludePatterns": []
}
]
}
__all__ = ['BigdataError', 'get_bigdata', 'upload_results']
BIGDATA_PATHS = [
os.environ.get('TEST_BIGDATA', '/srv/rt/betadrizzle'),
'https://bytesalad.stsci.edu/artifactory/datb-stistools'
]
class BigdataError(Exception):
pass
def _select_bigdata():
""" Find and returns the path to the nearest big datasets
"""
for path in BIGDATA_PATHS:
if os.path.exists(path) or check_url(path):
return path
return None
def get_bigdata(*args):
""" Acquire requested data from a managed resource
Usage:
filename = get_bigdata('abc', '123', 'sample.fits')
with open(filename, 'rb') as data:
example = data.read()
Returns:
Absolute path to local copy of data (i.e. /path/to/example.fits)
"""
src = os.path.join(_select_bigdata(), *args)
filename = os.path.basename(src)
dest = os.path.abspath(os.path.join(os.curdir, filename))
if os.path.exists(src):
if src == dest:
raise BigdataError('Source and destination paths are identical: '
'{}'.format(src))
shutil.copy2(src, dest)
elif check_url(src):
download(src, dest)
else:
raise BigdataError('Failed to retrieve data: {}'.format(src))
return dest
def upload_results(**kwargs):
"""Write out JSON file to upload results from test to storage area.
This function relies on the JFROG JSON schema for uploading data into
artifactory using the Jenkins plugin. Docs can be found at::
https://www.jfrog.com/confluence/display/RTF/Using+File+Specs
Parameters
----------
pattern : str or list of strings
Specifies the local file system path to test results which should be
uploaded to Artifactory. You can specify multiple artifacts by using
wildcards or a regular expression as designated by the regexp property.
target : str
Specifies the target path in Artifactory in the following format:
[repository_name]/[repository_path]
testname : str
Name of test that generate the results. This will be used to create the
name of the JSON file to enable these results to be uploaded to Artifactory.
recursive : bool, optional
Specify whether or not to identify files listed in sub-directories
for uploading. Default: False
"""
# Interpret mandatory inputs
pattern = kwargs.get("pattern")
target = kwargs.get("target")
testname = kwargs.get("testname")
# Finish interpreting inputs
jsonfile = "{}_results.json".format(testname)
recursive = repr(kwargs.get("recursive", False)).lower()
if isinstance(pattern, list):
# Populate schema for this test's data
upload_schema = {"files": []}
for p in pattern:
temp_schema = copy.deepcopy(UPLOAD_SCHEMA["files"][0])
temp_schema.update({"pattern": p, "target": target, "recursive": recursive})
upload_schema["files"].append(temp_schema)
else:
# Populate schema for this test's data
upload_schema = copy.deepcopy(UPLOAD_SCHEMA)
upload_schema["files"][0].update({"pattern": pattern, "target": target, "recursive": recursive})
# Write out JSON file with description of test results
with open(jsonfile, 'w') as outfile:
json.dump(upload_schema, outfile)
| 3,882 | 31.630252 | 104 |
py
|
stistools
|
stistools-master/doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# stistools documentation build configuration file, created by
# Warren Hack on Mon Oct 1 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('../../stistools'))
#from stsci.sphinxext.conf import *
import stsci_rtd_theme
# Check Sphinx version
#import sphinx
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0,os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath','numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary',
'sphinx.ext.doctest']
#extensions += ['parameter_anchor']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stistools'
copyright = u'2018, Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.0'
# The full version, including alpha/beta/rc tags.
release = '1.3.0 (03-March-2019)'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'stsci_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [stsci_rtd_theme.get_html_theme_path()]
# Turn off smart quotes on HTML pages:
smartquotes = False
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'stistoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'stistools.tex', u'stistools Documentation',
u'Warren Hack, Nadia Dencheva, Chris Sontag, Megan Sosey, Michael Droettboom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def setup(app):
app.add_css_file("stsci.css")
| 6,974 | 31.746479 | 95 |
py
|
sst-macro
|
sst-macro-master/sstmac/sst_core/sstmacro.py
|
# Load module function in Python is changed
# to look for a libmacro.so in LD_LIBRARY_PATH
import sst
import sst.macro
smallLatency = "1ps"
def getParam(params, paramName, paramNS=None):
if not paramName in params:
import sys
if paramNs:
sys.stderr.write("Missing parameter '%s' in namespace '%s'\n" % (paramName, paramNS))
else:
sys.stderr.write("Missing parameter '%s'\n" % (paramName, paramNS))
raise Exception("failed configuring SST/macro")
return params[paramName]
def getParamNamespace(params, ns, parentNs=None):
if not ns in params:
import sys
if parentNs:
sys.stderr.write("Missing parameter namespace '%s' in namespace '%s'\n" % (ns, parentNS))
else:
sys.stderr.write("Missing parameter namespace '%s'\n" % (ns))
raise Exception("failed configuring SST/macro")
return params[ns]
def getNestedParamNamespace(params, *xargs):
nestedNs = ""
nextParams = params
for entry in xargs:
if not entry in nextParams:
sys.stderr.write("Missing parameter namespace %s in params %s\n" % (entry, nestedNs))
raise Exception("failed configuring SST/macro")
nextParams = nextParams[entry]
nestedNs += "%s." % entry
return nextParams
def makeUniLink(linkType,srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat=None,inLat=None):
if not outLat: outLat = inLat
if not inLat: inLat = outLat
if not outLat: sys.exit("must specify at least one latency for link")
linkName = "%s%d:%d->%d:%d" % (linkType,srcId,srcPort,dstId,dstPort)
link = sst.Link(linkName)
portName = "output%d" % (srcPort)
srcComp.addLink(link,portName,outLat)
portName = "input%d" % (dstPort)
dstComp.addLink(link,portName,inLat)
def makeBiLink(linkType,comp1,id1,port1,comp2,id2,port2,outLat=None,inLat=None):
makeUniLink(linkType,comp1,id1,port1,comp2,id2,port2,outLat,inLat)
makeUniLink(linkType,comp2,id2,port2,comp1,id1,port1,outLat,inLat)
def makeUniNetworkLink(srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat=None,inLat=None):
makeUniLink("network",srcComp,srcId,srcPort,dstComp,dstId,dstPort,outLat,inLat)
def makeBiNetworkLink(comp1,id1,port1,comp2,id2,port2,outLat=None,inLat=None):
makeBiLink("network",comp1,id1,port1,comp2,id2,port2,outLat,inLat)
def addNew(prefix, kw, newDict, oldDict):
name = "%s.%s" % (prefix, kw)
newDict[name] = oldDict[kw]
def addParams(prefix, dict, *xargs, **kwargs):
for entry in xargs:
if isinstance(entry, type({})):
for kw in entry:
addNew(prefix, kw, dict, entry)
else: #function
entry(prefix, dict)
for kw in kwargs:
addNew(prefix, kw, dict, kwargs)
def addSubParams(oldPrefix, newPrefix, dict, *xargs, **kwargs):
prefix = "%s.%s" % (oldPrefix, newPrefix)
addParams(prefix, dict, *xargs, **kwargs)
def subParams(prefix, *xargs, **kwargs):
return lambda x,y: addSubParams(x, prefix, y, *xargs, **kwargs)
def redoSubParams_impl(nsArr, theDict, allParams):
for key in theDict:
val = theDict[key]
if isinstance(val, dict):
newNsArr = nsArr[:]
newNsArr.append(key)
redoSubParams_impl(newNsArr, val, allParams)
else:
paramArr = nsArr[:]
paramArr.append(key)
newParam = ".".join(paramArr)
allParams.append((newParam, val))
def macroToCoreParams(theDict):
allParams = []
redoSubParams_impl([], theDict, allParams)
newDict = {}
for key, val in allParams:
newDict[key] = val
return newDict
class Interconnect:
def __init__(self, params):
self.params = params
self.system = sst.macro.System(params)
self.num_nodes = self.system.numNodes()
self.num_switches = self.system.numSwitches()
self.switches = [0]*self.num_switches
self.nodes = [0]*self.num_nodes
def numNodes(self):
return self.num_nodes
def numSwitches(self):
return self.num_switches
def defaultEpFxn(self, nodeID):
nodeParams = getParamNamespace(self.params, "node")
topParams = getParamNamespace(self.params,"topology")
compName = getParam(nodeParams, "name", "node").lower()
if not compName.endswith("_node"):
compName += "_node"
node = sst.Component("Node %d" % nodeID, "macro.%s" % compName)
node.addParams(macroToCoreParams(nodeParams))
node.addParams(macroToCoreParams(topParams))
node.addParam("id", nodeID)
return node
def buildSwitches(self):
for i in range(self.num_switches):
switchParams = getParamNamespace(self.params, "switch")
compName = getParam(switchParams, "name", "switch").lower()
if not compName.endswith("_switch"):
compName += "_switch"
switch = sst.Component("Switch %d" % i, "macro.%s" % compName)
switch.addParams(macroToCoreParams(switchParams))
switch.addParam("id", i)
self.switches[i] = (switch, switchParams)
def buildEndpoints(self, epFxn):
for i in range(self.num_nodes):
self.nodes[i] = epFxn(i)
def latency(self, params):
if "latency" in params:
return params["latency"]
else:
import sys
sys.exit("need link latency in parameters")
def latencyAsFloat(self, params):
import re
lat = self.latency(params)
match = re.compile("(\d+[.]?\d*)(.*)").search(lat)
if not match:
sys.exit("improperly formatted latency %s" % lat)
num, units = match.groups()
num = float(num)
units = units.strip().lower()
if units == "ms":
num *= 1e-3
elif units == "us":
num *= 1e-6
elif units == "ns":
num *= 1e-9
elif units == "ps":
num *= 1e-12
return num
def connectSwitches(self):
switchParams = getParamNamespace(self.params, "switch")
for i in range(self.num_switches):
linkParams = getParamNamespace(switchParams, "link", "switch")
connections = self.system.switchConnections(i)
srcSwitch, params = self.switches[i]
lat = self.latency(linkParams)
for srcId, dstId, srcOutport, dstInport in connections:
dstSwitch, dstParams = self.switches[dstId]
makeUniNetworkLink(srcSwitch,srcId,srcOutport,
dstSwitch,dstId,dstInport,
lat)
def connectEndpoints(self):
lat = ""
latNs = getNestedParamNamespace(self.params,"node","nic","injection")
lat = getParam(latNs, "latency")
for swId in range(self.num_switches):
connections = self.system.injectionConnections(swId)
for epId, switchPort, injPort in connections:
ep = self.nodes[epId]
injSwitchComp, params = self.switches[swId]
makeUniLink("injection",ep,epId,injPort,injSwitchComp,swId,switchPort,lat)
connections = self.system.ejectionConnections(swId)
for epId, switchPort, ejPort, in connections:
ep = self.nodes[epId]
ejSwitchComp, params = self.switches[swId]
makeUniLink("ejection",ejSwitchComp,swId,switchPort,ep,epId,ejPort,
outLat=lat,inLat=smallLatency)
# Construct LogP short circuit network for small messages
# sst-macro uses one LogP switch per simulation rank, but using
# a single-switch "star" topology here since elements aren't supposed to
# know anything about simulation parallelism and it greatly simplifies
# sst-core support. We may want to revisit this decision if it proves
# to be a performance bottleneck for MPI parallel simulations.
def buildLogPNetwork(self):
import re
nproc = sst.getMPIRankCount() * sst.getThreadCount()
switchParams = self.params["switch"]
if "logp" in switchParams:
switchParams = switchParams["logp"]
lat = switchParams["out_in_latency"]
switch = sst.Component("LogP 0", "macro.logp_switch")
switch.addParams(macroToCoreParams(switchParams))
switch.addParam("id", 0)
for i in range(self.num_nodes):
ep = self.nodes[i]
linkName = "logPinjection%d->%d" % (i, 0)
#print("configuring link %s" % linkName)
link = sst.Link(linkName)
portName = "output%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, portName, smallLatency) #put no latency here
portName = "input%d" % i
switch.addLink(link, portName, smallLatency)
for i in range(self.num_nodes):
ep = self.nodes[i]
linkName = "logPejection%d->%d" % (0, i)
#print("configuring link %s" % linkName)
link = sst.Link(linkName)
portName = "output%d" % i
switch.addLink(link, portName, lat)
portName = "input%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, portName, lat)
def buildFull(self, epFxn):
self.buildSwitches()
self.buildEndpoints(epFxn)
self.connectSwitches()
self.connectEndpoints()
self.buildLogPNetwork()
def buildLogP(self, epFxn):
self.buildEndpoints(epFxn)
self.buildLogPNetwork()
def build(self, epFxn=None):
if epFxn == None:
epFxn = self.defaultEpFxn
if self.system.isLogP():
self.buildLogP(epFxn)
else:
self.buildFull(epFxn)
def readCmdLineParams():
import sys
return sst.macro.readParams(sys.argv)
def setupDeprecatedParams(params, debugList=[]):
nodeParams = getParamNamespace(params, "node")
swParams = getParamNamespace(params, "switch")
builtinApps = [
"apitest",
"global_test",
"hello_world",
"mpi_coverage",
"mpi_ping_all",
"mpi_print_nodes",
"mpi_topology",
"parsedumpi",
"parseotf2",
"sstmac_mpi_testall",
"traffic_matrix",
"UserAppCxxEmptyMain",
"UserAppCxxFullMain",
]
for i in range(10):
ns = "app%d" % i
if ns in params:
appParams = params[ns]
nodeParams[ns] = appParams
appName = appParams["name"]
if not appName in builtinApps:
cmd = "import sst.%s" % appName
exec(cmd)
del params[ns]
icParams = {}
topParams = getParamNamespace(params,"topology")
icParams["topology"] = topParams
nodeParams["interconnect"] = icParams
nodeParams["topology"] = topParams
if debugList:
nodeParams["debug"] = "[" + ",".join(debugList) + "]"
swParams["topology"] = topParams
#move every param in the global namespace
#into the individal namespaces
for ns in "node", "switch":
nsParams = params[ns]
for key in params:
val = params[key]
if isinstance(val, str):
if not key in nsParams:
nsParams[key] = val
ic = Interconnect(params)
ic.build()
return ic
def setupDeprecated():
print ("setupDeprecated")
import sys
sst.setProgramOption("timebase", "100as")
params = readCmdLineParams()
debugList = []
if "debug" in params:
debugList = params["debug"].strip().split()
for i in range(len(sys.argv)):
if sys.argv[i] == "-d" or sys.argv[i] == "--debug":
debugList.extend(sys.argv[i+1].split(","))
return setupDeprecatedParams(params, debugList)
| 10,780 | 31.570997 | 95 |
py
|
sst-macro
|
sst-macro-master/sstmac/skeletons/offered_load/traffic.py
|
def getVals(model):
fileName = "%s.out" % model
import re
text = open(fileName).read()
regexp = re.compile("throughput=\s+(\d+[.]\d+)")
matches = regexp.findall(text)
return map(float, matches)
def absError(lvals, rvals):
length = len(lvals)
err = 0
for i in range(length):
r = lvals[i]
l = rvals[i]
err += abs(l-r)
return err / length
def error(lvals, rvals):
length = len(lvals)
err = 0
for i in range(length):
r = lvals[i]
l = rvals[i]
err += l-r
return err / length
def avgTput(vals):
summer = 0
for v in vals:
summer += v
return summer / len(vals)
def scatter(vals):
delta = 0
mean = avgTput(vals)
for v in vals:
delta += abs(mean-v)
return delta / len(vals)
def run(model, **kwargs):
import os
args = [
'-p network_bandwidth=2.5GB/s',
]
for argName in kwargs:
argVal = kwargs[argName]
args.append("-p %s=%s" % (argName, argVal))
if model == "amm1":
args.append('-p congestion_model=simple')
else:
args.append('-p congestion_model=pisces')
args.append('-p amm_model=%s' % model)
args = " ".join(args)
cmd = "./runtraffic %s >& %s.out" % (args, model)
os.system(cmd)
| 1,195 | 19.271186 | 52 |
py
|
sst-macro
|
sst-macro-master/examples/snappr.py
|
import sst
sst.setProgramOption("timebase", "100as")
import sst.macro
from sst.macro import Interconnect
swParams = {
"name" : "snappr",
"router" : {
"seed" : "42",
"name" : "dragonfly_minimal",
},
"link" : {
"bandwidth" : "1.0GB/s",
"latency" : "100ns",
"credits" : "8KB",
},
"logp" : {
"bandwidth" : "1GB/s",
"hop_latency" : "100ns",
"out_in_latency" : "100ns",
}
}
appParams = {
"allocation" : "first_available",
"indexing" : "block",
"name" : "mpi_ping_all",
"launch_cmd" : "aprun -n 80 -N 2",
"sleep_time" : "1us",
"message_size" : "2KB",
}
memParams = {
"name" : "snappr",
"channel_bandwidth" : "0.7GB/s",
"num_channels" : "2",
"latency" : "10ns",
}
nicParams = {
"name" : "snappr",
"injection" : {
"bandwidth" : "1.0GB/s",
"latency" : "50ns",
"mtu" : "1024",
"credits" : "12KB",
},
"ejection" : {
"latency" : "50ns",
}
}
nodeParams = {
"memory" : memParams,
"nic" : nicParams,
"app1" : appParams,
"name" : "simple",
"proc" : {
"frequency" : "2GHz",
"ncores" : "4",
}
}
topoParams = {
"name" : "dragonfly",
"geometry" : "[4,3]",
"h" : "6",
"inter_group" : "circulant",
"concentration" : "4",
}
params = {
"node" : nodeParams,
"switch" : swParams,
"topology" : topoParams,
}
ic = Interconnect(params)
ic.build()
| 1,348 | 15.8625 | 41 |
py
|
sst-macro
|
sst-macro-master/examples/macrels.py
|
import sst
sst.setProgramOption("timebase", "100as")
import sst.macro
from sst.macro import Interconnect
swParams = {
"name" : "logp",
"out_in_latency" : "2us",
}
appParams = {
"allocation" : "first_available",
"indexing" : "block",
"name" : "mpi_ping_all",
"launch_cmd" : "aprun -n 80 -N 2",
}
memParams = {
"name" : "logp",
"latency" : "10ns",
"bandwidth" : "10GB/s",
}
nicParams = {
"name" : "logp",
"injection" : {
"bandwidth" : "2.0GB/s",
"latency" : "1us",
}
}
nodeParams = {
"memory" : memParams,
"nic" : nicParams,
"app1" : appParams,
"name" : "simple",
"proc" : {
"frequency" : "2GHz",
"ncores" : "4",
}
}
topoParams = {
"name" : "dragonfly",
"geometry" : "[4,3]",
"h" : "6",
"concentration" : "4",
}
params = {
"node" : nodeParams,
"switch" : swParams,
"topology" : topoParams,
}
ic = Interconnect(params)
ic.build()
| 894 | 14.431034 | 41 |
py
|
sst-macro
|
sst-macro-master/examples/pisces.py
|
import sst
sst.setProgramOption("timebase", "100as")
import sst.macro
from sst.macro import Interconnect
swParams = {
"name" : "pisces",
"arbitrator" : "cut_through",
"mtu" : "4096",
"router" : {
"seed" : "42",
"name" : "dragonfly_minimal",
},
"link" : {
"bandwidth" : "1.0GB/s",
"latency" : "100ns",
"credits" : "64KB",
},
"xbar" : {
"bandwidth" : "10GB/s",
},
"logp" : {
"bandwidth" : "1GB/s",
"hop_latency" : "100ns",
"out_in_latency" : "100ns",
}
}
appParams = {
"allocation" : "first_available",
"indexing" : "block",
"name" : "mpi_ping_all",
"launch_cmd" : "aprun -n 80 -N 2",
"start" : "0ms",
"sleep_time" : "1us",
"message_size" : "1KB"
}
memParams = {
"name" : "pisces",
"total_bandwidth" : "10GB/s",
"latency" : "10ns",
"max_single_bandwidth" : "10GB/s",
}
nicParams = {
"name" : "pisces",
"injection" : {
"mtu" : "4096",
"arbitrator" : "cut_through",
"bandwidth" : "1.0GB/s",
"latency" : "50ns",
"credits" : "64KB",
},
"ejection" : {
"latency" : "50ns",
}
}
nodeParams = {
"memory" : memParams,
"nic" : nicParams,
"app1" : appParams,
"name" : "simple",
"proc" : {
"frequency" : "2GHz",
"ncores" : "4",
}
}
topoParams = {
"name" : "dragonfly",
"geometry" : "[4,3]",
"h" : "6",
"inter_group" : "circulant",
"concentration" : "4",
}
params = {
"node" : nodeParams,
"switch" : swParams,
"topology" : topoParams,
}
ic = Interconnect(params)
ic.build()
| 1,507 | 16.333333 | 41 |
py
|
sst-macro
|
sst-macro-master/examples/sculpin.py
|
import sst
sst.setProgramOption("timebase", "100as")
import sst.macro
from sst.macro import Interconnect
swParams = {
"name" : "sculpin",
"router" : {
"seed" : "42",
"name" : "dragonfly_minimal",
},
"link" : {
"bandwidth" : "1.0GB/s",
"latency" : "100ns",
"credits" : "4KB",
},
"logp" : {
"bandwidth" : "1GB/s",
"hop_latency" : "100ns",
"out_in_latency" : "100ns",
}
}
appParams = {
"allocation" : "first_available",
"indexing" : "block",
"name" : "mpi_ping_all",
"launch_cmd" : "aprun -n 80 -N 2",
"sleep_time" : "1us",
"message_size" : "1KB",
}
memParams = {
"name" : "logp",
"bandwidth" : "10GB/s",
"latency" : "10ns",
}
nicParams = {
"name" : "sculpin",
"injection" : {
"bandwidth" : "1.0GB/s",
"latency" : "50ns",
"mtu" : "1024",
},
"ejection" : {
"latency" : "50ns",
}
}
nodeParams = {
"memory" : memParams,
"nic" : nicParams,
"app1" : appParams,
"name" : "simple",
"proc" : {
"frequency" : "2GHz",
"ncores" : "4",
}
}
topoParams = {
"name" : "dragonfly",
"geometry" : "[4,3]",
"h" : "6",
"inter_group" : "circulant",
"concentration" : "4",
}
params = {
"node" : nodeParams,
"switch" : swParams,
"topology" : topoParams,
}
ic = Interconnect(params)
ic.build()
| 1,292 | 15.576923 | 41 |
py
|
sst-macro
|
sst-macro-master/python/snappr.py
|
# Load module function in Python is changed
# to look for a libmacro.so in LD_LIBRARY_PATH
import sst
import sst.macro
sst.setProgramOption("timebase", "100as")
mtu = "4KB"
small_latency = "1ps"
nic_latency = "50ns"
nic_bandwidth = "1.0GB/s"
link_latency = "100ns"
link_bandwidth = "1.0GB/s"
topo_params = dict(
name = "dragonfly",
geometry = "[4,3]",
h = "6",
inter_group = "circulant",
concentration = "4"
)
router_name = "dragonfly_minimal"
system = sst.macro.System(topo_params)
num_switches = system.numSwitches()
num_nodes = system.numNodes()
switches = [None]*num_switches
nodes = [None]*num_nodes
for i in range(num_switches):
switch = sst.Component("Switch %d" % i, "macro.snappr_switch")
switch.addParams({
"router.name" : router_name,
"credits" : "8KB",
"id" : i,
})
switches[i] = switch
for i in range(num_nodes):
node = sst.Component("Node %d" % i, "macro.simple_node")
node.addParams({
"proc.frequency" : "2GHz",
"app1.name" : "mpi_ping_all",
"app1.launch_cmd" : "aprun -n 4 -N 1",
"id" : i,
})
nodes[i] = node
#build the NIC
nic = node.setSubComponent("nic", "macro.snappr_nic")
nic.addParams(dict(
mtu=mtu,
bandwidth=nic_bandwidth,
latency=nic_latency,
credits="8KB",
))
inj_port = nic.setSubComponent("outport", "macro.snappr_outport")
inj_port.addParams(dict(
bandwidth=nic_bandwidth,
latency=link_latency,
credits="8KB",
mtu=mtu
))
#build the memory system
mem = node.setSubComponent("memory", "macro.snappr_memory")
mem.addParams(dict(
channel_bandwidth="1.0GB/s",
num_channels=8
))
for i in range(num_switches):
connections = system.switchConnections(i)
switch = switches[i]
for src_id, dst_id, src_outport, dst_inport in connections:
port = switch.setSubComponent("outport%d" % src_outport, "macro.snappr_outport")
port.addParams(dict(
bandwidth=link_bandwidth,
latency=link_latency,
mtu=mtu
))
link_name = "network%d:%d->%d:%d" % (src_id,src_outport,dst_id,dst_inport)
link = sst.Link(link_name)
port_name = "output%d" % (src_outport)
switch.addLink(link,port_name,link_latency)
print("Connect %s on switch %d" % (port_name,i))
dst_switch = switches[dst_id]
port_name = "input%d" % (dst_inport)
dst_switch.addLink(link,port_name,link_latency)
for sw_id in range(num_switches):
connections = system.ejectionConnections(sw_id)
for ep_id, switch_port, ej_port in connections:
ep = nodes[ep_id]
switch = switches[sw_id]
link_name = "ejection%d:%d->%d:%d" % (ep_id,ej_port,sw_id,switch_port)
link = sst.Link(link_name)
port_name = "output%d" % (switch_port)
switch.addLink(link,port_name,link_latency)
port_name = "input%d" % (ej_port)
ep.addLink(link,port_name,link_latency)
connections = system.ejectionConnections(sw_id)
for ep_id, switch_port, inj_port, in connections:
ep = nodes[ep_id]
switch = switches[sw_id]
link_name = "injection%d:%d->%d:%d" % (ep_id,inj_port,sw_id,switch_port)
link = sst.Link(link_name)
port_name = "input%d" % (switch_port)
switch.addLink(link,port_name,link_latency)
port_name = "output%d" % (ej_port)
ep.addLink(link,port_name,link_latency)
nproc = sst.getMPIRankCount() * sst.getThreadCount()
logp_switches = [None]*nproc
for i in range(nproc):
switch = sst.Component("LogP %d" % i, "macro.logp_switch")
switch.addParams(dict(
bandwidth=link_bandwidth,
hop_latency="100ns",
out_in_latency="2us"
))
logp_switches[i] = switch
for i in range(num_nodes):
ep = nodes[i]
for p in range(nproc):
sw = logp_switches[p]
inj_sw = system.nodeToLogPSwitch(i)
#use inj_sw to set a no-cut
link_name = "logPinjection%d->%d" % (i, p)
link = sst.Link(link_name)
port_name = "output%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, port_name, small_latency) #put no latency here
port_name = "input%d" % (i)
sw.addLink(link, port_name, small_latency)
link_name = "logPejection%d->%d" % (p, i)
link = sst.Link(link_name)
port_name = "input%d" % (sst.macro.NICLogPInjectionPort)
ep.addLink(link, port_name, small_latency) #put no latency here
port_name = "output%d" % (i)
sw.addLink(link, port_name, small_latency)
sst.macro.debug("mpi")
| 4,372 | 26.161491 | 86 |
py
|
sst-macro
|
sst-macro-master/python/jobScheduler.py
|
from sst.merlin import *
from sst.macro import *
import sst.macro
mtu = 1204
arb = "cut_through"
buffer_size = "64KB"
topo = topoTorus()
params = sst.merlin._params
params["torus:shape"] = "2x2x2";
params["torus:width"] = "1x1x1";
params["flit_size"] = "8B"
params["link_bw"] = "10GB/s"
params["link_lat"] = "100ns"
params["xbar_bw"] = "15GB/s"
params["input_latency"] = "100ns"
params["output_latency"] = "100ns"
params["input_buf_size"] = buffer_size
params["output_buf_size"] = buffer_size
params["num_peers"] = 8
params["num_dims"] = 3
params["torus:local_ports"] = 1
memParams = {
"latency" : "10ns",
"bandwidth" : "10GB/s",
}
procParams = {
"frequency" : "2.1GHz",
"ncores" : 1,
}
nicParams = {
"name" : "pisces",
"mtu" : mtu,
"arbitrator" : arb,
"packetizer" : "merlin",
"injection" : {
"latency" : "1us",
"bandwidth" : "10GB/s",
"credits" : buffer_size,
},
"module" : "merlin.linkcontrol",
}
app1Params = {
"name" : "mpi_coverage",
"start" : "0ms",
"launch_cmd" : "aprun -n 8 -N 1",
"indexing" : "block",
"allocation" : "cartesian",
"cart_sizes" : "[2,2,2]",
}
app2Params = {
"name" : "mpi_coverage",
"start" : "1ms",
"launch_cmd" : "aprun -n 8 -N 1",
"indexing" : "round_robin",
"allocation" : "first_available",
}
nodeParams = {
"nic" : nicParams,
"memory" : memParams,
"proc" : procParams,
"topology" : {
"name" : "torus",
"geometry" : "[2,2,2]",
}
}
jobParams = {
"JobLauncher" : "exclusive",
"app1" : app1Params,
"app2" : app2Params,
}
topo.prepParams()
nodeParams = macroToCoreParams(nodeParams)
jobParams = macroToCoreParams(jobParams)
class TestEP(EndPoint):
def build( self, nodeID, extraKeys ):
node = sst.Component( "node" + str(nodeID), "macro.simple_node" )
node.addParams(extraKeys)
node.addParams(nodeParams)
node.addParam("id", nodeID)
if nodeID == 0:
node.addParams(jobParams)
return (node, "rtr", params["link_lat"])
ep = TestEP()
topo.setEndPoint(ep)
topo.build()
| 2,023 | 18.09434 | 69 |
py
|
sst-macro
|
sst-macro-master/python/arielShadowPuppet.py
|
import sst
import os
sst.setProgramOption("timebase", "100as")
next_core_id = 0
next_network_id = 0
next_memory_ctrl_id = 0
clock = "2660MHz"
memory_clock = "200MHz"
coherence_protocol = "MESI"
cores_per_group = 2
active_cores_per_group = 2
memory_controllers_per_group = 1
groups = 4
os.environ["OMP_NUM_THREADS"]=str(groups * cores_per_group)
l3cache_blocks_per_group = 5
l3cache_block_size = "1MB"
ring_latency = "50ps"
ring_bandwidth = "85GB/s"
ring_flit_size = "72B"
memory_network_bandwidth = "85GB/s"
mem_interleave_size = 4096 # Do 4K page level interleaving
memory_capacity = 16384 # Size of memory in MBs
streamN = 1000000
l1_prefetch_params = {
}
l2_prefetch_params = {
"prefetcher": "cassini.StridePrefetcher",
"reach": 16,
"detect_range" : 1
}
ringstop_params = {
"torus:shape" : groups * (cores_per_group + memory_controllers_per_group + l3cache_blocks_per_group),
"output_latency" : "100ps",
"xbar_bw" : ring_bandwidth,
"input_buf_size" : "2KB",
"input_latency" : "100ps",
"num_ports" : "3",
"debug" : "0",
"torus:local_ports" : "1",
"flit_size" : ring_flit_size,
"output_buf_size" : "2KB",
"link_bw" : ring_bandwidth,
"torus:width" : "1",
"topology" : "merlin.torus"
}
l1_params = {
"coherence_protocol": coherence_protocol,
"cache_frequency": clock,
"replacement_policy": "lru",
"cache_size": "32KB",
"maxRequestDelay" : "1000000",
"associativity": 8,
"cache_line_size": 64,
"access_latency_cycles": 4,
"L1": 1,
"debug": 0
}
l2_params = {
"coherence_protocol": coherence_protocol,
"cache_frequency": clock,
"replacement_policy": "lru",
"cache_size": "256KB",
"associativity": 8,
"cache_line_size": 64,
"access_latency_cycles": 8,
"mshr_num_entries" : 16,
"mshr_latency_cycles" : 2,
"debug": 0,
}
l3_params = {
"debug" : "0",
"access_latency_cycles" : "6",
"cache_frequency" : "2GHz",
"replacement_policy" : "lru",
"coherence_protocol" : coherence_protocol,
"associativity" : "4",
"cache_line_size" : "64",
"debug_level" : "10",
"cache_size" : "128 KB",
"mshr_num_entries" : "4096",
"mshr_latency_cycles" : 2,
"num_cache_slices" : str(groups * l3cache_blocks_per_group),
"slice_allocation_policy" : "rr"
}
memctrl_params = {
"backing" : "none",
"clock" : memory_clock,
}
memory_params = {
"access_time" : "30ns",
"mem_size" : str(memory_capacity / (groups * memory_controllers_per_group)) + "MiB",
}
dc_params = {
"coherence_protocol": coherence_protocol,
"memNIC.network_bw": memory_network_bandwidth,
"memNIC.interleave_size": str(mem_interleave_size) + "B",
"memNIC.interleave_step": str((groups * memory_controllers_per_group) * mem_interleave_size) + "B",
"entry_cache_size": 256*1024*1024, #Entry cache size of mem/blocksize
"clock": memory_clock,
"debug": 1,
}
print "Configuring Ariel processor model (" + str(groups * cores_per_group) + " cores)..."
fd = "ariel_tester6"
import sst.macro
sst.macro.debug("os", "timestamp")
ncores = groups * cores_per_group
sstmac = sst.Component("node", "macro.simple_node")
sstmac.addParams({
"nic.name" : "logp",
"nic.topology.name" : "torus",
"nic.topology.geometry" : [2,2],
"memory.name" : "logp",
"memory.bandwidth" : "1GB/s",
"proc.frequency" : "2GHz",
"proc.ncores" : ncores,
"app1.name" : "ariel_test",
"app1.size" : 1,
})
# "os.tunnel" : fd,
ariel = sst.Component("A0", "ariel.ariel")
ariel.addParams({
"verbose" : "0",
"maxcorequeue" : "256",
"maxtranscore" : "16",
"maxissuepercycle" : "2",
"pipetimeout" : "0",
"executable" : "bin/ariel_test",
"arielinterceptcalls" : "1",
"argv" : [fd],
"arielmode" : "1",
"corecount" : ncores,
"clock" : str(clock),
"autolaunch" : "native",
"pass_name" : "true",
})
for i in range(ncores):
notify_link = sst.Link("notify%d" % i)
src = ariel, "notify_empty_link%d" % i, "1ps"
dst = sstmac, "unblock%d" % i, "1ps"
notify_link.connect(src,dst)
memmgr = ariel.setSubComponent("memmgr", "ariel.MemoryManagerSimple")
memmgr.addParams({
"memmgr.pagecount0" : "1048576",
})
router_map = {}
print "Configuring ring network..."
for next_ring_stop in range((cores_per_group + memory_controllers_per_group + l3cache_blocks_per_group) * groups):
ring_rtr = sst.Component("rtr." + str(next_ring_stop), "merlin.hr_router")
ring_rtr.addParams(ringstop_params)
ring_rtr.addParams({
"id" : next_ring_stop
})
router_map["rtr." + str(next_ring_stop)] = ring_rtr
for next_ring_stop in range((cores_per_group + memory_controllers_per_group + l3cache_blocks_per_group) * groups):
if next_ring_stop == 0:
rtr_link_positive = sst.Link("rtr_pos_" + str(next_ring_stop))
rtr_link_positive.connect( (router_map["rtr.0"], "port0", ring_latency), (router_map["rtr.1"], "port1", ring_latency) )
rtr_link_negative = sst.Link("rtr_neg_" + str(next_ring_stop))
rtr_link_negative.connect( (router_map["rtr.0"], "port1", ring_latency), (router_map["rtr." + str(((cores_per_group + memory_controllers_per_group + l3cache_blocks_per_group) * groups) - 1)], "port0", ring_latency) )
elif next_ring_stop == ((cores_per_group + memory_controllers_per_group + l3cache_blocks_per_group) * groups) - 1:
rtr_link_positive = sst.Link("rtr_pos_" + str(next_ring_stop))
rtr_link_positive.connect( (router_map["rtr." + str(next_ring_stop)], "port0", ring_latency), (router_map["rtr.0"], "port1", ring_latency) )
rtr_link_negative = sst.Link("rtr_neg_" + str(next_ring_stop))
rtr_link_negative.connect( (router_map["rtr." + str(next_ring_stop)], "port1", ring_latency), (router_map["rtr." + str(next_ring_stop-1)], "port0", ring_latency) )
else:
rtr_link_positive = sst.Link("rtr_pos_" + str(next_ring_stop))
rtr_link_positive.connect( (router_map["rtr." + str(next_ring_stop)], "port0", ring_latency), (router_map["rtr." + str(next_ring_stop+1)], "port1", ring_latency) )
rtr_link_negative = sst.Link("rtr_neg_" + str(next_ring_stop))
rtr_link_negative.connect( (router_map["rtr." + str(next_ring_stop)], "port1", ring_latency), (router_map["rtr." + str(next_ring_stop-1)], "port0", ring_latency) )
for next_group in range(groups):
print "Configuring core and memory controller group " + str(next_group) + "..."
for next_active_core in range(active_cores_per_group):
print "Creating active core " + str(next_active_core) + " in group " + str(next_group)
l1 = sst.Component("l1cache_" + str(next_core_id), "memHierarchy.Cache")
l1.addParams(l1_params)
l1.addParams(l1_prefetch_params)
l2 = sst.Component("l2cache_" + str(next_core_id), "memHierarchy.Cache")
l2.addParams(l2_params)
l2.addParams(l1_prefetch_params)
ariel_cache_link = sst.Link("ariel_cache_link_" + str(next_core_id))
ariel_cache_link.connect( (ariel, "cache_link_" + str(next_core_id), ring_latency), (l1, "high_network_0", ring_latency) )
l2_core_link = sst.Link("l2cache_" + str(next_core_id) + "_link")
l2_core_link.connect((l1, "low_network_0", ring_latency), (l2, "high_network_0", ring_latency))
l2_ring_link = sst.Link("l2_ring_link_" + str(next_core_id))
l2_ring_link.connect((l2, "cache", ring_latency), (router_map["rtr." + str(next_network_id)], "port2", ring_latency))
next_network_id = next_network_id + 1
next_core_id = next_core_id + 1
for next_inactive_core in range(cores_per_group - active_cores_per_group):
print "Creating inactive core: " + str(next_inactive_core) + " in group " + str(next_group)
l1 = sst.Component("l1cache_" + str(next_core_id), "memHierarchy.Cache")
l1.addParams(l1_params)
l1.addParams(l1_prefetch_params)
l2 = sst.Component("l2cache_" + str(next_core_id), "memHierarchy.Cache")
l2.addParams(l2_params)
l2.addParams(l2_prefetch_params)
ariel_cache_link = sst.Link("ariel_cache_link_" + str(next_core_id))
ariel_cache_link.connect( (ariel, "cache_link_" + str(next_core_id), ring_latency), (l1, "high_network_0", ring_latency) )
l2_core_link = sst.Link("l2cache_" + str(next_core_id) + "_link")
l2_core_link.connect((l1, "low_network_0", ring_latency), (l2, "high_network_0", ring_latency))
l2_ring_link = sst.Link("l2_ring_link_" + str(next_core_id))
l2_ring_link.connect((l2, "cache", ring_latency), (router_map["rtr." + str(next_network_id)], "port2", ring_latency))
next_network_id = next_network_id + 1
next_core_id = next_core_id + 1
for next_l3_cache_block in range(l3cache_blocks_per_group):
print "Creating L3 cache block: " + str(next_l3_cache_block) + " in group: " + str(next_group)
l3cache = sst.Component("l3cache" + str((next_group * l3cache_blocks_per_group) + next_l3_cache_block), "memHierarchy.Cache")
l3cache.addParams(l3_params)
l3cache.addParams({
"slice_id" : str((next_group * l3cache_blocks_per_group) + next_l3_cache_block)
})
l3_ring_link = sst.Link("l3_ring_link_" + str((next_group * l3cache_blocks_per_group) + next_l3_cache_block))
l3_ring_link.connect( (l3cache, "directory", ring_latency), (router_map["rtr." + str(next_network_id)], "port2", ring_latency) )
next_network_id = next_network_id + 1
for next_mem_ctrl in range(memory_controllers_per_group):
local_size = memory_capacity / (groups * memory_controllers_per_group)
memctrl = sst.Component("memory_" + str(next_memory_ctrl_id), "memHierarchy.MemController")
memctrl.addParams(memctrl_params)
memory = memctrl.setSubComponent("backend", "memHierarchy.simpleMem")
memory.addParams(memory_params)
dc = sst.Component("dc_" + str(next_memory_ctrl_id), "memHierarchy.DirectoryController")
dc.addParams({
"memNIC.addr_range_start" : next_memory_ctrl_id * mem_interleave_size,
"memNIC.addr_range_end" : (memory_capacity * 1024 * 1024) - (groups * memory_controllers_per_group * mem_interleave_size) + (next_memory_ctrl_id * mem_interleave_size)
})
dc.addParams(dc_params)
memLink = sst.Link("mem_link_" + str(next_memory_ctrl_id))
memLink.connect((memctrl, "direct_link", ring_latency), (dc, "memory", ring_latency))
netLink = sst.Link("dc_link_" + str(next_memory_ctrl_id))
netLink.connect((dc, "network", ring_latency), (router_map["rtr." + str(next_network_id)], "port2", ring_latency))
next_network_id = next_network_id + 1
next_memory_ctrl_id = next_memory_ctrl_id + 1
# Enable SST Statistics Outputs for this simulation
print "Completed configuring the SST Sandy Bridge model"
| 11,126 | 37.237113 | 232 |
py
|
sst-macro
|
sst-macro-master/python/emberDefaultParams.py
|
debug = 0
netConfig = {
}
networkParams = {
"packetSize" : "2048B",
"link_bw" : "4GB/s",
"link_lat" : "40ns",
"input_latency" : "50ns",
"output_latency" : "50ns",
"flitSize" : "8B",
"buffer_size" : "14KB",
}
nicParams = {
"detailedCompute.name" : "thornhill.SingleThread",
"module" : "merlin.linkcontrol",
"packetSize" : networkParams['packetSize'],
"link_bw" : networkParams['link_bw'],
"buffer_size" : networkParams['buffer_size'],
"rxMatchDelay_ns" : 100,
"txDelay_ns" : 50,
"nic2host_lat" : "150ns",
}
emberParams = {
"os.module" : "firefly.hades",
"os.name" : "hermesParams",
"api.0.module" : "firefly.hadesMP",
"verbose" : 0,
}
hermesParams = {
"hermesParams.detailedCompute.name" : "thornhill.SingleThread",
"hermesParams.memoryHeapLink.name" : "thornhill.MemoryHeapLink",
"hermesParams.nicModule" : "firefly.VirtNic",
"hermesParams.functionSM.defaultEnterLatency" : 30000,
"hermesParams.functionSM.defaultReturnLatency" : 30000,
"hermesParams.ctrlMsg.shortMsgLength" : 12000,
"hermesParams.ctrlMsg.matchDelay_ns" : 150,
"hermesParams.ctrlMsg.txSetupMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txSetupModParams.range.0" : "0-:130ns",
"hermesParams.ctrlMsg.rxSetupMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.rxSetupModParams.range.0" : "0-:100ns",
"hermesParams.ctrlMsg.txMemcpyMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txMemcpyModParams.op" : "Mult",
"hermesParams.ctrlMsg.txMemcpyModParams.range.0" : "0-:344ps",
"hermesParams.ctrlMsg.rxMemcpyMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txMemcpyModParams.op" : "Mult",
"hermesParams.ctrlMsg.rxMemcpyModParams.range.0" : "0-:344ps",
"hermesParams.ctrlMsg.sendAckDelay_ns" : 0,
"hermesParams.ctrlMsg.regRegionBaseDelay_ns" : 3000,
"hermesParams.ctrlMsg.regRegionPerPageDelay_ns" : 100,
"hermesParams.ctrlMsg.regRegionXoverLength" : 4096,
"hermesParams.loadMap.0.start" : 0,
"hermesParams.loadMap.0.len" : 2,
}
| 2,076 | 30 | 66 |
py
|
sst-macro
|
sst-macro-master/python/emberLoadInfo.py
|
import sst
import copy
def calcNetMapId( nodeId, nidList ):
if nidList == 'Null':
return -1
pos = 0
a = nidList.split(',')
for b in a:
c = b.split('-')
start = int(c[0])
stop = start
if 2 == len(c):
stop = int(c[1])
if nodeId >= start and nodeId <= stop:
return pos + (nodeId - start)
pos = pos + ((stop - start) + 1)
return -1
def calcNetMapSize( nidList ):
if nidList == 'Null':
return 0
pos = 0
a = nidList.split(',')
for b in a:
c = b.split('-')
xx = 1
if 2 == len(c):
xx = int(c[1]) - int(c[0]) + 1
pos += xx
return pos
def calcMaxNode( nidList ):
if nidList == 'Null':
return 0
max = 0
a = nidList.split(',')
for b in a:
c = b.split('-')
tmp = int(c[0])
if 2 == len(c):
tmp = int(c[1])
if tmp > max:
max = tmp
return max + 1
class EmberEP:
def __init__( self, jobId, driverParams, nicParams, numCores, ranksPerNode, statNodes, nidList, motifLogNodes, detailedModel ): # added motifLogNodes here
self.driverParams = driverParams
self.nicParams = nicParams
self.numCores = numCores
self.driverParams['jobId'] = jobId
self.statNodes = statNodes
self.nidList = nidList
self.numNids = calcNetMapSize( self.nidList )
# in order to create motifLog files only for the desired nodes of a job
self.motifLogNodes = motifLogNodes
self.detailedModel = detailedModel
def getName( self ):
return "EmberEP"
def prepParams( self ):
pass
def build( self, nodeID, extraKeys ):
nic = sst.Component( "nic" + str(nodeID), "firefly.nic" )
nic.addParams( self.nicParams )
nic.addParams( extraKeys)
nic.addParam( "nid", nodeID )
retval = (nic, "rtr", "100ns")
built = False
if self.detailedModel:
built = self.detailedModel.build( nodeID, self.numCores )
memory = None
if built:
nic.addLink( self.detailedModel.getNicLink( ), "detailed0", "1ps" )
memory = sst.Component("memory" + str(nodeID), "thornhill.MemoryHeap")
memory.addParam( "nid", nodeID )
#memory.addParam( "verboseLevel", 1 )
loopBack = sst.Component("loopBack" + str(nodeID), "firefly.loopBack")
loopBack.addParam( "numCores", self.numCores )
# Create a motifLog only for one core of the desired node(s)
logCreatedforFirstCore = False
# end
for x in xrange(self.numCores):
ep = sst.Component("nic" + str(nodeID) + "core" + str(x) +
"_EmberEP", "ember.EmberEngine")
if built:
links = self.detailedModel.getThreadLinks( x )
cpuNum = 0
for link in links:
ep.addLink(link,"detailed"+str(cpuNum),"1ps")
cpuNum = cpuNum + 1
# Create a motif log only for the desired list of nodes (endpoints)
# Delete the 'motifLog' parameter from the param list of other endpoints
if 'motifLog' in self.driverParams:
if self.driverParams['motifLog'] != '':
if (self.motifLogNodes):
for id in self.motifLogNodes:
if nodeID == int(id) and logCreatedforFirstCore == False:
#print str(nodeID) + " " + str(self.driverParams['jobId']) + " " + str(self.motifLogNodes)
#print "Create motifLog for node {0}".format(id)
logCreatedforFirstCore = True
ep.addParams(self.driverParams)
else:
tempParams = copy.copy(self.driverParams)
del tempParams['motifLog']
ep.addParams(tempParams)
else:
tempParams = copy.copy(self.driverParams)
del tempParams['motifLog']
ep.addParams(tempParams)
else:
ep.addParams(self.driverParams)
else:
ep.addParams(self.driverParams)
# end
# Original version before motifLog
#ep.addParams(self.driverParams)
for id in self.statNodes:
if nodeID == id:
print "printStats for node {0}".format(id)
ep.addParams( {'motif1.printStats': 1} )
ep.addParams( {'hermesParams.netId': nodeID } )
ep.addParams( {'hermesParams.netMapId': calcNetMapId( nodeID, self.nidList ) } )
ep.addParams( {'hermesParams.netMapSize': self.numNids } )
nicLink = sst.Link( "nic" + str(nodeID) + "core" + str(x) +
"_Link" )
nicLink.setNoCut()
loopLink = sst.Link( "loop" + str(nodeID) + "core" + str(x) +
"_Link" )
loopLink.setNoCut()
ep.addLink(nicLink, "nic", self.nicParams["nic2host_lat"] )
nic.addLink(nicLink, "core" + str(x), self.nicParams["nic2host_lat"] )
ep.addLink(loopLink, "loop", "1ns")
loopBack.addLink(loopLink, "core" + str(x), "1ns")
if built:
memoryLink = sst.Link( "memory" + str(nodeID) + "core" + str(x) + "_Link" )
memoryLink.setNoCut()
ep.addLink(memoryLink, "memoryHeap", "0 ps")
memory.addLink(memoryLink, "detailed" + str(x), "0 ns")
return retval
class LoadInfo:
def __init__(self, nicParams, epParams, numNodes, numCores, numNics, detailedModel = None ):
self.nicParams = nicParams
self.epParams = epParams
self.numNodes = int(numNodes)
self.numCores = int(numCores)
self.numNics = int(numNics)
self.detailedModel = detailedModel
self.nicParams["num_vNics"] = numCores
self.map = []
nullMotif = [{
'cmd' : "-nidList=Null Null",
'printStats' : 0,
'api': "",
'spyplotmode': 0
}]
self.nullEP, nidlist = self.foo( -1, self.readWorkList( nullMotif ), [] )
self.nullEP.prepParams()
def foo( self, jobId, x, statNodes, detailedModel = None ):
nidList, ranksPerNode, params = x
# In order to pass the motifLog parameter to only desired nodes of a job
# Here we choose the first node in the nidList
motifLogNodes = []
if (nidList != 'Null' and 'motifLog' in self.epParams):
tempnidList = nidList
if '-' in tempnidList:
tempnidList = tempnidList.split('-')
else:
tempnidList = tempnidList.split(',')
motifLogNodes.append(tempnidList[0])
# end
numNodes = calcMaxNode( nidList )
if numNodes > self.numNics:
sys.exit('Error: Requested max nodes ' + str(numNodes) +\
' is greater than available nodes ' + str(self.numNodes) )
params.update( self.epParams )
ep = EmberEP( jobId, params, self.nicParams, self.numCores, ranksPerNode, statNodes, nidList, motifLogNodes, detailedModel ) # added motifLogNodes here
ep.prepParams()
return (ep, nidList)
def getWorkListFromFile( self, filename, defaultParams ):
stage1 = []
for line in open(filename, 'r'):
line = line.strip()
if line:
if line[:1] == '[':
stage1.append(line)
elif line[:1] == '#':
continue;
else:
stage1[-1] += ' ' + line
tmp = []
nidlist=''
for item in stage1:
tag,str = item.split(' ', 1)
if tag == '[JOB_ID]':
tmp.append([])
tmp[-1].append( str )
elif tag == '[NID_LIST]':
nidlist = str
tmp[-1].append( [] )
elif tag == '[MOTIF]':
tmp[-1][-1].append( dict.copy(defaultParams) )
tmp[-1][-1][-1]['cmd'] = '-nidList=' + nidlist + ' ' + str
return tmp
def initFile(self, defaultParams, fileName, statNodeList ):
work = self.getWorkListFromFile( fileName, defaultParams )
for item in work:
jobid, motifs = item
self.map.append( self.foo( jobid, self.readWorkList( motifs ), statNodeList, self.detailedModel ) )
self.verifyLoadInfo()
def initWork(self, workList, statNodes ):
for jobid, work in workList:
print jobid, work, "the bad line"
self.map.append( self.foo( jobid, self.readWorkList( work ), statNodes, self.detailedModel ) )
self.verifyLoadInfo()
def readWorkList(self, workList ):
tmp = {}
tmp['motif_count'] = len(workList)
for i, work in enumerate( workList ) :
cmdList = work['cmd'].split()
del work['cmd']
ranksPerNode = self.numCores
nidList = []
while len(cmdList):
if "-" != cmdList[0][0]:
break
o, a = cmdList.pop(0).split("=")
if "-ranksPerNode" == o:
ranksPerNode = int(a)
elif "-nidList" == o:
nidList = a
else:
sys.exit("bad argument")
if 0 == len(nidList):
nidList = "0-" + str(self.numNodes-1)
if "Null" != cmdList[0]:
print "EMBER: Job: -nidList={0} -ranksPerNode={1} {2}".format( nidList, ranksPerNode, cmdList )
if ranksPerNode > self.numCores:
sys.exit("Error: " + str(ranksPerNode) + " ranksPerNode is greater than "+
str(self.numCores) + " coresPerNode")
motif = self.parseCmd( "ember.", "Motif", cmdList, i )
for x in work.items():
motif[ 'motif' + str(i) + '.' + x[0] ] = x[1]
tmp.update( motif )
return ( nidList, ranksPerNode, tmp )
def parseCmd(self, motifPrefix, motifSuffix, cmdList, cmdNum ):
motif = {}
tmp = 'motif' + str(cmdNum) + '.name'
motif[ tmp ] = motifPrefix + cmdList[0] + motifSuffix
cmdList.pop(0)
for x in cmdList:
y = x.split("=")
tmp = 'motif' + str(cmdNum) + '.arg.' + y[0]
motif[ tmp ] = y[1]
return motif
def verifyLoadInfo(self):
#print "verifyLoadInfo", "numNodes", self.numNodes, "numCores", self.numCores
#for ep,nidList in self.map:
#print nidList
return True
def inRange( self, nid, start, end ):
if nid >= start:
if nid <= end:
return True
return False
def setNode(self,nodeId):
print self.map
for ep, nidList in self.map:
x = nidList.split(',')
for y in x:
tmp = y.split('-')
print tmp
if 1 == len(tmp):
if nodeId == int( tmp[0] ):
return ep
else:
if self.inRange( nodeId, int(tmp[0]), int(tmp[1]) ):
return ep
return self.nullEP
| 10,427 | 28.047354 | 158 |
py
|
sst-macro
|
sst-macro-master/python/emberMacro.py
|
import sys,getopt
import sst
from sst.merlin import *
from sst.macro import *
#debug("simple_network")
import emberLoadInfo
from emberLoadInfo import *
import random
topoParams = {
"name" : "torus",
"geometry" : "2 2 2",
}
injLat = "1us"
mtu="1KB"
bufSize = "64KB"
arb = "cut_through"
macroParams = {
"topology" : topoParams,
"injection_latency" : injLat,
"switch" : {
"router" : "torus_minimal",
"arbitrator" : arb,
"mtu" : mtu,
"name" : "pisces",
"buffer_size" : bufSize,
"ejection" : {
"latency" : injLat,
"bandwidth" : "10GB/s",
},
"link" : {
"sendLatency" : "100ns",
"creditLatency" : "1ns",
"bandwidth" : "10GB/s",
},
"xbar" : {
"sendLatency" : "1ns",
"creditLatency" : "100ns",
"bandwidth" : "10GB/s",
},
},
}
emberNicParams = {
"mtu" : mtu,
"bandwidth" : "10GB/s",
"latency" : "1us",
"credits" : bufSize,
"arbitrator" : arb,
}
debug = 0
emberVerbose = 10
embermotifLog = ''
emberrankmapper = ''
statNodeList = []
jobid = 0
loadFile = ''
workList = []
workFlow = []
motifDefaults = {
'cmd' : "",
'printStats' : 0,
'api': "HadesMP",
'spyplotmode': 0
}
motifs = [
"Init",
"AllPingPong iterations=10 messageSize=20000",
"Fini"
]
for entry in motifs:
motif = dict.copy(motifDefaults)
motif['cmd'] = entry
workFlow.append(motif)
numCores = 1
netTopo = "torus"
netShape = "2x2x2"
platform = 'emberDefault'
netFlitSize = ''
netBW = ''
netPktSize = ''
netTopo = ''
netShape = ''
netInspect = ''
rtrArb = ''
platParams = ""
if workFlow:
workList.append( [jobid, workFlow] )
jobid += 1
model = None
print "EMBER: platform: {0}".format( platform )
if not platParams:
platParams = platform + 'Params'
try:
config = __import__( platParams, fromlist=[''] )
except:
sys.exit('Failed: could not import `{0}`'.format(platParams) )
nicParams = config.nicParams
nicParams["module"] = "macro.pisces"
#nicParams["module"] = "merlin.linkcontrol"
networkParams = config.networkParams
hermesParams = config.hermesParams
emberParams = config.emberParams
platNetConfig = config.netConfig
emptyNids = []
XXX = []
nicParams['verboseLevel'] = debug
nicParams['verboseMask'] = 1
hermesParams['hermesParams.verboseLevel'] = debug
hermesParams['hermesParams.nicParams.verboseLevel'] = debug
hermesParams['hermesParams.functionSM.verboseLevel'] = debug
hermesParams['hermesParams.ctrlMsg.verboseLevel'] = debug
emberParams['verbose'] = emberVerbose
if embermotifLog:
emberParams['motifLog'] = embermotifLog
if emberrankmapper:
emberParams['rankmapper'] = emberrankmapper
epParams = {}
epParams.update(emberParams)
epParams.update(hermesParams)
def buildFxn(ID):
params = emberNicParams.copy()
params["id"] = ID
return loadInfo.setNode(ID).build(ID,params)[0]
ic = Interconnect(macroParams)
numNodes = ic.numNodes()
loadInfo = LoadInfo( nicParams, epParams, numNodes, numCores, ic.numNodes(), model )
loadInfo.initWork( workList, statNodeList )
ic.build(buildFxn)
| 2,968 | 17.32716 | 84 |
py
|
sst-macro
|
sst-macro-master/python/plotSwitches.py
|
import sys
import os
import numpy as np
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import matplotlib.pyplot as plt
def genFaces(Z):
"""should be numbered like so
Z[0] = corner[:]
Z[1] = corner + yDelta
Z[2] = corner + zDelta
Z[3] = Z[2] + yDelta
Z[4] = corner + xDelta
Z[5] = Z[4] + yDelta
Z[6] = Z[4] + zDelta
Z[7] = Z[6] + yDelta
"""
# list of sides' polygons of figure
faces = [[Z[0],Z[1],Z[3],Z[2]],
[Z[0],Z[1],Z[5],Z[4]],
[Z[0],Z[2],Z[6],Z[4]],
[Z[4],Z[5],Z[7],Z[6]],
[Z[2],Z[6],Z[7],Z[3]],
[Z[1],Z[5],Z[7],Z[3]]]
return faces
def addPoly(ax, verts, color=None, alpha=0.5, rgb=None):
faces = genFaces(verts)
# plot sides
poly = Poly3DCollection(faces, facecolors=color, linewidths=0.01, edgecolors='black')
alpha_tuple = []
if not rgb and not color: color = "cyan" #okay to default for now
if color:
color_tuple = matplotlib.colors.to_rgb(color)
for entry in color_tuple: #copy to allow assignment
alpha_tuple.append(entry)
elif rgb:
for entry in rgb:
alpha_tuple.append(entry)
else:
sys.exit("Must give either color name or RGB tuple")
alpha_tuple.append(alpha)
poly.set_facecolor(alpha_tuple)
ax.add_collection3d(poly)
if len(sys.argv) != 2:
sys.exit("./plot <sst-macro.xyz file>")
path = sys.argv[1]
if not os.path.isfile(path):
sys.exit("%s is not a valid input file" % path)
text = open(path).read()
def get_xyz(tuple):
return np.array(map(float, tuple.split(",")))
mins = [ 100000, 100000, 100000 ]
maxs = [-100000,-100000,-100000 ]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for line in text.strip().splitlines():
splitLine = line.split(";")
geometry = splitLine[0]
attributes = splitLine[1:]
geomSplit = geometry.split("->")
if len(geomSplit) != 8:
sys.exit("line is not a valid geometry: %s" % line)
color = None
rgb = None
alpha = 0.5
for attr in attributes:
split = attr.split("=")
if len(split) == 2:
name, value = map(lambda x: x.strip().lower(), attr.split("="))
if name == "color":
color = value
elif name == "alpha":
alpha = float(value)
elif name == "rgb":
rgb = map(float, value.split(","))
else:
sys.stderr.write("Bad key,value pair: %s\n" % attr)
verts = map(get_xyz, geomSplit)
for v in verts:
for i in range(3):
mins[i] = min(mins[i], v[i])
maxs[i] = max(maxs[i], v[i])
addPoly(ax, verts, color=color, alpha=alpha, rgb=rgb)
ax.set_xlim(mins[0], maxs[0])
ax.set_ylim(mins[1], maxs[1])
ax.set_zlim(mins[2], maxs[2])
#ax.set_xlabel('X')
#ax.set_ylabel('Y')
#ax.set_zlabel('Z')
#ax.set_xlim(0,nboxes)
#ax.set_ylim(0,nboxes)
#ax.set_zlim(0,nboxes)
plt.show()
| 2,832 | 22.608333 | 87 |
py
|
sst-macro
|
sst-macro-master/python/merlin.py
|
from sst.merlin import *
from sst.macro import *
import sst.macro
mtu = 1204
arb = "cut_through"
params = sst.merlin._params
buffer_size = "64KB"
topo = topoTorus()
params["torus:shape"] = "2x2x2";
params["torus:width"] = "1x1x1";
params["flit_size"] = "8B"
params["link_bw"] = "10GB/s"
params["link_lat"] = "100ns"
params["xbar_bw"] = "15GB/s"
params["input_latency"] = "100ns"
params["output_latency"] = "100ns"
params["input_buf_size"] = buffer_size
params["output_buf_size"] = buffer_size
params["num_peers"] = 8
params["num_dims"] = 3
params["torus:local_ports"] = 1
memParams = {
"latency" : "10ns",
"bandwidth" : "10GB/s",
}
procParams = {
"frequency" : "2.1GHz",
"ncores" : 1,
}
nicParams = {
"name" : "pisces",
"mtu" : mtu,
"arbitrator" : arb,
"packetizer" : "merlin",
"injection" : {
"latency" : "1us",
"bandwidth" : "10GB/s",
"credits" : buffer_size,
},
"module" : "merlin.linkcontrol",
}
appParams = {
"name" : "mpi_coverage",
"start" : "0ms",
"launch_cmd" : "aprun -n 8 -N 1",
}
nodeParams = {
"nic" : nicParams,
"memory" : memParams,
"proc" : procParams,
"topology" : {
"name" : "merlin",
"num_nodes" : 8,
"num_switches" : 8,
},
"app1" : appParams,
}
topo.prepParams()
nodeParams = macroToCoreParams(nodeParams)
class TestEP(EndPoint):
def build( self, nodeID, extraKeys ):
node = sst.Component( "node" + str(nodeID), "macro.SimpleNode" )
node.addParams(extraKeys)
node.addParams(nodeParams)
node.addParam("id", nodeID)
return (node, "rtr", params["link_lat"])
ep = TestEP()
topo.setEndPoint(ep)
topo.build()
#sst.macro.debug("mpi", "timestamp")
#sst.macro.debug("packetizer")
| 1,699 | 17.085106 | 68 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.