language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def runCCBlade(self, Uhub, ptfm_pitch=0, yaw_misalign=0):
'''This performs a single CCBlade evaluation at specified conditions.
ptfm_pitch
mean platform pitch angle to be included in rotor tilt angle [rad]
yaw_misalign
turbine yaw misalignment angle [deg]
'''
# find turbine operating point at the provided wind speed
Omega_rpm = np.interp(Uhub, self.Uhub, self.Omega_rpm) # rotor speed [rpm]
pitch_deg = np.interp(Uhub, self.Uhub, self.pitch_deg) # blade pitch angle [deg]
# adjust rotor angles based on provided info (I think this intervention in CCBlade should work...)
self.ccblade.tilt = np.deg2rad(self.shaft_tilt) + ptfm_pitch
self.ccblade.yaw = np.deg2rad(yaw_misalign)
# evaluate aero loads and derivatives with CCBlade
loads, derivs = self.ccblade.evaluate(Uhub, Omega_rpm, pitch_deg, coefficients=True)
# organize and save the relevant outputs...
self.U_case = Uhub
self.Omega_case = Omega_rpm
self.aero_torque = loads["Q"][0]
self.aero_power = loads["P"][0]
self.pitch_case = pitch_deg
outputs = {}
outputs["P"] = loads["P"]
outputs["Mb"] = loads["Mb"]
outputs["CP"] = loads["CP"]
outputs["CMb"] = loads["CMb"]
outputs["Fhub"] = np.array( [loads["T" ][0], loads["Y" ][0], loads["Z" ][0]])
outputs["Mhub"] = np.array( [loads["Q" ][0], loads["My" ][0], loads["Mz" ][0]])
outputs["CFhub"] = np.array([loads["CT"][0], loads["CY" ][0], loads["CZ" ][0]])
outputs["CMhub"] = np.array([loads["CQ"][0], loads["CMy"][0], loads["CMz"][0]])
print(f"Wind speed: {Uhub} m/s, Aerodynamic power coefficient: {loads['CP'][0]:4.3f}")
J={} # Jacobian/derivatives
dP = derivs["dP"]
J["P", "r"] = dP["dr"]
# J["P", "chord"] = dP["dchord"]
# J["P", "theta"] = dP["dtheta"]
# J["P", "Rhub"] = np.squeeze(dP["dRhub"])
# J["P", "Rtip"] = np.squeeze(dP["dRtip"])
# J["P", "hub_height"] = np.squeeze(dP["dhubHt"])
# J["P", "precone"] = np.squeeze(dP["dprecone"])
# J["P", "tilt"] = np.squeeze(dP["dtilt"])
# J["P", "yaw"] = np.squeeze(dP["dyaw"])
# J["P", "shearExp"] = np.squeeze(dP["dshear"])
# J["P", "V_load"] = np.squeeze(dP["dUinf"])
# J["P", "Omega_load"] = np.squeeze(dP["dOmega"])
# J["P", "pitch_load"] = np.squeeze(dP["dpitch"])
# J["P", "precurve"] = dP["dprecurve"]
# J["P", "precurveTip"] = dP["dprecurveTip"]
# J["P", "presweep"] = dP["dpresweep"]
# J["P", "presweepTip"] = dP["dpresweepTip"]
dQ = derivs["dQ"]
J["Q","Uhub"] = np.atleast_1d(np.diag(dQ["dUinf"]))
J["Q","pitch_deg"] = np.atleast_1d(np.diag(dQ["dpitch"]))
J["Q","Omega_rpm"] = np.atleast_1d(np.diag(dQ["dOmega"]))
dT = derivs["dT"]
J["T","Uhub"] = np.atleast_1d(np.diag(dT["dUinf"]))
J["T","pitch_deg"] = np.atleast_1d(np.diag(dT["dpitch"]))
J["T","Omega_rpm"] = np.atleast_1d(np.diag(dT["dOmega"]))
# dT = derivs["dT"]
# .J["Fhub", "r"][0,:] = dT["dr"] # 0 is for thrust force, 1 would be y, 2 z
# .J["Fhub", "chord"][0,:] = dT["dchord"]
# .J["Fhub", "theta"][0,:] = dT["dtheta"]
# .J["Fhub", "Rhub"][0,:] = np.squeeze(dT["dRhub"])
# .J["Fhub", "Rtip"][0,:] = np.squeeze(dT["dRtip"])
# .J["Fhub", "hub_height"][0,:] = np.squeeze(dT["dhubHt"])
# .J["Fhub", "precone"][0,:] = np.squeeze(dT["dprecone"])
# .J["Fhub", "tilt"][0,:] = np.squeeze(dT["dtilt"])
# .J["Fhub", "yaw"][0,:] = np.squeeze(dT["dyaw"])
# .J["Fhub", "shearExp"][0,:] = np.squeeze(dT["dshear"])
# .J["Fhub", "V_load"][0,:] = np.squeeze(dT["dUinf"])
# .J["Fhub", "Omega_load"][0,:] = np.squeeze(dT["dOmega"])
# .J["Fhub", "pitch_load"][0,:] = np.squeeze(dT["dpitch"])
# .J["Fhub", "precurve"][0,:] = dT["dprecurve"]
# .J["Fhub", "precurveTip"][0,:] = dT["dprecurveTip"]
# .J["Fhub", "presweep"][0,:] = dT["dpresweep"]
# .J["Fhub", "presweepTip"][0,:] = dT["dpresweepTip"]
self.J = J
return loads, derivs | def runCCBlade(self, Uhub, ptfm_pitch=0, yaw_misalign=0):
'''This performs a single CCBlade evaluation at specified conditions.
ptfm_pitch
mean platform pitch angle to be included in rotor tilt angle [rad]
yaw_misalign
turbine yaw misalignment angle [deg]
'''
# find turbine operating point at the provided wind speed
Omega_rpm = np.interp(Uhub, self.Uhub, self.Omega_rpm) # rotor speed [rpm]
pitch_deg = np.interp(Uhub, self.Uhub, self.pitch_deg) # blade pitch angle [deg]
# adjust rotor angles based on provided info (I think this intervention in CCBlade should work...)
self.ccblade.tilt = np.deg2rad(self.shaft_tilt) + ptfm_pitch
self.ccblade.yaw = np.deg2rad(yaw_misalign)
# evaluate aero loads and derivatives with CCBlade
loads, derivs = self.ccblade.evaluate(Uhub, Omega_rpm, pitch_deg, coefficients=True)
# organize and save the relevant outputs...
self.U_case = Uhub
self.Omega_case = Omega_rpm
self.aero_torque = loads["Q"][0]
self.aero_power = loads["P"][0]
self.pitch_case = pitch_deg
outputs = {}
outputs["P"] = loads["P"]
outputs["Mb"] = loads["Mb"]
outputs["CP"] = loads["CP"]
outputs["CMb"] = loads["CMb"]
outputs["Fhub"] = np.array( [loads["T" ][0], loads["Y" ][0], loads["Z" ][0]])
outputs["Mhub"] = np.array( [loads["Q" ][0], loads["My" ][0], loads["Mz" ][0]])
outputs["CFhub"] = np.array([loads["CT"][0], loads["CY" ][0], loads["CZ" ][0]])
outputs["CMhub"] = np.array([loads["CQ"][0], loads["CMy"][0], loads["CMz"][0]])
print(f"Wind speed: {Uhub} m/s, Aerodynamic power coefficient: {loads['CP'][0]:4.3f}")
J={} # Jacobian/derivatives
dP = derivs["dP"]
J["P", "r"] = dP["dr"]
# J["P", "chord"] = dP["dchord"]
# J["P", "theta"] = dP["dtheta"]
# J["P", "Rhub"] = np.squeeze(dP["dRhub"])
# J["P", "Rtip"] = np.squeeze(dP["dRtip"])
# J["P", "hub_height"] = np.squeeze(dP["dhubHt"])
# J["P", "precone"] = np.squeeze(dP["dprecone"])
# J["P", "tilt"] = np.squeeze(dP["dtilt"])
# J["P", "yaw"] = np.squeeze(dP["dyaw"])
# J["P", "shearExp"] = np.squeeze(dP["dshear"])
# J["P", "V_load"] = np.squeeze(dP["dUinf"])
# J["P", "Omega_load"] = np.squeeze(dP["dOmega"])
# J["P", "pitch_load"] = np.squeeze(dP["dpitch"])
# J["P", "precurve"] = dP["dprecurve"]
# J["P", "precurveTip"] = dP["dprecurveTip"]
# J["P", "presweep"] = dP["dpresweep"]
# J["P", "presweepTip"] = dP["dpresweepTip"]
dQ = derivs["dQ"]
J["Q","Uhub"] = np.atleast_1d(np.diag(dQ["dUinf"]))
J["Q","pitch_deg"] = np.atleast_1d(np.diag(dQ["dpitch"]))
J["Q","Omega_rpm"] = np.atleast_1d(np.diag(dQ["dOmega"]))
dT = derivs["dT"]
J["T","Uhub"] = np.atleast_1d(np.diag(dT["dUinf"]))
J["T","pitch_deg"] = np.atleast_1d(np.diag(dT["dpitch"]))
J["T","Omega_rpm"] = np.atleast_1d(np.diag(dT["dOmega"]))
# dT = derivs["dT"]
# .J["Fhub", "r"][0,:] = dT["dr"] # 0 is for thrust force, 1 would be y, 2 z
# .J["Fhub", "chord"][0,:] = dT["dchord"]
# .J["Fhub", "theta"][0,:] = dT["dtheta"]
# .J["Fhub", "Rhub"][0,:] = np.squeeze(dT["dRhub"])
# .J["Fhub", "Rtip"][0,:] = np.squeeze(dT["dRtip"])
# .J["Fhub", "hub_height"][0,:] = np.squeeze(dT["dhubHt"])
# .J["Fhub", "precone"][0,:] = np.squeeze(dT["dprecone"])
# .J["Fhub", "tilt"][0,:] = np.squeeze(dT["dtilt"])
# .J["Fhub", "yaw"][0,:] = np.squeeze(dT["dyaw"])
# .J["Fhub", "shearExp"][0,:] = np.squeeze(dT["dshear"])
# .J["Fhub", "V_load"][0,:] = np.squeeze(dT["dUinf"])
# .J["Fhub", "Omega_load"][0,:] = np.squeeze(dT["dOmega"])
# .J["Fhub", "pitch_load"][0,:] = np.squeeze(dT["dpitch"])
# .J["Fhub", "precurve"][0,:] = dT["dprecurve"]
# .J["Fhub", "precurveTip"][0,:] = dT["dprecurveTip"]
# .J["Fhub", "presweep"][0,:] = dT["dpresweep"]
# .J["Fhub", "presweepTip"][0,:] = dT["dpresweepTip"]
self.J = J
return loads, derivs |
Python | def calcAeroServoContributions(self, case, ptfm_pitch=0, display=0):
'''Calculates stiffness, damping, added mass, and excitation coefficients
from rotor aerodynamics coupled with turbine controls.
Results are w.r.t. nonrotating hub reference frame.
Currently returning 6 DOF mean loads, but other terms are just hub fore-aft scalars.
ptfm_pitch
mean platform pitch angle to be included in rotor tilt angle [rad]
'''
loads, derivs = self.runCCBlade(case['wind_speed'], ptfm_pitch=ptfm_pitch, yaw_misalign=case['yaw_misalign'])
Uinf = case['wind_speed'] # inflow wind speed (m/s) <<< eventually should be consistent with rest of RAFT
# extract derivatives of interest
dT_dU = np.atleast_1d(np.diag(derivs["dT"]["dUinf"]))
dT_dOm = np.atleast_1d(np.diag(derivs["dT"]["dOmega"])) / rpm2radps
dT_dPi = np.atleast_1d(np.diag(derivs["dT"]["dpitch"])) * rad2deg
dQ_dU = np.atleast_1d(np.diag(derivs["dQ"]["dUinf"]))
dQ_dOm = np.atleast_1d(np.diag(derivs["dQ"]["dOmega"])) / rpm2radps
dQ_dPi = np.atleast_1d(np.diag(derivs["dQ"]["dpitch"])) * rad2deg
# Pitch control gains at Uinf (Uinf), flip sign to translate ROSCO convention to this one
self.kp_beta = -np.interp(Uinf, self.Uhub, self.kp_0)
self.ki_beta = -np.interp(Uinf, self.Uhub, self.ki_0)
# Torque control gains, need to get these from somewhere
kp_tau = self.kp_tau * (self.kp_beta == 0) # -38609162.66552 ! VS_KP - Proportional gain for generator PI torque controller [1/(rad/s) Nm]. (Only used in the transitional 2.5 region if VS_ControlMode =/ 2)
ki_tau = self.kp_tau * (self.kp_beta == 0) # -4588245.18720 ! VS_KI
a_aer = np.zeros_like(self.w)
b_aer = np.zeros_like(self.w)
C = np.zeros_like(self.w,dtype=np.complex_)
C2 = np.zeros_like(self.w,dtype=np.complex_)
D = np.zeros_like(self.w,dtype=np.complex_)
E = np.zeros_like(self.w,dtype=np.complex_)
# Roots of characteristic equation, helps w/ debugging
p = np.array([-self.I_drivetrain, (dQ_dOm + self.kp_beta * dQ_dPi - self.Ng * kp_tau), self.ki_beta* dQ_dPi - self.Ng * ki_tau])
r = np.roots(p)
for iw, omega in enumerate(self.w):
# Denominator of control transfer function
D[iw] = self.I_drivetrain * omega**2 + (dQ_dOm + self.kp_beta * dQ_dPi - self.Ng * kp_tau) * 1j * omega + self.ki_beta* dQ_dPi - self.Ng * ki_tau
# control transfer function
C[iw] = 1j * omega * (dQ_dU - self.k_float * dQ_dPi / self.Zhub) / D[iw]
# Thrust transfer function
E[iw] = ((dT_dOm + self.kp_beta * dT_dPi) * 1j * omega + self.ki_beta * dT_dPi )
# alternative for debugging
C2[iw] = C[iw] / (1j * omega)
# Complex aero damping
T = 1j * omega * (dT_dU - self.k_float * dT_dPi / self.Zhub) - ( E[iw] * C[iw])
# Aerodynamic coefficients
a_aer[iw] = -(1/omega**2) * np.real(T)
b_aer[iw] = (1/omega) * np.imag(T)
# Save transfer functions required for output
self.C = C
# calculate steady aero forces and moments
F_aero0 = np.array([loads["T" ][0], loads["Y" ][0], loads["Z" ][0],
loads["My" ][0], loads["Q" ][0], loads["Mz" ][0] ])
# calculate wind excitation force/moment spectra
_,_,_,S_rot = self.IECKaimal(case)
self.V_w = np.sqrt(S_rot) # convert from power spectral density to complex amplitudes (FFT)
T_0 = loads["T" ][0]
T_w1 = dT_dU * self.V_w
T_w2 = (E * C * self.V_w) / (1j * self.w) * (-1) # mhall: think this needs the sign reversal
T_ext = T_w1 + T_w2
if display > 1:
'''
plt.plot(self.w/2/np.pi, self.V_w, label = 'S_rot')
plt.yscale('log')
plt.xscale('log')
plt.xlim([1e-2,10])
plt.grid('True')
plt.xlabel('Freq. (Hz)')
plt.ylabel('PSD')
#plt.plot(thrust_psd.fq_0 * 2 * np.pi,thrust_psd.psd_0)
plt.plot(self.w, np.abs(T_ext))
plt.plot(self.w, abs(T_w2))
'''
fig,ax = plt.subplots(4,1,sharex=True)
ax[0].plot(self.w/2.0/np.pi, self.V_w); ax[0].set_ylabel('U (m/s)')
ax[1].plot(self.w/2.0/np.pi, T_w1 ); ax[1].set_ylabel('T_w1')
ax[2].plot(self.w/2.0/np.pi, np.real(T_w2),'k')
ax[2].plot(self.w/2.0/np.pi, np.imag(T_w2),'k:'); ax[2].set_ylabel('T_w2')
ax[3].plot(self.w/2.0/np.pi, np.real(T_w1+T_w2),'k')
ax[3].plot(self.w/2.0/np.pi, np.imag(T_w1+T_w2),'k:'); ax[3].set_ylabel('T_w2+T_w2')
ax[3].set_xlabel('f (Hz)')
f_aero = T_ext # wind thrust force excitation spectrum
return F_aero0, f_aero, a_aer, b_aer | def calcAeroServoContributions(self, case, ptfm_pitch=0, display=0):
'''Calculates stiffness, damping, added mass, and excitation coefficients
from rotor aerodynamics coupled with turbine controls.
Results are w.r.t. nonrotating hub reference frame.
Currently returning 6 DOF mean loads, but other terms are just hub fore-aft scalars.
ptfm_pitch
mean platform pitch angle to be included in rotor tilt angle [rad]
'''
loads, derivs = self.runCCBlade(case['wind_speed'], ptfm_pitch=ptfm_pitch, yaw_misalign=case['yaw_misalign'])
Uinf = case['wind_speed'] # inflow wind speed (m/s) <<< eventually should be consistent with rest of RAFT
# extract derivatives of interest
dT_dU = np.atleast_1d(np.diag(derivs["dT"]["dUinf"]))
dT_dOm = np.atleast_1d(np.diag(derivs["dT"]["dOmega"])) / rpm2radps
dT_dPi = np.atleast_1d(np.diag(derivs["dT"]["dpitch"])) * rad2deg
dQ_dU = np.atleast_1d(np.diag(derivs["dQ"]["dUinf"]))
dQ_dOm = np.atleast_1d(np.diag(derivs["dQ"]["dOmega"])) / rpm2radps
dQ_dPi = np.atleast_1d(np.diag(derivs["dQ"]["dpitch"])) * rad2deg
# Pitch control gains at Uinf (Uinf), flip sign to translate ROSCO convention to this one
self.kp_beta = -np.interp(Uinf, self.Uhub, self.kp_0)
self.ki_beta = -np.interp(Uinf, self.Uhub, self.ki_0)
# Torque control gains, need to get these from somewhere
kp_tau = self.kp_tau * (self.kp_beta == 0) # -38609162.66552 ! VS_KP - Proportional gain for generator PI torque controller [1/(rad/s) Nm]. (Only used in the transitional 2.5 region if VS_ControlMode =/ 2)
ki_tau = self.kp_tau * (self.kp_beta == 0) # -4588245.18720 ! VS_KI
a_aer = np.zeros_like(self.w)
b_aer = np.zeros_like(self.w)
C = np.zeros_like(self.w,dtype=np.complex_)
C2 = np.zeros_like(self.w,dtype=np.complex_)
D = np.zeros_like(self.w,dtype=np.complex_)
E = np.zeros_like(self.w,dtype=np.complex_)
# Roots of characteristic equation, helps w/ debugging
p = np.array([-self.I_drivetrain, (dQ_dOm + self.kp_beta * dQ_dPi - self.Ng * kp_tau), self.ki_beta* dQ_dPi - self.Ng * ki_tau])
r = np.roots(p)
for iw, omega in enumerate(self.w):
# Denominator of control transfer function
D[iw] = self.I_drivetrain * omega**2 + (dQ_dOm + self.kp_beta * dQ_dPi - self.Ng * kp_tau) * 1j * omega + self.ki_beta* dQ_dPi - self.Ng * ki_tau
# control transfer function
C[iw] = 1j * omega * (dQ_dU - self.k_float * dQ_dPi / self.Zhub) / D[iw]
# Thrust transfer function
E[iw] = ((dT_dOm + self.kp_beta * dT_dPi) * 1j * omega + self.ki_beta * dT_dPi )
# alternative for debugging
C2[iw] = C[iw] / (1j * omega)
# Complex aero damping
T = 1j * omega * (dT_dU - self.k_float * dT_dPi / self.Zhub) - ( E[iw] * C[iw])
# Aerodynamic coefficients
a_aer[iw] = -(1/omega**2) * np.real(T)
b_aer[iw] = (1/omega) * np.imag(T)
# Save transfer functions required for output
self.C = C
# calculate steady aero forces and moments
F_aero0 = np.array([loads["T" ][0], loads["Y" ][0], loads["Z" ][0],
loads["My" ][0], loads["Q" ][0], loads["Mz" ][0] ])
# calculate wind excitation force/moment spectra
_,_,_,S_rot = self.IECKaimal(case)
self.V_w = np.sqrt(S_rot) # convert from power spectral density to complex amplitudes (FFT)
T_0 = loads["T" ][0]
T_w1 = dT_dU * self.V_w
T_w2 = (E * C * self.V_w) / (1j * self.w) * (-1) # mhall: think this needs the sign reversal
T_ext = T_w1 + T_w2
if display > 1:
'''
plt.plot(self.w/2/np.pi, self.V_w, label = 'S_rot')
plt.yscale('log')
plt.xscale('log')
plt.xlim([1e-2,10])
plt.grid('True')
plt.xlabel('Freq. (Hz)')
plt.ylabel('PSD')
#plt.plot(thrust_psd.fq_0 * 2 * np.pi,thrust_psd.psd_0)
plt.plot(self.w, np.abs(T_ext))
plt.plot(self.w, abs(T_w2))
'''
fig,ax = plt.subplots(4,1,sharex=True)
ax[0].plot(self.w/2.0/np.pi, self.V_w); ax[0].set_ylabel('U (m/s)')
ax[1].plot(self.w/2.0/np.pi, T_w1 ); ax[1].set_ylabel('T_w1')
ax[2].plot(self.w/2.0/np.pi, np.real(T_w2),'k')
ax[2].plot(self.w/2.0/np.pi, np.imag(T_w2),'k:'); ax[2].set_ylabel('T_w2')
ax[3].plot(self.w/2.0/np.pi, np.real(T_w1+T_w2),'k')
ax[3].plot(self.w/2.0/np.pi, np.imag(T_w1+T_w2),'k:'); ax[3].set_ylabel('T_w2+T_w2')
ax[3].set_xlabel('f (Hz)')
f_aero = T_ext # wind thrust force excitation spectrum
return F_aero0, f_aero, a_aer, b_aer |
Python | def plot(self, ax, r_ptfm=[0,0,0], R_ptfm=np.eye(3), azimuth=0, color='k'):
'''Draws the rotor on the passed axes, considering optional platform offset and rotation matrix, and rotor azimuth angle'''
# ----- blade geometry ----------
m = len(self.ccblade.chord)
# lists to be filled with coordinates for plotting
X = []
Y = []
Z = []
# generic airfoil for now
afx = np.array([ 0.0 , -0.16, 0.0 , 0.0 ])
afy = np.array([-0.25, 0. , 0.75, -0.25])
npts = len(afx)
# should add real airfoil shapes, and twist
for i in range(m):
for j in range(npts):
X.append(self.ccblade.chord[i]*afx[j])
Y.append(self.ccblade.chord[i]*afy[j])
Z.append(self.ccblade.r[i])
#X.append(self.ccblade.chord[i+1]*afx[j])
#Y.append(self.ccblade.chord[i+1]*afy[j])
#Z.append(self.ccblade.r[i+1])
P = np.array([X, Y, Z])
# ----- rotation matricse -----
# (blade pitch would be a -rotation about local z)
R_precone = rotationMatrix(0, -self.ccblade.precone, 0)
R_azimuth = [rotationMatrix(azimuth + azi, 0, 0) for azi in 2*np.pi/3.*np.arange(3)]
R_tilt = rotationMatrix(0, np.deg2rad(self.shaft_tilt), 0) # # define x as along shaft downwind, y is same as ptfm y
# ----- transform coordinates -----
for ib in range(3):
P2 = np.matmul(R_precone, P)
P2 = np.matmul(R_azimuth[ib], P2)
P2 = np.matmul(R_tilt, P2)
P2 = P2 + np.array([-self.overhang, 0, self.Zhub])[:,None] # PRP to tower-shaft intersection point
P2 = np.matmul(R_ptfm, P2) + np.array(r_ptfm)[:,None]
# drawing airfoils
#for ii in range(m-1):
# ax.plot(P2[0, npts*ii:npts*(ii+1)], P2[1, npts*ii:npts*(ii+1)], P2[2, npts*ii:npts*(ii+1)])
# draw outline
ax.plot(P2[0, 0:-1:npts], P2[1, 0:-1:npts], P2[2, 0:-1:npts], color=color) # leading edge
ax.plot(P2[0, 2:-1:npts], P2[1, 2:-1:npts], P2[2, 2:-1:npts], color=color) | def plot(self, ax, r_ptfm=[0,0,0], R_ptfm=np.eye(3), azimuth=0, color='k'):
'''Draws the rotor on the passed axes, considering optional platform offset and rotation matrix, and rotor azimuth angle'''
# ----- blade geometry ----------
m = len(self.ccblade.chord)
# lists to be filled with coordinates for plotting
X = []
Y = []
Z = []
# generic airfoil for now
afx = np.array([ 0.0 , -0.16, 0.0 , 0.0 ])
afy = np.array([-0.25, 0. , 0.75, -0.25])
npts = len(afx)
# should add real airfoil shapes, and twist
for i in range(m):
for j in range(npts):
X.append(self.ccblade.chord[i]*afx[j])
Y.append(self.ccblade.chord[i]*afy[j])
Z.append(self.ccblade.r[i])
#X.append(self.ccblade.chord[i+1]*afx[j])
#Y.append(self.ccblade.chord[i+1]*afy[j])
#Z.append(self.ccblade.r[i+1])
P = np.array([X, Y, Z])
# ----- rotation matricse -----
# (blade pitch would be a -rotation about local z)
R_precone = rotationMatrix(0, -self.ccblade.precone, 0)
R_azimuth = [rotationMatrix(azimuth + azi, 0, 0) for azi in 2*np.pi/3.*np.arange(3)]
R_tilt = rotationMatrix(0, np.deg2rad(self.shaft_tilt), 0) # # define x as along shaft downwind, y is same as ptfm y
# ----- transform coordinates -----
for ib in range(3):
P2 = np.matmul(R_precone, P)
P2 = np.matmul(R_azimuth[ib], P2)
P2 = np.matmul(R_tilt, P2)
P2 = P2 + np.array([-self.overhang, 0, self.Zhub])[:,None] # PRP to tower-shaft intersection point
P2 = np.matmul(R_ptfm, P2) + np.array(r_ptfm)[:,None]
# drawing airfoils
#for ii in range(m-1):
# ax.plot(P2[0, npts*ii:npts*(ii+1)], P2[1, npts*ii:npts*(ii+1)], P2[2, npts*ii:npts*(ii+1)])
# draw outline
ax.plot(P2[0, 0:-1:npts], P2[1, 0:-1:npts], P2[2, 0:-1:npts], color=color) # leading edge
ax.plot(P2[0, 2:-1:npts], P2[1, 2:-1:npts], P2[2, 2:-1:npts], color=color) |
Python | def IECKaimal(self, case): #
'''Calculates rotor-averaged turbulent wind spectrum based on inputted turbulence intensity or class.'''
#TODO: expand commenting, confirm that Rot is power spectrum, skip V,W calcs if not used
# Set inputs (f, V_ref, HH, Class, Categ, TurbMod, R)
f = self.w / 2 / np.pi # frequency in Hz
HH = self.Zhub
R = self.R_rot
V_ref = case['wind_speed']
###### Initialize IEC Wind parameters #######
iec_wind = pyIECWind_extreme()
iec_wind.z_hub = HH
if isinstance(case['turbulence'],str):
# If a string, the options are I, II, III, IV
Class = ''
for char in case['turbulence']:
if char == 'I' or char == 'V':
Class += char
else:
break
if not Class:
raise Exception("Turbulence class must start with I, II, III, or IV, while you wrote " + case['turbulence'])
else:
Categ = char
iec_wind.Turbulence_Class = Categ
try:
TurbMod = case['turbulence'].split('_')[1]
except:
raise Exception("Error reading the turbulence model. You wrote " + case['turbulence'])
iec_wind.Turbine_Class = Class
# set things up (use default values if not specified in the above)
iec_wind.setup()
# Can set iec_wind.I_ref here if wanted, NTM used then
if isinstance(case['turbulence'],int):
case['turbulence'] = float(case['turbulence'])
if isinstance(case['turbulence'],float):
iec_wind.I_ref = case['turbulence'] # this overwrites the value set in setup method
TurbMod = 'NTM'
# Compute wind turbulence standard deviation (invariant with height)
if TurbMod == 'NTM':
sigma_1 = iec_wind.NTM(V_ref)
elif TurbMod == 'ETM':
sigma_1 = iec_wind.ETM(V_ref)
elif TurbMod == 'EWM':
sigma_1 = iec_wind.EWM(V_ref)
else:
raise Exception("Wind model must be either NTM, ETM, or EWM. While you wrote " + TurbMod)
# Compute turbulence scale parameter Annex C3 of IEC 61400-1-2019
# Longitudinal
if HH <= 60:
L_1 = .7 * HH
else:
L_1 = 42.
sigma_u = sigma_1
L_u = 8.1 * L_1
# Lateral
sigma_v = 0.8 * sigma_1
L_v = 2.7 * L_1
# Upward
sigma_w = 0.5 * sigma_1
L_w = 0.66 * L_1
U = (4*L_u/V_ref)*sigma_u**2/((1+6*f*L_u/V_ref)**(5./3.))
V = (4*L_v/V_ref)*sigma_v**2/((1+6*f*L_v/V_ref)**(5./3.))
W = (4*L_w/V_ref)*sigma_w**2/((1+6*f*L_w/V_ref)**(5./3.))
kappa = 12 * np.sqrt((f/V_ref)**2 + (0.12 / L_u)**2)
Rot = (2*U / (R * kappa)**3) * \
(modstruve(1,2*R*kappa) - iv(1,2*R*kappa) - 2/np.pi + \
R*kappa * (-2 * modstruve(-2,2*R*kappa) + 2 * iv(2,2*R*kappa) + 1) )
# set NaNs to 0
Rot[np.isnan(Rot)] = 0
# Formulas from Section 6.3 of IEC 61400-1-2019
# S_1_f = 0.05 * sigma_1**2. * (L_1 / V_hub) ** (-2./3.) * f **(-5./3)
# S_2_f = S_3_f = 4. / 3. * S_1_f
# sigma_k = np.sqrt(np.trapz(S_1_f, f))
# print(sigma_k)
# print(sigma_u)
return U, V, W, Rot | def IECKaimal(self, case): #
'''Calculates rotor-averaged turbulent wind spectrum based on inputted turbulence intensity or class.'''
#TODO: expand commenting, confirm that Rot is power spectrum, skip V,W calcs if not used
# Set inputs (f, V_ref, HH, Class, Categ, TurbMod, R)
f = self.w / 2 / np.pi # frequency in Hz
HH = self.Zhub
R = self.R_rot
V_ref = case['wind_speed']
###### Initialize IEC Wind parameters #######
iec_wind = pyIECWind_extreme()
iec_wind.z_hub = HH
if isinstance(case['turbulence'],str):
# If a string, the options are I, II, III, IV
Class = ''
for char in case['turbulence']:
if char == 'I' or char == 'V':
Class += char
else:
break
if not Class:
raise Exception("Turbulence class must start with I, II, III, or IV, while you wrote " + case['turbulence'])
else:
Categ = char
iec_wind.Turbulence_Class = Categ
try:
TurbMod = case['turbulence'].split('_')[1]
except:
raise Exception("Error reading the turbulence model. You wrote " + case['turbulence'])
iec_wind.Turbine_Class = Class
# set things up (use default values if not specified in the above)
iec_wind.setup()
# Can set iec_wind.I_ref here if wanted, NTM used then
if isinstance(case['turbulence'],int):
case['turbulence'] = float(case['turbulence'])
if isinstance(case['turbulence'],float):
iec_wind.I_ref = case['turbulence'] # this overwrites the value set in setup method
TurbMod = 'NTM'
# Compute wind turbulence standard deviation (invariant with height)
if TurbMod == 'NTM':
sigma_1 = iec_wind.NTM(V_ref)
elif TurbMod == 'ETM':
sigma_1 = iec_wind.ETM(V_ref)
elif TurbMod == 'EWM':
sigma_1 = iec_wind.EWM(V_ref)
else:
raise Exception("Wind model must be either NTM, ETM, or EWM. While you wrote " + TurbMod)
# Compute turbulence scale parameter Annex C3 of IEC 61400-1-2019
# Longitudinal
if HH <= 60:
L_1 = .7 * HH
else:
L_1 = 42.
sigma_u = sigma_1
L_u = 8.1 * L_1
# Lateral
sigma_v = 0.8 * sigma_1
L_v = 2.7 * L_1
# Upward
sigma_w = 0.5 * sigma_1
L_w = 0.66 * L_1
U = (4*L_u/V_ref)*sigma_u**2/((1+6*f*L_u/V_ref)**(5./3.))
V = (4*L_v/V_ref)*sigma_v**2/((1+6*f*L_v/V_ref)**(5./3.))
W = (4*L_w/V_ref)*sigma_w**2/((1+6*f*L_w/V_ref)**(5./3.))
kappa = 12 * np.sqrt((f/V_ref)**2 + (0.12 / L_u)**2)
Rot = (2*U / (R * kappa)**3) * \
(modstruve(1,2*R*kappa) - iv(1,2*R*kappa) - 2/np.pi + \
R*kappa * (-2 * modstruve(-2,2*R*kappa) + 2 * iv(2,2*R*kappa) + 1) )
# set NaNs to 0
Rot[np.isnan(Rot)] = 0
# Formulas from Section 6.3 of IEC 61400-1-2019
# S_1_f = 0.05 * sigma_1**2. * (L_1 / V_hub) ** (-2./3.) * f **(-5./3)
# S_2_f = S_3_f = 4. / 3. * S_1_f
# sigma_k = np.sqrt(np.trapz(S_1_f, f))
# print(sigma_k)
# print(sigma_u)
return U, V, W, Rot |
Python | def analyzeUnloaded(self):
'''This calculates the system properties under undloaded coonditions: equilibrium positions, natural frequencies, etc.'''
# calculate the system's constant properties
#self.calcSystemConstantProps()
for fowt in self.fowtList:
fowt.calcStatics()
#fowt.calcBEM()
fowt.calcHydroConstants(dict(wave_spectrum='still', wave_heading=0))
# get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
try:
self.C_moor0 = self.ms.getCoupledStiffness(lines_only=True) # this method accounts for eqiuilibrium of free objects in the system
self.F_moor0 = self.ms.getForces(DOFtype="coupled", lines_only=True)
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in undisplaced state: '+e.message)
self.results['properties'] = {} # signal this data is available by adding a section to the results dictionary
# calculate platform offsets and mooring system equilibrium state
self.calcMooringAndOffsets()
self.results['properties']['offset_unloaded'] = self.fowtList[0].Xi0
# TODO: add printing of summary info here - mass, stiffnesses, etc | def analyzeUnloaded(self):
'''This calculates the system properties under undloaded coonditions: equilibrium positions, natural frequencies, etc.'''
# calculate the system's constant properties
#self.calcSystemConstantProps()
for fowt in self.fowtList:
fowt.calcStatics()
#fowt.calcBEM()
fowt.calcHydroConstants(dict(wave_spectrum='still', wave_heading=0))
# get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
try:
self.C_moor0 = self.ms.getCoupledStiffness(lines_only=True) # this method accounts for eqiuilibrium of free objects in the system
self.F_moor0 = self.ms.getForces(DOFtype="coupled", lines_only=True)
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in undisplaced state: '+e.message)
self.results['properties'] = {} # signal this data is available by adding a section to the results dictionary
# calculate platform offsets and mooring system equilibrium state
self.calcMooringAndOffsets()
self.results['properties']['offset_unloaded'] = self.fowtList[0].Xi0
# TODO: add printing of summary info here - mass, stiffnesses, etc |
Python | def calcMooringAndOffsets(self):
'''Calculates mean offsets and linearized mooring properties for the current load case.
setEnv and calcSystemProps must be called first. This will ultimately become a method for solving mean operating point.
'''
# apply any mean aerodynamic and hydrodynamic loads
F_PRP = self.fowtList[0].F_aero0# + self.fowtList[0].F_hydro0 <<< hydro load would be nice here eventually
self.ms.bodyList[0].f6Ext = np.array(F_PRP)
# Now find static equilibrium offsets of platform and get mooring properties about that point
# (This assumes some loads have been applied)
#self.ms.display=2
try:
self.ms.solveEquilibrium3(DOFtype="both", tol=0.01) #, rmsTol=1.0E-5) # get the system to its equilibrium
except Exception as e: #mp.MoorPyError
print('An error occured when solving system equilibrium: '+e.message)
#raise RuntimeError('An error occured when solving unloaded equilibrium: '+error.message)
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
#print("Equilibrium'3' platform positions/rotations:")
#printVec(self.ms.bodyList[0].r6)
r6eq = self.ms.bodyList[0].r6
fowt.Xi0 = np.array(r6eq) # save current mean offsets for the FOWT
#self.ms.plot()
print(f"Found mean offets with with surge = {r6eq[0]:.2f} m and pitch = {r6eq[4]*180/np.pi:.2f} deg.")
try:
C_moor, J_moor = self.ms.getCoupledStiffness(lines_only=True, tensions=True) # get stiffness matrix and tension jacobian matrix
F_moor = self.ms.getForces(DOFtype="coupled", lines_only=True) # get net forces and moments from mooring lines on Body
T_moor = self.ms.getTensions()
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in offset state: '+e.message)
# add any additional yaw stiffness that isn't included in the MoorPy model (e.g. if a bridle isn't modeled)
C_moor[5,5] += fowt.yawstiff
self.C_moor = C_moor
self.J_moor = J_moor # jacobian of mooring line tensions w.r.t. coupled DOFs
self.F_moor = F_moor
self.T_moor = T_moor
# store results
self.results['means'] = {} # signal this data is available by adding a section to the results dictionary
self.results['means']['aero force' ] = self.fowtList[0].F_aero0
self.results['means']['platform offset' ] = r6eq
self.results['means']['mooring force' ] = F_moor
self.results['means']['fairlead tensions'] = np.array([np.linalg.norm(self.ms.pointList[id-1].getForces()) for id in self.ms.bodyList[0].attachedP]) | def calcMooringAndOffsets(self):
'''Calculates mean offsets and linearized mooring properties for the current load case.
setEnv and calcSystemProps must be called first. This will ultimately become a method for solving mean operating point.
'''
# apply any mean aerodynamic and hydrodynamic loads
F_PRP = self.fowtList[0].F_aero0# + self.fowtList[0].F_hydro0 <<< hydro load would be nice here eventually
self.ms.bodyList[0].f6Ext = np.array(F_PRP)
# Now find static equilibrium offsets of platform and get mooring properties about that point
# (This assumes some loads have been applied)
#self.ms.display=2
try:
self.ms.solveEquilibrium3(DOFtype="both", tol=0.01) #, rmsTol=1.0E-5) # get the system to its equilibrium
except Exception as e: #mp.MoorPyError
print('An error occured when solving system equilibrium: '+e.message)
#raise RuntimeError('An error occured when solving unloaded equilibrium: '+error.message)
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
#print("Equilibrium'3' platform positions/rotations:")
#printVec(self.ms.bodyList[0].r6)
r6eq = self.ms.bodyList[0].r6
fowt.Xi0 = np.array(r6eq) # save current mean offsets for the FOWT
#self.ms.plot()
print(f"Found mean offets with with surge = {r6eq[0]:.2f} m and pitch = {r6eq[4]*180/np.pi:.2f} deg.")
try:
C_moor, J_moor = self.ms.getCoupledStiffness(lines_only=True, tensions=True) # get stiffness matrix and tension jacobian matrix
F_moor = self.ms.getForces(DOFtype="coupled", lines_only=True) # get net forces and moments from mooring lines on Body
T_moor = self.ms.getTensions()
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in offset state: '+e.message)
# add any additional yaw stiffness that isn't included in the MoorPy model (e.g. if a bridle isn't modeled)
C_moor[5,5] += fowt.yawstiff
self.C_moor = C_moor
self.J_moor = J_moor # jacobian of mooring line tensions w.r.t. coupled DOFs
self.F_moor = F_moor
self.T_moor = T_moor
# store results
self.results['means'] = {} # signal this data is available by adding a section to the results dictionary
self.results['means']['aero force' ] = self.fowtList[0].F_aero0
self.results['means']['platform offset' ] = r6eq
self.results['means']['mooring force' ] = F_moor
self.results['means']['fairlead tensions'] = np.array([np.linalg.norm(self.ms.pointList[id-1].getForces()) for id in self.ms.bodyList[0].attachedP]) |
Python | def calcOutputs(self):
'''This is where various output quantities of interest are calculated based on the already-solved system response.'''
fowt = self.fowtList[0] # just using a single turbine for now
# ----- system properties outputs -----------------------------
# all values about platform reference point (z=0) unless otherwise noted
if 'properties' in self.results:
self.results['properties']['tower mass'] = fowt.mtower
self.results['properties']['tower CG'] = fowt.rCG_tow
self.results['properties']['substructure mass'] = fowt.msubstruc
self.results['properties']['substructure CG'] = fowt.rCG_sub
self.results['properties']['shell mass'] = fowt.mshell
self.results['properties']['ballast mass'] = fowt.mballast
self.results['properties']['ballast densities'] = fowt.pb
self.results['properties']['total mass'] = fowt.M_struc[0,0]
self.results['properties']['total CG'] = fowt.rCG_TOT
#self.results['properties']['roll inertia at subCG'] = fowt.I44
#self.results['properties']['pitch inertia at subCG'] = fowt.I55
#self.results['properties']['yaw inertia at subCG'] = fowt.I66
self.results['properties']['roll inertia at subCG'] = fowt.M_struc_subCM[3,3]
self.results['properties']['pitch inertia at subCG'] = fowt.M_struc_subCM[4,4]
self.results['properties']['yaw inertia at subCG'] = fowt.M_struc_subCM[5,5]
self.results['properties']['Buoyancy (pgV)'] = fowt.rho_water*fowt.g*fowt.V
self.results['properties']['Center of Buoyancy'] = fowt.rCB
self.results['properties']['C stiffness matrix'] = fowt.C_hydro
# unloaded equilibrium <<<
self.results['properties']['F_lines0'] = self.F_moor0
self.results['properties']['C_lines0'] = self.C_moor0
# 6DOF matrices for the support structure (everything but turbine) including mass, hydrostatics, and mooring reactions
self.results['properties']['M support structure'] = fowt.M_struc_subCM # mass matrix
self.results['properties']['A support structure'] = fowt.A_hydro_morison + fowt.A_BEM[:,:,-1] # hydrodynamic added mass (currently using highest frequency of BEM added mass)
self.results['properties']['C support structure'] = fowt.C_struc_sub + fowt.C_hydro + self.C_moor0 # stiffness
# ----- response outputs (always in standard units) ---------------------------------------
if 'response' in self.results:
RAOmag = abs(self.Xi /fowt.zeta) # magnitudes of motion RAO
self.results['response']['frequencies'] = self.w/2/np.pi # Hz
self.results['response']['wave elevation'] = fowt.zeta
self.results['response']['Xi' ] = self.Xi
self.results['response']['surge RAO' ] = RAOmag[0,:]
self.results['response'][ 'sway RAO' ] = RAOmag[1,:]
self.results['response']['heave RAO' ] = RAOmag[2,:]
self.results['response']['pitch RAO' ] = RAOmag[3,:]
self.results['response'][ 'roll RAO' ] = RAOmag[4,:]
self.results['response'][ 'yaw RAO' ] = RAOmag[5,:]
# save dynamic derived quantities
#self.results['response']['mooring tensions'] = ...
self.results['response']['nacelle acceleration'] = self.w**2 * (self.Xi[0] + self.Xi[4]*fowt.hHub)
return self.results | def calcOutputs(self):
'''This is where various output quantities of interest are calculated based on the already-solved system response.'''
fowt = self.fowtList[0] # just using a single turbine for now
# ----- system properties outputs -----------------------------
# all values about platform reference point (z=0) unless otherwise noted
if 'properties' in self.results:
self.results['properties']['tower mass'] = fowt.mtower
self.results['properties']['tower CG'] = fowt.rCG_tow
self.results['properties']['substructure mass'] = fowt.msubstruc
self.results['properties']['substructure CG'] = fowt.rCG_sub
self.results['properties']['shell mass'] = fowt.mshell
self.results['properties']['ballast mass'] = fowt.mballast
self.results['properties']['ballast densities'] = fowt.pb
self.results['properties']['total mass'] = fowt.M_struc[0,0]
self.results['properties']['total CG'] = fowt.rCG_TOT
#self.results['properties']['roll inertia at subCG'] = fowt.I44
#self.results['properties']['pitch inertia at subCG'] = fowt.I55
#self.results['properties']['yaw inertia at subCG'] = fowt.I66
self.results['properties']['roll inertia at subCG'] = fowt.M_struc_subCM[3,3]
self.results['properties']['pitch inertia at subCG'] = fowt.M_struc_subCM[4,4]
self.results['properties']['yaw inertia at subCG'] = fowt.M_struc_subCM[5,5]
self.results['properties']['Buoyancy (pgV)'] = fowt.rho_water*fowt.g*fowt.V
self.results['properties']['Center of Buoyancy'] = fowt.rCB
self.results['properties']['C stiffness matrix'] = fowt.C_hydro
# unloaded equilibrium <<<
self.results['properties']['F_lines0'] = self.F_moor0
self.results['properties']['C_lines0'] = self.C_moor0
# 6DOF matrices for the support structure (everything but turbine) including mass, hydrostatics, and mooring reactions
self.results['properties']['M support structure'] = fowt.M_struc_subCM # mass matrix
self.results['properties']['A support structure'] = fowt.A_hydro_morison + fowt.A_BEM[:,:,-1] # hydrodynamic added mass (currently using highest frequency of BEM added mass)
self.results['properties']['C support structure'] = fowt.C_struc_sub + fowt.C_hydro + self.C_moor0 # stiffness
# ----- response outputs (always in standard units) ---------------------------------------
if 'response' in self.results:
RAOmag = abs(self.Xi /fowt.zeta) # magnitudes of motion RAO
self.results['response']['frequencies'] = self.w/2/np.pi # Hz
self.results['response']['wave elevation'] = fowt.zeta
self.results['response']['Xi' ] = self.Xi
self.results['response']['surge RAO' ] = RAOmag[0,:]
self.results['response'][ 'sway RAO' ] = RAOmag[1,:]
self.results['response']['heave RAO' ] = RAOmag[2,:]
self.results['response']['pitch RAO' ] = RAOmag[3,:]
self.results['response'][ 'roll RAO' ] = RAOmag[4,:]
self.results['response'][ 'yaw RAO' ] = RAOmag[5,:]
# save dynamic derived quantities
#self.results['response']['mooring tensions'] = ...
self.results['response']['nacelle acceleration'] = self.w**2 * (self.Xi[0] + self.Xi[4]*fowt.hHub)
return self.results |
Python | def plotResponses(self):
'''Plots the power spectral densities of the available response channels for each case.'''
fig, ax = plt.subplots(6, 1, sharex=True)
metrics = self.results['case_metrics']
nCases = len(metrics['surge_avg'])
for iCase in range(nCases):
ax[0].plot(self.w/TwoPi, TwoPi*metrics['surge_PSD'][iCase,:] ) # surge
ax[1].plot(self.w/TwoPi, TwoPi*metrics['heave_PSD'][iCase,:] ) # heave
ax[2].plot(self.w/TwoPi, TwoPi*metrics['pitch_PSD'][iCase,:] ) # pitch [deg]
ax[3].plot(self.w/TwoPi, TwoPi*metrics['AxRNA_PSD'][iCase,:] ) # nacelle acceleration
ax[4].plot(self.w/TwoPi, TwoPi*metrics['Mbase_PSD'][iCase,:] ) # tower base bending moment (using FAST's kN-m)
ax[5].plot(self.w/TwoPi, TwoPi*metrics['wave_PSD' ][iCase,:], label=f'case {iCase+1}') # wave spectrum
# need a variable number of subplots for the mooring lines
#ax2[3].plot(model.w/2/np.pi, TwoPi*metrics['Tmoor_PSD'][0,3,:] ) # fairlead tension
ax[0].set_ylabel('surge \n'+r'(m$^2$/Hz)')
ax[1].set_ylabel('heave \n'+r'(m$^2$/Hz)')
ax[2].set_ylabel('pitch \n'+r'(deg$^2$/Hz)')
ax[3].set_ylabel('nac. acc. \n'+r'((m/s$^2$)$^2$/Hz)')
ax[4].set_ylabel('twr. bend \n'+r'((Nm)$^2$/Hz)')
ax[5].set_ylabel('wave elev.\n'+r'(m$^2$/Hz)')
#ax[0].set_ylim([0.0, 25])
#ax[1].set_ylim([0.0, 15])
#ax[2].set_ylim([0.0, 4])
#ax[-1].set_xlim([0.03, 0.15])
ax[-1].set_xlabel('frequency (Hz)')
#if nCases > 1:
ax[-1].legend()
fig.suptitle('RAFT power spectral densities') | def plotResponses(self):
'''Plots the power spectral densities of the available response channels for each case.'''
fig, ax = plt.subplots(6, 1, sharex=True)
metrics = self.results['case_metrics']
nCases = len(metrics['surge_avg'])
for iCase in range(nCases):
ax[0].plot(self.w/TwoPi, TwoPi*metrics['surge_PSD'][iCase,:] ) # surge
ax[1].plot(self.w/TwoPi, TwoPi*metrics['heave_PSD'][iCase,:] ) # heave
ax[2].plot(self.w/TwoPi, TwoPi*metrics['pitch_PSD'][iCase,:] ) # pitch [deg]
ax[3].plot(self.w/TwoPi, TwoPi*metrics['AxRNA_PSD'][iCase,:] ) # nacelle acceleration
ax[4].plot(self.w/TwoPi, TwoPi*metrics['Mbase_PSD'][iCase,:] ) # tower base bending moment (using FAST's kN-m)
ax[5].plot(self.w/TwoPi, TwoPi*metrics['wave_PSD' ][iCase,:], label=f'case {iCase+1}') # wave spectrum
# need a variable number of subplots for the mooring lines
#ax2[3].plot(model.w/2/np.pi, TwoPi*metrics['Tmoor_PSD'][0,3,:] ) # fairlead tension
ax[0].set_ylabel('surge \n'+r'(m$^2$/Hz)')
ax[1].set_ylabel('heave \n'+r'(m$^2$/Hz)')
ax[2].set_ylabel('pitch \n'+r'(deg$^2$/Hz)')
ax[3].set_ylabel('nac. acc. \n'+r'((m/s$^2$)$^2$/Hz)')
ax[4].set_ylabel('twr. bend \n'+r'((Nm)$^2$/Hz)')
ax[5].set_ylabel('wave elev.\n'+r'(m$^2$/Hz)')
#ax[0].set_ylim([0.0, 25])
#ax[1].set_ylim([0.0, 15])
#ax[2].set_ylim([0.0, 4])
#ax[-1].set_xlim([0.03, 0.15])
ax[-1].set_xlabel('frequency (Hz)')
#if nCases > 1:
ax[-1].legend()
fig.suptitle('RAFT power spectral densities') |
Python | def preprocess_HAMS(self, dw=0, wMax=0, dz=0, da=0):
'''This generates a mesh for the platform, runs a BEM analysis on it
using pyHAMS, and writes .1 and .3 output files for use with OpenFAST.
The input parameters are useful for multifidelity applications where
different levels have different accuracy demands for the HAMS analysis.
The mesh is only made for non-interesecting members flagged with potMod=1.
PARAMETERS
----------
dw : float
Optional specification of custom frequency increment (rad/s).
wMax : float
Optional specification of maximum frequency for BEM analysis (rad/s). Will only be
used if it is greater than the maximum frequency used in RAFT.
dz : float
desired longitudinal panel size for potential flow BEM analysis (m)
da : float
desired azimuthal panel size for potential flow BEM analysis (m)
'''
self.fowtList[0].calcBEM(dw=dw, wMax=wMax, dz=dz, da=da) | def preprocess_HAMS(self, dw=0, wMax=0, dz=0, da=0):
'''This generates a mesh for the platform, runs a BEM analysis on it
using pyHAMS, and writes .1 and .3 output files for use with OpenFAST.
The input parameters are useful for multifidelity applications where
different levels have different accuracy demands for the HAMS analysis.
The mesh is only made for non-interesecting members flagged with potMod=1.
PARAMETERS
----------
dw : float
Optional specification of custom frequency increment (rad/s).
wMax : float
Optional specification of maximum frequency for BEM analysis (rad/s). Will only be
used if it is greater than the maximum frequency used in RAFT.
dz : float
desired longitudinal panel size for potential flow BEM analysis (m)
da : float
desired azimuthal panel size for potential flow BEM analysis (m)
'''
self.fowtList[0].calcBEM(dw=dw, wMax=wMax, dz=dz, da=da) |
Python | def plot(self, ax=None, hideGrid=False, color='k', nodes=0):
'''plots the whole model, including FOWTs and mooring system...'''
# for now, start the plot via the mooring system, since MoorPy doesn't yet know how to draw on other codes' plots
#self.ms.bodyList[0].setPosition(np.zeros(6))
#self.ms.initialize()
#fig = plt.figure(figsize=(20/2.54,12/2.54))
#ax = Axes3D(fig)
# if axes not passed in, make a new figure
if ax == None:
fig, ax = self.ms.plot(color=color)
else:
fig = ax.get_figure()
self.ms.plot(ax=ax, color=color)
# plot each FOWT
for fowt in self.fowtList:
fowt.plot(ax, color=color, nodes=nodes)
if hideGrid:
ax.set_xticks([]) # Hide axes ticks
ax.set_yticks([])
ax.set_zticks([])
ax.grid(False) # Hide grid lines
ax.grid(b=None)
ax.axis('off')
ax.set_frame_on(False)
return fig, ax | def plot(self, ax=None, hideGrid=False, color='k', nodes=0):
'''plots the whole model, including FOWTs and mooring system...'''
# for now, start the plot via the mooring system, since MoorPy doesn't yet know how to draw on other codes' plots
#self.ms.bodyList[0].setPosition(np.zeros(6))
#self.ms.initialize()
#fig = plt.figure(figsize=(20/2.54,12/2.54))
#ax = Axes3D(fig)
# if axes not passed in, make a new figure
if ax == None:
fig, ax = self.ms.plot(color=color)
else:
fig = ax.get_figure()
self.ms.plot(ax=ax, color=color)
# plot each FOWT
for fowt in self.fowtList:
fowt.plot(ax, color=color, nodes=nodes)
if hideGrid:
ax.set_xticks([]) # Hide axes ticks
ax.set_yticks([])
ax.set_zticks([])
ax.grid(False) # Hide grid lines
ax.grid(b=None)
ax.axis('off')
ax.set_frame_on(False)
return fig, ax |
Python | def runRAFT(input_file, turbine_file=""):
'''
This will set up and run RAFT based on a YAML input file.
'''
# open the design YAML file and parse it into a dictionary for passing to raft
print("Loading RAFT input file: "+input_file)
with open(input_file) as file:
design = yaml.load(file, Loader=yaml.FullLoader)
print(f"'{design['name']}'")
depth = float(design['mooring']['water_depth'])
# for now, turn off potMod in the design dictionary to avoid BEM analysis
#design['platform']['potModMaster'] = 1
# read in turbine data and combine it in
# if len(turbine_file) > 0:
# turbine = convertIEAturbineYAML2RAFT(turbine_file)
# design['turbine'].update(turbine)
# Create and run the model
print(" --- making model ---")
model = raft.Model(design)
print(" --- analyizing unloaded ---")
model.analyzeUnloaded()
print(" --- analyzing cases ---")
model.analyzeCases()
model.plot()
model.plotResponses()
#model.preprocess_HAMS("testHAMSoutput", dw=0.1, wMax=10)
plt.show()
return model | def runRAFT(input_file, turbine_file=""):
'''
This will set up and run RAFT based on a YAML input file.
'''
# open the design YAML file and parse it into a dictionary for passing to raft
print("Loading RAFT input file: "+input_file)
with open(input_file) as file:
design = yaml.load(file, Loader=yaml.FullLoader)
print(f"'{design['name']}'")
depth = float(design['mooring']['water_depth'])
# for now, turn off potMod in the design dictionary to avoid BEM analysis
#design['platform']['potModMaster'] = 1
# read in turbine data and combine it in
# if len(turbine_file) > 0:
# turbine = convertIEAturbineYAML2RAFT(turbine_file)
# design['turbine'].update(turbine)
# Create and run the model
print(" --- making model ---")
model = raft.Model(design)
print(" --- analyizing unloaded ---")
model.analyzeUnloaded()
print(" --- analyzing cases ---")
model.analyzeCases()
model.plot()
model.plotResponses()
#model.preprocess_HAMS("testHAMSoutput", dw=0.1, wMax=10)
plt.show()
return model |
Python | def calcStatics(self):
'''Fills in the static quantities of the FOWT and its matrices.
Also adds some dynamic parameters that are constant, e.g. BEM coefficients and steady thrust loads.'''
rho = self.rho_water
g = self.g
# structure-related arrays
self.M_struc = np.zeros([6,6]) # structure/static mass/inertia matrix [kg, kg-m, kg-m^2]
self.B_struc = np.zeros([6,6]) # structure damping matrix [N-s/m, N-s, N-s-m] (may not be used)
self.C_struc = np.zeros([6,6]) # structure effective stiffness matrix [N/m, N, N-m]
self.W_struc = np.zeros([6]) # static weight vector [N, N-m]
self.C_struc_sub = np.zeros([6,6]) # substructure effective stiffness matrix [N/m, N, N-m]
# hydrostatic arrays
self.C_hydro = np.zeros([6,6]) # hydrostatic stiffness matrix [N/m, N, N-m]
self.W_hydro = np.zeros(6) # buoyancy force/moment vector [N, N-m] <<<<< not used yet
# --------------- add in linear hydrodynamic coefficients here if applicable --------------------
#[as in load them] <<<<<<<<<<<<<<<<<<<<<
# --------------- Get general geometry properties including hydrostatics ------------------------
# initialize some variables for running totals
VTOT = 0. # Total underwater volume of all members combined
mTOT = 0. # Total mass of all members [kg]
AWP_TOT = 0. # Total waterplane area of all members [m^2]
IWPx_TOT = 0 # Total waterplane moment of inertia of all members about x axis [m^4]
IWPy_TOT = 0 # Total waterplane moment of inertia of all members about y axis [m^4]
Sum_V_rCB = np.zeros(3) # product of each member's buoyancy multiplied by center of buoyancy [m^4]
Sum_AWP_rWP = np.zeros(2) # product of each member's waterplane area multiplied by the area's center point [m^3]
Sum_M_center = np.zeros(3) # product of each member's mass multiplied by its center of mass [kg-m] (Only considers the shell mass right now)
self.msubstruc = 0 # total mass of just the members that make up the substructure [kg]
self.M_struc_subPRP = np.zeros([6,6]) # total mass matrix of just the substructure about the PRP
msubstruc_sum = 0 # product of each substructure member's mass and CG, to be used to find the total substructure CG [kg-m]
self.mshell = 0 # total mass of the shells/steel of the members in the substructure [kg]
mballast = [] # list to store the mass of the ballast in each of the substructure members [kg]
pballast = [] # list to store the density of ballast in each of the substructure members [kg]
'''
I44list = [] # list to store the I44 MoI about the PRP of each substructure member
I55list = [] # list to store the I55 MoI about the PRP of each substructure member
I66list = [] # list to store the I66 MoI about the PRP of each substructure member
masslist = [] # list to store the mass of each substructure member
'''
# loop through each member
for mem in self.memberList:
# calculate member's orientation information (needed for later steps)
mem.calcOrientation()
# ---------------------- get member's mass and inertia properties ------------------------------
mass, center, mshell, mfill, pfill = mem.getInertia() # calls the getInertia method to calcaulte values
# Calculate the mass matrix of the FOWT about the PRP
self.W_struc += translateForce3to6DOF( np.array([0,0, -g*mass]), center ) # weight vector
self.M_struc += mem.M_struc # mass/inertia matrix about the PRP
Sum_M_center += center*mass # product sum of the mass and center of mass to find the total center of mass [kg-m]
# Tower calculations
if mem.type <= 1: # <<<<<<<<<<<< maybe find a better way to do the if condition
self.mtower = mass # mass of the tower [kg]
self.rCG_tow = center # center of mass of the tower from the PRP [m]
# Substructure calculations
if mem.type > 1:
self.msubstruc += mass # mass of the substructure
self.M_struc_subPRP += mem.M_struc # mass matrix of the substructure about the PRP
msubstruc_sum += center*mass # product sum of the substructure members and their centers of mass [kg-m]
self.mshell += mshell # mass of the substructure shell material [kg]
mballast.extend(mfill) # list of ballast masses in each substructure member (list of lists) [kg]
pballast.extend(pfill) # list of ballast densities in each substructure member (list of lists) [kg/m^3]
'''
# Store substructure moment of inertia terms
I44list.append(mem.M_struc[3,3])
I55list.append(mem.M_struc[4,4])
I66list.append(mem.M_struc[5,5])
masslist.append(mass)
'''
# -------------------- get each member's buoyancy/hydrostatic properties -----------------------
Fvec, Cmat, V_UW, r_CB, AWP, IWP, xWP, yWP = mem.getHydrostatics(self.rho_water, self.g) # call to Member method for hydrostatic calculations
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices <<<<< needs updating (already about PRP)
self.W_hydro += Fvec # translateForce3to6DOF( np.array([0,0, Fz]), mem.rA ) # buoyancy vector
self.C_hydro += Cmat # translateMatrix6to6DOF(Cmat, mem.rA) # hydrostatic stiffness matrix
VTOT += V_UW # add to total underwater volume of all members combined
AWP_TOT += AWP
IWPx_TOT += IWP + AWP*yWP**2
IWPy_TOT += IWP + AWP*xWP**2
Sum_V_rCB += r_CB*V_UW
Sum_AWP_rWP += np.array([xWP, yWP])*AWP
# ------------------------- include RNA properties -----------------------------
# Here we could initialize first versions of the structure matrix components.
# These might be iterated on later to deal with mean- or amplitude-dependent terms.
#self.M_struc += structural.M_lin(q0, self.turbineParams) # Linear Mass Matrix
#self.B_struc += structural.C_lin(q0, qd0, self.turbineParams, u0) # Linear Damping Matrix
#self.C_struc += structural.K_lin(q0, qd0, self.turbineParams, u0) # Linear Stifness Matrix
#self.W_struc += structural.B_lin(q0, qd0, self.turbineParams, u0) # Linear RHS
# below are temporary placeholders
# for now, turbine RNA is specified by some simple lumped properties
Mmat = np.diag([self.mRNA, self.mRNA, self.mRNA, self.IxRNA, self.IrRNA, self.IrRNA]) # create mass/inertia matrix
center = np.array([self.xCG_RNA, 0, self.hHub]) # RNA center of mass location
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices
self.W_struc += translateForce3to6DOF(np.array([0,0, -g*self.mRNA]), center ) # weight vector
self.M_struc += translateMatrix6to6DOF(Mmat, center) # mass/inertia matrix
Sum_M_center += center*self.mRNA
# ----------- process inertia-related totals ----------------
mTOT = self.M_struc[0,0] # total mass of all the members
rCG_TOT = Sum_M_center/mTOT # total CG of all the members
self.rCG_TOT = rCG_TOT
self.rCG_sub = msubstruc_sum/self.msubstruc # solve for just the substructure mass and CG
self.M_struc_subCM = translateMatrix6to6DOF(self.M_struc_subPRP, -self.rCG_sub) # the mass matrix of the substructure about the substruc's CM
# need to make rCG_sub negative here because tM6to6DOF takes a vector that goes from where you want the ref point to be (CM) to the currently ref point (PRP)
'''
self.I44 = 0 # moment of inertia in roll due to roll of the substructure about the substruc's CG [kg-m^2]
self.I44B = 0 # moment of inertia in roll due to roll of the substructure about the PRP [kg-m^2]
self.I55 = 0 # moment of inertia in pitch due to pitch of the substructure about the substruc's CG [kg-m^2]
self.I55B = 0 # moment of inertia in pitch due to pitch of the substructure about the PRP [kg-m^2]
self.I66 = 0 # moment of inertia in yaw due to yaw of the substructure about the substruc's centerline [kg-m^2]
# Use the parallel axis theorem to move each substructure's MoI to the substructure's CG
x = np.linalg.norm([self.rCG_sub[1],self.rCG_sub[2]]) # the normalized distance between the x and x' axes
y = np.linalg.norm([self.rCG_sub[0],self.rCG_sub[2]]) # the normalized distance between the y and y' axes
z = np.linalg.norm([self.rCG_sub[0],self.rCG_sub[1]]) # the normalized distance between the z and z' axes
for i in range(len(I44list)):
self.I44 += I44list[i] - masslist[i]*x**2
self.I44B += I44list[i]
self.I55 += I55list[i] - masslist[i]*y**2
self.I55B += I55list[i]
self.I66 += I66list[i] - masslist[i]*z**2
'''
# Solve for the total mass of each type of ballast in the substructure
self.pb = [] # empty list to store the unique ballast densities
for i in range(len(pballast)):
if pballast[i] != 0: # if the value in pballast is not zero
if self.pb.count(pballast[i]) == 0: # and if that value is not already in pb
self.pb.append(pballast[i]) # store that ballast density value
self.mballast = np.zeros(len(self.pb)) # make an empty mballast list with len=len(pb)
for i in range(len(self.pb)): # for each ballast density
for j in range(len(mballast)): # loop through each ballast mass
if np.float(pballast[j]) == np.float(self.pb[i]): # but only if the index of the ballast mass (density) matches the value of pb
self.mballast[i] += mballast[j] # add that ballast mass to the correct index of mballast
# ----------- process key hydrostatic-related totals for use in static equilibrium solution ------------------
self.V = VTOT # save the total underwater volume
rCB_TOT = Sum_V_rCB/VTOT # location of center of buoyancy on platform
self.rCB = rCB_TOT
if VTOT==0: # if you're only working with members above the platform, like modeling the wind turbine
zMeta = 0
else:
zMeta = rCB_TOT[2] + IWPx_TOT/VTOT # add center of buoyancy and BM=I/v to get z elevation of metecenter [m] (have to pick one direction for IWP)
self.C_struc[3,3] = -mTOT*g*rCG_TOT[2]
self.C_struc[4,4] = -mTOT*g*rCG_TOT[2]
self.C_struc_sub[3,3] = -self.msubstruc*g*self.rCG_sub[2]
self.C_struc_sub[4,4] = -self.msubstruc*g*self.rCG_sub[2]
# add relevant properties to this turbine's MoorPy Body
# >>> should double check proper handling of mean weight and buoyancy forces throughout model <<<
self.body.m = mTOT
self.body.v = VTOT
self.body.rCG = rCG_TOT
self.body.AWP = AWP_TOT
self.body.rM = np.array([0,0,zMeta])
# is there any risk of additional moments due to offset CB since MoorPy assumes CB at ref point? <<< | def calcStatics(self):
'''Fills in the static quantities of the FOWT and its matrices.
Also adds some dynamic parameters that are constant, e.g. BEM coefficients and steady thrust loads.'''
rho = self.rho_water
g = self.g
# structure-related arrays
self.M_struc = np.zeros([6,6]) # structure/static mass/inertia matrix [kg, kg-m, kg-m^2]
self.B_struc = np.zeros([6,6]) # structure damping matrix [N-s/m, N-s, N-s-m] (may not be used)
self.C_struc = np.zeros([6,6]) # structure effective stiffness matrix [N/m, N, N-m]
self.W_struc = np.zeros([6]) # static weight vector [N, N-m]
self.C_struc_sub = np.zeros([6,6]) # substructure effective stiffness matrix [N/m, N, N-m]
# hydrostatic arrays
self.C_hydro = np.zeros([6,6]) # hydrostatic stiffness matrix [N/m, N, N-m]
self.W_hydro = np.zeros(6) # buoyancy force/moment vector [N, N-m] <<<<< not used yet
# --------------- add in linear hydrodynamic coefficients here if applicable --------------------
#[as in load them] <<<<<<<<<<<<<<<<<<<<<
# --------------- Get general geometry properties including hydrostatics ------------------------
# initialize some variables for running totals
VTOT = 0. # Total underwater volume of all members combined
mTOT = 0. # Total mass of all members [kg]
AWP_TOT = 0. # Total waterplane area of all members [m^2]
IWPx_TOT = 0 # Total waterplane moment of inertia of all members about x axis [m^4]
IWPy_TOT = 0 # Total waterplane moment of inertia of all members about y axis [m^4]
Sum_V_rCB = np.zeros(3) # product of each member's buoyancy multiplied by center of buoyancy [m^4]
Sum_AWP_rWP = np.zeros(2) # product of each member's waterplane area multiplied by the area's center point [m^3]
Sum_M_center = np.zeros(3) # product of each member's mass multiplied by its center of mass [kg-m] (Only considers the shell mass right now)
self.msubstruc = 0 # total mass of just the members that make up the substructure [kg]
self.M_struc_subPRP = np.zeros([6,6]) # total mass matrix of just the substructure about the PRP
msubstruc_sum = 0 # product of each substructure member's mass and CG, to be used to find the total substructure CG [kg-m]
self.mshell = 0 # total mass of the shells/steel of the members in the substructure [kg]
mballast = [] # list to store the mass of the ballast in each of the substructure members [kg]
pballast = [] # list to store the density of ballast in each of the substructure members [kg]
'''
I44list = [] # list to store the I44 MoI about the PRP of each substructure member
I55list = [] # list to store the I55 MoI about the PRP of each substructure member
I66list = [] # list to store the I66 MoI about the PRP of each substructure member
masslist = [] # list to store the mass of each substructure member
'''
# loop through each member
for mem in self.memberList:
# calculate member's orientation information (needed for later steps)
mem.calcOrientation()
# ---------------------- get member's mass and inertia properties ------------------------------
mass, center, mshell, mfill, pfill = mem.getInertia() # calls the getInertia method to calcaulte values
# Calculate the mass matrix of the FOWT about the PRP
self.W_struc += translateForce3to6DOF( np.array([0,0, -g*mass]), center ) # weight vector
self.M_struc += mem.M_struc # mass/inertia matrix about the PRP
Sum_M_center += center*mass # product sum of the mass and center of mass to find the total center of mass [kg-m]
# Tower calculations
if mem.type <= 1: # <<<<<<<<<<<< maybe find a better way to do the if condition
self.mtower = mass # mass of the tower [kg]
self.rCG_tow = center # center of mass of the tower from the PRP [m]
# Substructure calculations
if mem.type > 1:
self.msubstruc += mass # mass of the substructure
self.M_struc_subPRP += mem.M_struc # mass matrix of the substructure about the PRP
msubstruc_sum += center*mass # product sum of the substructure members and their centers of mass [kg-m]
self.mshell += mshell # mass of the substructure shell material [kg]
mballast.extend(mfill) # list of ballast masses in each substructure member (list of lists) [kg]
pballast.extend(pfill) # list of ballast densities in each substructure member (list of lists) [kg/m^3]
'''
# Store substructure moment of inertia terms
I44list.append(mem.M_struc[3,3])
I55list.append(mem.M_struc[4,4])
I66list.append(mem.M_struc[5,5])
masslist.append(mass)
'''
# -------------------- get each member's buoyancy/hydrostatic properties -----------------------
Fvec, Cmat, V_UW, r_CB, AWP, IWP, xWP, yWP = mem.getHydrostatics(self.rho_water, self.g) # call to Member method for hydrostatic calculations
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices <<<<< needs updating (already about PRP)
self.W_hydro += Fvec # translateForce3to6DOF( np.array([0,0, Fz]), mem.rA ) # buoyancy vector
self.C_hydro += Cmat # translateMatrix6to6DOF(Cmat, mem.rA) # hydrostatic stiffness matrix
VTOT += V_UW # add to total underwater volume of all members combined
AWP_TOT += AWP
IWPx_TOT += IWP + AWP*yWP**2
IWPy_TOT += IWP + AWP*xWP**2
Sum_V_rCB += r_CB*V_UW
Sum_AWP_rWP += np.array([xWP, yWP])*AWP
# ------------------------- include RNA properties -----------------------------
# Here we could initialize first versions of the structure matrix components.
# These might be iterated on later to deal with mean- or amplitude-dependent terms.
#self.M_struc += structural.M_lin(q0, self.turbineParams) # Linear Mass Matrix
#self.B_struc += structural.C_lin(q0, qd0, self.turbineParams, u0) # Linear Damping Matrix
#self.C_struc += structural.K_lin(q0, qd0, self.turbineParams, u0) # Linear Stifness Matrix
#self.W_struc += structural.B_lin(q0, qd0, self.turbineParams, u0) # Linear RHS
# below are temporary placeholders
# for now, turbine RNA is specified by some simple lumped properties
Mmat = np.diag([self.mRNA, self.mRNA, self.mRNA, self.IxRNA, self.IrRNA, self.IrRNA]) # create mass/inertia matrix
center = np.array([self.xCG_RNA, 0, self.hHub]) # RNA center of mass location
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices
self.W_struc += translateForce3to6DOF(np.array([0,0, -g*self.mRNA]), center ) # weight vector
self.M_struc += translateMatrix6to6DOF(Mmat, center) # mass/inertia matrix
Sum_M_center += center*self.mRNA
# ----------- process inertia-related totals ----------------
mTOT = self.M_struc[0,0] # total mass of all the members
rCG_TOT = Sum_M_center/mTOT # total CG of all the members
self.rCG_TOT = rCG_TOT
self.rCG_sub = msubstruc_sum/self.msubstruc # solve for just the substructure mass and CG
self.M_struc_subCM = translateMatrix6to6DOF(self.M_struc_subPRP, -self.rCG_sub) # the mass matrix of the substructure about the substruc's CM
# need to make rCG_sub negative here because tM6to6DOF takes a vector that goes from where you want the ref point to be (CM) to the currently ref point (PRP)
'''
self.I44 = 0 # moment of inertia in roll due to roll of the substructure about the substruc's CG [kg-m^2]
self.I44B = 0 # moment of inertia in roll due to roll of the substructure about the PRP [kg-m^2]
self.I55 = 0 # moment of inertia in pitch due to pitch of the substructure about the substruc's CG [kg-m^2]
self.I55B = 0 # moment of inertia in pitch due to pitch of the substructure about the PRP [kg-m^2]
self.I66 = 0 # moment of inertia in yaw due to yaw of the substructure about the substruc's centerline [kg-m^2]
# Use the parallel axis theorem to move each substructure's MoI to the substructure's CG
x = np.linalg.norm([self.rCG_sub[1],self.rCG_sub[2]]) # the normalized distance between the x and x' axes
y = np.linalg.norm([self.rCG_sub[0],self.rCG_sub[2]]) # the normalized distance between the y and y' axes
z = np.linalg.norm([self.rCG_sub[0],self.rCG_sub[1]]) # the normalized distance between the z and z' axes
for i in range(len(I44list)):
self.I44 += I44list[i] - masslist[i]*x**2
self.I44B += I44list[i]
self.I55 += I55list[i] - masslist[i]*y**2
self.I55B += I55list[i]
self.I66 += I66list[i] - masslist[i]*z**2
'''
# Solve for the total mass of each type of ballast in the substructure
self.pb = [] # empty list to store the unique ballast densities
for i in range(len(pballast)):
if pballast[i] != 0: # if the value in pballast is not zero
if self.pb.count(pballast[i]) == 0: # and if that value is not already in pb
self.pb.append(pballast[i]) # store that ballast density value
self.mballast = np.zeros(len(self.pb)) # make an empty mballast list with len=len(pb)
for i in range(len(self.pb)): # for each ballast density
for j in range(len(mballast)): # loop through each ballast mass
if np.float(pballast[j]) == np.float(self.pb[i]): # but only if the index of the ballast mass (density) matches the value of pb
self.mballast[i] += mballast[j] # add that ballast mass to the correct index of mballast
# ----------- process key hydrostatic-related totals for use in static equilibrium solution ------------------
self.V = VTOT # save the total underwater volume
rCB_TOT = Sum_V_rCB/VTOT # location of center of buoyancy on platform
self.rCB = rCB_TOT
if VTOT==0: # if you're only working with members above the platform, like modeling the wind turbine
zMeta = 0
else:
zMeta = rCB_TOT[2] + IWPx_TOT/VTOT # add center of buoyancy and BM=I/v to get z elevation of metecenter [m] (have to pick one direction for IWP)
self.C_struc[3,3] = -mTOT*g*rCG_TOT[2]
self.C_struc[4,4] = -mTOT*g*rCG_TOT[2]
self.C_struc_sub[3,3] = -self.msubstruc*g*self.rCG_sub[2]
self.C_struc_sub[4,4] = -self.msubstruc*g*self.rCG_sub[2]
# add relevant properties to this turbine's MoorPy Body
# >>> should double check proper handling of mean weight and buoyancy forces throughout model <<<
self.body.m = mTOT
self.body.v = VTOT
self.body.rCG = rCG_TOT
self.body.AWP = AWP_TOT
self.body.rM = np.array([0,0,zMeta])
# is there any risk of additional moments due to offset CB since MoorPy assumes CB at ref point? <<< |
Python | def calcBEM(self, dw=0, wMax=0, wInf=10.0, dz=0, da=0):
'''This generates a mesh for the platform and runs a BEM analysis on it
using pyHAMS. It can also write adjusted .1 and .3 output files suitable
for use with OpenFAST.
The mesh is only made for non-interesecting members flagged with potMod=1.
PARAMETERS
----------
dw : float
Optional specification of custom frequency increment (rad/s).
wMax : float
Optional specification of maximum frequency for BEM analysis (rad/s). Will only be
used if it is greater than the maximum frequency used in RAFT.
wInf : float
Optional specification of large frequency to use as approximation for infinite
frequency in pyHAMS analysis (rad/s).
dz : float
desired longitudinal panel size for potential flow BEM analysis (m)
da : float
desired azimuthal panel size for potential flow BEM analysis (m)
'''
# go through members to be modeled with BEM and calculated their nodes and panels lists
nodes = []
panels = []
vertices = np.zeros([0,3]) # for GDF output
dz = self.dz_BEM if dz==0 else dz # allow override if provided
da = self.da_BEM if da==0 else da
for mem in self.memberList:
if mem.potMod==True:
pnl.meshMember(mem.stations, mem.d, mem.rA, mem.rB,
dz_max=dz, da_max=da, savedNodes=nodes, savedPanels=panels)
# for GDF output
vertices_i = pnl.meshMemberForGDF(mem.stations, mem.d, mem.rA, mem.rB, dz_max=dz, da_max=da)
vertices = np.vstack([vertices, vertices_i]) # append the member's vertices to the master list
# only try to save a mesh and run HAMS if some members DO have potMod=True
if len(panels) > 0:
meshDir = os.path.join(os.getcwd(), 'BEM')
pnl.writeMesh(nodes, panels, oDir=os.path.join(meshDir,'Input')) # generate a mesh file in the HAMS .pnl format
#pnl.writeMeshToGDF(vertices) # also a GDF for visualization
ph.create_hams_dirs(meshDir) #
ph.write_hydrostatic_file(meshDir) # HAMS needs a hydrostatics file, but it's unused for .1 and .3, so write a blank one
# prepare frequency settings for HAMS
dw_HAMS = self.dw_BEM if dw==0 else dw # frequency increment - allow override if provided
wMax_HAMS = max(wMax, max(self.w)) # make sure the HAMS runs includes both RAFT and export frequency extents
nw_HAMS = int(np.ceil(wMax_HAMS/dw_HAMS)) # ensure the upper frequency of the HAMS analysis is large enough
ph.write_control_file(meshDir, waterDepth=self.depth, incFLim=1, iFType=3, oFType=4, # inputs are in rad/s, outputs in s
numFreqs=-nw_HAMS, minFreq=dw_HAMS, dFreq=dw_HAMS)
# Note about zero/infinite frequencies from WAMIT-formatted output files (as per WAMIT v7 manual):
# The limiting values of the added-mass coefficients may be evaluated for zero or infinite
# period by specifying the values PER= 0:0 and PER< 0:0, respectively. These special values are always
# associated with the wave period, irrespective of the value of IPERIN and the corresponding
# interpretation of the positive elements of the array PER
# execute the HAMS analysis
ph.run_hams(meshDir)
# read the HAMS WAMIT-style output files
addedMass, damping, w1 = ph.read_wamit1(os.path.join(meshDir,'Output','Wamit_format','Buoy.1'), TFlag=True) # first two entries in frequency dimension are expected to be zero-frequency then infinite frequency
M, P, R, I, w3, heads = ph.read_wamit3(os.path.join(meshDir,'Output','Wamit_format','Buoy.3'), TFlag=True)
# interpole to the frequencies RAFT is using
addedMassInterp = interp1d(np.hstack([w1[2:], 0.0]), np.dstack([addedMass[:,:,2:], addedMass[:,:,0]]), assume_sorted=False, axis=2)(self.w)
dampingInterp = interp1d(np.hstack([w1[2:], 0.0]), np.dstack([ damping[:,:,2:], np.zeros([6,6]) ]), assume_sorted=False, axis=2)(self.w)
fExRealInterp = interp1d(w3, R , assume_sorted=False )(self.w)
fExImagInterp = interp1d(w3, I , assume_sorted=False )(self.w)
# copy results over to the FOWT's coefficient arrays
self.A_BEM = self.rho_water * addedMassInterp
self.B_BEM = self.rho_water * dampingInterp
self.X_BEM = self.rho_water * self.g * (fExRealInterp + 1j*fExImagInterp)
# HAMS results error checks >>> any more we should have? <<<
if np.isnan(self.A_BEM).any():
#print("NaN values detected in HAMS calculations for added mass. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for added mass. Check the geometry.")
if np.isnan(self.B_BEM).any():
#print("NaN values detected in HAMS calculations for damping. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for damping. Check the geometry.")
if np.isnan(self.X_BEM).any():
#print("NaN values detected in HAMS calculations for excitation. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for excitation. Check the geometry.")
# TODO: add support for multiple wave headings <<<
# note: RAFT will only be using finite-frequency potential flow coefficients | def calcBEM(self, dw=0, wMax=0, wInf=10.0, dz=0, da=0):
'''This generates a mesh for the platform and runs a BEM analysis on it
using pyHAMS. It can also write adjusted .1 and .3 output files suitable
for use with OpenFAST.
The mesh is only made for non-interesecting members flagged with potMod=1.
PARAMETERS
----------
dw : float
Optional specification of custom frequency increment (rad/s).
wMax : float
Optional specification of maximum frequency for BEM analysis (rad/s). Will only be
used if it is greater than the maximum frequency used in RAFT.
wInf : float
Optional specification of large frequency to use as approximation for infinite
frequency in pyHAMS analysis (rad/s).
dz : float
desired longitudinal panel size for potential flow BEM analysis (m)
da : float
desired azimuthal panel size for potential flow BEM analysis (m)
'''
# go through members to be modeled with BEM and calculated their nodes and panels lists
nodes = []
panels = []
vertices = np.zeros([0,3]) # for GDF output
dz = self.dz_BEM if dz==0 else dz # allow override if provided
da = self.da_BEM if da==0 else da
for mem in self.memberList:
if mem.potMod==True:
pnl.meshMember(mem.stations, mem.d, mem.rA, mem.rB,
dz_max=dz, da_max=da, savedNodes=nodes, savedPanels=panels)
# for GDF output
vertices_i = pnl.meshMemberForGDF(mem.stations, mem.d, mem.rA, mem.rB, dz_max=dz, da_max=da)
vertices = np.vstack([vertices, vertices_i]) # append the member's vertices to the master list
# only try to save a mesh and run HAMS if some members DO have potMod=True
if len(panels) > 0:
meshDir = os.path.join(os.getcwd(), 'BEM')
pnl.writeMesh(nodes, panels, oDir=os.path.join(meshDir,'Input')) # generate a mesh file in the HAMS .pnl format
#pnl.writeMeshToGDF(vertices) # also a GDF for visualization
ph.create_hams_dirs(meshDir) #
ph.write_hydrostatic_file(meshDir) # HAMS needs a hydrostatics file, but it's unused for .1 and .3, so write a blank one
# prepare frequency settings for HAMS
dw_HAMS = self.dw_BEM if dw==0 else dw # frequency increment - allow override if provided
wMax_HAMS = max(wMax, max(self.w)) # make sure the HAMS runs includes both RAFT and export frequency extents
nw_HAMS = int(np.ceil(wMax_HAMS/dw_HAMS)) # ensure the upper frequency of the HAMS analysis is large enough
ph.write_control_file(meshDir, waterDepth=self.depth, incFLim=1, iFType=3, oFType=4, # inputs are in rad/s, outputs in s
numFreqs=-nw_HAMS, minFreq=dw_HAMS, dFreq=dw_HAMS)
# Note about zero/infinite frequencies from WAMIT-formatted output files (as per WAMIT v7 manual):
# The limiting values of the added-mass coefficients may be evaluated for zero or infinite
# period by specifying the values PER= 0:0 and PER< 0:0, respectively. These special values are always
# associated with the wave period, irrespective of the value of IPERIN and the corresponding
# interpretation of the positive elements of the array PER
# execute the HAMS analysis
ph.run_hams(meshDir)
# read the HAMS WAMIT-style output files
addedMass, damping, w1 = ph.read_wamit1(os.path.join(meshDir,'Output','Wamit_format','Buoy.1'), TFlag=True) # first two entries in frequency dimension are expected to be zero-frequency then infinite frequency
M, P, R, I, w3, heads = ph.read_wamit3(os.path.join(meshDir,'Output','Wamit_format','Buoy.3'), TFlag=True)
# interpole to the frequencies RAFT is using
addedMassInterp = interp1d(np.hstack([w1[2:], 0.0]), np.dstack([addedMass[:,:,2:], addedMass[:,:,0]]), assume_sorted=False, axis=2)(self.w)
dampingInterp = interp1d(np.hstack([w1[2:], 0.0]), np.dstack([ damping[:,:,2:], np.zeros([6,6]) ]), assume_sorted=False, axis=2)(self.w)
fExRealInterp = interp1d(w3, R , assume_sorted=False )(self.w)
fExImagInterp = interp1d(w3, I , assume_sorted=False )(self.w)
# copy results over to the FOWT's coefficient arrays
self.A_BEM = self.rho_water * addedMassInterp
self.B_BEM = self.rho_water * dampingInterp
self.X_BEM = self.rho_water * self.g * (fExRealInterp + 1j*fExImagInterp)
# HAMS results error checks >>> any more we should have? <<<
if np.isnan(self.A_BEM).any():
#print("NaN values detected in HAMS calculations for added mass. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for added mass. Check the geometry.")
if np.isnan(self.B_BEM).any():
#print("NaN values detected in HAMS calculations for damping. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for damping. Check the geometry.")
if np.isnan(self.X_BEM).any():
#print("NaN values detected in HAMS calculations for excitation. Check the geometry.")
#breakpoint()
raise Exception("NaN values detected in HAMS calculations for excitation. Check the geometry.")
# TODO: add support for multiple wave headings <<<
# note: RAFT will only be using finite-frequency potential flow coefficients |
Python | def calcTurbineConstants(self, case, ptfm_pitch=0):
'''This computes turbine linear terms
case
dictionary of case information
ptfm_pitch
mean pitch angle of the platform [rad]
'''
#self.rotor.runCCBlade(case['wind_speed'], ptfm_pitch=ptfm_pitch, yaw_misalign=case['yaw_misalign'])
# initialize arrays (can remain zero if aerodynamics are disabled)
self.A_aero = np.zeros([6,6,self.nw]) # frequency-dependent aero-servo added mass matrix
self.B_aero = np.zeros([6,6,self.nw]) # frequency-dependent aero-servo damping matrix
self.F_aero = np.zeros([6, self.nw], dtype=complex) # dynamice excitation force and moment amplitude spectra
self.F_aero0 = np.zeros([6]) # mean aerodynamic forces and moments
# only compute the aerodynamics if enabled and windspeed is nonzero
if self.aeroMod > 0 and case['wind_speed'] > 0.0:
F_aero0, f_aero, a_aero, b_aero = self.rotor.calcAeroServoContributions(case, ptfm_pitch=ptfm_pitch) # get values about hub
# hub reference frame relative to PRP <<<<<<<<<<<<<<<<<
rHub = np.array([0, 0, self.hHub])
#rotMatHub = rotationMatrix(0, 0.01, 0)
# convert coefficients to platform reference frame
for i in range(self.nw):
self.A_aero[:,:,i] = translateMatrix3to6DOF( np.diag([a_aero[i], 0, 0]), rHub)
self.B_aero[:,:,i] = translateMatrix3to6DOF( np.diag([b_aero[i], 0, 0]), rHub)
#self.C_aero = translateMatrix6to6DOF( rotateMatrix6(C_aero, rotMatHub), rHub)
# convert forces to platform reference frame
self.F_aero0 = transformForce(F_aero0, offset=rHub) # mean forces and moments
for iw in range(self.nw):
#self.F_aero[:,iw] = transformForce(F_aero[:,iw], offset=rHub, orientation=rotMatHub)
self.F_aero[:,iw] = translateForce3to6DOF(np.array([f_aero[iw], 0, 0]), rHub) | def calcTurbineConstants(self, case, ptfm_pitch=0):
'''This computes turbine linear terms
case
dictionary of case information
ptfm_pitch
mean pitch angle of the platform [rad]
'''
#self.rotor.runCCBlade(case['wind_speed'], ptfm_pitch=ptfm_pitch, yaw_misalign=case['yaw_misalign'])
# initialize arrays (can remain zero if aerodynamics are disabled)
self.A_aero = np.zeros([6,6,self.nw]) # frequency-dependent aero-servo added mass matrix
self.B_aero = np.zeros([6,6,self.nw]) # frequency-dependent aero-servo damping matrix
self.F_aero = np.zeros([6, self.nw], dtype=complex) # dynamice excitation force and moment amplitude spectra
self.F_aero0 = np.zeros([6]) # mean aerodynamic forces and moments
# only compute the aerodynamics if enabled and windspeed is nonzero
if self.aeroMod > 0 and case['wind_speed'] > 0.0:
F_aero0, f_aero, a_aero, b_aero = self.rotor.calcAeroServoContributions(case, ptfm_pitch=ptfm_pitch) # get values about hub
# hub reference frame relative to PRP <<<<<<<<<<<<<<<<<
rHub = np.array([0, 0, self.hHub])
#rotMatHub = rotationMatrix(0, 0.01, 0)
# convert coefficients to platform reference frame
for i in range(self.nw):
self.A_aero[:,:,i] = translateMatrix3to6DOF( np.diag([a_aero[i], 0, 0]), rHub)
self.B_aero[:,:,i] = translateMatrix3to6DOF( np.diag([b_aero[i], 0, 0]), rHub)
#self.C_aero = translateMatrix6to6DOF( rotateMatrix6(C_aero, rotMatHub), rHub)
# convert forces to platform reference frame
self.F_aero0 = transformForce(F_aero0, offset=rHub) # mean forces and moments
for iw in range(self.nw):
#self.F_aero[:,iw] = transformForce(F_aero[:,iw], offset=rHub, orientation=rotMatHub)
self.F_aero[:,iw] = translateForce3to6DOF(np.array([f_aero[iw], 0, 0]), rHub) |
Python | def calcHydroConstants(self, case):
'''This computes the linear strip-theory-hydrodynamics terms, including wave excitation for a specific case.'''
# set up sea state
self.beta = case['wave_heading']
# make wave spectrum
if case['wave_spectrum'] == 'unit':
self.zeta = np.tile(1, self.nw)
S = np.tile(1, self.nw)
elif case['wave_spectrum'] == 'JONSWAP':
S = JONSWAP(self.w, case['wave_height'], case['wave_period'])
self.zeta = np.sqrt(S) # wave elevation amplitudes (these are easiest to use)
elif case['wave_spectrum'] in ['none','still']:
self.zeta = np.zeros(self.nw)
S = np.zeros(self.nw)
else:
raise ValueError(f"Wave spectrum input '{case['wave_spectrum']}' not recognized.")
rho = self.rho_water
g = self.g
#print(f"significant wave height: {4*np.sqrt(np.sum(S)*self.dw):5.2f} = {4*getRMS(self.zeta, self.dw):5.2f}") # << temporary <<<
# TODO: consider current and viscous drift
# ----- calculate potential-flow wave excitation force -----
self.F_BEM = self.X_BEM * self.zeta # wave excitation force (will be zero if HAMS wasn't run)
# --------------------- get constant hydrodynamic values along each member -----------------------------
self.A_hydro_morison = np.zeros([6,6]) # hydrodynamic added mass matrix, from only Morison equation [kg, kg-m, kg-m^2]
self.F_hydro_iner = np.zeros([6,self.nw],dtype=complex) # inertia excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in self.memberList:
circ = mem.shape=='circular' # convenience boolian for circular vs. rectangular cross sections
# print(mem.name)
# loop through each node of the member
for il in range(mem.ns):
# print(il)
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
# print("underwater")
# get wave kinematics spectra given a certain wave spectrum and location
mem.u[il,:,:], mem.ud[il,:,:], mem.pDyn[il,:] = getWaveKin(self.zeta, self.beta, self.w, self.k, self.depth, mem.r[il,:], self.nw)
# only compute inertial loads and added mass for members that aren't modeled with potential flow
if mem.potMod==False:
# interpolate coefficients for the current strip
Ca_q = np.interp( mem.ls[il], mem.stations, mem.Ca_q )
Ca_p1 = np.interp( mem.ls[il], mem.stations, mem.Ca_p1 )
Ca_p2 = np.interp( mem.ls[il], mem.stations, mem.Ca_p2 )
Ca_End = np.interp( mem.ls[il], mem.stations, mem.Ca_End)
# ----- compute side effects ---------------------------------------------------------
if circ:
v_i = 0.25*np.pi*mem.ds[il]**2*mem.dls[il]
else:
v_i = mem.ds[il,0]*mem.ds[il,1]*mem.dls[il] # member volume assigned to this node
if mem.r[il,2] + 0.5*mem.dls[il] > 0: # if member extends out of water # <<< may want a better appraoch for this...
v_i = v_i * (0.5*mem.dls[il] - mem.r[il,2]) / mem.dls[il] # scale volume by the portion that is under water
# added mass
Amat = rho*v_i *( Ca_q*mem.qMat + Ca_p1*mem.p1Mat + Ca_p2*mem.p2Mat ) # local added mass matrix
# print(f"Member side added mass diagonals are {Amat[0,0]:6.2e} {Amat[1,1]:6.2e} {Amat[2,2]:6.2e}")
self.A_hydro_morison += translateMatrix3to6DOF(Amat, mem.r[il,:]) # add to global added mass matrix for Morison members
# inertial excitation - Froude-Krylov (axial term explicitly excluded here - we aren't dealing with chains)
Imat = rho*v_i *( (1.+Ca_p1)*mem.p1Mat + (1.+Ca_p2)*mem.p2Mat ) # local inertial excitation matrix (note: the 1 is the Cp, dynamic pressure, term)
#Imat = rho*v_i *( (1.+Ca_q)*mem.qMat + (1.+Ca_p1)*mem.p1Mat + (1.+Ca_p2)*mem.p2Mat ) # local inertial excitation matrix
for i in range(self.nw): # for each wave frequency...
mem.F_exc_iner[il,:,i] = np.matmul(Imat, mem.ud[il,:,i]) # add to global excitation vector (frequency dependent)
self.F_hydro_iner[:,i] += translateForce3to6DOF(mem.F_exc_iner[il,:,i], mem.r[il,:]) # add to global excitation vector (frequency dependent)
# ----- add axial/end effects for added mass, and excitation including dynamic pressure ------
# note : v_a and a_i work out to zero for non-tapered sections or non-end sections
if circ:
v_i = np.pi/12.0 * abs((mem.ds[il]+mem.drs[il])**3 - (mem.ds[il]-mem.drs[il])**3) # volume assigned to this end surface
a_i = np.pi*mem.ds[il] * mem.drs[il] # signed end area (positive facing down) = mean diameter of strip * radius change of strip
else:
v_i = np.pi/12.0 * ((np.mean(mem.ds[il]+mem.drs[il]))**3 - (np.mean(mem.ds[il]-mem.drs[il]))**3) # so far just using sphere eqn and taking mean of side lengths as d
a_i = (mem.ds[il,0]+mem.drs[il,0])*(mem.ds[il,1]+mem.drs[il,1]) - (mem.ds[il,0]-mem.drs[il,0])*(mem.ds[il,1]-mem.drs[il,1])
# >>> should support different coefficients or reference volumes for rectangular cross sections <<<
# added mass
AmatE = rho*v_i * Ca_End*mem.qMat # local added mass matrix
# print(f"Member END added mass diagonals are {AmatE[0,0]:6.2e} {AmatE[1,1]:6.2e} {AmatE[2,2]:6.2e}")
self.A_hydro_morison += translateMatrix3to6DOF(AmatE, mem.r[il,:]) # add to global added mass matrix for Morison members
# inertial excitation
ImatE = rho*v_i * Ca_End*mem.qMat # local inertial excitation matrix (note, there is no 1 added to Ca_End because dynamic pressure is handled separately)
#ImatE = rho*v_i * (1+Ca_End)*mem.qMat # local inertial excitation matrix
for i in range(self.nw): # for each wave frequency...
#F_exc_iner_temp = np.matmul(ImatE, mem.ud[il,:,i]) # local inertial excitation force complex amplitude in x,y,z
mem.F_exc_a[il,:,i] = np.matmul(ImatE, mem.ud[il,:,i]) # local inertial excitation force complex amplitude in x,y,z
# >>> may want to add a separate dynamic pressure input <<<
mem.F_exc_p[il,:,i] = mem.pDyn[il,i]*a_i *mem.q # add dynamic pressure - positive with q if end A - determined by sign of a_i
#F_exc_iner_temp += mem.pDyn[il,i]*a_i *mem.q # add dynamic pressure - positive with q if end A - determined by sign of a_i
F_exc_iner_temp = mem.F_exc_a[il,:,i] + mem.F_exc_p[il,:,i]
mem.F_exc_iner[il,:,i] += F_exc_iner_temp # add to stored member force vector
self.F_hydro_iner[:,i] += translateForce3to6DOF(F_exc_iner_temp, mem.r[il,:]) | def calcHydroConstants(self, case):
'''This computes the linear strip-theory-hydrodynamics terms, including wave excitation for a specific case.'''
# set up sea state
self.beta = case['wave_heading']
# make wave spectrum
if case['wave_spectrum'] == 'unit':
self.zeta = np.tile(1, self.nw)
S = np.tile(1, self.nw)
elif case['wave_spectrum'] == 'JONSWAP':
S = JONSWAP(self.w, case['wave_height'], case['wave_period'])
self.zeta = np.sqrt(S) # wave elevation amplitudes (these are easiest to use)
elif case['wave_spectrum'] in ['none','still']:
self.zeta = np.zeros(self.nw)
S = np.zeros(self.nw)
else:
raise ValueError(f"Wave spectrum input '{case['wave_spectrum']}' not recognized.")
rho = self.rho_water
g = self.g
#print(f"significant wave height: {4*np.sqrt(np.sum(S)*self.dw):5.2f} = {4*getRMS(self.zeta, self.dw):5.2f}") # << temporary <<<
# TODO: consider current and viscous drift
# ----- calculate potential-flow wave excitation force -----
self.F_BEM = self.X_BEM * self.zeta # wave excitation force (will be zero if HAMS wasn't run)
# --------------------- get constant hydrodynamic values along each member -----------------------------
self.A_hydro_morison = np.zeros([6,6]) # hydrodynamic added mass matrix, from only Morison equation [kg, kg-m, kg-m^2]
self.F_hydro_iner = np.zeros([6,self.nw],dtype=complex) # inertia excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in self.memberList:
circ = mem.shape=='circular' # convenience boolian for circular vs. rectangular cross sections
# print(mem.name)
# loop through each node of the member
for il in range(mem.ns):
# print(il)
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
# print("underwater")
# get wave kinematics spectra given a certain wave spectrum and location
mem.u[il,:,:], mem.ud[il,:,:], mem.pDyn[il,:] = getWaveKin(self.zeta, self.beta, self.w, self.k, self.depth, mem.r[il,:], self.nw)
# only compute inertial loads and added mass for members that aren't modeled with potential flow
if mem.potMod==False:
# interpolate coefficients for the current strip
Ca_q = np.interp( mem.ls[il], mem.stations, mem.Ca_q )
Ca_p1 = np.interp( mem.ls[il], mem.stations, mem.Ca_p1 )
Ca_p2 = np.interp( mem.ls[il], mem.stations, mem.Ca_p2 )
Ca_End = np.interp( mem.ls[il], mem.stations, mem.Ca_End)
# ----- compute side effects ---------------------------------------------------------
if circ:
v_i = 0.25*np.pi*mem.ds[il]**2*mem.dls[il]
else:
v_i = mem.ds[il,0]*mem.ds[il,1]*mem.dls[il] # member volume assigned to this node
if mem.r[il,2] + 0.5*mem.dls[il] > 0: # if member extends out of water # <<< may want a better appraoch for this...
v_i = v_i * (0.5*mem.dls[il] - mem.r[il,2]) / mem.dls[il] # scale volume by the portion that is under water
# added mass
Amat = rho*v_i *( Ca_q*mem.qMat + Ca_p1*mem.p1Mat + Ca_p2*mem.p2Mat ) # local added mass matrix
# print(f"Member side added mass diagonals are {Amat[0,0]:6.2e} {Amat[1,1]:6.2e} {Amat[2,2]:6.2e}")
self.A_hydro_morison += translateMatrix3to6DOF(Amat, mem.r[il,:]) # add to global added mass matrix for Morison members
# inertial excitation - Froude-Krylov (axial term explicitly excluded here - we aren't dealing with chains)
Imat = rho*v_i *( (1.+Ca_p1)*mem.p1Mat + (1.+Ca_p2)*mem.p2Mat ) # local inertial excitation matrix (note: the 1 is the Cp, dynamic pressure, term)
#Imat = rho*v_i *( (1.+Ca_q)*mem.qMat + (1.+Ca_p1)*mem.p1Mat + (1.+Ca_p2)*mem.p2Mat ) # local inertial excitation matrix
for i in range(self.nw): # for each wave frequency...
mem.F_exc_iner[il,:,i] = np.matmul(Imat, mem.ud[il,:,i]) # add to global excitation vector (frequency dependent)
self.F_hydro_iner[:,i] += translateForce3to6DOF(mem.F_exc_iner[il,:,i], mem.r[il,:]) # add to global excitation vector (frequency dependent)
# ----- add axial/end effects for added mass, and excitation including dynamic pressure ------
# note : v_a and a_i work out to zero for non-tapered sections or non-end sections
if circ:
v_i = np.pi/12.0 * abs((mem.ds[il]+mem.drs[il])**3 - (mem.ds[il]-mem.drs[il])**3) # volume assigned to this end surface
a_i = np.pi*mem.ds[il] * mem.drs[il] # signed end area (positive facing down) = mean diameter of strip * radius change of strip
else:
v_i = np.pi/12.0 * ((np.mean(mem.ds[il]+mem.drs[il]))**3 - (np.mean(mem.ds[il]-mem.drs[il]))**3) # so far just using sphere eqn and taking mean of side lengths as d
a_i = (mem.ds[il,0]+mem.drs[il,0])*(mem.ds[il,1]+mem.drs[il,1]) - (mem.ds[il,0]-mem.drs[il,0])*(mem.ds[il,1]-mem.drs[il,1])
# >>> should support different coefficients or reference volumes for rectangular cross sections <<<
# added mass
AmatE = rho*v_i * Ca_End*mem.qMat # local added mass matrix
# print(f"Member END added mass diagonals are {AmatE[0,0]:6.2e} {AmatE[1,1]:6.2e} {AmatE[2,2]:6.2e}")
self.A_hydro_morison += translateMatrix3to6DOF(AmatE, mem.r[il,:]) # add to global added mass matrix for Morison members
# inertial excitation
ImatE = rho*v_i * Ca_End*mem.qMat # local inertial excitation matrix (note, there is no 1 added to Ca_End because dynamic pressure is handled separately)
#ImatE = rho*v_i * (1+Ca_End)*mem.qMat # local inertial excitation matrix
for i in range(self.nw): # for each wave frequency...
#F_exc_iner_temp = np.matmul(ImatE, mem.ud[il,:,i]) # local inertial excitation force complex amplitude in x,y,z
mem.F_exc_a[il,:,i] = np.matmul(ImatE, mem.ud[il,:,i]) # local inertial excitation force complex amplitude in x,y,z
# >>> may want to add a separate dynamic pressure input <<<
mem.F_exc_p[il,:,i] = mem.pDyn[il,i]*a_i *mem.q # add dynamic pressure - positive with q if end A - determined by sign of a_i
#F_exc_iner_temp += mem.pDyn[il,i]*a_i *mem.q # add dynamic pressure - positive with q if end A - determined by sign of a_i
F_exc_iner_temp = mem.F_exc_a[il,:,i] + mem.F_exc_p[il,:,i]
mem.F_exc_iner[il,:,i] += F_exc_iner_temp # add to stored member force vector
self.F_hydro_iner[:,i] += translateForce3to6DOF(F_exc_iner_temp, mem.r[il,:]) |
Python | def calcLinearizedTerms(self, Xi):
'''The FOWT's dynamics solve iteration method. This calculates the amplitude-dependent linearized coefficients.
Xi : complex array
system response (just for this FOWT) - displacement and rotation complex amplitudes [m, rad]
'''
rho = self.rho_water
g = self.g
# The linearized coefficients to be calculated
B_hydro_drag = np.zeros([6,6]) # hydrodynamic damping matrix (just linearized viscous drag for now) [N-s/m, N-s, N-s-m]
F_hydro_drag = np.zeros([6,self.nw],dtype=complex) # excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in self.memberList:
circ = mem.shape=='circular' # convenience boolian for circular vs. rectangular cross sections
# loop through each node of the member
for il in range(mem.ns):
# node displacement, velocity, and acceleration (each [3 x nw])
drnode, vnode, anode = getVelocity(mem.r[il,:], Xi, self.w) # get node complex velocity spectrum based on platform motion's and relative position from PRP
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
# interpolate coefficients for the current strip
Cd_q = np.interp( mem.ls[il], mem.stations, mem.Cd_q )
Cd_p1 = np.interp( mem.ls[il], mem.stations, mem.Cd_p1 )
Cd_p2 = np.interp( mem.ls[il], mem.stations, mem.Cd_p2 )
Cd_End = np.interp( mem.ls[il], mem.stations, mem.Cd_End)
# ----- compute side effects ------------------------
# member acting area assigned to this node in each direction
a_i_q = np.pi*mem.ds[il]*mem.dls[il] if circ else 2*(mem.ds[il,0]+mem.ds[il,0])*mem.dls[il]
a_i_p1 = mem.ds[il]*mem.dls[il] if circ else mem.ds[il,0] *mem.dls[il]
a_i_p2 = mem.ds[il]*mem.dls[il] if circ else mem.ds[il,1] *mem.dls[il]
# water relative velocity over node (complex amplitude spectrum) [3 x nw]
vrel = mem.u[il,:] - vnode
# break out velocity components in each direction relative to member orientation [nw]
vrel_q = vrel*mem.q[ :,None] # (the ,None is for broadcasting q across all frequencies in vrel)
vrel_p1 = vrel*mem.p1[:,None]
vrel_p2 = vrel*mem.p2[:,None]
# get RMS of relative velocity component magnitudes (real-valued)
vRMS_q = getRMS(vrel_q , self.dw)
vRMS_p1 = getRMS(vrel_p1, self.dw)
vRMS_p2 = getRMS(vrel_p2, self.dw)
#print(f" {vRMS_q:5.2f} {vRMS_p1:5.2f} {vRMS_p2:5.2f}")
# linearized damping coefficients in each direction relative to member orientation [not explicitly frequency dependent...] (this goes into damping matrix)
Bprime_q = np.sqrt(8/np.pi) * vRMS_q * 0.5*rho * a_i_q * Cd_q
Bprime_p1 = np.sqrt(8/np.pi) * vRMS_p1 * 0.5*rho * a_i_p1 * Cd_p1
Bprime_p2 = np.sqrt(8/np.pi) * vRMS_p2 * 0.5*rho * a_i_p2 * Cd_p2
Bmat = Bprime_q*mem.qMat + Bprime_p1*mem.p1Mat + Bprime_p2*mem.p2Mat # damping matrix for the node based on linearized drag coefficients
B_hydro_drag += translateMatrix3to6DOF(Bmat, mem.r[il,:]) # add to global damping matrix for Morison members
for i in range(self.nw):
mem.F_exc_drag[il,:,i] = np.matmul(Bmat, mem.u[il,:,i]) # get local 3d drag excitation force complex amplitude for each frequency [3 x nw]
F_hydro_drag[:,i] += translateForce3to6DOF(mem.F_exc_drag[il,:,i], mem.r[il,:]) # add to global excitation vector (frequency dependent)
# ----- add end/axial effects for added mass, and excitation including dynamic pressure ------
# note : v_a and a_i work out to zero for non-tapered sections or non-end sections
# end/axial area (removing sign for use as drag)
if circ:
a_i = np.abs(np.pi*mem.ds[il]*mem.drs[il])
else:
a_i = np.abs((mem.ds[il,0]+mem.drs[il,0])*(mem.ds[il,1]+mem.drs[il,1]) - (mem.ds[il,0]-mem.drs[il,0])*(mem.ds[il,1]-mem.drs[il,1]))
Bprime_End = np.sqrt(8/np.pi)*vRMS_q*0.5*rho*a_i*Cd_End
#print(f" {a_i:5.2f} {vRMS_q:5.2f} {Bprime_End:5.2f}")
Bmat = Bprime_End*mem.qMat #
B_hydro_drag += translateMatrix3to6DOF(Bmat, mem.r[il,:]) # add to global damping matrix for Morison members
for i in range(self.nw): # for each wave frequency...
F_exc_drag_temp = np.matmul(Bmat, mem.u[il,:,i]) # local drag excitation force complex amplitude in x,y,z
mem.F_exc_drag[il,:,i] += F_exc_drag_temp # add to stored member force vector
F_hydro_drag[:,i] += translateForce3to6DOF(F_exc_drag_temp, mem.r[il,:]) # add to global excitation vector (frequency dependent)
# save the arrays internally in case there's ever a need for the FOWT to solve it's own latest dynamics
self.B_hydro_drag = B_hydro_drag
self.F_hydro_drag = F_hydro_drag
# return the linearized coefficients
return B_hydro_drag, F_hydro_drag | def calcLinearizedTerms(self, Xi):
'''The FOWT's dynamics solve iteration method. This calculates the amplitude-dependent linearized coefficients.
Xi : complex array
system response (just for this FOWT) - displacement and rotation complex amplitudes [m, rad]
'''
rho = self.rho_water
g = self.g
# The linearized coefficients to be calculated
B_hydro_drag = np.zeros([6,6]) # hydrodynamic damping matrix (just linearized viscous drag for now) [N-s/m, N-s, N-s-m]
F_hydro_drag = np.zeros([6,self.nw],dtype=complex) # excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in self.memberList:
circ = mem.shape=='circular' # convenience boolian for circular vs. rectangular cross sections
# loop through each node of the member
for il in range(mem.ns):
# node displacement, velocity, and acceleration (each [3 x nw])
drnode, vnode, anode = getVelocity(mem.r[il,:], Xi, self.w) # get node complex velocity spectrum based on platform motion's and relative position from PRP
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
# interpolate coefficients for the current strip
Cd_q = np.interp( mem.ls[il], mem.stations, mem.Cd_q )
Cd_p1 = np.interp( mem.ls[il], mem.stations, mem.Cd_p1 )
Cd_p2 = np.interp( mem.ls[il], mem.stations, mem.Cd_p2 )
Cd_End = np.interp( mem.ls[il], mem.stations, mem.Cd_End)
# ----- compute side effects ------------------------
# member acting area assigned to this node in each direction
a_i_q = np.pi*mem.ds[il]*mem.dls[il] if circ else 2*(mem.ds[il,0]+mem.ds[il,0])*mem.dls[il]
a_i_p1 = mem.ds[il]*mem.dls[il] if circ else mem.ds[il,0] *mem.dls[il]
a_i_p2 = mem.ds[il]*mem.dls[il] if circ else mem.ds[il,1] *mem.dls[il]
# water relative velocity over node (complex amplitude spectrum) [3 x nw]
vrel = mem.u[il,:] - vnode
# break out velocity components in each direction relative to member orientation [nw]
vrel_q = vrel*mem.q[ :,None] # (the ,None is for broadcasting q across all frequencies in vrel)
vrel_p1 = vrel*mem.p1[:,None]
vrel_p2 = vrel*mem.p2[:,None]
# get RMS of relative velocity component magnitudes (real-valued)
vRMS_q = getRMS(vrel_q , self.dw)
vRMS_p1 = getRMS(vrel_p1, self.dw)
vRMS_p2 = getRMS(vrel_p2, self.dw)
#print(f" {vRMS_q:5.2f} {vRMS_p1:5.2f} {vRMS_p2:5.2f}")
# linearized damping coefficients in each direction relative to member orientation [not explicitly frequency dependent...] (this goes into damping matrix)
Bprime_q = np.sqrt(8/np.pi) * vRMS_q * 0.5*rho * a_i_q * Cd_q
Bprime_p1 = np.sqrt(8/np.pi) * vRMS_p1 * 0.5*rho * a_i_p1 * Cd_p1
Bprime_p2 = np.sqrt(8/np.pi) * vRMS_p2 * 0.5*rho * a_i_p2 * Cd_p2
Bmat = Bprime_q*mem.qMat + Bprime_p1*mem.p1Mat + Bprime_p2*mem.p2Mat # damping matrix for the node based on linearized drag coefficients
B_hydro_drag += translateMatrix3to6DOF(Bmat, mem.r[il,:]) # add to global damping matrix for Morison members
for i in range(self.nw):
mem.F_exc_drag[il,:,i] = np.matmul(Bmat, mem.u[il,:,i]) # get local 3d drag excitation force complex amplitude for each frequency [3 x nw]
F_hydro_drag[:,i] += translateForce3to6DOF(mem.F_exc_drag[il,:,i], mem.r[il,:]) # add to global excitation vector (frequency dependent)
# ----- add end/axial effects for added mass, and excitation including dynamic pressure ------
# note : v_a and a_i work out to zero for non-tapered sections or non-end sections
# end/axial area (removing sign for use as drag)
if circ:
a_i = np.abs(np.pi*mem.ds[il]*mem.drs[il])
else:
a_i = np.abs((mem.ds[il,0]+mem.drs[il,0])*(mem.ds[il,1]+mem.drs[il,1]) - (mem.ds[il,0]-mem.drs[il,0])*(mem.ds[il,1]-mem.drs[il,1]))
Bprime_End = np.sqrt(8/np.pi)*vRMS_q*0.5*rho*a_i*Cd_End
#print(f" {a_i:5.2f} {vRMS_q:5.2f} {Bprime_End:5.2f}")
Bmat = Bprime_End*mem.qMat #
B_hydro_drag += translateMatrix3to6DOF(Bmat, mem.r[il,:]) # add to global damping matrix for Morison members
for i in range(self.nw): # for each wave frequency...
F_exc_drag_temp = np.matmul(Bmat, mem.u[il,:,i]) # local drag excitation force complex amplitude in x,y,z
mem.F_exc_drag[il,:,i] += F_exc_drag_temp # add to stored member force vector
F_hydro_drag[:,i] += translateForce3to6DOF(F_exc_drag_temp, mem.r[il,:]) # add to global excitation vector (frequency dependent)
# save the arrays internally in case there's ever a need for the FOWT to solve it's own latest dynamics
self.B_hydro_drag = B_hydro_drag
self.F_hydro_drag = F_hydro_drag
# return the linearized coefficients
return B_hydro_drag, F_hydro_drag |
Python | def is_valid_path(path: str) -> Path:
"""
Returns a Path object if a file or directory exists at the specified path.
Raises exception otherwise.
"""
path = Path(path).resolve()
if not path.exists():
raise FileNotFoundError(path, "is not a valid file or directory")
return path | def is_valid_path(path: str) -> Path:
"""
Returns a Path object if a file or directory exists at the specified path.
Raises exception otherwise.
"""
path = Path(path).resolve()
if not path.exists():
raise FileNotFoundError(path, "is not a valid file or directory")
return path |
Python | def timestamp_zip(string: str) -> str:
"""
Adds the current date and a .zip ending to the given string.
Example:
> timestamp("foo")
"foo-1997-09-14-1253.zip"
"""
timestamp = datetime.today().strftime("%Y-%m-%d-%H%M")
return f"{string}-{timestamp}.zip" | def timestamp_zip(string: str) -> str:
"""
Adds the current date and a .zip ending to the given string.
Example:
> timestamp("foo")
"foo-1997-09-14-1253.zip"
"""
timestamp = datetime.today().strftime("%Y-%m-%d-%H%M")
return f"{string}-{timestamp}.zip" |
Python | def backup_file(path: Path, save_dir: Path) -> None:
"""
Given the path to a file or directory, creates a zip backup to the given location.
"""
archive = zipfile.ZipFile(save_dir, "w", zipfile.ZIP_DEFLATED)
restore_path = str(path.parent.absolute())
archive.writestr("restore_path.txt", restore_path)
# handle files and directories; strip paths of files from names
if path.is_file():
archive.write(path, str(path)[len(restore_path):])
else:
for p in path.rglob("*"):
archive.write(p, str(p)[len(restore_path):])
archive.close() | def backup_file(path: Path, save_dir: Path) -> None:
"""
Given the path to a file or directory, creates a zip backup to the given location.
"""
archive = zipfile.ZipFile(save_dir, "w", zipfile.ZIP_DEFLATED)
restore_path = str(path.parent.absolute())
archive.writestr("restore_path.txt", restore_path)
# handle files and directories; strip paths of files from names
if path.is_file():
archive.write(path, str(path)[len(restore_path):])
else:
for p in path.rglob("*"):
archive.write(p, str(p)[len(restore_path):])
archive.close() |
Python | def backup_files(paths: list, save_dir: Path) -> None:
"""
Given the list of file paths, creates zip backups to the given location.
"""
for f in paths:
save_path = save_dir / timestamp_zip(f.name)
backup_file(f, save_path) | def backup_files(paths: list, save_dir: Path) -> None:
"""
Given the list of file paths, creates zip backups to the given location.
"""
for f in paths:
save_path = save_dir / timestamp_zip(f.name)
backup_file(f, save_path) |
Python | def restore_file(path: Path) -> None:
"""
Restores the file at the given path to its original location based on restore_path.txt.
"""
archive = zipfile.ZipFile(path, "r", zipfile.ZIP_DEFLATED)
restore_path = str(archive.read("restore_path.txt"))[2:-1]
for p in archive.namelist():
if p != "restore_path.txt":
archive.extract(p, restore_path)
archive.close() | def restore_file(path: Path) -> None:
"""
Restores the file at the given path to its original location based on restore_path.txt.
"""
archive = zipfile.ZipFile(path, "r", zipfile.ZIP_DEFLATED)
restore_path = str(archive.read("restore_path.txt"))[2:-1]
for p in archive.namelist():
if p != "restore_path.txt":
archive.extract(p, restore_path)
archive.close() |
Python | def restore_files(paths: list) -> None:
"""
Restores the files at the given paths to their original locations based on restore_path.txt.
"""
if not paths:
print("No files selected. Are you sure there are files at the given time?")
for p in paths:
restore_file(p) | def restore_files(paths: list) -> None:
"""
Restores the files at the given paths to their original locations based on restore_path.txt.
"""
if not paths:
print("No files selected. Are you sure there are files at the given time?")
for p in paths:
restore_file(p) |
Python | def is_valid_date(string: str) -> bool:
"""
Returns whether the date is valid or not.
"""
return re.compile(r"^(\d{4}|\d{4}-\d{2}|\d{4}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2}-\d{4})$").match(string) | def is_valid_date(string: str) -> bool:
"""
Returns whether the date is valid or not.
"""
return re.compile(r"^(\d{4}|\d{4}-\d{2}|\d{4}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2}-\d{2}|\d{4}-\d{2}-\d{2}-\d{4})$").match(string) |
Python | def filter_by_date(date: str, save_dir: Path) -> list:
"""
Returns list of paths that match the given date range, only including the latest entries per file.
"""
# remove ambiguity between year and time; I assure you, dear reader, this was the lesser evil
if len(date) == 4:
date += "-"
# iterate through all files which match the date requirement
# since alphabetical happens to be chronological when list is sorted, grab the last backup from each file
filtered_paths = []
last = None
for curr in sorted([p for p in save_dir.glob("*") if date in str(p.stem)[-15:]]):
if last and str(last.stem)[:-15] != str(curr.stem)[:-15]:
filtered_paths.append(last)
last = curr
if last:
filtered_paths.append(last)
return filtered_paths | def filter_by_date(date: str, save_dir: Path) -> list:
"""
Returns list of paths that match the given date range, only including the latest entries per file.
"""
# remove ambiguity between year and time; I assure you, dear reader, this was the lesser evil
if len(date) == 4:
date += "-"
# iterate through all files which match the date requirement
# since alphabetical happens to be chronological when list is sorted, grab the last backup from each file
filtered_paths = []
last = None
for curr in sorted([p for p in save_dir.glob("*") if date in str(p.stem)[-15:]]):
if last and str(last.stem)[:-15] != str(curr.stem)[:-15]:
filtered_paths.append(last)
last = curr
if last:
filtered_paths.append(last)
return filtered_paths |
Python | def find_world_zips(paths: list) -> list:
"""
Returns input list with only world zips
"""
world_zips = []
world_checker = re.compile(r"^[^\/]+/level.dat$")
for p in paths:
archive = zipfile.ZipFile(p, "r", zipfile.ZIP_DEFLATED)
for z in archive.namelist():
if world_checker.match(z):
world_zips.append(p)
break
archive.close()
return world_zips | def find_world_zips(paths: list) -> list:
"""
Returns input list with only world zips
"""
world_zips = []
world_checker = re.compile(r"^[^\/]+/level.dat$")
for p in paths:
archive = zipfile.ZipFile(p, "r", zipfile.ZIP_DEFLATED)
for z in archive.namelist():
if world_checker.match(z):
world_zips.append(p)
break
archive.close()
return world_zips |
Python | def find_setting_zips(paths: list) -> list:
"""
Returns input list with only setting zips
"""
return list(set(paths) - set(find_world_zips(paths))) | def find_setting_zips(paths: list) -> list:
"""
Returns input list with only setting zips
"""
return list(set(paths) - set(find_world_zips(paths))) |
Python | def create_folder(self, name, parent_folder_id=0):
"""Create a folder
If the folder exists, a BoxError will be raised.
Args:
folder_id (int): Name of the folder.
parent_folder_id (int): ID of the folder where to create the new one.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("POST", "folders",
data={ "name": name,
"parent": {"id": str(parent_folder_id)} }) | def create_folder(self, name, parent_folder_id=0):
"""Create a folder
If the folder exists, a BoxError will be raised.
Args:
folder_id (int): Name of the folder.
parent_folder_id (int): ID of the folder where to create the new one.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
"""
return self.__request("POST", "folders",
data={ "name": name,
"parent": {"id": str(parent_folder_id)} }) |
Python | def obj_ref(obj):
"""Returns a tuple identifying a category/event/contrib/subcontrib"""
from indico_livesync.models.queue import EntryType
if isinstance(obj, Category):
ref = {'type': EntryType.category, 'category_id': obj.id}
elif isinstance(obj, Event):
ref = {'type': EntryType.event, 'event_id': obj.id}
elif isinstance(obj, Session):
ref = {'type': EntryType.session, 'session_id': obj.id}
elif isinstance(obj, Contribution):
ref = {'type': EntryType.contribution, 'contrib_id': obj.id}
elif isinstance(obj, SubContribution):
ref = {'type': EntryType.subcontribution, 'subcontrib_id': obj.id}
else:
raise ValueError('Unexpected object: {}'.format(obj.__class__.__name__))
return ImmutableDict(ref) | def obj_ref(obj):
"""Returns a tuple identifying a category/event/contrib/subcontrib"""
from indico_livesync.models.queue import EntryType
if isinstance(obj, Category):
ref = {'type': EntryType.category, 'category_id': obj.id}
elif isinstance(obj, Event):
ref = {'type': EntryType.event, 'event_id': obj.id}
elif isinstance(obj, Session):
ref = {'type': EntryType.session, 'session_id': obj.id}
elif isinstance(obj, Contribution):
ref = {'type': EntryType.contribution, 'contrib_id': obj.id}
elif isinstance(obj, SubContribution):
ref = {'type': EntryType.subcontribution, 'subcontrib_id': obj.id}
else:
raise ValueError('Unexpected object: {}'.format(obj.__class__.__name__))
return ImmutableDict(ref) |
Python | def obj_deref(ref):
"""Returns the object identified by `ref`"""
from indico_livesync.models.queue import EntryType
if ref['type'] == EntryType.category:
return Category.get_or_404(ref['category_id'])
elif ref['type'] == EntryType.event:
return Event.get_or_404(ref['event_id'])
elif ref['type'] == EntryType.session:
return Session.get_or_404(ref['session_id'])
elif ref['type'] == EntryType.contribution:
return Contribution.get_or_404(ref['contrib_id'])
elif ref['type'] == EntryType.subcontribution:
return SubContribution.get_or_404(ref['subcontrib_id'])
else:
raise ValueError('Unexpected object type: {}'.format(ref['type'])) | def obj_deref(ref):
"""Returns the object identified by `ref`"""
from indico_livesync.models.queue import EntryType
if ref['type'] == EntryType.category:
return Category.get_or_404(ref['category_id'])
elif ref['type'] == EntryType.event:
return Event.get_or_404(ref['event_id'])
elif ref['type'] == EntryType.session:
return Session.get_or_404(ref['session_id'])
elif ref['type'] == EntryType.contribution:
return Contribution.get_or_404(ref['contrib_id'])
elif ref['type'] == EntryType.subcontribution:
return SubContribution.get_or_404(ref['subcontrib_id'])
else:
raise ValueError('Unexpected object type: {}'.format(ref['type'])) |
Python | def clean_old_entries():
"""Deletes obsolete entries from the queues"""
from indico_livesync.plugin import LiveSyncPlugin
from indico_livesync.models.queue import LiveSyncQueueEntry
queue_entry_ttl = LiveSyncPlugin.settings.get('queue_entry_ttl')
if not queue_entry_ttl:
return
expire_threshold = now_utc() - timedelta(days=queue_entry_ttl)
LiveSyncQueueEntry.find(LiveSyncQueueEntry.processed,
LiveSyncQueueEntry.timestamp < expire_threshold).delete(synchronize_session='fetch') | def clean_old_entries():
"""Deletes obsolete entries from the queues"""
from indico_livesync.plugin import LiveSyncPlugin
from indico_livesync.models.queue import LiveSyncQueueEntry
queue_entry_ttl = LiveSyncPlugin.settings.get('queue_entry_ttl')
if not queue_entry_ttl:
return
expire_threshold = now_utc() - timedelta(days=queue_entry_ttl)
LiveSyncQueueEntry.find(LiveSyncQueueEntry.processed,
LiveSyncQueueEntry.timestamp < expire_threshold).delete(synchronize_session='fetch') |
Python | def compound_id(obj):
"""Generate a hierarchical compound ID, separated by dots."""
if isinstance(obj, (Category, Session)):
raise TypeError('Compound IDs are not supported for this entry type')
elif isinstance(obj, Event):
return unicode(obj.id)
elif isinstance(obj, Contribution):
return '{}.{}'.format(obj.event_id, obj.id)
elif isinstance(obj, SubContribution):
return '{}.{}.{}'.format(obj.contribution.event_id, obj.contribution_id, obj.id) | def compound_id(obj):
"""Generate a hierarchical compound ID, separated by dots."""
if isinstance(obj, (Category, Session)):
raise TypeError('Compound IDs are not supported for this entry type')
elif isinstance(obj, Event):
return unicode(obj.id)
elif isinstance(obj, Contribution):
return '{}.{}'.format(obj.event_id, obj.id)
elif isinstance(obj, SubContribution):
return '{}.{}.{}'.format(obj.contribution.event_id, obj.contribution_id, obj.id) |
Python | def distance(point_x, point_y):
'''
To caculate the distance between point_x and point_y
'''
x2 = (point_x.x-point_y.x)**2
y2 = (point_x.y-point_y.y)**2
z2 = (point_x.z-point_y.z)**2
return math.sqrt(x2+y2+z2) | def distance(point_x, point_y):
'''
To caculate the distance between point_x and point_y
'''
x2 = (point_x.x-point_y.x)**2
y2 = (point_x.y-point_y.y)**2
z2 = (point_x.z-point_y.z)**2
return math.sqrt(x2+y2+z2) |
Python | def unit(point_z):
'''
To caculate the unit vector of point_z
'''
# print (point_z)
magnitude = math.sqrt(point_z.x**2 + point_z.y**2 + point_z.z**2)
if magnitude:
point_z.x = point_z.x/magnitude
point_z.y = point_z.y/magnitude
point_z.z = point_z.z/magnitude
return point_z,magnitude
else:
return point_z,magnitude | def unit(point_z):
'''
To caculate the unit vector of point_z
'''
# print (point_z)
magnitude = math.sqrt(point_z.x**2 + point_z.y**2 + point_z.z**2)
if magnitude:
point_z.x = point_z.x/magnitude
point_z.y = point_z.y/magnitude
point_z.z = point_z.z/magnitude
return point_z,magnitude
else:
return point_z,magnitude |
Python | def check_distance(r,point_x,point_y,check_T_points=0):
'''
return the distance between point_x and point_y
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
#print (distance)# 52.076992648 52.0769926525
if distance >= (r-0.001):
if check_T_points:
if distance < 52.1 and (point_y.index-1)//2 != point_x.index:
global T_points_file
T_points_file.write(str(point_y)+"\n")
T_points_file.write(str(point_x)+"\n")
global T_y_points_array
T_y_points_array.append(point_y)
global T_x_points_array
T_x_points_array.append(point_x)
global T_points_count
T_points_count += 1
else:
return True
else:
return False | def check_distance(r,point_x,point_y,check_T_points=0):
'''
return the distance between point_x and point_y
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
#print (distance)# 52.076992648 52.0769926525
if distance >= (r-0.001):
if check_T_points:
if distance < 52.1 and (point_y.index-1)//2 != point_x.index:
global T_points_file
T_points_file.write(str(point_y)+"\n")
T_points_file.write(str(point_x)+"\n")
global T_y_points_array
T_y_points_array.append(point_y)
global T_x_points_array
T_x_points_array.append(point_x)
global T_points_count
T_points_count += 1
else:
return True
else:
return False |
Python | def check_overlap(r,point_x,point_y):
'''
Check the distance between two center of trimer is less than the r prime. r prime = 71.4
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
if distance <= 71.4:
return 1
else:
return 0 | def check_overlap(r,point_x,point_y):
'''
Check the distance between two center of trimer is less than the r prime. r prime = 71.4
'''
distance = math.sqrt((point_x.x-point_y.x)**2+(point_x.y-point_y.y)**2+(point_x.z-point_y.z)**2)
if distance <= 71.4:
return 1
else:
return 0 |
Python | def check_everydistance(r,point_x,K_keepers):
'''
check the distances between each points in K_keepers and point_x
'''
for i in K_keepers:
if not check_distance(r,i,point_x):
return False
for i in K_keepers:
check_distance(r,i, point_x, check_T_points=1)
count = 0
for i in K_keepers:
count += check_overlap(r,i, point_x)
if count != 1:
return False
return True | def check_everydistance(r,point_x,K_keepers):
'''
check the distances between each points in K_keepers and point_x
'''
for i in K_keepers:
if not check_distance(r,i,point_x):
return False
for i in K_keepers:
check_distance(r,i, point_x, check_T_points=1)
count = 0
for i in K_keepers:
count += check_overlap(r,i, point_x)
if count != 1:
return False
return True |
Python | def plot_eigs(eigs, **kwargs):
"""
Plot the provided eigenvalues (of the dynamics operator A).
Args:
eigs (:obj:`ndarray` of complex):
**kwargs: kwargs of matplotlib.pyplot.subplots
Returns:
(tuple): Tuple containing:
fig: figure object
ax: axes object
"""
xlim = kwargs.pop('xlim', [-1.1, 1.1])
ylim = kwargs.pop('xlim', [-1.1, 1.1])
fig, ax = plt.subplots(1, **kwargs)
ax.set_aspect('equal'), ax.set_xlim(xlim), ax.set_ylim(ylim)
ax.scatter(eigs.real, eigs.imag)
ax.add_artist(plt.Circle((0, 0), 1, color='k', linestyle='--', fill=False))
return fig, ax | def plot_eigs(eigs, **kwargs):
"""
Plot the provided eigenvalues (of the dynamics operator A).
Args:
eigs (:obj:`ndarray` of complex):
**kwargs: kwargs of matplotlib.pyplot.subplots
Returns:
(tuple): Tuple containing:
fig: figure object
ax: axes object
"""
xlim = kwargs.pop('xlim', [-1.1, 1.1])
ylim = kwargs.pop('xlim', [-1.1, 1.1])
fig, ax = plt.subplots(1, **kwargs)
ax.set_aspect('equal'), ax.set_xlim(xlim), ax.set_ylim(ylim)
ax.scatter(eigs.real, eigs.imag)
ax.add_artist(plt.Circle((0, 0), 1, color='k', linestyle='--', fill=False))
return fig, ax |
Python | def delay_embed(X, shift):
"""Delay-embed the matrix X with measurements from future times.
Args:
X (:obj:`ndarray` of float): Data matrix with columns storing states at sequential time measurements.
shift (int): Number of future times copies to augment to the current time state.
Returns:
(:obj:`ndarray` of float): The function maps (d, t) to (shift+1, d, t-shft) which is then stacked
into ((shift+1)*d, t-shift).
"""
if X.ndim != 2:
raise ValueError('In delay_embed, invalid X matrix shape of ' + str(X.shape))
_, T = X.shape
return np.vstack([X[:, i:(T - shift) + i] for i in range(shift + 1)]) | def delay_embed(X, shift):
"""Delay-embed the matrix X with measurements from future times.
Args:
X (:obj:`ndarray` of float): Data matrix with columns storing states at sequential time measurements.
shift (int): Number of future times copies to augment to the current time state.
Returns:
(:obj:`ndarray` of float): The function maps (d, t) to (shift+1, d, t-shft) which is then stacked
into ((shift+1)*d, t-shift).
"""
if X.ndim != 2:
raise ValueError('In delay_embed, invalid X matrix shape of ' + str(X.shape))
_, T = X.shape
return np.vstack([X[:, i:(T - shift) + i] for i in range(shift + 1)]) |
Python | def dst_from_cts(cA, cB, dt):
"""
Convert constant continuous state space matrices to discrete
matrices with time step dt using:
exp(dt*[[cA, cB], = [[dA, dB],
[0, 0 ]]) [0, 1 ]]
Require cA in R^(na x na) and cB in R^(na x nb). The zero and
identity components make the matrix square.
Args:
cA (:obj:`ndarray` of float): Continuous A.
cB (:obj:`ndarray` of float): Continuous B.
dt (float): Time step.
Returns:
(tuple): tuple containing:
(:obj:`ndarray` of float): discrete A.
(:obj:`ndarray` of float): discrete B.
"""
na, _ = cA.shape
_, nb = cB.shape
cM = np.block([[cA, cB],
[np.zeros([nb, na]), np.zeros([nb, nb])]])
dM = expm(cM * dt)
return dM[:na, :na], dM[:na, na:] | def dst_from_cts(cA, cB, dt):
"""
Convert constant continuous state space matrices to discrete
matrices with time step dt using:
exp(dt*[[cA, cB], = [[dA, dB],
[0, 0 ]]) [0, 1 ]]
Require cA in R^(na x na) and cB in R^(na x nb). The zero and
identity components make the matrix square.
Args:
cA (:obj:`ndarray` of float): Continuous A.
cB (:obj:`ndarray` of float): Continuous B.
dt (float): Time step.
Returns:
(tuple): tuple containing:
(:obj:`ndarray` of float): discrete A.
(:obj:`ndarray` of float): discrete B.
"""
na, _ = cA.shape
_, nb = cB.shape
cM = np.block([[cA, cB],
[np.zeros([nb, na]), np.zeros([nb, nb])]])
dM = expm(cM * dt)
return dM[:na, :na], dM[:na, na:] |
Python | def cts_from_dst(dA, dB, dt):
"""
Convert discrete state space matrices with time step dt to
continuous matrices by inverting
exp(dt*[[cA, cB], = [[dA, dB],
[0, 0 ]]) [0, 1 ]]
Require dA in R^(na x na) and dB in R^(na x nb). The zero and
identity components make the matrix square.
Args:
dA (:obj:`ndarray` of float): discrete A.
dB (:obj:`ndarray` of float): discrete B.
dt (float): Time step.
Returns:
(tuple): tuple containing:
(:obj:`ndarray` of float): Continuous A.
(:obj:`ndarray` of float): Continuous B.
"""
na, _ = dA.shape
_, nb = dB.shape
dM = np.block([[dA, dB],
[np.zeros([nb, na]), np.identity(nb)]])
cM = logm(dM)/dt
return cM[:na, :na], cM[:na, na:] | def cts_from_dst(dA, dB, dt):
"""
Convert discrete state space matrices with time step dt to
continuous matrices by inverting
exp(dt*[[cA, cB], = [[dA, dB],
[0, 0 ]]) [0, 1 ]]
Require dA in R^(na x na) and dB in R^(na x nb). The zero and
identity components make the matrix square.
Args:
dA (:obj:`ndarray` of float): discrete A.
dB (:obj:`ndarray` of float): discrete B.
dt (float): Time step.
Returns:
(tuple): tuple containing:
(:obj:`ndarray` of float): Continuous A.
(:obj:`ndarray` of float): Continuous B.
"""
na, _ = dA.shape
_, nb = dB.shape
dM = np.block([[dA, dB],
[np.zeros([nb, na]), np.identity(nb)]])
cM = logm(dM)/dt
return cM[:na, :na], cM[:na, na:] |
Python | def time_spectrum(self, ts, system='discrete'):
"""Returns a continuous approximation to the time dynamics of A.
Note that A_dst = e^(A_cts dt). Suppose (operator, eigs) pairs are denoted (A_dst, Y) for the discrete case
and (A_cts, Omega) for the continuous case. The eigenvalue correspondence is e^log(Y)/dt = Omega.
Args:
ts (:obj:`ndarray` of float): Times.
system ({'continuous', 'discrete'}): default 'discrete'.
Returns:
:obj:`ndarray` of float: Evaluations of modes at ts.
"""
if np.isscalar(ts):
# Cast eigs to complex numbers for logarithm
if system == 'discrete':
omega = np.log(self.eigs + 0j) / self.dt
elif system == 'continuous':
omega = self.eigs + 0j
else:
raise ValueError('In time_spectrum, invalid system value.')
return np.exp(omega * (ts - self.t0))
else:
return np.array([self.time_spectrum(it, system=system) for it in ts]).T | def time_spectrum(self, ts, system='discrete'):
"""Returns a continuous approximation to the time dynamics of A.
Note that A_dst = e^(A_cts dt). Suppose (operator, eigs) pairs are denoted (A_dst, Y) for the discrete case
and (A_cts, Omega) for the continuous case. The eigenvalue correspondence is e^log(Y)/dt = Omega.
Args:
ts (:obj:`ndarray` of float): Times.
system ({'continuous', 'discrete'}): default 'discrete'.
Returns:
:obj:`ndarray` of float: Evaluations of modes at ts.
"""
if np.isscalar(ts):
# Cast eigs to complex numbers for logarithm
if system == 'discrete':
omega = np.log(self.eigs + 0j) / self.dt
elif system == 'continuous':
omega = self.eigs + 0j
else:
raise ValueError('In time_spectrum, invalid system value.')
return np.exp(omega * (ts - self.t0))
else:
return np.array([self.time_spectrum(it, system=system) for it in ts]).T |
Python | def predict_dst(self, ts=None, x0=None):
"""Predict the future state using continuous approximation to the discrete A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'discrete') | def predict_dst(self, ts=None, x0=None):
"""Predict the future state using continuous approximation to the discrete A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'discrete') |
Python | def predict_cts(self, ts=None, x0=None):
"""Predict the future state using the continuous operator A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'continuous') | def predict_cts(self, ts=None, x0=None):
"""Predict the future state using the continuous operator A.
Args:
ts (:obj:`ndarray` of float): Array of time-steps to predict. default self.orig_timesteps.
x0 (:obj:`ndarray` of float): The initial value. default self.x0.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
x0 = self.X1[:, 0] if x0 is None else x0
ts = self.orig_timesteps if ts is None else ts
return self._predict(ts, x0, 'continuous') |
Python | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution X_2 = A X_1 + B u_1.
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.)
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (:obj:`ndarray` of float): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = self.A @ xt + self.B @ ut
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution X_2 = A X_1 + B u_1.
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.)
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (:obj:`ndarray` of float): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = self.A @ xt + self.B @ ut
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
X_dot = A X + B u
x(t+dt) = e^{dt A}(x(t) + dt B u(t))
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.) Be sure that dt matches the train dt if
using delay embeddings.
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = expm(dt * self.A) @ (xt + dt * self.B @ ut)
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
X_dot = A X + B u
x(t+dt) = e^{dt A}(x(t) + dt B u(t))
Default behavior (control=None) is to use the original control. (If the underlying A is desired,
format zeros_like u that runs for the desired time.) Be sure that dt matches the train dt if
using delay embeddings.
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
U = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0
res = [xt]
for ut in U[:, :-1].T:
xt_1 = expm(dt * self.A) @ (xt + dt * self.B @ ut)
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution:
x_1 = A x_0 + B (u.x_0)
= [A B] [x_0, u.x_0]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution:
x_1 = A x_0 + B (u.x_0)
= [A B] [x_0, u.x_0]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } x_t
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm((self.A + _uBt) * dt) @ xt
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } x_t
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm((self.A + _uBt) * dt) @ xt
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution,
x_1 = A x_0 + B (u*x_0) + D u
= [A B D] [x_0, u*x_0, u ]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t + self.D @ control[:, t]
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_dst(self, control=None, x0=None):
""" Predict the future state using discrete evolution.
Evolve the system from X0 as long as control is available, using
the discrete evolution,
x_1 = A x_0 + B (u*x_0) + D u
= [A B D] [x_0, u*x_0, u ]^T
Args:
control (:obj:`ndarray` of float): The control signal.
x0 (): The initial value.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
res = [xt]
for t in range(control.shape[1] - 1):
# Outer product then flatten to correctly combine the different
# times present due to time-delays. That is, make sure that
# u(t)'s multiply x(t)'s
# _ct => (time-delays + 1) x (number of controls)
# _xt => (time-delays + 1) x (measured dimensions)
# _ups_t => (time-delays + 1) x (controls) x (measurements)
# Flatten to get the desired vector.
_ct = control[:, t].reshape(self.shift + 1, -1)
_xt = xt.reshape(self.shift + 1, -1)
ups_t = np.einsum('sc,sm->scm', _ct, _xt).flatten()
xt_1 = self.A @ xt + self.B @ ups_t + self.D @ control[:, t]
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } (x_t + dt * D u_t}
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm(dt * (self.A + _uBt)) @ (xt + dt * self.D @ control[:, t])
xt = xt_1
res.append(xt_1)
return np.array(res).T | def predict_cts(self, control=None, x0=None, dt=None):
""" Predict the future state using continuous evolution.
Evolve the system from X0 as long as control is available, using
the continuous evolution while u is constant,
x_{t+1} = e^{A dt + u B dt } (x_t + dt * D u_t}
Args:
control (:obj:`ndarray` of float): The control signal.
A zero-order hold is assumed between time steps.
The dt must match the training data if time-delays are used.
x0 (:obj:`ndarray` of float): The initial value.
dt (float): The time-step between control inputs.
Returns:
:obj:`ndarray` of float: Predicted state for each control input.
"""
control = self.U if control is None else control
dt = self.dt if dt is None else dt
xt = self.X1[:, 0] if x0 is None else x0 # Flat array
# store useful dimensions
delay_dim = self.shift + 1
control_dim = self.U.shape[0] // delay_dim
measure_1_dim = self.X1.shape[0] // delay_dim
to_dim = self.X2.shape[0]
res = [xt]
for t in range(control.shape[1] - 1):
# Correctly combine u(t) and B(t)
# Initial:
# B <= (time-delays+1 x measurements_2) x (time-delays+1 x controls x measurements_1)
# Reshape:
# B => (time-delays+1 x measurements_2) x (time-delays+1) x (controls) x (measurements_1)
# _ct => (time-delays+1) x (controls)
# _uBt => (time-delays+1 x measurements_2) x (time-delays+1) x (measurements_1)
# => (time-delays+1 x measurements_2) x (time-delays+1 x measurements_1)
# Notice that _uBt is formed by a sum over all controls in order to act on the
# state xt which has dimensions of (delays x measurements_1).
_uBt = np.einsum('ascm,sc->asm',
self.B.reshape(to_dim, delay_dim, control_dim, measure_1_dim),
control[:, t].reshape(delay_dim, control_dim)
).reshape(to_dim, delay_dim * measure_1_dim)
xt_1 = expm(dt * (self.A + _uBt)) @ (xt + dt * self.D @ control[:, t])
xt = xt_1
res.append(xt_1)
return np.array(res).T |
Python | def evaluate_network_multiple(self,inputs):
'''
Evaluate network using multiple sets of inputs
Args:
inputs (numpy array of floats): Array of network inputs to be evaluated.
Returns:
(numpy array of floats): Network outputs for each set of inputs
'''
numLayers = self.numLayers
inputSize = self.inputSize
outputSize = self.outputSize
biases = self.biases
weights = self.weights
inputs = np.array(inputs).T
# Prepare the inputs to the neural network
numInputs = inputs.shape[1]
inputsNorm = np.zeros((inputSize,numInputs))
for i in range(inputSize):
for j in range(numInputs):
if inputs[i,j]<self.mins[i]:
inputsNorm[i,j] = (self.mins[i]-self.means[i])/self.ranges[i]
elif inputs[i,j] > self.maxes[i]:
inputsNorm[i,j] = (self.maxes[i]-self.means[i])/self.ranges[i]
else:
inputsNorm[i,j] = (inputs[i,j]-self.means[i])/self.ranges[i]
# Evaluate the neural network
for layer in range(numLayers-1):
inputsNorm = np.maximum(np.dot(weights[layer],inputsNorm)+biases[layer].reshape((len(biases[layer]),1)),0)
outputs = np.dot(weights[-1],inputsNorm)+biases[-1].reshape((len(biases[-1]),1))
# Undo output normalization
for i in range(outputSize):
for j in range(numInputs):
outputs[i,j] = outputs[i,j]*self.ranges[-1]+self.means[-1]
return outputs.T | def evaluate_network_multiple(self,inputs):
'''
Evaluate network using multiple sets of inputs
Args:
inputs (numpy array of floats): Array of network inputs to be evaluated.
Returns:
(numpy array of floats): Network outputs for each set of inputs
'''
numLayers = self.numLayers
inputSize = self.inputSize
outputSize = self.outputSize
biases = self.biases
weights = self.weights
inputs = np.array(inputs).T
# Prepare the inputs to the neural network
numInputs = inputs.shape[1]
inputsNorm = np.zeros((inputSize,numInputs))
for i in range(inputSize):
for j in range(numInputs):
if inputs[i,j]<self.mins[i]:
inputsNorm[i,j] = (self.mins[i]-self.means[i])/self.ranges[i]
elif inputs[i,j] > self.maxes[i]:
inputsNorm[i,j] = (self.maxes[i]-self.means[i])/self.ranges[i]
else:
inputsNorm[i,j] = (inputs[i,j]-self.means[i])/self.ranges[i]
# Evaluate the neural network
for layer in range(numLayers-1):
inputsNorm = np.maximum(np.dot(weights[layer],inputsNorm)+biases[layer].reshape((len(biases[layer]),1)),0)
outputs = np.dot(weights[-1],inputsNorm)+biases[-1].reshape((len(biases[-1]),1))
# Undo output normalization
for i in range(outputSize):
for j in range(numInputs):
outputs[i,j] = outputs[i,j]*self.ranges[-1]+self.means[-1]
return outputs.T |
Python | def is_word(w):
'''Return False if a word should be excluded'''
if w.isdigit(): return False
if all([match(r"\p{P}|`", l) for l in w]): return False
if len(w) < 3 and w not in short_words: return False
return True | def is_word(w):
'''Return False if a word should be excluded'''
if w.isdigit(): return False
if all([match(r"\p{P}|`", l) for l in w]): return False
if len(w) < 3 and w not in short_words: return False
return True |
Python | def mad(x):
"""Calculate Mean Absolute Deviation
Not to be confused with commonly found 'Median' Absolute Deviation.
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
Returns
-------
mad : scalar
Mean Absolute Deviation of x
"""
mean = np.mean(x)
deviation = x-mean
absolute_deviation = np.abs(deviation)
mad = np.mean(absolute_deviation)
return mad | def mad(x):
"""Calculate Mean Absolute Deviation
Not to be confused with commonly found 'Median' Absolute Deviation.
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
Returns
-------
mad : scalar
Mean Absolute Deviation of x
"""
mean = np.mean(x)
deviation = x-mean
absolute_deviation = np.abs(deviation)
mad = np.mean(absolute_deviation)
return mad |
Python | def object_to_byte_tensor(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
f"objects too large: object size {obj_size}, max size {max_size}"
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2 : 2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor | def object_to_byte_tensor(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
f"objects too large: object size {obj_size}, max size {max_size}"
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2 : 2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor |
Python | def byte_tensor_to_object(byte_tensor, max_size=MAX_SIZE_LIMIT):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2 : 2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj | def byte_tensor_to_object(byte_tensor, max_size=MAX_SIZE_LIMIT):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2 : 2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj |
Python | def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import warnings
builtin_warn = warnings.warn
def warn(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_warn(*args, **kwargs)
# Log warnings only once
warnings.warn = warn
warnings.simplefilter("once", UserWarning) | def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import warnings
builtin_warn = warnings.warn
def warn(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_warn(*args, **kwargs)
# Log warnings only once
warnings.warn = warn
warnings.simplefilter("once", UserWarning) |
Python | def classify(image_file, top_matches: int):
"""
Uses the neural net to classify the given image
:param image_file: the path to the image file
:param top_matches: the number of predictions to return
:return: a dict containing lists of class names, class ids and probabilities
"""
# Use network to get top predictions
category_name_list, class_id_list, probabilities_list = nw.predict(image_file, top_matches)
return {
'class_names': category_name_list,
'class_ids': class_id_list,
'probabilities': probabilities_list
} | def classify(image_file, top_matches: int):
"""
Uses the neural net to classify the given image
:param image_file: the path to the image file
:param top_matches: the number of predictions to return
:return: a dict containing lists of class names, class ids and probabilities
"""
# Use network to get top predictions
category_name_list, class_id_list, probabilities_list = nw.predict(image_file, top_matches)
return {
'class_names': category_name_list,
'class_ids': class_id_list,
'probabilities': probabilities_list
} |
Python | def convert_file_path_to_url(file):
"""
Gets a file url given a path to an image file.
:param file: the path to the image file
:return: a URL friendly string that Flask can use to link to the image
"""
# Remove static prefix and update file path separators to URL style
file = file[len('static/'):].replace('\\', '/')
return file | def convert_file_path_to_url(file):
"""
Gets a file url given a path to an image file.
:param file: the path to the image file
:return: a URL friendly string that Flask can use to link to the image
"""
# Remove static prefix and update file path separators to URL style
file = file[len('static/'):].replace('\\', '/')
return file |
Python | def predict(self, image_path, topk=5):
"""
Predict the class (or classes) of an image using a trained deep learning model.
:param image_path:
:param topk: the number of classes to return
:return: tuple containing [0]class_name_list, [1]class_id_list, [2]probabilities_list
"""
# Setup Cuda
device = torch.device("cuda:0" if self.gpu and torch.cuda.is_available() else "cpu")
self.model.to(device)
# Make sure model is in eval mode
self.model.eval()
# Process image into numpy image, then convert to torch tensor
np_image = self.process_image(image_path)
torch_image = torch.from_numpy(np_image)
torch_image = torch_image.to(device)
with torch.no_grad():
output = self.model(torch_image.unsqueeze_(0))
probabilities = torch.exp(output)
kprobs, kindex = probabilities.topk(topk)
probabilities_list = kprobs[0].cpu().numpy().tolist()
indexes_list = kindex[0].cpu().numpy().tolist()
# For every kindex value, look up the class and return it instead of the index
idx_to_class = {v: k for k, v in self.class_to_idx_items}
class_id_list = [idx_to_class[idx] for idx in indexes_list]
class_name_list = [self.cat_to_name[image_class].title() for image_class in class_id_list]
return class_name_list, class_id_list, probabilities_list | def predict(self, image_path, topk=5):
"""
Predict the class (or classes) of an image using a trained deep learning model.
:param image_path:
:param topk: the number of classes to return
:return: tuple containing [0]class_name_list, [1]class_id_list, [2]probabilities_list
"""
# Setup Cuda
device = torch.device("cuda:0" if self.gpu and torch.cuda.is_available() else "cpu")
self.model.to(device)
# Make sure model is in eval mode
self.model.eval()
# Process image into numpy image, then convert to torch tensor
np_image = self.process_image(image_path)
torch_image = torch.from_numpy(np_image)
torch_image = torch_image.to(device)
with torch.no_grad():
output = self.model(torch_image.unsqueeze_(0))
probabilities = torch.exp(output)
kprobs, kindex = probabilities.topk(topk)
probabilities_list = kprobs[0].cpu().numpy().tolist()
indexes_list = kindex[0].cpu().numpy().tolist()
# For every kindex value, look up the class and return it instead of the index
idx_to_class = {v: k for k, v in self.class_to_idx_items}
class_id_list = [idx_to_class[idx] for idx in indexes_list]
class_name_list = [self.cat_to_name[image_class].title() for image_class in class_id_list]
return class_name_list, class_id_list, probabilities_list |
Python | def process_image(image_path):
"""
Given a path to a file, pre-process that image in preparation for making a prediction.
:param image_path: the path to the image file
:return: the image represented by a flattened numpy array
"""
im_transforms = TRANSFORM_TEST_VALIDATION
# Open image
im = Image.open(image_path)
# Transform it: creates pytorch tensor
im_transformed_tensor = im_transforms(im)
# Return np array
np_image = np.array(im_transformed_tensor)
return np_image | def process_image(image_path):
"""
Given a path to a file, pre-process that image in preparation for making a prediction.
:param image_path: the path to the image file
:return: the image represented by a flattened numpy array
"""
im_transforms = TRANSFORM_TEST_VALIDATION
# Open image
im = Image.open(image_path)
# Transform it: creates pytorch tensor
im_transformed_tensor = im_transforms(im)
# Return np array
np_image = np.array(im_transformed_tensor)
return np_image |
Python | def match_brackets(self, string) -> str:
"""Automatically closes bracket at the end
Note that the expected order of operations may change because the bracket is added only at the end
Useful when user forgets to close all brackets"""
characters = [char for char in string]
num_of_open_brackets = characters.count("(")
num_of_closed_brackets = characters.count(")")
difference = num_of_open_brackets - num_of_closed_brackets
return string + ")" * difference | def match_brackets(self, string) -> str:
"""Automatically closes bracket at the end
Note that the expected order of operations may change because the bracket is added only at the end
Useful when user forgets to close all brackets"""
characters = [char for char in string]
num_of_open_brackets = characters.count("(")
num_of_closed_brackets = characters.count(")")
difference = num_of_open_brackets - num_of_closed_brackets
return string + ")" * difference |
Python | def resolve_transition(self, tr, data):
"""Return the label, the start and the end states of a transition"""
start_conn = data['connectors'][tr['start']]
end_conn = data['connectors'][tr['end']]
start_state = start_conn['parent']
end_state = end_conn['parent']
event = tr['label']
return start_state, end_state, event | def resolve_transition(self, tr, data):
"""Return the label, the start and the end states of a transition"""
start_conn = data['connectors'][tr['start']]
end_conn = data['connectors'][tr['end']]
start_state = start_conn['parent']
end_state = end_conn['parent']
event = tr['label']
return start_state, end_state, event |
Python | def add_transitions_to_states(self, states, data):
"""Go through the transitions in the state machine, and add them
to the start states"""
for tr in data['transitions'].values():
start, target, label = self.resolve_transition(tr, data)
if states[start]['type'] == 'initial':
start = states[start]['parent']
states[start]['initial'] = target
signal = self.str_to_signal(label, target=target, target_title=states[target]['title'], initial=True)
else:
signal = self.str_to_signal(label, target, target_title=states[target]['title'], initial=False)
self.add_signal_to_state(states[start], signal) | def add_transitions_to_states(self, states, data):
"""Go through the transitions in the state machine, and add them
to the start states"""
for tr in data['transitions'].values():
start, target, label = self.resolve_transition(tr, data)
if states[start]['type'] == 'initial':
start = states[start]['parent']
states[start]['initial'] = target
signal = self.str_to_signal(label, target=target, target_title=states[target]['title'], initial=True)
else:
signal = self.str_to_signal(label, target, target_title=states[target]['title'], initial=False)
self.add_signal_to_state(states[start], signal) |
Python | def subject_questions(self):
"""
Returns a Queryset of all questions added to the participant/demographic data form.
"""
if self.pk:
return Question.objects.filter(experiment=self.pk).order_by('position')
else:
return None | def subject_questions(self):
"""
Returns a Queryset of all questions added to the participant/demographic data form.
"""
if self.pk:
return Question.objects.filter(experiment=self.pk).order_by('position')
else:
return None |
Python | def consent_questions(self):
"""
Returns a Queryset of all questions added to the consent form.
"""
if self.pk:
return ConsentQuestion.objects.filter(experiment=self.pk).order_by('position')
else:
return None | def consent_questions(self):
"""
Returns a Queryset of all questions added to the consent form.
"""
if self.pk:
return ConsentQuestion.objects.filter(experiment=self.pk).order_by('position')
else:
return None |
Python | def filename(self):
"""
Returns webcam/audio file name
"""
return os.path.basename(self.webcam_file.name) | def filename(self):
"""
Returns webcam/audio file name
"""
return os.path.basename(self.webcam_file.name) |
Python | def delete_file(sender, instance, *args, **kwargs):
"""
Deletes webcam file on `post_delete`
"""
if instance.webcam_file.name:
_delete_file(os.path.join(settings.WEBCAM_ROOT, instance.webcam_file.name)) | def delete_file(sender, instance, *args, **kwargs):
"""
Deletes webcam file on `post_delete`
"""
if instance.webcam_file.name:
_delete_file(os.path.join(settings.WEBCAM_ROOT, instance.webcam_file.name)) |
Python | def validate_list(value):
"""
Takes a text value and verifies that there is at least 1 comma.
"""
# split by comma and remove empty strings
values = list(filter(None, [x.strip() for x in value.split(',')]))
if len(values) < 2:
raise ValidationError({'choices':('The selected question type requires an associated list of choices. Choices must contain more than one item.')}) | def validate_list(value):
"""
Takes a text value and verifies that there is at least 1 comma.
"""
# split by comma and remove empty strings
values = list(filter(None, [x.strip() for x in value.split(',')]))
if len(values) < 2:
raise ValidationError({'choices':('The selected question type requires an associated list of choices. Choices must contain more than one item.')}) |
Python | def validate_range(value):
"""
Takes a text value and verifies that the min and max values are valid.
"""
try:
# split by comma and remove empty strings
values = list(filter(None, [x.strip() for x in value.split(',')]))
if len(values) != 2:
raise ValidationError({'choices':('The selected question type requires a minimum and a maximum value.')})
# convert list to integers
min_max = list(map(int, values))
if min_max[0] >= min_max[1]:
raise ValidationError({'choices':('The minimum value must be greater than the maximum value.')})
except ValueError:
raise ValidationError({'choices':('The values can only be integers.')}) | def validate_range(value):
"""
Takes a text value and verifies that the min and max values are valid.
"""
try:
# split by comma and remove empty strings
values = list(filter(None, [x.strip() for x in value.split(',')]))
if len(values) != 2:
raise ValidationError({'choices':('The selected question type requires a minimum and a maximum value.')})
# convert list to integers
min_max = list(map(int, values))
if min_max[0] >= min_max[1]:
raise ValidationError({'choices':('The minimum value must be greater than the maximum value.')})
except ValueError:
raise ValidationError({'choices':('The values can only be integers.')}) |
Python | def login_required(function=None, next=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
next=next,
login_url=login_url,
redirect_field_name=redirect_field_name,
)
if function:
return actual_decorator(function)
return actual_decorator | def login_required(function=None, next=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
next=next,
login_url=login_url,
redirect_field_name=redirect_field_name,
)
if function:
return actual_decorator(function)
return actual_decorator |
Python | def write_trial_columns(self, worksheet):
"""
Writes column headers to worksheet.
"""
for index, column in enumerate(self.trial_columns):
worksheet.write(0, index, column) | def write_trial_columns(self, worksheet):
"""
Writes column headers to worksheet.
"""
for index, column in enumerate(self.trial_columns):
worksheet.write(0, index, column) |
Python | def calc_trial_duration(self, t1, t2):
"""
Calculates trial duration based on the start and end times.
"""
if t1 and t2:
return str(t2 - t1)
return '' | def calc_trial_duration(self, t1, t2):
"""
Calculates trial duration based on the start and end times.
"""
if t1 and t2:
return str(t2 - t1)
return '' |
Python | def calc_area_clicked(self, result):
"""
Determines row and col clicked based on grid size defined for the trial.
"""
if 'mouse' in result.key_pressed and (result.trialitem.grid_row != 1 or result.trialitem.grid_col != 1):
width = result.resolution_w
height = result.resolution_h
boundaries_r = list(range(0, height, int(height/result.trialitem.grid_row)))
boundaries_r.append(height)
boundaries_c = list(range(0, width, int(width/result.trialitem.grid_col)))
boundaries_c.append(width)
coords = list(map(int, re.findall(r'\d+', result.key_pressed)))
if len(coords) == 2:
col_num = next(i for i,c in enumerate(boundaries_c) if c > coords[0])
row_num = next(i for i,r in enumerate(boundaries_r) if r > coords[1])
return '({row_num},{col_num})'.format(row_num=row_num, col_num=col_num)
return '' | def calc_area_clicked(self, result):
"""
Determines row and col clicked based on grid size defined for the trial.
"""
if 'mouse' in result.key_pressed and (result.trialitem.grid_row != 1 or result.trialitem.grid_col != 1):
width = result.resolution_w
height = result.resolution_h
boundaries_r = list(range(0, height, int(height/result.trialitem.grid_row)))
boundaries_r.append(height)
boundaries_c = list(range(0, width, int(width/result.trialitem.grid_col)))
boundaries_c.append(width)
coords = list(map(int, re.findall(r'\d+', result.key_pressed)))
if len(coords) == 2:
col_num = next(i for i,c in enumerate(boundaries_c) if c > coords[0])
row_num = next(i for i,r in enumerate(boundaries_r) if r > coords[1])
return '({row_num},{col_num})'.format(row_num=row_num, col_num=col_num)
return '' |
Python | def create_subject_worksheet(self, worksheet, subject):
"""
Creates a worksheet for each participant containing the data
obtained from the consent form and the demographic/participant data form.
"""
gcd = self.gcd(subject.resolution_w, subject.resolution_h)
if gcd == 0:
gcd = 1
try:
subject_data = {
'Report Date': datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S"),
'Experiment Name': subject.experiment.exp_name,
'Global Timeout': subject.listitem.global_timeout if subject.listitem else '',
'List': subject.listitem.list_name if subject.listitem else '',
'Participant Number': subject.participant_id,
'Participant UUID': subject.id,
'Participation Date': subject.created.strftime("%d.%m.%Y %H:%M:%S"),
'Aspect Ratio': '{}:{}'.format(int(subject.resolution_h / gcd),
int(subject.resolution_w / gcd)),
'Resolution': '{}x{}'.format(subject.resolution_w, subject.resolution_h),
'Consent Questions': '',
}
consent_questions = ConsentQuestion.objects.filter(experiment_id=subject.experiment.id)
for consent_question in consent_questions:
subject_data[consent_question.text] = 'Y'
answer_bases = AnswerBase.objects.filter(subject_data_id=subject.id)
subject_data['Participant Form Responses'] = ''
for answer_base in answer_bases:
value = ''
if answer_base.question.question_type == Question.TEXT:
value = str(AnswerText.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.INTEGER or answer_base.question.question_type == Question.NUM_RANGE or answer_base.question.question_type == Question.AGE:
value = str(AnswerInteger.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.RADIO or answer_base.question.question_type == Question.SEX:
value = str(AnswerRadio.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.SELECT:
value = str(AnswerSelect.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.SELECT_MULTIPLE:
value = str(AnswerSelectMultiple.objects.get(pk=answer_base.pk).body)
subject_data[answer_base.question.text] = value
cdi_results = CdiResult.objects.filter(subject=subject.id)
subject_data['CDI estimate'] = subject.cdi_estimate
subject_data['CDI'] = subject.experiment.instrument.instr_name if subject.experiment.instrument else ''
for cdi_result in cdi_results:
subject_data[cdi_result.given_label] = cdi_result.response
except ObjectDoesNotExist as e:
logger.exception('Object does not exist: ' + str(e))
finally:
current_row = 0
for key in subject_data:
worksheet.write(current_row, 0, key)
worksheet.write(current_row, 1, subject_data[key])
current_row += 1 | def create_subject_worksheet(self, worksheet, subject):
"""
Creates a worksheet for each participant containing the data
obtained from the consent form and the demographic/participant data form.
"""
gcd = self.gcd(subject.resolution_w, subject.resolution_h)
if gcd == 0:
gcd = 1
try:
subject_data = {
'Report Date': datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S"),
'Experiment Name': subject.experiment.exp_name,
'Global Timeout': subject.listitem.global_timeout if subject.listitem else '',
'List': subject.listitem.list_name if subject.listitem else '',
'Participant Number': subject.participant_id,
'Participant UUID': subject.id,
'Participation Date': subject.created.strftime("%d.%m.%Y %H:%M:%S"),
'Aspect Ratio': '{}:{}'.format(int(subject.resolution_h / gcd),
int(subject.resolution_w / gcd)),
'Resolution': '{}x{}'.format(subject.resolution_w, subject.resolution_h),
'Consent Questions': '',
}
consent_questions = ConsentQuestion.objects.filter(experiment_id=subject.experiment.id)
for consent_question in consent_questions:
subject_data[consent_question.text] = 'Y'
answer_bases = AnswerBase.objects.filter(subject_data_id=subject.id)
subject_data['Participant Form Responses'] = ''
for answer_base in answer_bases:
value = ''
if answer_base.question.question_type == Question.TEXT:
value = str(AnswerText.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.INTEGER or answer_base.question.question_type == Question.NUM_RANGE or answer_base.question.question_type == Question.AGE:
value = str(AnswerInteger.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.RADIO or answer_base.question.question_type == Question.SEX:
value = str(AnswerRadio.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.SELECT:
value = str(AnswerSelect.objects.get(pk=answer_base.pk).body)
elif answer_base.question.question_type == Question.SELECT_MULTIPLE:
value = str(AnswerSelectMultiple.objects.get(pk=answer_base.pk).body)
subject_data[answer_base.question.text] = value
cdi_results = CdiResult.objects.filter(subject=subject.id)
subject_data['CDI estimate'] = subject.cdi_estimate
subject_data['CDI'] = subject.experiment.instrument.instr_name if subject.experiment.instrument else ''
for cdi_result in cdi_results:
subject_data[cdi_result.given_label] = cdi_result.response
except ObjectDoesNotExist as e:
logger.exception('Object does not exist: ' + str(e))
finally:
current_row = 0
for key in subject_data:
worksheet.write(current_row, 0, key)
worksheet.write(current_row, 1, subject_data[key])
current_row += 1 |
Python | def create_trial_worksheet(self, worksheet, subject):
"""
Creates a worksheet per participant containing the trial results
and adds the corresponding webcam/audio files to the final zip file.
"""
self.write_trial_columns(worksheet)
current_row = 1
outer_blocks_pk = list(OuterBlockItem.objects.filter(listitem__pk=subject.listitem.pk).values_list('id',flat=True))
blocks = BlockItem.objects.filter(outerblockitem__pk__in=outer_blocks_pk)
for block in blocks:
trial_results = TrialResult.objects.filter(trialitem__blockitem__pk=block.pk, subject_id=subject.id).order_by('trial_number', 'pk')
for result in trial_results:
worksheet.write(current_row, 0, block.outerblockitem.outer_block_name)
worksheet.write(current_row, 1, block.label)
worksheet.write(current_row, 2, block.randomise_trials)
worksheet.write(current_row, 3, result.trial_number)
worksheet.write(current_row, 4, result.trialitem.label)
worksheet.write(current_row, 5, result.trialitem.code)
worksheet.write(current_row, 6, result.trialitem.visual_onset)
worksheet.write(current_row, 7, result.trialitem.audio_onset)
worksheet.write(current_row, 8, result.start_time.strftime("%H:%M:%S") if result.start_time else '')
worksheet.write(current_row, 9, result.end_time.strftime("%H:%M:%S") if result.end_time else '')
worksheet.write(current_row, 10, result.trialitem.visual_file.filename)
audio_file = result.trialitem.audio_file
worksheet.write(current_row, 11, audio_file.filename if audio_file else '')
worksheet.write(current_row, 12, result.trialitem.max_duration)
worksheet.write(current_row, 13, result.trialitem.user_input)
worksheet.write(current_row, 14, result.key_pressed)
worksheet.write(current_row, 15, self.calc_area_clicked(result))
worksheet.write(current_row, 16, self.calc_trial_duration(result.start_time, result.end_time))
worksheet.write(current_row, 17, block.outerblockitem.listitem.experiment.recording_option != 'NON' and result.trialitem.record_media)
worksheet.write(current_row, 18, result.webcam_file.name)
worksheet.write(current_row, 19, result.resolution_w)
worksheet.write(current_row, 20, result.resolution_h)
# Add webcam file to zip
self.zip_file.write(os.path.join("webcam", result.webcam_file.name),
result.webcam_file.name)
current_row += 1 | def create_trial_worksheet(self, worksheet, subject):
"""
Creates a worksheet per participant containing the trial results
and adds the corresponding webcam/audio files to the final zip file.
"""
self.write_trial_columns(worksheet)
current_row = 1
outer_blocks_pk = list(OuterBlockItem.objects.filter(listitem__pk=subject.listitem.pk).values_list('id',flat=True))
blocks = BlockItem.objects.filter(outerblockitem__pk__in=outer_blocks_pk)
for block in blocks:
trial_results = TrialResult.objects.filter(trialitem__blockitem__pk=block.pk, subject_id=subject.id).order_by('trial_number', 'pk')
for result in trial_results:
worksheet.write(current_row, 0, block.outerblockitem.outer_block_name)
worksheet.write(current_row, 1, block.label)
worksheet.write(current_row, 2, block.randomise_trials)
worksheet.write(current_row, 3, result.trial_number)
worksheet.write(current_row, 4, result.trialitem.label)
worksheet.write(current_row, 5, result.trialitem.code)
worksheet.write(current_row, 6, result.trialitem.visual_onset)
worksheet.write(current_row, 7, result.trialitem.audio_onset)
worksheet.write(current_row, 8, result.start_time.strftime("%H:%M:%S") if result.start_time else '')
worksheet.write(current_row, 9, result.end_time.strftime("%H:%M:%S") if result.end_time else '')
worksheet.write(current_row, 10, result.trialitem.visual_file.filename)
audio_file = result.trialitem.audio_file
worksheet.write(current_row, 11, audio_file.filename if audio_file else '')
worksheet.write(current_row, 12, result.trialitem.max_duration)
worksheet.write(current_row, 13, result.trialitem.user_input)
worksheet.write(current_row, 14, result.key_pressed)
worksheet.write(current_row, 15, self.calc_area_clicked(result))
worksheet.write(current_row, 16, self.calc_trial_duration(result.start_time, result.end_time))
worksheet.write(current_row, 17, block.outerblockitem.listitem.experiment.recording_option != 'NON' and result.trialitem.record_media)
worksheet.write(current_row, 18, result.webcam_file.name)
worksheet.write(current_row, 19, result.resolution_w)
worksheet.write(current_row, 20, result.resolution_h)
# Add webcam file to zip
self.zip_file.write(os.path.join("webcam", result.webcam_file.name),
result.webcam_file.name)
current_row += 1 |
Python | def create_report(self):
"""
Creates a zip file containing all participants' results and webcam/audio files for an experiment.
"""
# For each subject
subjects = SubjectData.objects.filter(experiment__pk=self.experiment.pk)
for subject in subjects:
# Create excel report
workbook_file = str(subject.participant_id) + '_' + \
self.experiment.exp_name + '_' + subject.created.strftime('%Y%m%d') + \
'_' + subject.id + '.xlsx'
workbook_file = get_valid_filename(workbook_file)
workbook = xlsxwriter.Workbook(os.path.join(self.output_folder,
self.tmp_folder, workbook_file))
# Create subject data worksheet
subject_worksheet = workbook.add_worksheet('Participant')
self.create_subject_worksheet(subject_worksheet, subject)
if subject.listitem:
# Create trial data worksheet
trials_worksheet = workbook.add_worksheet('Trials')
self.create_trial_worksheet(trials_worksheet, subject)
# Close and store excel report
workbook.close()
self.zip_file.write(os.path.join(self.output_folder,
self.tmp_folder, workbook_file),
workbook_file)
# Close zip
self.zip_file.close()
# Remove tmp folder
shutil.rmtree(os.path.join(self.output_folder, self.tmp_folder))
return os.path.join(self.output_folder, self.output_file) | def create_report(self):
"""
Creates a zip file containing all participants' results and webcam/audio files for an experiment.
"""
# For each subject
subjects = SubjectData.objects.filter(experiment__pk=self.experiment.pk)
for subject in subjects:
# Create excel report
workbook_file = str(subject.participant_id) + '_' + \
self.experiment.exp_name + '_' + subject.created.strftime('%Y%m%d') + \
'_' + subject.id + '.xlsx'
workbook_file = get_valid_filename(workbook_file)
workbook = xlsxwriter.Workbook(os.path.join(self.output_folder,
self.tmp_folder, workbook_file))
# Create subject data worksheet
subject_worksheet = workbook.add_worksheet('Participant')
self.create_subject_worksheet(subject_worksheet, subject)
if subject.listitem:
# Create trial data worksheet
trials_worksheet = workbook.add_worksheet('Trials')
self.create_trial_worksheet(trials_worksheet, subject)
# Close and store excel report
workbook.close()
self.zip_file.write(os.path.join(self.output_folder,
self.tmp_folder, workbook_file),
workbook_file)
# Close zip
self.zip_file.close()
# Remove tmp folder
shutil.rmtree(os.path.join(self.output_folder, self.tmp_folder))
return os.path.join(self.output_folder, self.output_file) |
Python | def clean_sharing_groups(self):
"""
Checks that at least one group is selected when experiment is to be shared with groups.
"""
sharing_option = self.cleaned_data.get('sharing_option')
groups = self.cleaned_data.get('sharing_groups')
if sharing_option == 'GRP':
if not groups:
raise ValidationError('Please select at least one group.')
return groups | def clean_sharing_groups(self):
"""
Checks that at least one group is selected when experiment is to be shared with groups.
"""
sharing_option = self.cleaned_data.get('sharing_option')
groups = self.cleaned_data.get('sharing_groups')
if sharing_option == 'GRP':
if not groups:
raise ValidationError('Please select at least one group.')
return groups |
Python | def has_change_permission(self, request, obj=None):
"""
Checks if user has permission to change an experiment.
"""
user_groups = request.user.groups.values_list('id')
if not obj:
return True
elif obj.sharing_option == 'PUB' or obj.sharing_option == 'GRP' and user_groups.intersection(
obj.sharing_groups.values_list('id')):
return True
else:
return obj.user == request.user or request.user.is_superuser | def has_change_permission(self, request, obj=None):
"""
Checks if user has permission to change an experiment.
"""
user_groups = request.user.groups.values_list('id')
if not obj:
return True
elif obj.sharing_option == 'PUB' or obj.sharing_option == 'GRP' and user_groups.intersection(
obj.sharing_groups.values_list('id')):
return True
else:
return obj.user == request.user or request.user.is_superuser |
Python | def experiment_buttons(self, obj):
"""
Displays action buttons for the Experiment admin interface.
"""
return format_html(
'<a class="grp-button" href="{url_exp}">Go to Experiment</a> '
+ '<a class="grp-button" href="{url_report}">Download Results</a> '
+ '<a class="grp-button" href="{url_export}">Export Experiment</a>',
url_exp=reverse('experiments:informationPage', args=[obj.id]),
url_report=reverse('experiments:experimentReport', args=[obj.id]),
url_export=reverse('experiments:experimentExport', args=[obj.id])) | def experiment_buttons(self, obj):
"""
Displays action buttons for the Experiment admin interface.
"""
return format_html(
'<a class="grp-button" href="{url_exp}">Go to Experiment</a> '
+ '<a class="grp-button" href="{url_report}">Download Results</a> '
+ '<a class="grp-button" href="{url_export}">Export Experiment</a>',
url_exp=reverse('experiments:informationPage', args=[obj.id]),
url_report=reverse('experiments:experimentReport', args=[obj.id]),
url_export=reverse('experiments:experimentExport', args=[obj.id])) |
Python | def exportToJSON(experiment_id):
"""
Creates a JSON object of the experiment to be exported.
"""
# Get all data
experiment = Experiment.objects.filter(pk=experiment_id)
lists = ListItem.objects.filter(experiment=experiment_id)
outerblocks = OuterBlockItem.objects.filter(listitem__experiment=experiment_id)
innerblocks = BlockItem.objects.filter(outerblockitem__listitem__experiment=experiment_id)
trials = TrialItem.objects.filter(blockitem__outerblockitem__listitem__experiment=experiment_id)
questions = Question.objects.filter(experiment=experiment_id)
consentquestions = ConsentQuestion.objects.filter(experiment=experiment_id)
# Serialize into JSON object
json_data = {}
json_data['experiment'] = json.loads(serializers.serialize("json", experiment))
json_data['lists'] = json.loads(serializers.serialize("json", lists))
json_data['outerblocks'] = json.loads(serializers.serialize("json", outerblocks))
json_data['innerblocks'] = json.loads(serializers.serialize("json", innerblocks))
json_data['trials'] = json.loads(serializers.serialize("json", trials))
json_data['questions'] = json.loads(serializers.serialize("json", questions))
json_data['consentquestions'] = json.loads(serializers.serialize("json", consentquestions))
return json_data | def exportToJSON(experiment_id):
"""
Creates a JSON object of the experiment to be exported.
"""
# Get all data
experiment = Experiment.objects.filter(pk=experiment_id)
lists = ListItem.objects.filter(experiment=experiment_id)
outerblocks = OuterBlockItem.objects.filter(listitem__experiment=experiment_id)
innerblocks = BlockItem.objects.filter(outerblockitem__listitem__experiment=experiment_id)
trials = TrialItem.objects.filter(blockitem__outerblockitem__listitem__experiment=experiment_id)
questions = Question.objects.filter(experiment=experiment_id)
consentquestions = ConsentQuestion.objects.filter(experiment=experiment_id)
# Serialize into JSON object
json_data = {}
json_data['experiment'] = json.loads(serializers.serialize("json", experiment))
json_data['lists'] = json.loads(serializers.serialize("json", lists))
json_data['outerblocks'] = json.loads(serializers.serialize("json", outerblocks))
json_data['innerblocks'] = json.loads(serializers.serialize("json", innerblocks))
json_data['trials'] = json.loads(serializers.serialize("json", trials))
json_data['questions'] = json.loads(serializers.serialize("json", questions))
json_data['consentquestions'] = json.loads(serializers.serialize("json", consentquestions))
return json_data |
Python | def importFromJSON(request, data):
"""
Imports an experiment from JSON file data.
"""
json_data = data.decode("utf-8")
# Import experiment
for experiment in serializers.deserialize("json", json.dumps(json.loads(json_data)['experiment'])):
old_primary_key = str(experiment.object.id)
experiment.object.created_on = datetime.now()
experiment.object.user = request.user
experiment.object.id = None
# Save as new experiment
experiment.save()
new_primary_key = str(experiment.object.id)
# Replace all experiment ids
json_data = json_data.replace(old_primary_key, new_primary_key)
# Import lists
for listItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['lists'])):
old_primary_key = str(listItem.object.id)
listItem.object.id = None
# Save as new list
listItem.save()
new_primary_key = str(listItem.object.id)
# Replace all list ids
json_data = json_data.replace('"listitem": {},'.format(old_primary_key), '"listitem": {},'.format(new_primary_key))
# Import outer blocks
for outerBlockItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['outerblocks'])):
old_primary_key = str(outerBlockItem.object.id)
outerBlockItem.object.id = None
# Save as new outer block
outerBlockItem.save()
new_primary_key = str(outerBlockItem.object.id)
# Replace all outer block ids
json_data = json_data.replace('"outerblockitem": {},'.format(old_primary_key), '"outerblockitem": {},'.format(new_primary_key))
# Import inner blocks
for innerBlockItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['innerblocks'])):
old_primary_key = str(innerBlockItem.object.id)
innerBlockItem.object.id = None
# Save as new inner block
innerBlockItem.save()
new_primary_key = str(innerBlockItem.object.id)
# Replace all inner block ids
json_data = json_data.replace('"blockitem": {},'.format(old_primary_key), '"blockitem": {},'.format(new_primary_key))
# Import trials
for trialItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['trials'])):
old_primary_key = str(trialItem.object.id)
trialItem.object.id = None
# Save as new trial
trialItem.save()
new_primary_key = str(trialItem.object.id)
# Import questions
for question in serializers.deserialize("json", json.dumps(json.loads(json_data)['questions'])):
old_primary_key = str(question.object.id)
question.object.id = None
# Save as new question
question.save()
new_primary_key = str(question.object.id)
# Import consent questions
for consentQuestion in serializers.deserialize("json", json.dumps(json.loads(json_data)['consentquestions'])):
old_primary_key = str(consentQuestion.object.id)
consentQuestion.object.id = None
# Save as new consent question
consentQuestion.save()
new_primary_key = str(consentQuestion.object.id) | def importFromJSON(request, data):
"""
Imports an experiment from JSON file data.
"""
json_data = data.decode("utf-8")
# Import experiment
for experiment in serializers.deserialize("json", json.dumps(json.loads(json_data)['experiment'])):
old_primary_key = str(experiment.object.id)
experiment.object.created_on = datetime.now()
experiment.object.user = request.user
experiment.object.id = None
# Save as new experiment
experiment.save()
new_primary_key = str(experiment.object.id)
# Replace all experiment ids
json_data = json_data.replace(old_primary_key, new_primary_key)
# Import lists
for listItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['lists'])):
old_primary_key = str(listItem.object.id)
listItem.object.id = None
# Save as new list
listItem.save()
new_primary_key = str(listItem.object.id)
# Replace all list ids
json_data = json_data.replace('"listitem": {},'.format(old_primary_key), '"listitem": {},'.format(new_primary_key))
# Import outer blocks
for outerBlockItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['outerblocks'])):
old_primary_key = str(outerBlockItem.object.id)
outerBlockItem.object.id = None
# Save as new outer block
outerBlockItem.save()
new_primary_key = str(outerBlockItem.object.id)
# Replace all outer block ids
json_data = json_data.replace('"outerblockitem": {},'.format(old_primary_key), '"outerblockitem": {},'.format(new_primary_key))
# Import inner blocks
for innerBlockItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['innerblocks'])):
old_primary_key = str(innerBlockItem.object.id)
innerBlockItem.object.id = None
# Save as new inner block
innerBlockItem.save()
new_primary_key = str(innerBlockItem.object.id)
# Replace all inner block ids
json_data = json_data.replace('"blockitem": {},'.format(old_primary_key), '"blockitem": {},'.format(new_primary_key))
# Import trials
for trialItem in serializers.deserialize("json", json.dumps(json.loads(json_data)['trials'])):
old_primary_key = str(trialItem.object.id)
trialItem.object.id = None
# Save as new trial
trialItem.save()
new_primary_key = str(trialItem.object.id)
# Import questions
for question in serializers.deserialize("json", json.dumps(json.loads(json_data)['questions'])):
old_primary_key = str(question.object.id)
question.object.id = None
# Save as new question
question.save()
new_primary_key = str(question.object.id)
# Import consent questions
for consentQuestion in serializers.deserialize("json", json.dumps(json.loads(json_data)['consentquestions'])):
old_primary_key = str(consentQuestion.object.id)
consentQuestion.object.id = None
# Save as new consent question
consentQuestion.save()
new_primary_key = str(consentQuestion.object.id) |
Python | def has_change_permission(self, request, obj=None):
"""
Checks if user has permission to change subject data.
"""
user_groups = request.user.groups.values_list('id')
if not obj:
return True
elif obj.experiment.sharing_option == 'PUB' or obj.experiment.sharing_option == 'GRP' and user_groups.intersection(
obj.experiment.sharing_groups.values_list('id')):
return True
else:
return obj.experiment.user == request.user or request.user.is_superuser | def has_change_permission(self, request, obj=None):
"""
Checks if user has permission to change subject data.
"""
user_groups = request.user.groups.values_list('id')
if not obj:
return True
elif obj.experiment.sharing_option == 'PUB' or obj.experiment.sharing_option == 'GRP' and user_groups.intersection(
obj.experiment.sharing_groups.values_list('id')):
return True
else:
return obj.experiment.user == request.user or request.user.is_superuser |
Python | def sort_items(item_params):
"""
Returns ndarray of indices of items sorted by maximum item information.
"""
return (-inf_hpc(max_info_hpc(item_params), item_params)).argsort() | def sort_items(item_params):
"""
Returns ndarray of indices of items sorted by maximum item information.
"""
return (-inf_hpc(max_info_hpc(item_params), item_params)).argsort() |
Python | def estimateCDI(run_uuid):
"""
Computes CDI estimates based on Mayor and Mani (2019)
"""
subject_data = get_object_or_404(SubjectData, pk=run_uuid)
experiment = get_object_or_404(Experiment, pk=subject_data.experiment.pk)
instrument = get_object_or_404(Instrument, pk=experiment.instrument.pk)
estimate = 0
# get the latest response for duplicate (i.e., modified) responses
cdi_results = CdiResult.objects.filter(subject=run_uuid).order_by('given_label', '-id').distinct('given_label')
try:
words_list = instrument.words_list
# parse instrument word list
all_words_reader = csv.DictReader(open(os.path.join(settings.MEDIA_ROOT, instrument.words_list.path), mode='r', encoding='utf-8-sig'), delimiter = ',')
all_words = {}
for row in all_words_reader:
all_words[row['word']] = int(row['word_id'])
# get child's age and sex
age = (AnswerInteger.objects.filter(subject_data=subject_data, question__question_type='age').first()).body
sex = (AnswerRadio.objects.filter(subject_data=subject_data, question__question_type='sex').first()).body
choices = (Question.objects.filter(experiment=experiment, question_type='sex').first()).choices
choices = list(filter(None, [x.strip() for x in choices.split(',')]))
# get lookup files for child's sex
if sex.strip().lower() == choices[0].lower(): # choices0 = female
lm_np_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_np_mean.path))
lm_np_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_np_sd.path))
lm_p_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_p_mean.path))
lm_p_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_p_sd.path))
bmin = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_bmin.path))
slope = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_slope.path))
else: # choices1 = male
lm_np_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_np_mean.path))
lm_np_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_np_sd.path))
lm_p_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_p_mean.path))
lm_p_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_p_sd.path))
bmin = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_bmin.path))
slope = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_slope.path))
instr_num_words = len(lm_np_mean.index)
basis = np.ones(instr_num_words+1)
min_score = np.ones(instr_num_words+1)
max_score = np.ones(instr_num_words+1)
x_values = np.arange(instr_num_words+1)
for cr in cdi_results:
# retrieve row number via word_id, assuming row numbers are the same across all data files
word_idx = lm_np_mean[lm_np_mean['word_id'] == all_words.get(cr.given_label)].index[0]
if cr.response: # if can produce/comprehend word
basis = basis + np.log(norm.pdf(x_values, loc=lm_p_mean.at[word_idx,str(age)], scale=lm_p_sd.at[word_idx,str(age)]))
else: # cannot produce/comprehend word
basis = basis + np.log(norm.pdf(x_values, loc=lm_np_mean.at[word_idx,str(age)], scale=lm_np_sd.at[word_idx,str(age)]))
min_score = min_score + np.log(norm.pdf(x_values, loc=lm_np_mean.at[word_idx,str(age)], scale=lm_np_sd.at[word_idx,str(age)]))
max_score = max_score + np.log(norm.pdf(x_values, loc=lm_p_mean.at[word_idx,str(age)], scale=lm_p_sd.at[word_idx,str(age)]))
# get index of max value in basis
B = np.where(basis == np.amax(basis))
B = int(B[0][0])
estimate = (B-bmin.at[0,str(age)])/slope.at[0,str(age)]
# store CDI estimate in subject_data
subject_data.cdi_estimate = estimate
subject_data.save()
except KeyError as e:
logger.exception('Failed to estimate CDI score: ' + str(e))
return HttpResponseRedirect(reverse('experiments:experimentError', args=(run_uuid,)))
else:
logger.info('CDI estimate: ' + str(estimate))
return estimate | def estimateCDI(run_uuid):
"""
Computes CDI estimates based on Mayor and Mani (2019)
"""
subject_data = get_object_or_404(SubjectData, pk=run_uuid)
experiment = get_object_or_404(Experiment, pk=subject_data.experiment.pk)
instrument = get_object_or_404(Instrument, pk=experiment.instrument.pk)
estimate = 0
# get the latest response for duplicate (i.e., modified) responses
cdi_results = CdiResult.objects.filter(subject=run_uuid).order_by('given_label', '-id').distinct('given_label')
try:
words_list = instrument.words_list
# parse instrument word list
all_words_reader = csv.DictReader(open(os.path.join(settings.MEDIA_ROOT, instrument.words_list.path), mode='r', encoding='utf-8-sig'), delimiter = ',')
all_words = {}
for row in all_words_reader:
all_words[row['word']] = int(row['word_id'])
# get child's age and sex
age = (AnswerInteger.objects.filter(subject_data=subject_data, question__question_type='age').first()).body
sex = (AnswerRadio.objects.filter(subject_data=subject_data, question__question_type='sex').first()).body
choices = (Question.objects.filter(experiment=experiment, question_type='sex').first()).choices
choices = list(filter(None, [x.strip() for x in choices.split(',')]))
# get lookup files for child's sex
if sex.strip().lower() == choices[0].lower(): # choices0 = female
lm_np_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_np_mean.path))
lm_np_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_np_sd.path))
lm_p_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_p_mean.path))
lm_p_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_lm_p_sd.path))
bmin = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_bmin.path))
slope = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.f_slope.path))
else: # choices1 = male
lm_np_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_np_mean.path))
lm_np_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_np_sd.path))
lm_p_mean = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_p_mean.path))
lm_p_sd = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_lm_p_sd.path))
bmin = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_bmin.path))
slope = pd.read_csv(os.path.join(settings.MEDIA_ROOT, instrument.m_slope.path))
instr_num_words = len(lm_np_mean.index)
basis = np.ones(instr_num_words+1)
min_score = np.ones(instr_num_words+1)
max_score = np.ones(instr_num_words+1)
x_values = np.arange(instr_num_words+1)
for cr in cdi_results:
# retrieve row number via word_id, assuming row numbers are the same across all data files
word_idx = lm_np_mean[lm_np_mean['word_id'] == all_words.get(cr.given_label)].index[0]
if cr.response: # if can produce/comprehend word
basis = basis + np.log(norm.pdf(x_values, loc=lm_p_mean.at[word_idx,str(age)], scale=lm_p_sd.at[word_idx,str(age)]))
else: # cannot produce/comprehend word
basis = basis + np.log(norm.pdf(x_values, loc=lm_np_mean.at[word_idx,str(age)], scale=lm_np_sd.at[word_idx,str(age)]))
min_score = min_score + np.log(norm.pdf(x_values, loc=lm_np_mean.at[word_idx,str(age)], scale=lm_np_sd.at[word_idx,str(age)]))
max_score = max_score + np.log(norm.pdf(x_values, loc=lm_p_mean.at[word_idx,str(age)], scale=lm_p_sd.at[word_idx,str(age)]))
# get index of max value in basis
B = np.where(basis == np.amax(basis))
B = int(B[0][0])
estimate = (B-bmin.at[0,str(age)])/slope.at[0,str(age)]
# store CDI estimate in subject_data
subject_data.cdi_estimate = estimate
subject_data.save()
except KeyError as e:
logger.exception('Failed to estimate CDI score: ' + str(e))
return HttpResponseRedirect(reverse('experiments:experimentError', args=(run_uuid,)))
else:
logger.info('CDI estimate: ' + str(estimate))
return estimate |
Python | def cdiSubmit(request, run_uuid):
"""
Stores item response as CdiResult.
"""
subject_data = get_object_or_404(SubjectData, pk=run_uuid)
experiment = get_object_or_404(Experiment, pk=subject_data.experiment.pk)
form = VocabularyChecklistForm(request.POST, cdi_form=experiment)
# store current response as CdiResult and add to request.responses
if form.is_valid():
responses = request.session.get('responses')
for key, value in request.POST.items():
if key.startswith('word_'):
cdiresult = CdiResult()
cdiresult.subject = subject_data
cdiresult.given_label = key[5:]
if value.lower() == 'on':
cdiresult.response = True
responses.append(bool(1))
else:
cdiresult.response = False
responses.append(bool(0))
cdiresult.save()
request.session['responses'] = responses
irt_run = request.session.get('irt_run')
# count unique items
count_unique = CdiResult.objects.filter(subject=run_uuid).order_by('given_label').distinct('given_label').count()
logger.info('unique count: ' + str(count_unique))
if count_unique < experiment.num_words:
request.session['irt_run'] = irt_run + 1
# generate subsequent item
return cdiGenerateNextItem(request, run_uuid)
else: # proceed to experiment or end page
estimateCDI(run_uuid)
if ListItem.objects.filter(experiment=experiment):
return proceedToExperiment(experiment, run_uuid)
else:
return HttpResponseRedirect(reverse('experiments:experimentEnd', args=(run_uuid,)))
t = Template(experiment.cdi_page_tpl)
c = RequestContext(request, {'subject_data': subject_data, 'cdi_form':form, 'experiment': experiment,})
return HttpResponse(t.render(c)) | def cdiSubmit(request, run_uuid):
"""
Stores item response as CdiResult.
"""
subject_data = get_object_or_404(SubjectData, pk=run_uuid)
experiment = get_object_or_404(Experiment, pk=subject_data.experiment.pk)
form = VocabularyChecklistForm(request.POST, cdi_form=experiment)
# store current response as CdiResult and add to request.responses
if form.is_valid():
responses = request.session.get('responses')
for key, value in request.POST.items():
if key.startswith('word_'):
cdiresult = CdiResult()
cdiresult.subject = subject_data
cdiresult.given_label = key[5:]
if value.lower() == 'on':
cdiresult.response = True
responses.append(bool(1))
else:
cdiresult.response = False
responses.append(bool(0))
cdiresult.save()
request.session['responses'] = responses
irt_run = request.session.get('irt_run')
# count unique items
count_unique = CdiResult.objects.filter(subject=run_uuid).order_by('given_label').distinct('given_label').count()
logger.info('unique count: ' + str(count_unique))
if count_unique < experiment.num_words:
request.session['irt_run'] = irt_run + 1
# generate subsequent item
return cdiGenerateNextItem(request, run_uuid)
else: # proceed to experiment or end page
estimateCDI(run_uuid)
if ListItem.objects.filter(experiment=experiment):
return proceedToExperiment(experiment, run_uuid)
else:
return HttpResponseRedirect(reverse('experiments:experimentEnd', args=(run_uuid,)))
t = Template(experiment.cdi_page_tpl)
c = RequestContext(request, {'subject_data': subject_data, 'cdi_form':form, 'experiment': experiment,})
return HttpResponse(t.render(c)) |
Python | def webcam_upload(request, run_uuid):
"""
Receives upload requests for video/audio chunks during the experiment and
merges these chunks into a file.
"""
fs = FileSystemStorage(location=settings.WEBCAM_ROOT)
# Upload request
if request.method == 'POST' and request.FILES.get('file'):
webcam_file = request.FILES.get('file')
# webcam_file_type = request.POST.get('type')
# Delete existing file
if fs.exists(webcam_file.name):
fs.delete(webcam_file.name)
fs.save(get_valid_filename(webcam_file.name), webcam_file)
logger.info('Received upload request of %s.' % webcam_file.name)
return HttpResponse(status=204)
# Merge request
elif request.method == 'POST' and request.POST.get('trialResultId'):
# Get base filename, by removing chunk number at the end
base_filename = request.POST.get('filename')
base_filename = get_valid_filename(base_filename)
logger.info('Received last file of %s, merge files.' % base_filename)
# Find and merge individual chunks
webcam_files = find_files(base_filename)
merge_files(base_filename + '.webm', webcam_files)
# Delete chunks
for webcam_file in webcam_files:
fs.delete(webcam_file)
# Add filename to trial result
trial_result_id = 0
try:
trial_result_id = int(request.POST.get('trialResultId'))
except ValueError as e:
logger.exception('Failed to retrieve trial result ID: ' + str(e))
raise Http404('Invalid trialResultId.')
trial_result = get_object_or_404(TrialResult, pk=trial_result_id, subject=run_uuid)
trial_result.webcam_file = base_filename + '.webm'
trial_result.save()
logger.info('Successfully saved webcam file to trial result.')
return HttpResponse(status=204)
else:
logger.error('Failed to upload webcam file.')
raise Http404('Page not found.') | def webcam_upload(request, run_uuid):
"""
Receives upload requests for video/audio chunks during the experiment and
merges these chunks into a file.
"""
fs = FileSystemStorage(location=settings.WEBCAM_ROOT)
# Upload request
if request.method == 'POST' and request.FILES.get('file'):
webcam_file = request.FILES.get('file')
# webcam_file_type = request.POST.get('type')
# Delete existing file
if fs.exists(webcam_file.name):
fs.delete(webcam_file.name)
fs.save(get_valid_filename(webcam_file.name), webcam_file)
logger.info('Received upload request of %s.' % webcam_file.name)
return HttpResponse(status=204)
# Merge request
elif request.method == 'POST' and request.POST.get('trialResultId'):
# Get base filename, by removing chunk number at the end
base_filename = request.POST.get('filename')
base_filename = get_valid_filename(base_filename)
logger.info('Received last file of %s, merge files.' % base_filename)
# Find and merge individual chunks
webcam_files = find_files(base_filename)
merge_files(base_filename + '.webm', webcam_files)
# Delete chunks
for webcam_file in webcam_files:
fs.delete(webcam_file)
# Add filename to trial result
trial_result_id = 0
try:
trial_result_id = int(request.POST.get('trialResultId'))
except ValueError as e:
logger.exception('Failed to retrieve trial result ID: ' + str(e))
raise Http404('Invalid trialResultId.')
trial_result = get_object_or_404(TrialResult, pk=trial_result_id, subject=run_uuid)
trial_result.webcam_file = base_filename + '.webm'
trial_result.save()
logger.info('Successfully saved webcam file to trial result.')
return HttpResponse(status=204)
else:
logger.error('Failed to upload webcam file.')
raise Http404('Page not found.') |
Python | def merge_files(target, files):
"""
Merges chunks into a file.
"""
fs = FileSystemStorage(location=settings.WEBCAM_ROOT)
destination_file = os.path.join(
settings.WEBCAM_ROOT, target)
# Delete any existing file
if fs.exists(target):
fs.delete(target)
# Merge
with open(destination_file, 'wb') as outfile:
for fname in files:
with open(os.path.join(settings.WEBCAM_ROOT, fname), 'rb') as infile:
while True:
data = infile.read(65536)
if not data:
break
outfile.write(data) | def merge_files(target, files):
"""
Merges chunks into a file.
"""
fs = FileSystemStorage(location=settings.WEBCAM_ROOT)
destination_file = os.path.join(
settings.WEBCAM_ROOT, target)
# Delete any existing file
if fs.exists(target):
fs.delete(target)
# Merge
with open(destination_file, 'wb') as outfile:
for fname in files:
with open(os.path.join(settings.WEBCAM_ROOT, fname), 'rb') as infile:
while True:
data = infile.read(65536)
if not data:
break
outfile.write(data) |
Python | def all_episodes():
"""
contains playable podcasts listed as just-in
"""
soup = thisislove.get_soup(URL)
playable_podcast = thisislove.get_playable_podcast(soup)
items = thisislove.compile_playable_podcast(playable_podcast)
return items | def all_episodes():
"""
contains playable podcasts listed as just-in
"""
soup = thisislove.get_soup(URL)
playable_podcast = thisislove.get_playable_podcast(soup)
items = thisislove.compile_playable_podcast(playable_podcast)
return items |
Python | def all_episodes1():
"""
contains playable podcasts listed as just-in
"""
soup = thisislove.get_soup(URL)
playable_podcast1 = thisislove.get_playable_podcast1(soup)
items = thisislove.compile_playable_podcast1(playable_podcast1)
return items | def all_episodes1():
"""
contains playable podcasts listed as just-in
"""
soup = thisislove.get_soup(URL)
playable_podcast1 = thisislove.get_playable_podcast1(soup)
items = thisislove.compile_playable_podcast1(playable_podcast1)
return items |
Python | async def approvepm(apprvpm):
""" For .approve command, give someone the permissions to PM you. """
try:
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
return await apprvpm.edit("`Running on Non-SQL mode!`")
if apprvpm.reply_to_msg_id:
reply = await apprvpm.get_reply_message()
replied_user = await apprvpm.client.get_entity(reply.from_id)
aname = replied_user.id
name0 = str(replied_user.first_name)
uid = replied_user.id
else:
aname = await apprvpm.client.get_entity(apprvpm.chat_id)
name0 = str(aname.first_name)
uid = apprvpm.chat_id
try:
approve(uid)
except IntegrityError:
return await apprvpm.edit("`User ini mungkin sudah di tag dalam transaksi.`")
await apprvpm.edit(f"`#SenturyBot: ...!`\n\n"
f"`Sedang Melakukan Transaksi Dengan `[{name0}](tg://user?id={uid})")
async for message in apprvpm.client.iter_messages(apprvpm.chat_id,
from_user='me',
search=UNAPPROVED_MSG):
await message.delete()
if BOTLOG:
await apprvpm.client.send_message(
BOTLOG_CHATID,
"#SedangTransaksi\n" + "Dengan User: " + f"[{name0}](tg://user?id={uid})",
) | async def approvepm(apprvpm):
""" For .approve command, give someone the permissions to PM you. """
try:
from userbot.modules.sql_helper.pm_permit_sql import approve
except AttributeError:
return await apprvpm.edit("`Running on Non-SQL mode!`")
if apprvpm.reply_to_msg_id:
reply = await apprvpm.get_reply_message()
replied_user = await apprvpm.client.get_entity(reply.from_id)
aname = replied_user.id
name0 = str(replied_user.first_name)
uid = replied_user.id
else:
aname = await apprvpm.client.get_entity(apprvpm.chat_id)
name0 = str(aname.first_name)
uid = apprvpm.chat_id
try:
approve(uid)
except IntegrityError:
return await apprvpm.edit("`User ini mungkin sudah di tag dalam transaksi.`")
await apprvpm.edit(f"`#SenturyBot: ...!`\n\n"
f"`Sedang Melakukan Transaksi Dengan `[{name0}](tg://user?id={uid})")
async for message in apprvpm.client.iter_messages(apprvpm.chat_id,
from_user='me',
search=UNAPPROVED_MSG):
await message.delete()
if BOTLOG:
await apprvpm.client.send_message(
BOTLOG_CHATID,
"#SedangTransaksi\n" + "Dengan User: " + f"[{name0}](tg://user?id={uid})",
) |
Python | def target_distributionDouble(model,target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = model(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
next_dist = target_network(next_states)
logits = jnp.squeeze(next_dist.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits) | def target_distributionDouble(model,target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = model(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
next_dist = target_network(next_states)
logits = jnp.squeeze(next_dist.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits) |
Python | def target_distribution(target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
logits = jnp.squeeze(next_state_target_outputs.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits) | def target_distribution(target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
logits = jnp.squeeze(next_state_target_outputs.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits) |
Python | def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype) | def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype) |
Python | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._kappa,
self._num_atoms,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='QuantileLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._kappa,
self._num_atoms,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='QuantileLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 |
Python | def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority) | def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority) |
Python | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='CrossEntropyLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='CrossEntropyLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 |
Python | def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action | def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action |
Python | def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action | def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._tau,
self.optimizer)
self.action = onp.asarray(self.action)
return self.action |
Python | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self._rng, self.optimizer, loss, mean_loss= train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._target_opt,
self.num_tau_samples,
self.num_tau_prime_samples,
self.num_quantile_samples,
self.cumulative_gamma,
self.double_dqn,
self.kappa,
self._tau,
self._alpha,
self._clip_value_min,
self._num_actions,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='ImplicitLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 | def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self._rng, self.optimizer, loss, mean_loss= train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._target_opt,
self.num_tau_samples,
self.num_tau_prime_samples,
self.num_quantile_samples,
self.cumulative_gamma,
self.double_dqn,
self.kappa,
self._tau,
self._alpha,
self._clip_value_min,
self._num_actions,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='ImplicitLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1 |
Python | def trello_model_id_put(model, id): # noqa: E501
"""Updates the models currently in db.
# noqa: E501
:param model:
:type model: str
:param id:
:type id: str
:rtype: UniversalResource
"""
return 'do some magic!' | def trello_model_id_put(model, id): # noqa: E501
"""Updates the models currently in db.
# noqa: E501
:param model:
:type model: str
:param id:
:type id: str
:rtype: UniversalResource
"""
return 'do some magic!' |
Python | def trello_model_id_put(self, model, id, **kwargs): # noqa: E501
"""Updates the models currently in db. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.trello_model_id_put(model, id, async=True)
>>> result = thread.get()
:param async bool
:param str model: (required)
:param str id: (required)
:return: UniversalResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.trello_model_id_put_with_http_info(model, id, **kwargs) # noqa: E501
else:
(data) = self.trello_model_id_put_with_http_info(model, id, **kwargs) # noqa: E501
return data | def trello_model_id_put(self, model, id, **kwargs): # noqa: E501
"""Updates the models currently in db. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.trello_model_id_put(model, id, async=True)
>>> result = thread.get()
:param async bool
:param str model: (required)
:param str id: (required)
:return: UniversalResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.trello_model_id_put_with_http_info(model, id, **kwargs) # noqa: E501
else:
(data) = self.trello_model_id_put_with_http_info(model, id, **kwargs) # noqa: E501
return data |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.