code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
import f16lib.models.f16 as f16
import f16lib.models.llc as llc
import f16lib.models.autopilot as auto
import f16lib.models.autoairspeed as autoair
import f16lib.models.autoaltitude as autoalt
import f16lib.models.autowaypoint as awaypoint
import f16lib.models.switch as switch
import f16lib.models.autoacas as acas
import f16lib.models.monitor_ap as monitor
import f16lib.models.acas_switch as aswitch
import f16lib.models.dummy_predictor as predictor
import f16lib.models.nnllc as nnllc
from f16lib.messages import *
from csaf import ContinuousComponent, DiscreteComponent
import typing
f16_gcas_scen = [540.0,
0.037027160081059704,
0.0,
0.7853981633974483,
-1.2566370614359172,
-0.7853981633974483,
0.0,
0.0,
0.0,
0.0,
0.0,
3600.0,
9.0]
f16_xequil = [502.0,
0.03887505597600522,
0.0,
0.0,
0.03887505597600522,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1000.0,
9.05666543872074]
class F16PlantComponent(ContinuousComponent):
name = "F16 Plant Model"
sampling_frequency = 30.0
default_parameters = {
"s": 300.0,
"b": 30.0,
"cbar": 11.32,
"rm": 1.57e-3,
"xcgref": 0.35,
"xcg": 0.35,
"he": 160.0,
"c1": -0.770,
"c2": 0.02755,
"c3": 1.055e-4,
"c4": 1.642e-6,
"c5": 0.9604,
"c6": 1.759e-2,
"c7": 1.792e-5,
"c8": -0.7336,
"c9": 1.587e-5,
"rtod": 57.29578,
"g": 32.17,
"xcg_mult": 1,
"cxt_mult": 1,
"cyt_mult": 1,
"czt_mult": 1,
"clt_mult": 1,
"cmt_mult": 1,
"cnt_mult": 1,
"model": "morelli"
}
inputs = (
("inputs", F16ControllerOutputMessage),
)
outputs = (
("outputs", F16PlantOutputMessage),
)
states = F16PlantStateMessage
default_initial_values = {
"states": f16_xequil,
"inputs": [0.0, 0.0, 0.0, 0.0]
}
flows = {
"outputs": f16.model_output,
"states": f16.model_state_update
}
class F16LlcComponent(ContinuousComponent):
name = "F16 Low Level Controller"
sampling_frequency = 30.0
default_parameters = {
"lqr_name": "lqr_original",
"throttle_max": 1,
"throttle_min": 0,
"elevator_max": 25,
"elevator_min": -25,
"aileron_max": 21.5,
"aileron_min": -21.5,
"rudder_max": 30.0,
"rudder_min": -30.0
}
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
("inputs_coutputs", F16ControllerOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
states = F16LlcStateMessage
default_initial_values = {
"states": [0.0, 0.0, 0.0],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
"inputs_coutputs": [0.0, 0.0, 0.0, 0.0]
}
flows = {
"outputs": llc.model_output,
"states": llc.model_state_update
}
initialize = llc.model_init
class F16NNLlcComponent(F16LlcComponent):
name = "F16 NN Low Level Controller"
flows = {
"outputs": nnllc.model_output,
"states": nnllc.model_state_update
}
initialize = nnllc.model_init
class F16AutopilotComponent(DiscreteComponent):
name = ""
sampling_frequency = 10.0
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
)
class F16GcasComponent(F16AutopilotComponent):
name = "F16 GCAS Autopilot"
default_parameters = {
"NzMax": 9.0,
"vt_des": 502.0
}
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ["Waiting"],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
("fdas", F16AutopilotOutputMessage)
)
flows = {
"outputs": auto.model_output,
"fdas": auto.model_info,
"states": auto.model_state_update
}
class F16AutoAirspeedComponent(F16AutopilotComponent):
name = "F16 Airspeed Autopilot"
default_parameters = {
"setpoint": 800.0, # setpoint in airspeed (ft/s)
"p_gain": 0.01, # P controller gain value
"xequil": [502.0, 0.03887505597600522, 0.0, 0.0, 0.03887505597600522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0,
9.05666543872074]
}
states = EmptyMessage
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": autoair.model_output,
}
class F16AutoAltitudeComponent(F16AutopilotComponent):
name = "F16 Altitude Autopilot"
default_parameters = {
"setpoint": 2500,
"xequil": f16_xequil
}
states = EmptyMessage
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": autoalt.model_output
}
class F16MonitorComponent(DiscreteComponent):
name = "F16 Autopilot Monitor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
}
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
"inputs_gcas": ["Waiting"]
}
states = EmptyMessage
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
("inputs_gcas", F16AutopilotOutputMessage),
)
outputs = (
("outputs", F16MonitorOutputMessage),
)
flows = {
"outputs": monitor.model_output
}
class F16SwitchComponent(DiscreteComponent):
name = "F16 Autopilot Selector"
sampling_frequency = 10.0
default_initial_values = {
"inputs_0": [0.0, 0.0, 0.0, 0.0],
"inputs_1": [0.0, 0.0, 0.0, 0.0],
"inputs_2": [0.0, 0.0, 0.0, 0.0],
"inputs_monitors": ["gcas"],
"states": []
}
default_parameters = {
"mapper": ["gcas", "altitude", "airspeed"]
}
states = EmptyMessage
inputs = (
("inputs_0", F16ControllerOutputMessage),
("inputs_1", F16ControllerOutputMessage),
("inputs_2", F16ControllerOutputMessage),
("inputs_monitors", F16MonitorOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": switch.model_output
}
def create_collision_predictor(nagents: int) -> typing.Type[DiscreteComponent]:
class _F16CollisionPredictor(DiscreteComponent):
name = "F16 Collision Predictor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
"intruder_waypoints" : ((0.0, 0.0, 1000.0),),
"own_waypoints" : ((0.0, 0.0, 1000.0),)
}
inputs = (
("inputs_own", F16PlantStateMessage),
*[(f"inputs_intruder{idx}", F16PlantStateMessage) for idx in range(nagents)]
)
outputs = (
("outputs", PredictorOutputMessage),
)
states = EmptyMessage
default_initial_values = {
"inputs_own": f16_xequil,
"states": [],
**{f"inputs_intruder{idx}": f16_xequil for idx in range(nagents)}
}
flows = {
"outputs": predictor.model_output
}
initialize = predictor.model_init
return _F16CollisionPredictor
def create_nagents_acas_xu(nagents: int) -> typing.Type[DiscreteComponent]:
class _F16AcasComponent(DiscreteComponent):
name = "F16 Acas Xu Controller"
sampling_frequency = 10.0
default_parameters = {
"roll_rates": (0, -1.5, 1.5, -3.0, 3.0),
"gains": "nominal",
"setpoint": 2500.0,
"xequil": [502.0, 0.03887505597600522, 0.0, 0.0, 0.03887505597600522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0,
9.05666543872074]
}
inputs = (
("inputs_own", F16PlantStateMessage),
*[(f"inputs_intruder{idx}", F16PlantStateMessage) for idx in range(nagents)]
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ['clear'],
"inputs_own": f16_xequil,
**{f"inputs_intruder{idx}": f16_xequil for idx in range(nagents)}
}
flows = {
"outputs": acas.model_output,
"states": acas.model_state_update
}
initialize = acas.model_init
return _F16AcasComponent
def switch_model_output(*args):
inputs = args[-1]
if inputs[-1] == "clear":
return inputs[:4]
else:
return inputs[4:8]
def switch_model_state(*args):
inputs = args[-1]
return [inputs[-1]]
class F16AcasRecoverySwitchComponent(DiscreteComponent):
name = "F16 Acas Recovery Selector"
sampling_frequency = 10.0
default_parameters = {
"mapper": ["acas", "acas_recovery"]
}
inputs = (
("inputs", F16ControllerOutputMessage),
("inputs_state", F16MonitorOutputMessage),
("inputs_recovery", F16PlantOutputMessage),
("inputs_recovery_state", F16MonitorOutputMessage),
("inputs_select", PredictorOutputMessage),
)
outputs = (
("outputs", F16ControllerOutputMessage),
("outputs_state", F16MonitorOutputMessage)
)
states = EmptyMessage
default_initial_values = {
"inputs": [0.0, ] * 4,
"inputs_recovery": [0.0, ] * 4,
"states": [],
"inputs_select": [False],
"inputs_state": ["clear"],
"inputs_recovery_state": ["clear"]
}
flows = {
"outputs": aswitch.model_output,
"outputs_state": aswitch.model_output_state
}
initialize = None
class F16CollisionPredictor(DiscreteComponent):
name = "F16 Collision Predictor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
}
inputs = (
("inputs_own", F16PlantStateMessage),
("inputs_intruder0", F16PlantStateMessage),
)
outputs = (
("outputs", PredictorOutputMessage),
)
states = EmptyMessage
default_initial_values = {
"inputs_own": f16_xequil,
"inputs_intruder0": f16_xequil,
"states": []
}
flows = {
"outputs": predictor.model_output
}
initialize = None
class F16AutoWaypointComponent(F16AutopilotComponent):
name = "F16 Waypoint Autopilot"
default_parameters = {
"waypoints": [(5000.0, -1000.0, 1000.0)],
"airspeed": None
}
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ['Waiting 1'],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": awaypoint.model_output,
"states": awaypoint.model_state_update
}
initialize = awaypoint.model_init
class StaticObject(DiscreteComponent):
name = "Static Object"
sampling_frequency = 1.0
default_parameters: typing.Dict[str, typing.Any] = {}
inputs = ()
outputs = ()
states = F16PlantStateMessage
default_initial_values = {
"states": f16_xequil
}
flows = {
"states": lambda m, t, s, i: s
}
class F16AcasSwitchComponent(DiscreteComponent):
name = "F16 Acas Monitor"
sampling_frequency = 10.0
default_initial_values = {
"inputs": [0.0, 0.0, 0.0, 0.0],
"inputs_recovery": [0.0, 0.0, 0.0, 0.0],
"inputs_select": ["clear"],
"states": []
}
default_parameters = {
"mapper": ["gcas", "altitude", "airspeed"]
}
states = EmptyMessage
inputs = (
("inputs", F16ControllerOutputMessage),
("inputs_recovery", F16ControllerOutputMessage),
("inputs_select", F16MonitorOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
("outputs_state", F16MonitorOutputMessage),
)
flows = {
"outputs": switch_model_output,
"outputs_state": switch_model_state
}
|
new_csaf/f16lib/components.py
|
import f16lib.models.f16 as f16
import f16lib.models.llc as llc
import f16lib.models.autopilot as auto
import f16lib.models.autoairspeed as autoair
import f16lib.models.autoaltitude as autoalt
import f16lib.models.autowaypoint as awaypoint
import f16lib.models.switch as switch
import f16lib.models.autoacas as acas
import f16lib.models.monitor_ap as monitor
import f16lib.models.acas_switch as aswitch
import f16lib.models.dummy_predictor as predictor
import f16lib.models.nnllc as nnllc
from f16lib.messages import *
from csaf import ContinuousComponent, DiscreteComponent
import typing
f16_gcas_scen = [540.0,
0.037027160081059704,
0.0,
0.7853981633974483,
-1.2566370614359172,
-0.7853981633974483,
0.0,
0.0,
0.0,
0.0,
0.0,
3600.0,
9.0]
f16_xequil = [502.0,
0.03887505597600522,
0.0,
0.0,
0.03887505597600522,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1000.0,
9.05666543872074]
class F16PlantComponent(ContinuousComponent):
name = "F16 Plant Model"
sampling_frequency = 30.0
default_parameters = {
"s": 300.0,
"b": 30.0,
"cbar": 11.32,
"rm": 1.57e-3,
"xcgref": 0.35,
"xcg": 0.35,
"he": 160.0,
"c1": -0.770,
"c2": 0.02755,
"c3": 1.055e-4,
"c4": 1.642e-6,
"c5": 0.9604,
"c6": 1.759e-2,
"c7": 1.792e-5,
"c8": -0.7336,
"c9": 1.587e-5,
"rtod": 57.29578,
"g": 32.17,
"xcg_mult": 1,
"cxt_mult": 1,
"cyt_mult": 1,
"czt_mult": 1,
"clt_mult": 1,
"cmt_mult": 1,
"cnt_mult": 1,
"model": "morelli"
}
inputs = (
("inputs", F16ControllerOutputMessage),
)
outputs = (
("outputs", F16PlantOutputMessage),
)
states = F16PlantStateMessage
default_initial_values = {
"states": f16_xequil,
"inputs": [0.0, 0.0, 0.0, 0.0]
}
flows = {
"outputs": f16.model_output,
"states": f16.model_state_update
}
class F16LlcComponent(ContinuousComponent):
name = "F16 Low Level Controller"
sampling_frequency = 30.0
default_parameters = {
"lqr_name": "lqr_original",
"throttle_max": 1,
"throttle_min": 0,
"elevator_max": 25,
"elevator_min": -25,
"aileron_max": 21.5,
"aileron_min": -21.5,
"rudder_max": 30.0,
"rudder_min": -30.0
}
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
("inputs_coutputs", F16ControllerOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
states = F16LlcStateMessage
default_initial_values = {
"states": [0.0, 0.0, 0.0],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
"inputs_coutputs": [0.0, 0.0, 0.0, 0.0]
}
flows = {
"outputs": llc.model_output,
"states": llc.model_state_update
}
initialize = llc.model_init
class F16NNLlcComponent(F16LlcComponent):
name = "F16 NN Low Level Controller"
flows = {
"outputs": nnllc.model_output,
"states": nnllc.model_state_update
}
initialize = nnllc.model_init
class F16AutopilotComponent(DiscreteComponent):
name = ""
sampling_frequency = 10.0
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
)
class F16GcasComponent(F16AutopilotComponent):
name = "F16 GCAS Autopilot"
default_parameters = {
"NzMax": 9.0,
"vt_des": 502.0
}
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ["Waiting"],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
("fdas", F16AutopilotOutputMessage)
)
flows = {
"outputs": auto.model_output,
"fdas": auto.model_info,
"states": auto.model_state_update
}
class F16AutoAirspeedComponent(F16AutopilotComponent):
name = "F16 Airspeed Autopilot"
default_parameters = {
"setpoint": 800.0, # setpoint in airspeed (ft/s)
"p_gain": 0.01, # P controller gain value
"xequil": [502.0, 0.03887505597600522, 0.0, 0.0, 0.03887505597600522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0,
9.05666543872074]
}
states = EmptyMessage
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": autoair.model_output,
}
class F16AutoAltitudeComponent(F16AutopilotComponent):
name = "F16 Altitude Autopilot"
default_parameters = {
"setpoint": 2500,
"xequil": f16_xequil
}
states = EmptyMessage
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": autoalt.model_output
}
class F16MonitorComponent(DiscreteComponent):
name = "F16 Autopilot Monitor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
}
default_initial_values = {
"states": [],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
"inputs_gcas": ["Waiting"]
}
states = EmptyMessage
inputs = (
("inputs_pstates", F16PlantStateMessage),
("inputs_poutputs", F16PlantOutputMessage),
("inputs_gcas", F16AutopilotOutputMessage),
)
outputs = (
("outputs", F16MonitorOutputMessage),
)
flows = {
"outputs": monitor.model_output
}
class F16SwitchComponent(DiscreteComponent):
name = "F16 Autopilot Selector"
sampling_frequency = 10.0
default_initial_values = {
"inputs_0": [0.0, 0.0, 0.0, 0.0],
"inputs_1": [0.0, 0.0, 0.0, 0.0],
"inputs_2": [0.0, 0.0, 0.0, 0.0],
"inputs_monitors": ["gcas"],
"states": []
}
default_parameters = {
"mapper": ["gcas", "altitude", "airspeed"]
}
states = EmptyMessage
inputs = (
("inputs_0", F16ControllerOutputMessage),
("inputs_1", F16ControllerOutputMessage),
("inputs_2", F16ControllerOutputMessage),
("inputs_monitors", F16MonitorOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": switch.model_output
}
def create_collision_predictor(nagents: int) -> typing.Type[DiscreteComponent]:
class _F16CollisionPredictor(DiscreteComponent):
name = "F16 Collision Predictor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
"intruder_waypoints" : ((0.0, 0.0, 1000.0),),
"own_waypoints" : ((0.0, 0.0, 1000.0),)
}
inputs = (
("inputs_own", F16PlantStateMessage),
*[(f"inputs_intruder{idx}", F16PlantStateMessage) for idx in range(nagents)]
)
outputs = (
("outputs", PredictorOutputMessage),
)
states = EmptyMessage
default_initial_values = {
"inputs_own": f16_xequil,
"states": [],
**{f"inputs_intruder{idx}": f16_xequil for idx in range(nagents)}
}
flows = {
"outputs": predictor.model_output
}
initialize = predictor.model_init
return _F16CollisionPredictor
def create_nagents_acas_xu(nagents: int) -> typing.Type[DiscreteComponent]:
class _F16AcasComponent(DiscreteComponent):
name = "F16 Acas Xu Controller"
sampling_frequency = 10.0
default_parameters = {
"roll_rates": (0, -1.5, 1.5, -3.0, 3.0),
"gains": "nominal",
"setpoint": 2500.0,
"xequil": [502.0, 0.03887505597600522, 0.0, 0.0, 0.03887505597600522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0,
9.05666543872074]
}
inputs = (
("inputs_own", F16PlantStateMessage),
*[(f"inputs_intruder{idx}", F16PlantStateMessage) for idx in range(nagents)]
)
outputs = (
("outputs", F16ControllerOutputMessage),
)
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ['clear'],
"inputs_own": f16_xequil,
**{f"inputs_intruder{idx}": f16_xequil for idx in range(nagents)}
}
flows = {
"outputs": acas.model_output,
"states": acas.model_state_update
}
initialize = acas.model_init
return _F16AcasComponent
def switch_model_output(*args):
inputs = args[-1]
if inputs[-1] == "clear":
return inputs[:4]
else:
return inputs[4:8]
def switch_model_state(*args):
inputs = args[-1]
return [inputs[-1]]
class F16AcasRecoverySwitchComponent(DiscreteComponent):
name = "F16 Acas Recovery Selector"
sampling_frequency = 10.0
default_parameters = {
"mapper": ["acas", "acas_recovery"]
}
inputs = (
("inputs", F16ControllerOutputMessage),
("inputs_state", F16MonitorOutputMessage),
("inputs_recovery", F16PlantOutputMessage),
("inputs_recovery_state", F16MonitorOutputMessage),
("inputs_select", PredictorOutputMessage),
)
outputs = (
("outputs", F16ControllerOutputMessage),
("outputs_state", F16MonitorOutputMessage)
)
states = EmptyMessage
default_initial_values = {
"inputs": [0.0, ] * 4,
"inputs_recovery": [0.0, ] * 4,
"states": [],
"inputs_select": [False],
"inputs_state": ["clear"],
"inputs_recovery_state": ["clear"]
}
flows = {
"outputs": aswitch.model_output,
"outputs_state": aswitch.model_output_state
}
initialize = None
class F16CollisionPredictor(DiscreteComponent):
name = "F16 Collision Predictor"
sampling_frequency = 10.0
default_parameters: typing.Dict[str, typing.Any] = {
}
inputs = (
("inputs_own", F16PlantStateMessage),
("inputs_intruder0", F16PlantStateMessage),
)
outputs = (
("outputs", PredictorOutputMessage),
)
states = EmptyMessage
default_initial_values = {
"inputs_own": f16_xequil,
"inputs_intruder0": f16_xequil,
"states": []
}
flows = {
"outputs": predictor.model_output
}
initialize = None
class F16AutoWaypointComponent(F16AutopilotComponent):
name = "F16 Waypoint Autopilot"
default_parameters = {
"waypoints": [(5000.0, -1000.0, 1000.0)],
"airspeed": None
}
states = F16AutopilotOutputMessage
default_initial_values = {
"states": ['Waiting 1'],
"inputs_pstates": f16_xequil,
"inputs_poutputs": [0.0, 0.0, 0.0, 0.0],
}
outputs = (
("outputs", F16ControllerOutputMessage),
)
flows = {
"outputs": awaypoint.model_output,
"states": awaypoint.model_state_update
}
initialize = awaypoint.model_init
class StaticObject(DiscreteComponent):
name = "Static Object"
sampling_frequency = 1.0
default_parameters: typing.Dict[str, typing.Any] = {}
inputs = ()
outputs = ()
states = F16PlantStateMessage
default_initial_values = {
"states": f16_xequil
}
flows = {
"states": lambda m, t, s, i: s
}
class F16AcasSwitchComponent(DiscreteComponent):
name = "F16 Acas Monitor"
sampling_frequency = 10.0
default_initial_values = {
"inputs": [0.0, 0.0, 0.0, 0.0],
"inputs_recovery": [0.0, 0.0, 0.0, 0.0],
"inputs_select": ["clear"],
"states": []
}
default_parameters = {
"mapper": ["gcas", "altitude", "airspeed"]
}
states = EmptyMessage
inputs = (
("inputs", F16ControllerOutputMessage),
("inputs_recovery", F16ControllerOutputMessage),
("inputs_select", F16MonitorOutputMessage)
)
outputs = (
("outputs", F16ControllerOutputMessage),
("outputs_state", F16MonitorOutputMessage),
)
flows = {
"outputs": switch_model_output,
"outputs_state": switch_model_state
}
| 0.637369 | 0.232169 |
from tkinter import *
from tkinter import messagebox
window=Tk()
window.title("CALCULATOR")
f=StringVar()
s=StringVar()
def calsum():
fs=int(f.get())
ss=int(s.get())
r=fs+ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsub():
fs=int(f.get())
ss=int(s.get())
r=fs-ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("done","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calmul():
fs=int(f.get())
ss=int(s.get())
r=fs*ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def caldiv():
fs=int(f.get())
ss=int(s.get())
r=fs/ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsqr():
fs=int(f.get())
ss=int(s.get())
r=fs**(1/ss)
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsq():
fs=int(f.get())
ss=int(s.get())
r=fs**ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
add=Button(window, text='add', command=calsum, width=15)
add.grid(column=1, row=4)
sub=Button(window, text='subtract', command=calsub, width=15)
sub.grid(column=2, row=4)
mul=Button(window, text='multiply', command=calmul, width=15)
mul.grid(column=1, row=5)
div=Button(window, text='divide', command=caldiv, width=15)
div.grid(column=2, row=5)
sq=Button(window, text='square', command=calsq, width=15)
sq.grid(column=2, row=6)
sqr=Button(window, text='square root', command=calsqr, width=15)
sqr.grid(column=1, row=6)
cancel=Button(window, text='cancel', command=window.destroy, width=15)
cancel.grid(column=1,row=7,columnspan=2)
lb1=Label(window, text='Calculation')
lb1.grid(column=1, row=1, columnspan=2)
fs=Label(window, text='Enter the Number')
fs.grid(column=1,row=2)
ss=Label(window, text='Enter the Number')
ss.grid(column=1,row=3)
f=Entry(window, width=25, textvariable=f)
f.grid(column=2,row=2)
s=Entry(window, width=25, textvariable=s)
s.grid(column=2,row=3)
window.mainloop()
|
day5/calculator.py
|
from tkinter import *
from tkinter import messagebox
window=Tk()
window.title("CALCULATOR")
f=StringVar()
s=StringVar()
def calsum():
fs=int(f.get())
ss=int(s.get())
r=fs+ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsub():
fs=int(f.get())
ss=int(s.get())
r=fs-ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("done","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calmul():
fs=int(f.get())
ss=int(s.get())
r=fs*ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def caldiv():
fs=int(f.get())
ss=int(s.get())
r=fs/ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsqr():
fs=int(f.get())
ss=int(s.get())
r=fs**(1/ss)
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
def calsq():
fs=int(f.get())
ss=int(s.get())
r=fs**ss
messagebox.showinfo("output","result="+str(r))
messagebox.showinfo("confirmation page","done !!")
add=Button(window, text='add', command=calsum, width=15)
add.grid(column=1, row=4)
sub=Button(window, text='subtract', command=calsub, width=15)
sub.grid(column=2, row=4)
mul=Button(window, text='multiply', command=calmul, width=15)
mul.grid(column=1, row=5)
div=Button(window, text='divide', command=caldiv, width=15)
div.grid(column=2, row=5)
sq=Button(window, text='square', command=calsq, width=15)
sq.grid(column=2, row=6)
sqr=Button(window, text='square root', command=calsqr, width=15)
sqr.grid(column=1, row=6)
cancel=Button(window, text='cancel', command=window.destroy, width=15)
cancel.grid(column=1,row=7,columnspan=2)
lb1=Label(window, text='Calculation')
lb1.grid(column=1, row=1, columnspan=2)
fs=Label(window, text='Enter the Number')
fs.grid(column=1,row=2)
ss=Label(window, text='Enter the Number')
ss.grid(column=1,row=3)
f=Entry(window, width=25, textvariable=f)
f.grid(column=2,row=2)
s=Entry(window, width=25, textvariable=s)
s.grid(column=2,row=3)
window.mainloop()
| 0.266262 | 0.085901 |
import functools
from typing import Sequence
from absl.testing import absltest
from acme.utils import tree_utils
import numpy as np
import tree
TEST_SEQUENCE = [
{
'action': np.array([1.0]),
'observation': (np.array([0.0, 1.0, 2.0]),),
'reward': np.array(1.0),
},
{
'action': np.array([0.5]),
'observation': (np.array([1.0, 2.0, 3.0]),),
'reward': np.array(0.0),
},
{
'action': np.array([0.3]),
'observation': (np.array([2.0, 3.0, 4.0]),),
'reward': np.array(0.5),
},
]
class SequenceStackTest(absltest.TestCase):
"""Tests for various tree utilities."""
def test_stack_sequence_fields(self):
"""Tests that `stack_sequence_fields` behaves correctly on nested data."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
# Check that the stacked output has the correct structure.
tree.assert_same_structure(stacked, TEST_SEQUENCE[0])
# Check that the leaves have the correct array shapes.
self.assertEqual(stacked['action'].shape, (3, 1))
self.assertEqual(stacked['observation'][0].shape, (3, 3))
self.assertEqual(stacked['reward'].shape, (3,))
# Check values.
self.assertEqual(stacked['observation'][0].tolist(), [
[0., 1., 2.],
[1., 2., 3.],
[2., 3., 4.],
])
self.assertEqual(stacked['action'].tolist(), [[1.], [0.5], [0.3]])
self.assertEqual(stacked['reward'].tolist(), [1., 0., 0.5])
def test_unstack_sequence_fields(self):
"""Tests that `unstack_sequence_fields(stack_sequence_fields(x)) == x`."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
batch_size = len(TEST_SEQUENCE)
unstacked = tree_utils.unstack_sequence_fields(stacked, batch_size)
tree.map_structure(np.testing.assert_array_equal, unstacked, TEST_SEQUENCE)
def test_fast_map_structure_with_path(self):
structure = {
'a': {
'b': np.array([0.0])
},
'c': (np.array([1.0]), np.array([2.0])),
'd': [np.array(3.0), np.array(4.0)],
}
def map_fn(path: Sequence[str], x: np.ndarray, y: np.ndarray):
return x + y + len(path)
single_arg_map_fn = functools.partial(map_fn, y=np.array([0.0]))
expected_mapped_structure = (
tree.map_structure_with_path(single_arg_map_fn, structure))
mapped_structure = (
tree_utils.fast_map_structure_with_path(single_arg_map_fn, structure))
self.assertEqual(mapped_structure, expected_mapped_structure)
expected_double_mapped_structure = (
tree.map_structure_with_path(map_fn, structure, mapped_structure))
double_mapped_structure = (
tree_utils.fast_map_structure_with_path(map_fn, structure,
mapped_structure))
self.assertEqual(double_mapped_structure, expected_double_mapped_structure)
if __name__ == '__main__':
absltest.main()
|
acme/utils/tree_utils_test.py
|
import functools
from typing import Sequence
from absl.testing import absltest
from acme.utils import tree_utils
import numpy as np
import tree
TEST_SEQUENCE = [
{
'action': np.array([1.0]),
'observation': (np.array([0.0, 1.0, 2.0]),),
'reward': np.array(1.0),
},
{
'action': np.array([0.5]),
'observation': (np.array([1.0, 2.0, 3.0]),),
'reward': np.array(0.0),
},
{
'action': np.array([0.3]),
'observation': (np.array([2.0, 3.0, 4.0]),),
'reward': np.array(0.5),
},
]
class SequenceStackTest(absltest.TestCase):
"""Tests for various tree utilities."""
def test_stack_sequence_fields(self):
"""Tests that `stack_sequence_fields` behaves correctly on nested data."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
# Check that the stacked output has the correct structure.
tree.assert_same_structure(stacked, TEST_SEQUENCE[0])
# Check that the leaves have the correct array shapes.
self.assertEqual(stacked['action'].shape, (3, 1))
self.assertEqual(stacked['observation'][0].shape, (3, 3))
self.assertEqual(stacked['reward'].shape, (3,))
# Check values.
self.assertEqual(stacked['observation'][0].tolist(), [
[0., 1., 2.],
[1., 2., 3.],
[2., 3., 4.],
])
self.assertEqual(stacked['action'].tolist(), [[1.], [0.5], [0.3]])
self.assertEqual(stacked['reward'].tolist(), [1., 0., 0.5])
def test_unstack_sequence_fields(self):
"""Tests that `unstack_sequence_fields(stack_sequence_fields(x)) == x`."""
stacked = tree_utils.stack_sequence_fields(TEST_SEQUENCE)
batch_size = len(TEST_SEQUENCE)
unstacked = tree_utils.unstack_sequence_fields(stacked, batch_size)
tree.map_structure(np.testing.assert_array_equal, unstacked, TEST_SEQUENCE)
def test_fast_map_structure_with_path(self):
structure = {
'a': {
'b': np.array([0.0])
},
'c': (np.array([1.0]), np.array([2.0])),
'd': [np.array(3.0), np.array(4.0)],
}
def map_fn(path: Sequence[str], x: np.ndarray, y: np.ndarray):
return x + y + len(path)
single_arg_map_fn = functools.partial(map_fn, y=np.array([0.0]))
expected_mapped_structure = (
tree.map_structure_with_path(single_arg_map_fn, structure))
mapped_structure = (
tree_utils.fast_map_structure_with_path(single_arg_map_fn, structure))
self.assertEqual(mapped_structure, expected_mapped_structure)
expected_double_mapped_structure = (
tree.map_structure_with_path(map_fn, structure, mapped_structure))
double_mapped_structure = (
tree_utils.fast_map_structure_with_path(map_fn, structure,
mapped_structure))
self.assertEqual(double_mapped_structure, expected_double_mapped_structure)
if __name__ == '__main__':
absltest.main()
| 0.89915 | 0.756785 |
from CommonServerPython import *
import traceback
SECTIONS_TO_KEEP = ('Threat Hunting', 'Mitigation', 'Remediation', 'Eradication')
HEADER_TRANSFORM = {'id': 'Task ID', 'name': 'Task Name', 'state': 'Task State', 'completedDate': 'Completion Time'}
''' COMMAND FUNCTION '''
def add_url_to_tasks(tasks: Dict, workplan_url: str):
tasks = tasks.copy()
for task in tasks:
task_id = task.get('id')
task_url = os.path.join(workplan_url, task_id)
task['id'] = f"[{task_id}]({task_url})"
return tasks
def set_incident_with_count(all_tasks: List[Dict[str, str]]):
completed_tasks = list(filter(lambda x: x['state'] == 'Completed', all_tasks))
hunting_completed_tasks = list(filter(lambda x: 'hunting' in x['section'].lower(), completed_tasks))
mitigation_completed_tasks = list(filter(lambda x: 'mitigation' in x['section'].lower(), completed_tasks))
remediation_completed_tasks = list(filter(lambda x: 'remediation' in x['section'].lower(), completed_tasks))
eradication_completed_tasks = list(filter(lambda x: 'eradication' in x['section'].lower(), completed_tasks))
number_of_total_tasks = len(all_tasks)
number_of_completed_tasks = len(completed_tasks)
number_of_remaining_tasks = number_of_total_tasks - number_of_completed_tasks
number_of_completed_hunting_tasks = len(hunting_completed_tasks)
number_of_completed_mitigation_tasks = len(mitigation_completed_tasks)
number_of_completed_remediation_tasks = len(remediation_completed_tasks)
number_of_completed_eradication_tasks = len(eradication_completed_tasks)
incident_id = demisto.incident().get('id')
incident = {'id': incident_id, 'customFields': {'totaltaskcount': number_of_total_tasks,
'completedtaskcount': number_of_completed_tasks,
'remainingtaskcount': number_of_remaining_tasks,
'eradicationtaskcount': number_of_completed_eradication_tasks,
'huntingtaskcount': number_of_completed_hunting_tasks,
'mitigationtaskcount': number_of_completed_mitigation_tasks,
'remediationtaskcount': number_of_completed_remediation_tasks}}
res = demisto.executeCommand('setIncident', incident)
if isError(res[0]):
raise DemistoException('Command setIncident was not successful')
def create_markdown_tasks() -> CommandResults:
urls = demisto.demistoUrls() # works in multi tenant env as well
workplan_url = urls.get('workPlan')
res = demisto.executeCommand('GetTasksWithSections', {})
if isError(res[0]):
raise DemistoException('Command GetTasksWithSections was not successful')
tasks_nested_results = demisto.get(res[0], 'Contents')
all_tasks, md = get_tasks_and_readable(tasks_nested_results, workplan_url)
set_incident_with_count(all_tasks)
return CommandResults(readable_output=md)
def get_tasks_and_readable(tasks_nested_results: Dict[str, Dict], workplan_url: Optional[str] = None):
# This will keep only wanted keys and sort them by their order
tasks_nested_results = get_sorted_sections(tasks_nested_results)
all_tasks: List[Dict] = []
headers = ['id', 'name', 'state', 'completedDate']
md_lst = []
for section in SECTIONS_TO_KEEP:
md_lst.append(f"## {section}")
v1 = tasks_nested_results.get(section)
if v1 is None:
md_lst.append('**No tasks found**')
continue
if 'tasks' in v1.keys():
tasks = v1.get('tasks')
all_tasks.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(
tableToMarkdown('', tasks, headers=headers, headerTransform=lambda x: HEADER_TRANSFORM.get(x)))
else:
for k2, v2 in v1.items():
tasks = v2.get('tasks')
all_tasks.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(
tableToMarkdown(k2, tasks, headers=headers, headerTransform=lambda x: HEADER_TRANSFORM.get(x)))
md = '\n'.join(md_lst)
return all_tasks, md
def get_sorted_sections(tasks_nested_results):
tasks_nested_results = {key: value for key, value in tasks_nested_results.items() if key in SECTIONS_TO_KEEP}
tasks_nested_results = {key: value for key, value in sorted(
tasks_nested_results.items(), key=lambda x: SECTIONS_TO_KEEP.index(x[0]))}
return tasks_nested_results
''' MAIN FUNCTION '''
def main():
try:
return_results(
create_markdown_tasks()
)
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute SetIRProceduresMarkdown. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins',):
main()
|
Packs/DemistoRESTAPI/Scripts/SetIRProceduresMarkdown/SetIRProceduresMarkdown.py
|
from CommonServerPython import *
import traceback
SECTIONS_TO_KEEP = ('Threat Hunting', 'Mitigation', 'Remediation', 'Eradication')
HEADER_TRANSFORM = {'id': 'Task ID', 'name': 'Task Name', 'state': 'Task State', 'completedDate': 'Completion Time'}
''' COMMAND FUNCTION '''
def add_url_to_tasks(tasks: Dict, workplan_url: str):
tasks = tasks.copy()
for task in tasks:
task_id = task.get('id')
task_url = os.path.join(workplan_url, task_id)
task['id'] = f"[{task_id}]({task_url})"
return tasks
def set_incident_with_count(all_tasks: List[Dict[str, str]]):
completed_tasks = list(filter(lambda x: x['state'] == 'Completed', all_tasks))
hunting_completed_tasks = list(filter(lambda x: 'hunting' in x['section'].lower(), completed_tasks))
mitigation_completed_tasks = list(filter(lambda x: 'mitigation' in x['section'].lower(), completed_tasks))
remediation_completed_tasks = list(filter(lambda x: 'remediation' in x['section'].lower(), completed_tasks))
eradication_completed_tasks = list(filter(lambda x: 'eradication' in x['section'].lower(), completed_tasks))
number_of_total_tasks = len(all_tasks)
number_of_completed_tasks = len(completed_tasks)
number_of_remaining_tasks = number_of_total_tasks - number_of_completed_tasks
number_of_completed_hunting_tasks = len(hunting_completed_tasks)
number_of_completed_mitigation_tasks = len(mitigation_completed_tasks)
number_of_completed_remediation_tasks = len(remediation_completed_tasks)
number_of_completed_eradication_tasks = len(eradication_completed_tasks)
incident_id = demisto.incident().get('id')
incident = {'id': incident_id, 'customFields': {'totaltaskcount': number_of_total_tasks,
'completedtaskcount': number_of_completed_tasks,
'remainingtaskcount': number_of_remaining_tasks,
'eradicationtaskcount': number_of_completed_eradication_tasks,
'huntingtaskcount': number_of_completed_hunting_tasks,
'mitigationtaskcount': number_of_completed_mitigation_tasks,
'remediationtaskcount': number_of_completed_remediation_tasks}}
res = demisto.executeCommand('setIncident', incident)
if isError(res[0]):
raise DemistoException('Command setIncident was not successful')
def create_markdown_tasks() -> CommandResults:
urls = demisto.demistoUrls() # works in multi tenant env as well
workplan_url = urls.get('workPlan')
res = demisto.executeCommand('GetTasksWithSections', {})
if isError(res[0]):
raise DemistoException('Command GetTasksWithSections was not successful')
tasks_nested_results = demisto.get(res[0], 'Contents')
all_tasks, md = get_tasks_and_readable(tasks_nested_results, workplan_url)
set_incident_with_count(all_tasks)
return CommandResults(readable_output=md)
def get_tasks_and_readable(tasks_nested_results: Dict[str, Dict], workplan_url: Optional[str] = None):
# This will keep only wanted keys and sort them by their order
tasks_nested_results = get_sorted_sections(tasks_nested_results)
all_tasks: List[Dict] = []
headers = ['id', 'name', 'state', 'completedDate']
md_lst = []
for section in SECTIONS_TO_KEEP:
md_lst.append(f"## {section}")
v1 = tasks_nested_results.get(section)
if v1 is None:
md_lst.append('**No tasks found**')
continue
if 'tasks' in v1.keys():
tasks = v1.get('tasks')
all_tasks.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(
tableToMarkdown('', tasks, headers=headers, headerTransform=lambda x: HEADER_TRANSFORM.get(x)))
else:
for k2, v2 in v1.items():
tasks = v2.get('tasks')
all_tasks.extend(tasks) # type: ignore
tasks = add_url_to_tasks(tasks, workplan_url) if workplan_url else tasks # type: ignore
md_lst.append(
tableToMarkdown(k2, tasks, headers=headers, headerTransform=lambda x: HEADER_TRANSFORM.get(x)))
md = '\n'.join(md_lst)
return all_tasks, md
def get_sorted_sections(tasks_nested_results):
tasks_nested_results = {key: value for key, value in tasks_nested_results.items() if key in SECTIONS_TO_KEEP}
tasks_nested_results = {key: value for key, value in sorted(
tasks_nested_results.items(), key=lambda x: SECTIONS_TO_KEEP.index(x[0]))}
return tasks_nested_results
''' MAIN FUNCTION '''
def main():
try:
return_results(
create_markdown_tasks()
)
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute SetIRProceduresMarkdown. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins',):
main()
| 0.341692 | 0.298696 |
import arrow
from flask import request, render_template, redirect, url_for, flash, session, g
from flask_login import login_user
from flask_wtf import FlaskForm
from wtforms import StringField, validators
from app.auth.base import auth_bp
from app.config import MFA_USER_ID
from app.db import Session
from app.email_utils import send_invalid_totp_login_email
from app.extensions import limiter
from app.log import LOG
from app.models import User, RecoveryCode
class RecoveryForm(FlaskForm):
code = StringField("Code", validators=[validators.DataRequired()])
@auth_bp.route("/recovery", methods=["GET", "POST"])
@limiter.limit(
"10/minute", deduct_when=lambda r: hasattr(g, "deduct_limit") and g.deduct_limit
)
def recovery_route():
# passed from login page
user_id = session.get(MFA_USER_ID)
# user access this page directly without passing by login page
if not user_id:
flash("Unknown error, redirect back to main page", "warning")
return redirect(url_for("auth.login"))
user = User.get(user_id)
if not user.two_factor_authentication_enabled():
flash("Only user with MFA enabled should go to this page", "warning")
return redirect(url_for("auth.login"))
recovery_form = RecoveryForm()
next_url = request.args.get("next")
if recovery_form.validate_on_submit():
code = recovery_form.code.data
recovery_code = RecoveryCode.get_by(user_id=user.id, code=code)
if recovery_code:
if recovery_code.used:
# Trigger rate limiter
g.deduct_limit = True
flash("Code already used", "error")
else:
del session[MFA_USER_ID]
login_user(user)
flash(f"Welcome back!", "success")
recovery_code.used = True
recovery_code.used_at = arrow.now()
Session.commit()
# User comes to login page from another page
if next_url:
LOG.d("redirect user to %s", next_url)
return redirect(next_url)
else:
LOG.d("redirect user to dashboard")
return redirect(url_for("dashboard.index"))
else:
# Trigger rate limiter
g.deduct_limit = True
flash("Incorrect code", "error")
send_invalid_totp_login_email(user, "recovery")
return render_template("auth/recovery.html", recovery_form=recovery_form)
|
app/auth/views/recovery.py
|
import arrow
from flask import request, render_template, redirect, url_for, flash, session, g
from flask_login import login_user
from flask_wtf import FlaskForm
from wtforms import StringField, validators
from app.auth.base import auth_bp
from app.config import MFA_USER_ID
from app.db import Session
from app.email_utils import send_invalid_totp_login_email
from app.extensions import limiter
from app.log import LOG
from app.models import User, RecoveryCode
class RecoveryForm(FlaskForm):
code = StringField("Code", validators=[validators.DataRequired()])
@auth_bp.route("/recovery", methods=["GET", "POST"])
@limiter.limit(
"10/minute", deduct_when=lambda r: hasattr(g, "deduct_limit") and g.deduct_limit
)
def recovery_route():
# passed from login page
user_id = session.get(MFA_USER_ID)
# user access this page directly without passing by login page
if not user_id:
flash("Unknown error, redirect back to main page", "warning")
return redirect(url_for("auth.login"))
user = User.get(user_id)
if not user.two_factor_authentication_enabled():
flash("Only user with MFA enabled should go to this page", "warning")
return redirect(url_for("auth.login"))
recovery_form = RecoveryForm()
next_url = request.args.get("next")
if recovery_form.validate_on_submit():
code = recovery_form.code.data
recovery_code = RecoveryCode.get_by(user_id=user.id, code=code)
if recovery_code:
if recovery_code.used:
# Trigger rate limiter
g.deduct_limit = True
flash("Code already used", "error")
else:
del session[MFA_USER_ID]
login_user(user)
flash(f"Welcome back!", "success")
recovery_code.used = True
recovery_code.used_at = arrow.now()
Session.commit()
# User comes to login page from another page
if next_url:
LOG.d("redirect user to %s", next_url)
return redirect(next_url)
else:
LOG.d("redirect user to dashboard")
return redirect(url_for("dashboard.index"))
else:
# Trigger rate limiter
g.deduct_limit = True
flash("Incorrect code", "error")
send_invalid_totp_login_email(user, "recovery")
return render_template("auth/recovery.html", recovery_form=recovery_form)
| 0.311741 | 0.057335 |
from __future__ import annotations
from functools import partial
from colour.characterisation import RGB_DisplayPrimaries
from colour.hints import Dict
from colour.utilities import LazyCaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"DATA_DISPLAY_PRIMARIES_CRT",
"MSDS_DISPLAY_PRIMARIES_CRT",
]
DATA_DISPLAY_PRIMARIES_CRT: Dict = {
"Typical CRT Brainard 1997": {
380.0: (0.0025, 0.0018, 0.0219),
385.0: (0.0017, 0.0016, 0.0336),
390.0: (0.0017, 0.0020, 0.0524),
395.0: (0.0011, 0.0021, 0.0785),
400.0: (0.0017, 0.0025, 0.1130),
405.0: (0.0028, 0.0030, 0.1624),
410.0: (0.0037, 0.0043, 0.2312),
415.0: (0.0046, 0.0059, 0.3214),
420.0: (0.0064, 0.0079, 0.4263),
425.0: (0.0079, 0.0104, 0.5365),
430.0: (0.0094, 0.0126, 0.6296),
435.0: (0.0105, 0.0147, 0.6994),
440.0: (0.0113, 0.0170, 0.7470),
445.0: (0.0115, 0.0191, 0.7654),
450.0: (0.0113, 0.0220, 0.7519),
455.0: (0.0113, 0.0267, 0.7151),
460.0: (0.0115, 0.0340, 0.6619),
465.0: (0.0164, 0.0462, 0.5955),
470.0: (0.0162, 0.0649, 0.5177),
475.0: (0.0120, 0.0936, 0.4327),
480.0: (0.0091, 0.1345, 0.3507),
485.0: (0.0119, 0.1862, 0.2849),
490.0: (0.0174, 0.2485, 0.2278),
495.0: (0.0218, 0.3190, 0.1809),
500.0: (0.0130, 0.3964, 0.1408),
505.0: (0.0123, 0.4691, 0.1084),
510.0: (0.0260, 0.5305, 0.0855),
515.0: (0.0242, 0.5826, 0.0676),
520.0: (0.0125, 0.6195, 0.0537),
525.0: (0.0119, 0.6386, 0.0422),
530.0: (0.0201, 0.6414, 0.0341),
535.0: (0.0596, 0.6348, 0.0284),
540.0: (0.0647, 0.6189, 0.0238),
545.0: (0.0251, 0.5932, 0.0197),
550.0: (0.0248, 0.5562, 0.0165),
555.0: (0.0325, 0.5143, 0.0143),
560.0: (0.0199, 0.4606, 0.0119),
565.0: (0.0161, 0.3993, 0.0099),
570.0: (0.0128, 0.3297, 0.0079),
575.0: (0.0217, 0.2719, 0.0065),
580.0: (0.0693, 0.2214, 0.0057),
585.0: (0.1220, 0.1769, 0.0051),
590.0: (0.1861, 0.1407, 0.0047),
595.0: (0.2173, 0.1155, 0.0043),
600.0: (0.0777, 0.0938, 0.0029),
605.0: (0.0531, 0.0759, 0.0023),
610.0: (0.2434, 0.0614, 0.0036),
615.0: (0.5812, 0.0522, 0.0061),
620.0: (0.9354, 0.0455, 0.0088),
625.0: (1.6054, 0.0437, 0.0141),
630.0: (0.6464, 0.0278, 0.0060),
635.0: (0.1100, 0.0180, 0.0015),
640.0: (0.0322, 0.0136, 0.0008),
645.0: (0.0207, 0.0107, 0.0006),
650.0: (0.0194, 0.0085, 0.0006),
655.0: (0.0196, 0.0067, 0.0007),
660.0: (0.0166, 0.0055, 0.0006),
665.0: (0.0173, 0.0044, 0.0005),
670.0: (0.0220, 0.0039, 0.0006),
675.0: (0.0186, 0.0033, 0.0005),
680.0: (0.0377, 0.0030, 0.0007),
685.0: (0.0782, 0.0028, 0.0010),
690.0: (0.0642, 0.0023, 0.0010),
695.0: (0.1214, 0.0028, 0.0016),
700.0: (0.7169, 0.0078, 0.0060),
705.0: (1.1098, 0.0113, 0.0094),
710.0: (0.3106, 0.0039, 0.0030),
715.0: (0.0241, 0.0011, 0.0007),
720.0: (0.0180, 0.0009, 0.0009),
725.0: (0.0149, 0.0008, 0.0008),
730.0: (0.0108, 0.0009, 0.0011),
735.0: (0.0097, 0.0011, 0.0010),
740.0: (0.0091, 0.0009, 0.0010),
745.0: (0.0093, 0.0010, 0.0012),
750.0: (0.0083, 0.0011, 0.0013),
755.0: (0.0073, 0.0013, 0.0012),
760.0: (0.0081, 0.0015, 0.0016),
765.0: (0.0067, 0.0018, 0.0015),
770.0: (0.0070, 0.0021, 0.0028),
775.0: (0.0073, 0.0015, 0.0046),
780.0: (0.0066, 0.0018, 0.0058),
}
}
MSDS_DISPLAY_PRIMARIES_CRT: LazyCaseInsensitiveMapping = (
LazyCaseInsensitiveMapping(
{
"Typical CRT Brainard 1997": partial(
RGB_DisplayPrimaries,
DATA_DISPLAY_PRIMARIES_CRT["Typical CRT Brainard 1997"],
name="Typical CRT Brainard 1997",
)
}
)
)
"""
Primaries multi-spectral distributions of *CRT* displays.
References
----------
:cite:`Machado2010a`
"""
|
colour/characterisation/datasets/displays/crt/primaries.py
|
from __future__ import annotations
from functools import partial
from colour.characterisation import RGB_DisplayPrimaries
from colour.hints import Dict
from colour.utilities import LazyCaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"DATA_DISPLAY_PRIMARIES_CRT",
"MSDS_DISPLAY_PRIMARIES_CRT",
]
DATA_DISPLAY_PRIMARIES_CRT: Dict = {
"Typical CRT Brainard 1997": {
380.0: (0.0025, 0.0018, 0.0219),
385.0: (0.0017, 0.0016, 0.0336),
390.0: (0.0017, 0.0020, 0.0524),
395.0: (0.0011, 0.0021, 0.0785),
400.0: (0.0017, 0.0025, 0.1130),
405.0: (0.0028, 0.0030, 0.1624),
410.0: (0.0037, 0.0043, 0.2312),
415.0: (0.0046, 0.0059, 0.3214),
420.0: (0.0064, 0.0079, 0.4263),
425.0: (0.0079, 0.0104, 0.5365),
430.0: (0.0094, 0.0126, 0.6296),
435.0: (0.0105, 0.0147, 0.6994),
440.0: (0.0113, 0.0170, 0.7470),
445.0: (0.0115, 0.0191, 0.7654),
450.0: (0.0113, 0.0220, 0.7519),
455.0: (0.0113, 0.0267, 0.7151),
460.0: (0.0115, 0.0340, 0.6619),
465.0: (0.0164, 0.0462, 0.5955),
470.0: (0.0162, 0.0649, 0.5177),
475.0: (0.0120, 0.0936, 0.4327),
480.0: (0.0091, 0.1345, 0.3507),
485.0: (0.0119, 0.1862, 0.2849),
490.0: (0.0174, 0.2485, 0.2278),
495.0: (0.0218, 0.3190, 0.1809),
500.0: (0.0130, 0.3964, 0.1408),
505.0: (0.0123, 0.4691, 0.1084),
510.0: (0.0260, 0.5305, 0.0855),
515.0: (0.0242, 0.5826, 0.0676),
520.0: (0.0125, 0.6195, 0.0537),
525.0: (0.0119, 0.6386, 0.0422),
530.0: (0.0201, 0.6414, 0.0341),
535.0: (0.0596, 0.6348, 0.0284),
540.0: (0.0647, 0.6189, 0.0238),
545.0: (0.0251, 0.5932, 0.0197),
550.0: (0.0248, 0.5562, 0.0165),
555.0: (0.0325, 0.5143, 0.0143),
560.0: (0.0199, 0.4606, 0.0119),
565.0: (0.0161, 0.3993, 0.0099),
570.0: (0.0128, 0.3297, 0.0079),
575.0: (0.0217, 0.2719, 0.0065),
580.0: (0.0693, 0.2214, 0.0057),
585.0: (0.1220, 0.1769, 0.0051),
590.0: (0.1861, 0.1407, 0.0047),
595.0: (0.2173, 0.1155, 0.0043),
600.0: (0.0777, 0.0938, 0.0029),
605.0: (0.0531, 0.0759, 0.0023),
610.0: (0.2434, 0.0614, 0.0036),
615.0: (0.5812, 0.0522, 0.0061),
620.0: (0.9354, 0.0455, 0.0088),
625.0: (1.6054, 0.0437, 0.0141),
630.0: (0.6464, 0.0278, 0.0060),
635.0: (0.1100, 0.0180, 0.0015),
640.0: (0.0322, 0.0136, 0.0008),
645.0: (0.0207, 0.0107, 0.0006),
650.0: (0.0194, 0.0085, 0.0006),
655.0: (0.0196, 0.0067, 0.0007),
660.0: (0.0166, 0.0055, 0.0006),
665.0: (0.0173, 0.0044, 0.0005),
670.0: (0.0220, 0.0039, 0.0006),
675.0: (0.0186, 0.0033, 0.0005),
680.0: (0.0377, 0.0030, 0.0007),
685.0: (0.0782, 0.0028, 0.0010),
690.0: (0.0642, 0.0023, 0.0010),
695.0: (0.1214, 0.0028, 0.0016),
700.0: (0.7169, 0.0078, 0.0060),
705.0: (1.1098, 0.0113, 0.0094),
710.0: (0.3106, 0.0039, 0.0030),
715.0: (0.0241, 0.0011, 0.0007),
720.0: (0.0180, 0.0009, 0.0009),
725.0: (0.0149, 0.0008, 0.0008),
730.0: (0.0108, 0.0009, 0.0011),
735.0: (0.0097, 0.0011, 0.0010),
740.0: (0.0091, 0.0009, 0.0010),
745.0: (0.0093, 0.0010, 0.0012),
750.0: (0.0083, 0.0011, 0.0013),
755.0: (0.0073, 0.0013, 0.0012),
760.0: (0.0081, 0.0015, 0.0016),
765.0: (0.0067, 0.0018, 0.0015),
770.0: (0.0070, 0.0021, 0.0028),
775.0: (0.0073, 0.0015, 0.0046),
780.0: (0.0066, 0.0018, 0.0058),
}
}
MSDS_DISPLAY_PRIMARIES_CRT: LazyCaseInsensitiveMapping = (
LazyCaseInsensitiveMapping(
{
"Typical CRT Brainard 1997": partial(
RGB_DisplayPrimaries,
DATA_DISPLAY_PRIMARIES_CRT["Typical CRT Brainard 1997"],
name="Typical CRT Brainard 1997",
)
}
)
)
"""
Primaries multi-spectral distributions of *CRT* displays.
References
----------
:cite:`Machado2010a`
"""
| 0.816772 | 0.185929 |
from bleach import clean
import pandas as pd
import argparse
from sklearn.model_selection import train_test_split
import preprocessor as p # forming a separate feature for cleaned tweets
from nlpaug.augmenter.word.synonym import SynonymAug
from nlpaug.augmenter.word.back_translation import BackTranslationAug
SPLIT_PROP = 0.25
parser = argparse.ArgumentParser(description='Arguments for preprocessing the data.')
parser.add_argument('-data_path', type=str, default='../datasets/tweet_emotions.csv',
help='path to where the data is stored.')
parser.add_argument('-augmentation', type=int, default=0,
help='Whether to augment the data or not.')
parser.add_argument('-last_k', type=int, default=6,
help='Which least populated columns to augment.')
parser.add_argument('-augmenter', type=str, default='synonym',
help='Which augmenter to use.')
def clean_tweets(df: pd.DataFrame) -> pd.DataFrame:
"""
Preprocess the tweets
"""
df.drop(['tweet_id'],axis=1,inplace=True)
df['content'] = df.content.apply(lambda x: p.clean(x))
return df
def augment(df:pd.DataFrame,last_k:int,augmenter='synonym')->pd.DataFrame:
"""
Function for word lvel synonym augmenting string data
in a DataFrame
"""
#create the augmenter
if augmenter=='synonym':
augmenter = SynonymAug(aug_p=0.2,aug_min=1,aug_max=4)
else:
#instantiate the backwards translation
augmenter = BackTranslationAug()
#loop over columns and add their augmented versions
for value in df.sentiment.value_counts().index.to_list()[-last_k:]:
df_part=df[df['sentiment']==value].copy()
df_part.content.apply(lambda x: augmenter.augment(x,num_thread=4))
df=pd.concat([df,df_part])
return df# TODO evaluate model and choose which features to keep
# TODO ADD requirements at the end
if __name__ == '__main__':
args = parser.parse_args()
df=pd.read_csv(args.data_path)
df=clean_tweets(df)
if args.augmentation:
df=augment(df,args.last_k,augmenter=args.augmenter)#TODO augment only the train set
X,y = df["content"], df["sentiment"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42,stratify = y)
df_train = pd.concat([X_train,y_train],axis = 1)
df_test = pd.concat([X_test,y_test],axis = 1)
df_test.to_csv('../datasets/test_preprocessed.csv',index=False)
df_train.to_csv('../datasets/train_preprocessed.csv',index=False)
|
preprocessing/data_prep.py
|
from bleach import clean
import pandas as pd
import argparse
from sklearn.model_selection import train_test_split
import preprocessor as p # forming a separate feature for cleaned tweets
from nlpaug.augmenter.word.synonym import SynonymAug
from nlpaug.augmenter.word.back_translation import BackTranslationAug
SPLIT_PROP = 0.25
parser = argparse.ArgumentParser(description='Arguments for preprocessing the data.')
parser.add_argument('-data_path', type=str, default='../datasets/tweet_emotions.csv',
help='path to where the data is stored.')
parser.add_argument('-augmentation', type=int, default=0,
help='Whether to augment the data or not.')
parser.add_argument('-last_k', type=int, default=6,
help='Which least populated columns to augment.')
parser.add_argument('-augmenter', type=str, default='synonym',
help='Which augmenter to use.')
def clean_tweets(df: pd.DataFrame) -> pd.DataFrame:
"""
Preprocess the tweets
"""
df.drop(['tweet_id'],axis=1,inplace=True)
df['content'] = df.content.apply(lambda x: p.clean(x))
return df
def augment(df:pd.DataFrame,last_k:int,augmenter='synonym')->pd.DataFrame:
"""
Function for word lvel synonym augmenting string data
in a DataFrame
"""
#create the augmenter
if augmenter=='synonym':
augmenter = SynonymAug(aug_p=0.2,aug_min=1,aug_max=4)
else:
#instantiate the backwards translation
augmenter = BackTranslationAug()
#loop over columns and add their augmented versions
for value in df.sentiment.value_counts().index.to_list()[-last_k:]:
df_part=df[df['sentiment']==value].copy()
df_part.content.apply(lambda x: augmenter.augment(x,num_thread=4))
df=pd.concat([df,df_part])
return df# TODO evaluate model and choose which features to keep
# TODO ADD requirements at the end
if __name__ == '__main__':
args = parser.parse_args()
df=pd.read_csv(args.data_path)
df=clean_tweets(df)
if args.augmentation:
df=augment(df,args.last_k,augmenter=args.augmenter)#TODO augment only the train set
X,y = df["content"], df["sentiment"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42,stratify = y)
df_train = pd.concat([X_train,y_train],axis = 1)
df_test = pd.concat([X_test,y_test],axis = 1)
df_test.to_csv('../datasets/test_preprocessed.csv',index=False)
df_train.to_csv('../datasets/train_preprocessed.csv',index=False)
| 0.348091 | 0.183484 |
import os
import importlib
import json
from typing import Union
from flask import Response
from flask.testing import Client
import pytest
@pytest.fixture
def app(service_module_name):
# Ensure that test configuration will be loaded
os.environ["SERVER_ENVIRONMENT"] = "test"
service_module = importlib.import_module(service_module_name)
service_module.application.testing = True
service_module.application.config["PROPAGATE_EXCEPTIONS"] = False
return service_module.application
def assert_201(response: Response, expected_location: str) -> str:
"""
Assert that status code is 201.
201 stands for Created, meaning that location header is expected as well.
Assert that location header is containing the expected location (hostname trimmed for tests)
:param response: response object from service to be asserted
:param expected_location: Expected location starting from server root (eg: /xxx)
:return Location from server root.
"""
assert response.status_code == 201
actual_location = response.headers["location"].replace("http://localhost", "")
assert expected_location == actual_location
return actual_location
def assert_file(response: Response, expected_file_path: str):
"""
Assert that response is containing the bytes contained in expected file.
:param response: Received query response.
:param expected_file_path: Path to the file containing expected bytes.
"""
with open(expected_file_path, "rb") as expected_file:
assert response.data == expected_file.read()
def post_json(
client: Client, url: str, json_body: Union[dict, list], **kwargs
) -> Response:
"""
Send a POST request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param json_body: Python structure corresponding to the JSON to be sent.
:return: Received response.
"""
return client.post(
url, data=json.dumps(json_body), content_type="application/json", **kwargs
)
def post_file(
client: Client,
url: str,
file_name: str,
file_path: str,
additional_json: dict = None,
**kwargs,
) -> Response:
"""
Send a POST request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param file_name: Name of the parameter corresponding to the file to be sent.
:param file_path: Path to the file that should be sent.
:param additional_json: Additional JSON to be sent in body.
:return: Received response.
"""
with open(file_path, "rb") as file:
data = {file_name: (file, file_name)}
if additional_json:
data.update(additional_json)
return client.post(url, data=data, **kwargs)
def put_json(
client: Client, url: str, json_body: Union[dict, list], **kwargs
) -> Response:
"""
Send a PUT request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param json_body: Python structure corresponding to the JSON to be sent.
:return: Received response.
"""
return client.put(
url, data=json.dumps(json_body), content_type="application/json", **kwargs
)
|
pytest_layab/flask.py
|
import os
import importlib
import json
from typing import Union
from flask import Response
from flask.testing import Client
import pytest
@pytest.fixture
def app(service_module_name):
# Ensure that test configuration will be loaded
os.environ["SERVER_ENVIRONMENT"] = "test"
service_module = importlib.import_module(service_module_name)
service_module.application.testing = True
service_module.application.config["PROPAGATE_EXCEPTIONS"] = False
return service_module.application
def assert_201(response: Response, expected_location: str) -> str:
"""
Assert that status code is 201.
201 stands for Created, meaning that location header is expected as well.
Assert that location header is containing the expected location (hostname trimmed for tests)
:param response: response object from service to be asserted
:param expected_location: Expected location starting from server root (eg: /xxx)
:return Location from server root.
"""
assert response.status_code == 201
actual_location = response.headers["location"].replace("http://localhost", "")
assert expected_location == actual_location
return actual_location
def assert_file(response: Response, expected_file_path: str):
"""
Assert that response is containing the bytes contained in expected file.
:param response: Received query response.
:param expected_file_path: Path to the file containing expected bytes.
"""
with open(expected_file_path, "rb") as expected_file:
assert response.data == expected_file.read()
def post_json(
client: Client, url: str, json_body: Union[dict, list], **kwargs
) -> Response:
"""
Send a POST request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param json_body: Python structure corresponding to the JSON to be sent.
:return: Received response.
"""
return client.post(
url, data=json.dumps(json_body), content_type="application/json", **kwargs
)
def post_file(
client: Client,
url: str,
file_name: str,
file_path: str,
additional_json: dict = None,
**kwargs,
) -> Response:
"""
Send a POST request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param file_name: Name of the parameter corresponding to the file to be sent.
:param file_path: Path to the file that should be sent.
:param additional_json: Additional JSON to be sent in body.
:return: Received response.
"""
with open(file_path, "rb") as file:
data = {file_name: (file, file_name)}
if additional_json:
data.update(additional_json)
return client.post(url, data=data, **kwargs)
def put_json(
client: Client, url: str, json_body: Union[dict, list], **kwargs
) -> Response:
"""
Send a PUT request to this URL.
:param client: Flask test client.
:param url: Relative server URL (starts with /).
:param json_body: Python structure corresponding to the JSON to be sent.
:return: Received response.
"""
return client.put(
url, data=json.dumps(json_body), content_type="application/json", **kwargs
)
| 0.802556 | 0.422624 |
import itertools as it
import re
import types
from plato_pylib.shared.ucell_class import UnitCell
from plato_pylib.shared.energies_class import EnergyVals
from . import parse_xyz_files as parseXyzHelp
from ..shared import custom_errors as errorHelp
from ..shared import unit_convs as uConvHelp
import pycp2k
RYD_TO_EV = uConvHelp.RYD_TO_EV
HART_TO_EV = uConvHelp.HA_TO_EV
def parseGeomFromCpInputFile(inpFile):
""" Gets a plato_pylib UnitCell object when passed a path to cp2k input file
Args:
inpFile: (str) Path to cp2k input file
Returns
outCell: (plato_pylib) UnitCell object containing the geometry in the input file
IMPORTANT:
Current implementation only works if the unit cell is represented by cell vectors in the input file
Also currently no attempt to deal with units; you get whatever units you express the geometry in
"""
parser = pycp2k.inputparser.CP2KInputParser()
inpObj = pycp2k.CP2K()
pyCp2kObj = parser.parse(inpObj, inpFile)
cellVects = _getCellVectsFromPyCp2kObj(pyCp2kObj)
useFractCoords, coords = _getCoordsFromPyCp2kObj(pyCp2kObj)
outCell = UnitCell.fromLattVects(cellVects)
if useFractCoords:
outCell.fractCoords = coords
else:
outCell.cartCoords = coords
return outCell
def _getCellVectsFromPyCp2kObj(pyCp2kObj):
cellSection = pyCp2kObj.CP2K_INPUT.FORCE_EVAL_list[-1].SUBSYS.CELL
rawVects = [getattr(cellSection,x) for x in ["A","B","C"]]
outVects = list()
for rawVect in rawVects:
outVect = [float(x) for x in re.sub( '\[[A-Z,a-z]*\]','',rawVect).strip().split()]
outVects.append(outVect)
return outVects
def _getCoordsFromPyCp2kObj(pyCp2kObj):
coordSection = pyCp2kObj.CP2K_INPUT.FORCE_EVAL_list[-1].SUBSYS.COORD
scaled = coordSection.Scaled
outCoords = list()
for currAtom in coordSection.Default_keyword:
currCoord = [float(x) for x in currAtom.strip().split()[1:]]
currCoord.append( currAtom.strip().split()[0] )
outCoords.append(currCoord)
return scaled,outCoords
def parseCpout(outFile, ThrowIfTerminateFlagMissing=True):
fileAsList = _getFileAsListFromInpFile(outFile)
parser = _getStandardCpoutParser()
#TODO: Some way to maintain the ACTUAL terminate flag may be nice
if ThrowIfTerminateFlagMissing is False:
def _finalSetTerminateFlagToTrue(instance):
instance.outDict["terminate_flag_found"] = True
parser.finalStepsFunctions.append(_finalSetTerminateFlagToTrue)
try:
outDict = parser.getOutDictFromFileAsList(fileAsList)
except Exception as e:
raise errorHelp.PlatoPylibParseFileError("Something went wrong when parsing the current CP2K output file {}".format(outFile)) from e
return outDict
def _getStandardCpoutParser():
outParser = CpoutFileParser()
_addSearchWordAndFunctToParserObj("OVERLAP MATRIX CONDITION NUMBER AT GAMMA POINT", _parseOverlapCondSection, outParser)
_addSearchWordAndFunctToParserObj("BSSE RESULTS", _parseBSSESection, outParser)
_addSearchWordAndFunctToParserObj("Core Hamiltonian energy", _parseEnergiesSection, outParser)
_addSearchWordAndFunctToParserObj("T I M I N G", _parseTimingSection, outParser)
_addSearchWordAndFunctToParserObj("Total number of message passing", _parseNumbProcsSection, outParser)
_addSearchWordAndFunctToParserObj("CP2K| version string", _parseCompileInfoSection, outParser)
_addSearchWordAndFunctToParserObj("BSSE CALCULATION", _parseBSSEFragmentsInfo, outParser, handleParsedDictFunct=_handleParsedBSSEFragsInfo)
_addSearchWordAndFunctToParserObj("Hirshfeld Charges", _parseHirshfeldChargesSection, outParser, handleParsedDictFunct=_handleHirshfeldChargesInfo)
_addSearchWordAndFunctToParserObj("Mulliken Population Analysis", _parseHirshfeldChargesSection, outParser, handleParsedDictFunct=_handleMullikenChargesInfo)
_addSearchWordAndFunctToParserObj("ATOMIC FORCES in [a.u.]", _parseAtomicForcesSection, outParser, handleParsedDictFunct=_handleAtomicForcesSection)
outParser.finalStepsFunctions.append(_parseBSSEFragmentsFinalStepFunct)
return outParser
def _getFileAsListFromInpFile(inpFile):
with open(inpFile,"rt") as f:
fileAsList = f.readlines()
return fileAsList
def _addSearchWordAndFunctToParserObj(searchWord, funct, parserObj, handleParsedDictFunct=None):
decoObj = getDecoToAttachSectionParserToCpoutParser(searchWord, funct, handleParsedDictFunct=handleParsedDictFunct)
decoObj(parserObj)
#Want to introduce a way to add a new section to parse without directly modifying the parse source code
#(justified by open-closed principle)
def getDecoToAttachSectionParserToCpoutParser(pattern, parseFunction, handleParsedDictFunct=None):
""" Attaches a function to inpCls (which should be CpoutFileParser INSTANCE) for parsing a section of the output file
Args:
pattern: (str) The pattern to search for in a single line of a cpout file. Finding the pattern should trigger the parse function
parseFunction: Function with interface parsedDict, lineIdx = parseFunction(fileAsList, lineIdx). lineIdx is the index in the file where the initial arg is passed (when inp-arg) and where the section is over (when it appears as outArg). ParsedDict is simply a dictionary containing key:val pairs for this section; this is used to update the main dictionary the parser outputs.
handleParsedDictFunct: f(instance, parsedDict) Default of None means we simply update the output dict with the dict parsed from this section (usual desired behaviour). But setting this function explicitly allows for things such as parsing a series of values (e.g. temperature at each MD step) and saving ALL of them into the outptu dict (instance.outDict)
Returns
parseSectionDeco: Decorator for attaching this section parser to the overall CpoutFileParser. After calling parseSectionDeco(CpoutFileParser) any parser instances should be apply "parseFunction" upon finding "pattern" in any file its passed. Thus, this is essentially acting as a hook function for the parser behaviour.
"""
def decoFunct(inpCls):
inpCls.extraSingleLinePatterns.append(pattern)
inpCls.extraFunctsToParseFromSingleLine.append(parseFunction)
inpCls.extraHandleParsedOutputFuncts.append(handleParsedDictFunct)
return decoFunct
#Parse from single line has the interface outDict, lineIdx = parseSectionStartFromLine(fileAsList, lineIdx)
class CpoutFileParser():
"""Class used to parse CP2K files; NOT meant to be called directly in code; At time of writing _getStandardCpoutParser() is the most sensible way to create this object while the parseCpout function is the best way to parse a CP2K output file
"""
def __init__(self):
self.extraSingleLinePatterns = list() #Search strings that trigger us to parse a section
self.extraFunctsToParseFromSingleLine = list() #Functions to parse the relevant sections and return a dictionary AND lineIdx (so we dont re-read lines in this section)
self.extraHandleParsedOutputFuncts = list() #These functions map the parsed-dicts to the "global" self.outDict. If set to None then we simply do self.outDict.update(parsedDict) for each section.
self.finalStepsFunctions = list()
def getOutDictFromFileAsList(self, fileAsList):
try:
outDict = self._getOutDictFromFileAsList(fileAsList)
except Exception as e:
raise errorHelp.PlatoPylibParseFileError("Something went wrong when parsing the current CP2K output file") from e
return outDict
def _getOutDictFromFileAsList(self, fileAsList):
self.outDict = self._getInitCp2kOutDict() #Attach to class so we can access it with hook functions
lineIdx=0
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx].strip()
if currLine.find("CELL|") != -1:
self.outDict["unitCell"], lineIdx = parseCellSectionCpout(fileAsList,lineIdx)
elif currLine.find("Number of atoms:") != -1:
self.outDict["numbAtoms"] += int( currLine.split()[-1] )
lineIdx += 1
elif currLine.find("PROGRAM STARTED AT") !=-1: #Reset certain counters every time we find a new start of file
self.outDict = self._getInitCp2kOutDict()
lineIdx += 1
elif currLine.find("OPTIMIZATION STEP") != -1:
self.outDict["multiple_geom_present"] = True
lineIdx += 1
elif currLine.find("PROGRAM ENDED") != -1:
self.outDict["terminate_flag_found"] = True
lineIdx += 1
elif self._patternInExtraSingleLinePatterns(currLine):
lineIdx = self._updateDictBasedOnFindingSingleLinePatterns(fileAsList, lineIdx, self.outDict)
else:
lineIdx +=1
self._applyFinalStepsFunctions()
if self.outDict["terminate_flag_found"] is False:
raise ValueError("Termination flag not found in current cp2k output file")
return self.outDict
def _applyFinalStepsFunctions(self):
for funct in self.finalStepsFunctions:
funct(self)
def _patternInExtraSingleLinePatterns(self, currLine):
for x in self.extraSingleLinePatterns:
if currLine.find(x) != -1:
return True
return False
#TODO: Add the ability to change the update function from outside (needed for getting lists)
#Should work with multiple parse-functions on the same input pattern; though unlikely that would ever be a good idea (and returned lineIdx will just be that of the LAST matching pattern)
def _updateDictBasedOnFindingSingleLinePatterns(self, fileAsList, lineIdx, inpDict):
outLineIdx = lineIdx
for funct,pattern,handleFunct in it.zip_longest(self.extraFunctsToParseFromSingleLine,self.extraSingleLinePatterns, self.extraHandleParsedOutputFuncts):
if fileAsList[lineIdx].find(pattern) != -1:
updateDict, outLineIdx = funct(fileAsList, lineIdx)
if handleFunct is None:
inpDict.update(updateDict)
else:
handleFunct(self, updateDict)
return outLineIdx
def _getInitCp2kOutDict(self):
outDict = dict()
outDict["numbAtoms"] = 0
outDict["multiple_geom_present"] = False #Probably actually a useless output
outDict["terminate_flag_found"] = False
return outDict
def parseCellSectionCpout(fileAsList, lineIdx):
lattParams = list()
lattAngles = list()
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx].strip()
if currLine.find("CELL|") == -1:
break
elif currLine.find("Vector")!=-1 and currLine.find("angstrom")!=-1:
lattParams.append( currLine.split()[-1] )
lineIdx += 1
elif currLine.find("Angle")!=-1 and currLine.find("degree")!=-1:
lattAngles.append( currLine.split()[-1] )
lineIdx += 1
else:
lineIdx += 1
unitCell = UnitCell(lattParams=lattParams,lattAngles=lattAngles)
return unitCell,lineIdx
def parseMOInfo(inpFile:"normal cpout but print MO keyword must be used"):
with open(inpFile,"rt") as f:
fileAsList = f.readlines()
lineIdx = 0
startSectStr = "MO EIGENVALUES AND MO OCCUPATION NUMBERS"
notInStartSectStr = "MO EIGENVALUES AND MO OCCUPATION NUMBERS AFTER SCF STEP 0"
outDict = _getInitDictForParseMO()
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx]
if (startSectStr in currLine) and (notInStartSectStr not in currLine):
lineIdx = _parseSingleMoKpointSection(fileAsList, lineIdx, outDict)
else:
lineIdx += 1
#Change lists to None if empty
return outDict
def _getInitDictForParseMO():
outDict = {"eigenvals":list(),
"occvals": list(),
"efermi":None}
return outDict
def _parseSingleMoKpointSection(fileAsList, lineIdx, inpDict):
allEigs, allOccs = list(), list()
sectStartStr = "MO index"
sectEndStr = "Sum"
sectStart = False
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx]
if sectStartStr in currLine:
sectStart = True
elif sectEndStr in currLine:
sectStart = False
elif sectStart:
splitLine = currLine.strip().split()
allEigs.append( float(splitLine[1])*HART_TO_EV ), allOccs.append( float(splitLine[2]) )
elif "Fermi energy" in currLine:
eFermi = float( currLine.strip().split()[-1] ) * HART_TO_EV
break
lineIdx += 1
inpDict["eigenvals"].append(allEigs)
inpDict["occvals"].append(allOccs)
inpDict["efermi"] = eFermi
return lineIdx
def _parseOverlapCondSection(fileAsList, lineIdx):
outDict = dict()
outDict["overlap_condition_number"] = None
retOutObj = False
outObj = types.SimpleNamespace( estimate=types.SimpleNamespace(oneNorm=None),
diag=types.SimpleNamespace(oneNorm=None, twoNorm=None) )
endStr = "Number of electrons"
while endStr not in fileAsList[lineIdx]:
if "1-Norm Condition Number (Estimate)" in fileAsList[lineIdx]:
lineIdx += 1
outObj.estimate.oneNorm = float( fileAsList[lineIdx].split("=")[1].strip().split()[0] )
retOutObj = True
elif "Condition Numbers using Diagonalization" in fileAsList[lineIdx]:
lineIdx += 1
outObj.diag.oneNorm = float( fileAsList[lineIdx].split("=")[-1].strip().split()[0] )
lineIdx += 1
outObj.diag.twoNorm = float( fileAsList[lineIdx].split("=")[-1].strip().split()[0] )
retOutObj = True
else:
lineIdx += 1
if retOutObj:
outDict["overlap_condition_number"] = outObj
return outDict,lineIdx-1
#This is the RESULTS section, which comes last
def _parseBSSESection(fileAsList, lineIdx):
outDict = dict()
outDict["bsse"] = None
retOutObj = False
outObj = types.SimpleNamespace(cpCorrectedTotalEnergy=None)
endStr = "BSSE-free interaction energy"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "CP-corrected Total energy" in fileAsList[lineIdx]:
corrE = float( fileAsList[lineIdx].strip().split()[-2] ) * HART_TO_EV
outObj.cpCorrectedTotalEnergy = corrE
retOutObj = True
lineIdx += 1
if retOutObj:
outDict["bsse"] = outObj
return outDict, lineIdx-1
def _handleParsedBSSEFragsInfo(parserInstance, outDict):
if parserInstance.outDict.get("bsse_fragments",None) is None:
parserInstance.outDict["bsse_fragments"] = list()
parserInstance.outDict["bsse_fragments"].append(outDict)
if parserInstance.outDict.get("energies",None) is not None:
parserInstance.outDict["bsse_fragments"][-2]["energies"] = parserInstance.outDict["energies"]
def _parseBSSEFragmentsInfo(fileAsList, lineIdx):
outDict = dict()
endStr = "-----------------------------"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "FRAGMENT CONF:" in currLine:
outDict["conf"] = currLine.strip().split()[5]
outDict["frag_sub_conf"] = currLine.strip().split()[8]
elif "CHARGE" in currLine:
outDict["charge"] = int( currLine.strip().split()[3] )
outDict["multiplicity"] = int( currLine.strip().split()[6] )
elif "ATOM INDEX" in currLine:
lineIdx += 2
atomIndices, atomKinds = list(), list()
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
atomIndices.append( int(currLine.strip().split()[1]) )
atomKinds.append( currLine.strip().split()[-2] )
lineIdx+=1
break
lineIdx += 1
outDict["indices"], outDict["kinds"] = atomIndices, atomKinds
return outDict, lineIdx
def _parseBSSEFragmentsFinalStepFunct(parserInstance):
if parserInstance.outDict.get("bsse_fragments",None) is not None:
parserInstance.outDict["bsse_fragments"][-1]["energies"] = parserInstance.outDict["energies"]
def _parseEnergiesSection(fileAsList, lineIdx):
outDict = dict()
dftTotalElectronic, dispVal, entropy, fermiE = None, None, None, None
endStr = "Total energy:"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "Electronic entropic energy" in fileAsList[lineIdx]:
entropy = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
if "Dispersion energy" in fileAsList[lineIdx]:
dispVal = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
if "Fermi energy:" in fileAsList[lineIdx]:
pass
fermiE = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
lineIdx += 1
dftTotalElectronic = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
lineIdx += 1
outDict["energies"] = EnergyVals(dispersion=dispVal, entropy=entropy, dftTotalElectronic=dftTotalElectronic)
outDict["energy"] = dftTotalElectronic
if fermiE is not None:
outDict["fermi_energy"] = fermiE
return outDict,lineIdx
def _parseTimingSection(fileAsList, lineIdx):
outDict = dict()
endStr = "The number of warnings"
timingDict = dict()
subroutineTotals = dict()
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "CP2K " in fileAsList[lineIdx]:
timingDict["CP2K_total"] = float(fileAsList[lineIdx].strip().split()[-1])
if "-" not in fileAsList[lineIdx]:
line = fileAsList[lineIdx]
if ("SUBROUTINE" not in line) and ("MAXIMUM" not in line) and (line.strip()!=""):
currKey = line.strip().split()[0]
currVal = float(line.strip().split()[-1])
subroutineTotals[currKey] = currVal
lineIdx+=1
timingDict["subroutineTotals"] = subroutineTotals
outDict["timings"] = types.SimpleNamespace(**timingDict)
return outDict, lineIdx-1
def _parseNumbProcsSection(fileAsList, lineIdx):
outDict = dict()
endStr = "This output is from"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "Total number of message passing processes" in currLine:
outDict["nMPI"] = int(currLine.strip().split()[-1])
elif "Number of threads" in currLine:
outDict["nThreads"] = int(currLine.strip().split()[-1])
lineIdx +=1
return outDict, lineIdx
#NOTE: This probably works for ALL charges
def _parseHirshfeldChargesSection(fileAsList, lineIdx):
outDict = dict()
endStr = "!-----"
lineIdx += 1
outCharges = list()
parseCharges = True #Flag invented to deal with annoying case of Mulliken charges being mixed with orbital population
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if currLine.strip() == "":
pass
elif "Orbital" in currLine:
parseCharges = False #Dont try to parse anything now; but cant break since i want to get to the endStr symbol first
elif "Atom" in currLine:
pass
elif ("Total Charge".lower() in currLine.lower()) and parseCharges:
outDict["total"] = float( currLine.strip().split()[-1] )
elif parseCharges:
currCharge = float( currLine.strip().split()[-1] )
outCharges.append(currCharge)
lineIdx += 1
if parseCharges:
outDict["charges"] = outCharges
return outDict, lineIdx
def _handleHirshfeldChargesInfo(parserInstance, outDict):
parserInstance.outDict["hirshfeld_charges_final"] = outDict
def _handleMullikenChargesInfo(parserInstance, outDict):
parserInstance.outDict["mulliken_charges_final"] = outDict
def _parseAtomicForcesSection(fileAsList, lineIdx):
outDict = dict()
endStr = "SUM OF ATOMIC FORCES"
lineIdx+=3
outForces = list()
while (lineIdx<len(fileAsList)) and (endStr not in fileAsList[lineIdx]):
currLine = fileAsList[lineIdx]
splitLine = currLine.strip().split()
currVals = [float(x) for x in splitLine[-3:]]
outForces.append(currVals)
lineIdx+=1
outDict["forces"] = outForces
return outDict, lineIdx
def _handleAtomicForcesSection(parserInstance, outDict):
parserInstance.outDict["forces_final"] = outDict["forces"]
def _parseCompileInfoSection(fileAsList, lineIdx):
outDict = dict()
endStr = "is freely available from"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "CP2K| version string:" in currLine:
outDict["version_string"] = currLine.replace("CP2K| version string:","").strip()
if "CP2K| source code revision number:" in currLine:
outDict["source_code_number"] = currLine.replace("CP2K| source code revision number:","").strip()
if "CP2K| cp2kflags:" in currLine:
tempDict, lineIdx = _parseCp2kCompileFlags(fileAsList, lineIdx)
outDict.update(tempDict)
lineIdx += 1
return {"cp2k_compile_info":outDict}, lineIdx
def _parseCp2kCompileFlags(fileAsList, lineIdx):
outStr = ""
endStr = "is freely available from"
startIdx = fileAsList[lineIdx].find("CP2K| cp2kflags: ") + len("CP2K| cp2kflags: ")
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
outStr += fileAsList[lineIdx][startIdx:].strip("\n")
lineIdx += 1
return {"cp2kflags":outStr},lineIdx-1
def parseXyzFromGeomOpt(inpFile, startGeomIdx=1):
""" Description of function
Args:
inpFile: Path to xyz file
startGeomIdx: (int, optional) The index which denotes the FIRST step in a geometry optimisation. This is 1 for geo_opt but 0 in nudged elastic band calculations. The list of out geoms resets when this index is found (such that we ONLY parse results from the most recent optimisation contained in the file)
Returns
outDict: Contains "all_geoms" key which contains a list of geometries
Raises:
Errors
"""
outFileStr = _getFileStrFromInpFile(inpFile)
fileAsList = [x for x in outFileStr.split("\n") if x.strip()!='']
#Step 1 is to split the file up into individual strings for an xyz parser
lineIdx = 0
startLines, endLines = list(), list()
while lineIdx < len(fileAsList):
nAtomsLine, commentLine = fileAsList[lineIdx], fileAsList[lineIdx+1]
commentLine = commentLine.replace(","," ")
nAtoms = int( nAtomsLine.strip() )
geomIdx = int(commentLine.strip().split()[2])
if (geomIdx==startGeomIdx): #This means we only take geometries from the current job; not previous jobs of the same name
startLines, endLines = list(), list()
startLines.append(lineIdx)
endLines.append(lineIdx+nAtoms+1)
lineIdx += nAtoms + 2 #1 line per atom, 1 for comment line and we then need 1 more to move onto the next step
#Get the parsed xyz dicts for each
parsedGeoms = list()
for start,end in it.zip_longest(startLines,endLines):
currXyzAsList = fileAsList[start:end+1]
parsedGeoms.append( parseXyzHelp._parseStandardXyzFile(currXyzAsList) )
#Convert into the outDict format we want
outDict = dict()
outDict["all_geoms"] = parsedGeoms
return outDict
def _getFileStrFromInpFile(inpFile):
with open(inpFile,"rt") as f:
outStr = f.read()
return outStr
|
plato_pylib/parseOther/parse_cp2k_files.py
|
import itertools as it
import re
import types
from plato_pylib.shared.ucell_class import UnitCell
from plato_pylib.shared.energies_class import EnergyVals
from . import parse_xyz_files as parseXyzHelp
from ..shared import custom_errors as errorHelp
from ..shared import unit_convs as uConvHelp
import pycp2k
RYD_TO_EV = uConvHelp.RYD_TO_EV
HART_TO_EV = uConvHelp.HA_TO_EV
def parseGeomFromCpInputFile(inpFile):
""" Gets a plato_pylib UnitCell object when passed a path to cp2k input file
Args:
inpFile: (str) Path to cp2k input file
Returns
outCell: (plato_pylib) UnitCell object containing the geometry in the input file
IMPORTANT:
Current implementation only works if the unit cell is represented by cell vectors in the input file
Also currently no attempt to deal with units; you get whatever units you express the geometry in
"""
parser = pycp2k.inputparser.CP2KInputParser()
inpObj = pycp2k.CP2K()
pyCp2kObj = parser.parse(inpObj, inpFile)
cellVects = _getCellVectsFromPyCp2kObj(pyCp2kObj)
useFractCoords, coords = _getCoordsFromPyCp2kObj(pyCp2kObj)
outCell = UnitCell.fromLattVects(cellVects)
if useFractCoords:
outCell.fractCoords = coords
else:
outCell.cartCoords = coords
return outCell
def _getCellVectsFromPyCp2kObj(pyCp2kObj):
cellSection = pyCp2kObj.CP2K_INPUT.FORCE_EVAL_list[-1].SUBSYS.CELL
rawVects = [getattr(cellSection,x) for x in ["A","B","C"]]
outVects = list()
for rawVect in rawVects:
outVect = [float(x) for x in re.sub( '\[[A-Z,a-z]*\]','',rawVect).strip().split()]
outVects.append(outVect)
return outVects
def _getCoordsFromPyCp2kObj(pyCp2kObj):
coordSection = pyCp2kObj.CP2K_INPUT.FORCE_EVAL_list[-1].SUBSYS.COORD
scaled = coordSection.Scaled
outCoords = list()
for currAtom in coordSection.Default_keyword:
currCoord = [float(x) for x in currAtom.strip().split()[1:]]
currCoord.append( currAtom.strip().split()[0] )
outCoords.append(currCoord)
return scaled,outCoords
def parseCpout(outFile, ThrowIfTerminateFlagMissing=True):
fileAsList = _getFileAsListFromInpFile(outFile)
parser = _getStandardCpoutParser()
#TODO: Some way to maintain the ACTUAL terminate flag may be nice
if ThrowIfTerminateFlagMissing is False:
def _finalSetTerminateFlagToTrue(instance):
instance.outDict["terminate_flag_found"] = True
parser.finalStepsFunctions.append(_finalSetTerminateFlagToTrue)
try:
outDict = parser.getOutDictFromFileAsList(fileAsList)
except Exception as e:
raise errorHelp.PlatoPylibParseFileError("Something went wrong when parsing the current CP2K output file {}".format(outFile)) from e
return outDict
def _getStandardCpoutParser():
outParser = CpoutFileParser()
_addSearchWordAndFunctToParserObj("OVERLAP MATRIX CONDITION NUMBER AT GAMMA POINT", _parseOverlapCondSection, outParser)
_addSearchWordAndFunctToParserObj("BSSE RESULTS", _parseBSSESection, outParser)
_addSearchWordAndFunctToParserObj("Core Hamiltonian energy", _parseEnergiesSection, outParser)
_addSearchWordAndFunctToParserObj("T I M I N G", _parseTimingSection, outParser)
_addSearchWordAndFunctToParserObj("Total number of message passing", _parseNumbProcsSection, outParser)
_addSearchWordAndFunctToParserObj("CP2K| version string", _parseCompileInfoSection, outParser)
_addSearchWordAndFunctToParserObj("BSSE CALCULATION", _parseBSSEFragmentsInfo, outParser, handleParsedDictFunct=_handleParsedBSSEFragsInfo)
_addSearchWordAndFunctToParserObj("Hirshfeld Charges", _parseHirshfeldChargesSection, outParser, handleParsedDictFunct=_handleHirshfeldChargesInfo)
_addSearchWordAndFunctToParserObj("Mulliken Population Analysis", _parseHirshfeldChargesSection, outParser, handleParsedDictFunct=_handleMullikenChargesInfo)
_addSearchWordAndFunctToParserObj("ATOMIC FORCES in [a.u.]", _parseAtomicForcesSection, outParser, handleParsedDictFunct=_handleAtomicForcesSection)
outParser.finalStepsFunctions.append(_parseBSSEFragmentsFinalStepFunct)
return outParser
def _getFileAsListFromInpFile(inpFile):
with open(inpFile,"rt") as f:
fileAsList = f.readlines()
return fileAsList
def _addSearchWordAndFunctToParserObj(searchWord, funct, parserObj, handleParsedDictFunct=None):
decoObj = getDecoToAttachSectionParserToCpoutParser(searchWord, funct, handleParsedDictFunct=handleParsedDictFunct)
decoObj(parserObj)
#Want to introduce a way to add a new section to parse without directly modifying the parse source code
#(justified by open-closed principle)
def getDecoToAttachSectionParserToCpoutParser(pattern, parseFunction, handleParsedDictFunct=None):
""" Attaches a function to inpCls (which should be CpoutFileParser INSTANCE) for parsing a section of the output file
Args:
pattern: (str) The pattern to search for in a single line of a cpout file. Finding the pattern should trigger the parse function
parseFunction: Function with interface parsedDict, lineIdx = parseFunction(fileAsList, lineIdx). lineIdx is the index in the file where the initial arg is passed (when inp-arg) and where the section is over (when it appears as outArg). ParsedDict is simply a dictionary containing key:val pairs for this section; this is used to update the main dictionary the parser outputs.
handleParsedDictFunct: f(instance, parsedDict) Default of None means we simply update the output dict with the dict parsed from this section (usual desired behaviour). But setting this function explicitly allows for things such as parsing a series of values (e.g. temperature at each MD step) and saving ALL of them into the outptu dict (instance.outDict)
Returns
parseSectionDeco: Decorator for attaching this section parser to the overall CpoutFileParser. After calling parseSectionDeco(CpoutFileParser) any parser instances should be apply "parseFunction" upon finding "pattern" in any file its passed. Thus, this is essentially acting as a hook function for the parser behaviour.
"""
def decoFunct(inpCls):
inpCls.extraSingleLinePatterns.append(pattern)
inpCls.extraFunctsToParseFromSingleLine.append(parseFunction)
inpCls.extraHandleParsedOutputFuncts.append(handleParsedDictFunct)
return decoFunct
#Parse from single line has the interface outDict, lineIdx = parseSectionStartFromLine(fileAsList, lineIdx)
class CpoutFileParser():
"""Class used to parse CP2K files; NOT meant to be called directly in code; At time of writing _getStandardCpoutParser() is the most sensible way to create this object while the parseCpout function is the best way to parse a CP2K output file
"""
def __init__(self):
self.extraSingleLinePatterns = list() #Search strings that trigger us to parse a section
self.extraFunctsToParseFromSingleLine = list() #Functions to parse the relevant sections and return a dictionary AND lineIdx (so we dont re-read lines in this section)
self.extraHandleParsedOutputFuncts = list() #These functions map the parsed-dicts to the "global" self.outDict. If set to None then we simply do self.outDict.update(parsedDict) for each section.
self.finalStepsFunctions = list()
def getOutDictFromFileAsList(self, fileAsList):
try:
outDict = self._getOutDictFromFileAsList(fileAsList)
except Exception as e:
raise errorHelp.PlatoPylibParseFileError("Something went wrong when parsing the current CP2K output file") from e
return outDict
def _getOutDictFromFileAsList(self, fileAsList):
self.outDict = self._getInitCp2kOutDict() #Attach to class so we can access it with hook functions
lineIdx=0
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx].strip()
if currLine.find("CELL|") != -1:
self.outDict["unitCell"], lineIdx = parseCellSectionCpout(fileAsList,lineIdx)
elif currLine.find("Number of atoms:") != -1:
self.outDict["numbAtoms"] += int( currLine.split()[-1] )
lineIdx += 1
elif currLine.find("PROGRAM STARTED AT") !=-1: #Reset certain counters every time we find a new start of file
self.outDict = self._getInitCp2kOutDict()
lineIdx += 1
elif currLine.find("OPTIMIZATION STEP") != -1:
self.outDict["multiple_geom_present"] = True
lineIdx += 1
elif currLine.find("PROGRAM ENDED") != -1:
self.outDict["terminate_flag_found"] = True
lineIdx += 1
elif self._patternInExtraSingleLinePatterns(currLine):
lineIdx = self._updateDictBasedOnFindingSingleLinePatterns(fileAsList, lineIdx, self.outDict)
else:
lineIdx +=1
self._applyFinalStepsFunctions()
if self.outDict["terminate_flag_found"] is False:
raise ValueError("Termination flag not found in current cp2k output file")
return self.outDict
def _applyFinalStepsFunctions(self):
for funct in self.finalStepsFunctions:
funct(self)
def _patternInExtraSingleLinePatterns(self, currLine):
for x in self.extraSingleLinePatterns:
if currLine.find(x) != -1:
return True
return False
#TODO: Add the ability to change the update function from outside (needed for getting lists)
#Should work with multiple parse-functions on the same input pattern; though unlikely that would ever be a good idea (and returned lineIdx will just be that of the LAST matching pattern)
def _updateDictBasedOnFindingSingleLinePatterns(self, fileAsList, lineIdx, inpDict):
outLineIdx = lineIdx
for funct,pattern,handleFunct in it.zip_longest(self.extraFunctsToParseFromSingleLine,self.extraSingleLinePatterns, self.extraHandleParsedOutputFuncts):
if fileAsList[lineIdx].find(pattern) != -1:
updateDict, outLineIdx = funct(fileAsList, lineIdx)
if handleFunct is None:
inpDict.update(updateDict)
else:
handleFunct(self, updateDict)
return outLineIdx
def _getInitCp2kOutDict(self):
outDict = dict()
outDict["numbAtoms"] = 0
outDict["multiple_geom_present"] = False #Probably actually a useless output
outDict["terminate_flag_found"] = False
return outDict
def parseCellSectionCpout(fileAsList, lineIdx):
lattParams = list()
lattAngles = list()
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx].strip()
if currLine.find("CELL|") == -1:
break
elif currLine.find("Vector")!=-1 and currLine.find("angstrom")!=-1:
lattParams.append( currLine.split()[-1] )
lineIdx += 1
elif currLine.find("Angle")!=-1 and currLine.find("degree")!=-1:
lattAngles.append( currLine.split()[-1] )
lineIdx += 1
else:
lineIdx += 1
unitCell = UnitCell(lattParams=lattParams,lattAngles=lattAngles)
return unitCell,lineIdx
def parseMOInfo(inpFile:"normal cpout but print MO keyword must be used"):
with open(inpFile,"rt") as f:
fileAsList = f.readlines()
lineIdx = 0
startSectStr = "MO EIGENVALUES AND MO OCCUPATION NUMBERS"
notInStartSectStr = "MO EIGENVALUES AND MO OCCUPATION NUMBERS AFTER SCF STEP 0"
outDict = _getInitDictForParseMO()
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx]
if (startSectStr in currLine) and (notInStartSectStr not in currLine):
lineIdx = _parseSingleMoKpointSection(fileAsList, lineIdx, outDict)
else:
lineIdx += 1
#Change lists to None if empty
return outDict
def _getInitDictForParseMO():
outDict = {"eigenvals":list(),
"occvals": list(),
"efermi":None}
return outDict
def _parseSingleMoKpointSection(fileAsList, lineIdx, inpDict):
allEigs, allOccs = list(), list()
sectStartStr = "MO index"
sectEndStr = "Sum"
sectStart = False
while lineIdx < len(fileAsList):
currLine = fileAsList[lineIdx]
if sectStartStr in currLine:
sectStart = True
elif sectEndStr in currLine:
sectStart = False
elif sectStart:
splitLine = currLine.strip().split()
allEigs.append( float(splitLine[1])*HART_TO_EV ), allOccs.append( float(splitLine[2]) )
elif "Fermi energy" in currLine:
eFermi = float( currLine.strip().split()[-1] ) * HART_TO_EV
break
lineIdx += 1
inpDict["eigenvals"].append(allEigs)
inpDict["occvals"].append(allOccs)
inpDict["efermi"] = eFermi
return lineIdx
def _parseOverlapCondSection(fileAsList, lineIdx):
outDict = dict()
outDict["overlap_condition_number"] = None
retOutObj = False
outObj = types.SimpleNamespace( estimate=types.SimpleNamespace(oneNorm=None),
diag=types.SimpleNamespace(oneNorm=None, twoNorm=None) )
endStr = "Number of electrons"
while endStr not in fileAsList[lineIdx]:
if "1-Norm Condition Number (Estimate)" in fileAsList[lineIdx]:
lineIdx += 1
outObj.estimate.oneNorm = float( fileAsList[lineIdx].split("=")[1].strip().split()[0] )
retOutObj = True
elif "Condition Numbers using Diagonalization" in fileAsList[lineIdx]:
lineIdx += 1
outObj.diag.oneNorm = float( fileAsList[lineIdx].split("=")[-1].strip().split()[0] )
lineIdx += 1
outObj.diag.twoNorm = float( fileAsList[lineIdx].split("=")[-1].strip().split()[0] )
retOutObj = True
else:
lineIdx += 1
if retOutObj:
outDict["overlap_condition_number"] = outObj
return outDict,lineIdx-1
#This is the RESULTS section, which comes last
def _parseBSSESection(fileAsList, lineIdx):
outDict = dict()
outDict["bsse"] = None
retOutObj = False
outObj = types.SimpleNamespace(cpCorrectedTotalEnergy=None)
endStr = "BSSE-free interaction energy"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "CP-corrected Total energy" in fileAsList[lineIdx]:
corrE = float( fileAsList[lineIdx].strip().split()[-2] ) * HART_TO_EV
outObj.cpCorrectedTotalEnergy = corrE
retOutObj = True
lineIdx += 1
if retOutObj:
outDict["bsse"] = outObj
return outDict, lineIdx-1
def _handleParsedBSSEFragsInfo(parserInstance, outDict):
if parserInstance.outDict.get("bsse_fragments",None) is None:
parserInstance.outDict["bsse_fragments"] = list()
parserInstance.outDict["bsse_fragments"].append(outDict)
if parserInstance.outDict.get("energies",None) is not None:
parserInstance.outDict["bsse_fragments"][-2]["energies"] = parserInstance.outDict["energies"]
def _parseBSSEFragmentsInfo(fileAsList, lineIdx):
outDict = dict()
endStr = "-----------------------------"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "FRAGMENT CONF:" in currLine:
outDict["conf"] = currLine.strip().split()[5]
outDict["frag_sub_conf"] = currLine.strip().split()[8]
elif "CHARGE" in currLine:
outDict["charge"] = int( currLine.strip().split()[3] )
outDict["multiplicity"] = int( currLine.strip().split()[6] )
elif "ATOM INDEX" in currLine:
lineIdx += 2
atomIndices, atomKinds = list(), list()
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
atomIndices.append( int(currLine.strip().split()[1]) )
atomKinds.append( currLine.strip().split()[-2] )
lineIdx+=1
break
lineIdx += 1
outDict["indices"], outDict["kinds"] = atomIndices, atomKinds
return outDict, lineIdx
def _parseBSSEFragmentsFinalStepFunct(parserInstance):
if parserInstance.outDict.get("bsse_fragments",None) is not None:
parserInstance.outDict["bsse_fragments"][-1]["energies"] = parserInstance.outDict["energies"]
def _parseEnergiesSection(fileAsList, lineIdx):
outDict = dict()
dftTotalElectronic, dispVal, entropy, fermiE = None, None, None, None
endStr = "Total energy:"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "Electronic entropic energy" in fileAsList[lineIdx]:
entropy = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
if "Dispersion energy" in fileAsList[lineIdx]:
dispVal = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
if "Fermi energy:" in fileAsList[lineIdx]:
pass
fermiE = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
lineIdx += 1
dftTotalElectronic = float( fileAsList[lineIdx].split()[-1] ) * HART_TO_EV
lineIdx += 1
outDict["energies"] = EnergyVals(dispersion=dispVal, entropy=entropy, dftTotalElectronic=dftTotalElectronic)
outDict["energy"] = dftTotalElectronic
if fermiE is not None:
outDict["fermi_energy"] = fermiE
return outDict,lineIdx
def _parseTimingSection(fileAsList, lineIdx):
outDict = dict()
endStr = "The number of warnings"
timingDict = dict()
subroutineTotals = dict()
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
if "CP2K " in fileAsList[lineIdx]:
timingDict["CP2K_total"] = float(fileAsList[lineIdx].strip().split()[-1])
if "-" not in fileAsList[lineIdx]:
line = fileAsList[lineIdx]
if ("SUBROUTINE" not in line) and ("MAXIMUM" not in line) and (line.strip()!=""):
currKey = line.strip().split()[0]
currVal = float(line.strip().split()[-1])
subroutineTotals[currKey] = currVal
lineIdx+=1
timingDict["subroutineTotals"] = subroutineTotals
outDict["timings"] = types.SimpleNamespace(**timingDict)
return outDict, lineIdx-1
def _parseNumbProcsSection(fileAsList, lineIdx):
outDict = dict()
endStr = "This output is from"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "Total number of message passing processes" in currLine:
outDict["nMPI"] = int(currLine.strip().split()[-1])
elif "Number of threads" in currLine:
outDict["nThreads"] = int(currLine.strip().split()[-1])
lineIdx +=1
return outDict, lineIdx
#NOTE: This probably works for ALL charges
def _parseHirshfeldChargesSection(fileAsList, lineIdx):
outDict = dict()
endStr = "!-----"
lineIdx += 1
outCharges = list()
parseCharges = True #Flag invented to deal with annoying case of Mulliken charges being mixed with orbital population
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if currLine.strip() == "":
pass
elif "Orbital" in currLine:
parseCharges = False #Dont try to parse anything now; but cant break since i want to get to the endStr symbol first
elif "Atom" in currLine:
pass
elif ("Total Charge".lower() in currLine.lower()) and parseCharges:
outDict["total"] = float( currLine.strip().split()[-1] )
elif parseCharges:
currCharge = float( currLine.strip().split()[-1] )
outCharges.append(currCharge)
lineIdx += 1
if parseCharges:
outDict["charges"] = outCharges
return outDict, lineIdx
def _handleHirshfeldChargesInfo(parserInstance, outDict):
parserInstance.outDict["hirshfeld_charges_final"] = outDict
def _handleMullikenChargesInfo(parserInstance, outDict):
parserInstance.outDict["mulliken_charges_final"] = outDict
def _parseAtomicForcesSection(fileAsList, lineIdx):
outDict = dict()
endStr = "SUM OF ATOMIC FORCES"
lineIdx+=3
outForces = list()
while (lineIdx<len(fileAsList)) and (endStr not in fileAsList[lineIdx]):
currLine = fileAsList[lineIdx]
splitLine = currLine.strip().split()
currVals = [float(x) for x in splitLine[-3:]]
outForces.append(currVals)
lineIdx+=1
outDict["forces"] = outForces
return outDict, lineIdx
def _handleAtomicForcesSection(parserInstance, outDict):
parserInstance.outDict["forces_final"] = outDict["forces"]
def _parseCompileInfoSection(fileAsList, lineIdx):
outDict = dict()
endStr = "is freely available from"
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
currLine = fileAsList[lineIdx]
if "CP2K| version string:" in currLine:
outDict["version_string"] = currLine.replace("CP2K| version string:","").strip()
if "CP2K| source code revision number:" in currLine:
outDict["source_code_number"] = currLine.replace("CP2K| source code revision number:","").strip()
if "CP2K| cp2kflags:" in currLine:
tempDict, lineIdx = _parseCp2kCompileFlags(fileAsList, lineIdx)
outDict.update(tempDict)
lineIdx += 1
return {"cp2k_compile_info":outDict}, lineIdx
def _parseCp2kCompileFlags(fileAsList, lineIdx):
outStr = ""
endStr = "is freely available from"
startIdx = fileAsList[lineIdx].find("CP2K| cp2kflags: ") + len("CP2K| cp2kflags: ")
while (endStr not in fileAsList[lineIdx]) and (lineIdx<len(fileAsList)):
outStr += fileAsList[lineIdx][startIdx:].strip("\n")
lineIdx += 1
return {"cp2kflags":outStr},lineIdx-1
def parseXyzFromGeomOpt(inpFile, startGeomIdx=1):
""" Description of function
Args:
inpFile: Path to xyz file
startGeomIdx: (int, optional) The index which denotes the FIRST step in a geometry optimisation. This is 1 for geo_opt but 0 in nudged elastic band calculations. The list of out geoms resets when this index is found (such that we ONLY parse results from the most recent optimisation contained in the file)
Returns
outDict: Contains "all_geoms" key which contains a list of geometries
Raises:
Errors
"""
outFileStr = _getFileStrFromInpFile(inpFile)
fileAsList = [x for x in outFileStr.split("\n") if x.strip()!='']
#Step 1 is to split the file up into individual strings for an xyz parser
lineIdx = 0
startLines, endLines = list(), list()
while lineIdx < len(fileAsList):
nAtomsLine, commentLine = fileAsList[lineIdx], fileAsList[lineIdx+1]
commentLine = commentLine.replace(","," ")
nAtoms = int( nAtomsLine.strip() )
geomIdx = int(commentLine.strip().split()[2])
if (geomIdx==startGeomIdx): #This means we only take geometries from the current job; not previous jobs of the same name
startLines, endLines = list(), list()
startLines.append(lineIdx)
endLines.append(lineIdx+nAtoms+1)
lineIdx += nAtoms + 2 #1 line per atom, 1 for comment line and we then need 1 more to move onto the next step
#Get the parsed xyz dicts for each
parsedGeoms = list()
for start,end in it.zip_longest(startLines,endLines):
currXyzAsList = fileAsList[start:end+1]
parsedGeoms.append( parseXyzHelp._parseStandardXyzFile(currXyzAsList) )
#Convert into the outDict format we want
outDict = dict()
outDict["all_geoms"] = parsedGeoms
return outDict
def _getFileStrFromInpFile(inpFile):
with open(inpFile,"rt") as f:
outStr = f.read()
return outStr
| 0.452052 | 0.204223 |
from copy import copy
from openpyxl.cell import Cell
from openpyxl.worksheet import Worksheet
import openpyxl
from wysiwygtemplate.dictcontext import DictExcelTemplateContext
from wysiwygtemplate.looptemplate import ExcelArchetectureTemplate
from wysiwygtemplate.pyevaluator import PyEvaluator, EmbeddedPyEvaluator
from wysiwygtemplate.replacetemplate import ExcelRelpaceTemplate
from wysiwygtemplate.templatebase import ExcelProcessor
def borderfix(workbook:openpyxl.Workbook):
for sheet in workbook.worksheets:
for merged_cell in sheet.merged_cells:
border = copy(sheet.cell(merged_cell.min_row, merged_cell.min_col).border)
for row in range(merged_cell.min_row, merged_cell.max_row+1):
for col in range(merged_cell.min_col, merged_cell.max_col+1):
sheet.cell(row, col).border = copy(border)
pass
def copyCellFormat(cellsrc:Cell, sheetSrc:Worksheet, celldest:Cell, sheetDes:Worksheet):
celldest.fill = copy(cellsrc.fill)
celldest.font = copy(cellsrc.font)
celldest.border = copy(cellsrc.border)
celldest.alignment = copy(cellsrc.alignment)
celldest.number_format = copy(cellsrc.number_format)
celldest.protection = copy(cellsrc.protection)
for merged_cell in sheetSrc.merged_cells:
if merged_cell.min_col==cellsrc.col_idx and merged_cell.min_row==cellsrc.row:
sheetDes.merge_cells(start_row= celldest.row, end_row= celldest.row,
start_column= merged_cell.min_col, end_column= merged_cell.max_col)
break
class OpenpyXlExcelProcessor(ExcelProcessor):
def __init__(self, xlsfilepath, sheetnameorindex) -> None:
super().__init__(xlsfilepath)
self.workbook = openpyxl.load_workbook(xlsfilepath)
if isinstance(sheetnameorindex, int):
self.worksheet = self.workbook.worksheets[sheetnameorindex]
elif isinstance(sheetnameorindex, str):
self.worksheet = self.workbook.get_sheet_by_name(sheetnameorindex)
def bound(self) -> tuple:
s:Worksheet = self.worksheet
return s.min_row, s.max_row, s.min_column, s.max_column
def insertTemplate(self, template, context, row, col):
super().insertTemplate(template, context, row, col)
def readCellValue(self, row, col):
return self.worksheet.cell(row, col).value
def writeCellValue(self, row, col, val):
self.worksheet.cell(row, col).value = val
def insertRowBefore(self, rowdata, row):
sheet: Worksheet = self.worksheet
sheet.insert_rows(row, amount= len(rowdata))
for i in range(len(rowdata)):
for c in range(sheet.min_column,sheet.max_column+1):
cell = sheet.cell(row + i, c)
copyCellFormat(sheet.cell(row+len(rowdata), c), sheet, cell, sheet)
for i in range(len(rowdata)):
rowitem = rowdata[i]
for c in range(sheet.min_column,sheet.max_column+1):
if c-sheet.min_column>=len(rowitem): continue
sheet.cell(row+i, c, rowitem[c-sheet.min_column])
for i in range(len(rowdata)):
sheet.row_dimensions[row + len(rowdata) + i].height = sheet.row_dimensions[row + i].height if sheet.row_dimensions[row + i].height is not None else 15
for i in range(1, len(rowdata)):
sheet.row_dimensions[row + i].height = sheet.row_dimensions[row].height if sheet.row_dimensions[row].height is not None else 15
mergedmap = {}
for r in sheet.merged_cells.ranges:
if r.min_row< row:
if r.max_row>= row:
mergedmap[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row, r.max_row+len(rowdata), r.min_col, r.max_col)
else:
mergedmap[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row+len(rowdata), r.max_row + len(rowdata), r.min_col, r.max_col)
for min_row, max_row, min_col, max_col in mergedmap.keys():
sheet.unmerge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for min_row, max_row, min_col, max_col in mergedmap.values():
sheet.merge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
pass
def insertRowAfter(self, rowdata, row):
pass
def insertColumnBefore(self, coldata, row):
super().insertColumnBefore(coldata, row)
def insertColumnAfter(self, coldata, row):
super().insertColumnAfter(coldata, row)
def deleteRow(self, row):
sheet: Worksheet = self.worksheet
sheet.delete_rows(row- sheet.min_row, 1)
removelist = []
map = {}
for r in sheet.merged_cells.ranges:
if r.min_row<= row:
if r.min_row==r.max_row==row: removelist.append(r)
elif r.max_row>= row:
map[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row, r.max_row-1, r.min_col, r.max_col)
elif r.min_row> row:
map[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row - 1, r.max_row - 1, r.min_col, r.max_col)
for r in removelist:
sheet.unmerge_cells(start_row=r.min_row, start_column=r.min_col, end_row=r.max_row, end_column=r.max_col)
for min_row, max_row, min_col, max_col in map.keys():
sheet.unmerge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for min_row, max_row, min_col, max_col in map.values():
sheet.merge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for i in range(row, sheet.max_row+1):
sheet.row_dimensions[i].height = sheet.row_dimensions[i+1].height if sheet.row_dimensions[i+1].height is not None else 15
def mergeCell(self, startRow, endRow, startCol, endCol):
sheet: Worksheet = self.worksheet
sheet.merge_cells(start_row = startRow, end_row= endRow, start_column= startCol, end_column= endCol)
def save(self, path):
borderfix(self.workbook)
self.workbook.save(path)
if __name__=='__main__':
processor = OpenpyXlExcelProcessor('xlstemplates/template2.xlsx','Sheet1')
evaluator = PyEvaluator()
embeddedEvaluator = EmbeddedPyEvaluator(evaluator)
context = DictExcelTemplateContext({
'parts': [
{'name': 'p1', 'items': [
{'itemname': 'p11', 'v': 'sdf1'},
{'itemname': 'p12', 'v': 'sdf2'},
{'itemname': 'p13', 'v': 'sdf3'},
{'itemname': 'p14', 'v': 'sdf3'},
]},
{'name': 'p2', 'items': [
{'itemname': 'p21', 'v': 'sdd1'},
]},
{'name': 'p3', 'items': [
{'itemname': 'p31', 'v': 'sdf1'},
{'itemname': 'p32', 'v': 'sdf2'},
{'itemname': 'p33', 'v': 'sdf3'},
]},
]
})
archtemplate = ExcelArchetectureTemplate(processor, context, evaluator)
rptemplate = ExcelRelpaceTemplate(processor, context, embeddedEvaluator)
archtemplate.process()
rptemplate.process()
processor.save('out5.xlsx')
pass
|
wysiwygtemplate/openpyprocessor.py
|
from copy import copy
from openpyxl.cell import Cell
from openpyxl.worksheet import Worksheet
import openpyxl
from wysiwygtemplate.dictcontext import DictExcelTemplateContext
from wysiwygtemplate.looptemplate import ExcelArchetectureTemplate
from wysiwygtemplate.pyevaluator import PyEvaluator, EmbeddedPyEvaluator
from wysiwygtemplate.replacetemplate import ExcelRelpaceTemplate
from wysiwygtemplate.templatebase import ExcelProcessor
def borderfix(workbook:openpyxl.Workbook):
for sheet in workbook.worksheets:
for merged_cell in sheet.merged_cells:
border = copy(sheet.cell(merged_cell.min_row, merged_cell.min_col).border)
for row in range(merged_cell.min_row, merged_cell.max_row+1):
for col in range(merged_cell.min_col, merged_cell.max_col+1):
sheet.cell(row, col).border = copy(border)
pass
def copyCellFormat(cellsrc:Cell, sheetSrc:Worksheet, celldest:Cell, sheetDes:Worksheet):
celldest.fill = copy(cellsrc.fill)
celldest.font = copy(cellsrc.font)
celldest.border = copy(cellsrc.border)
celldest.alignment = copy(cellsrc.alignment)
celldest.number_format = copy(cellsrc.number_format)
celldest.protection = copy(cellsrc.protection)
for merged_cell in sheetSrc.merged_cells:
if merged_cell.min_col==cellsrc.col_idx and merged_cell.min_row==cellsrc.row:
sheetDes.merge_cells(start_row= celldest.row, end_row= celldest.row,
start_column= merged_cell.min_col, end_column= merged_cell.max_col)
break
class OpenpyXlExcelProcessor(ExcelProcessor):
def __init__(self, xlsfilepath, sheetnameorindex) -> None:
super().__init__(xlsfilepath)
self.workbook = openpyxl.load_workbook(xlsfilepath)
if isinstance(sheetnameorindex, int):
self.worksheet = self.workbook.worksheets[sheetnameorindex]
elif isinstance(sheetnameorindex, str):
self.worksheet = self.workbook.get_sheet_by_name(sheetnameorindex)
def bound(self) -> tuple:
s:Worksheet = self.worksheet
return s.min_row, s.max_row, s.min_column, s.max_column
def insertTemplate(self, template, context, row, col):
super().insertTemplate(template, context, row, col)
def readCellValue(self, row, col):
return self.worksheet.cell(row, col).value
def writeCellValue(self, row, col, val):
self.worksheet.cell(row, col).value = val
def insertRowBefore(self, rowdata, row):
sheet: Worksheet = self.worksheet
sheet.insert_rows(row, amount= len(rowdata))
for i in range(len(rowdata)):
for c in range(sheet.min_column,sheet.max_column+1):
cell = sheet.cell(row + i, c)
copyCellFormat(sheet.cell(row+len(rowdata), c), sheet, cell, sheet)
for i in range(len(rowdata)):
rowitem = rowdata[i]
for c in range(sheet.min_column,sheet.max_column+1):
if c-sheet.min_column>=len(rowitem): continue
sheet.cell(row+i, c, rowitem[c-sheet.min_column])
for i in range(len(rowdata)):
sheet.row_dimensions[row + len(rowdata) + i].height = sheet.row_dimensions[row + i].height if sheet.row_dimensions[row + i].height is not None else 15
for i in range(1, len(rowdata)):
sheet.row_dimensions[row + i].height = sheet.row_dimensions[row].height if sheet.row_dimensions[row].height is not None else 15
mergedmap = {}
for r in sheet.merged_cells.ranges:
if r.min_row< row:
if r.max_row>= row:
mergedmap[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row, r.max_row+len(rowdata), r.min_col, r.max_col)
else:
mergedmap[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row+len(rowdata), r.max_row + len(rowdata), r.min_col, r.max_col)
for min_row, max_row, min_col, max_col in mergedmap.keys():
sheet.unmerge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for min_row, max_row, min_col, max_col in mergedmap.values():
sheet.merge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
pass
def insertRowAfter(self, rowdata, row):
pass
def insertColumnBefore(self, coldata, row):
super().insertColumnBefore(coldata, row)
def insertColumnAfter(self, coldata, row):
super().insertColumnAfter(coldata, row)
def deleteRow(self, row):
sheet: Worksheet = self.worksheet
sheet.delete_rows(row- sheet.min_row, 1)
removelist = []
map = {}
for r in sheet.merged_cells.ranges:
if r.min_row<= row:
if r.min_row==r.max_row==row: removelist.append(r)
elif r.max_row>= row:
map[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row, r.max_row-1, r.min_col, r.max_col)
elif r.min_row> row:
map[(r.min_row, r.max_row, r.min_col, r.max_col)] = (r.min_row - 1, r.max_row - 1, r.min_col, r.max_col)
for r in removelist:
sheet.unmerge_cells(start_row=r.min_row, start_column=r.min_col, end_row=r.max_row, end_column=r.max_col)
for min_row, max_row, min_col, max_col in map.keys():
sheet.unmerge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for min_row, max_row, min_col, max_col in map.values():
sheet.merge_cells(start_row= min_row, start_column=min_col, end_row=max_row, end_column= max_col)
for i in range(row, sheet.max_row+1):
sheet.row_dimensions[i].height = sheet.row_dimensions[i+1].height if sheet.row_dimensions[i+1].height is not None else 15
def mergeCell(self, startRow, endRow, startCol, endCol):
sheet: Worksheet = self.worksheet
sheet.merge_cells(start_row = startRow, end_row= endRow, start_column= startCol, end_column= endCol)
def save(self, path):
borderfix(self.workbook)
self.workbook.save(path)
if __name__=='__main__':
processor = OpenpyXlExcelProcessor('xlstemplates/template2.xlsx','Sheet1')
evaluator = PyEvaluator()
embeddedEvaluator = EmbeddedPyEvaluator(evaluator)
context = DictExcelTemplateContext({
'parts': [
{'name': 'p1', 'items': [
{'itemname': 'p11', 'v': 'sdf1'},
{'itemname': 'p12', 'v': 'sdf2'},
{'itemname': 'p13', 'v': 'sdf3'},
{'itemname': 'p14', 'v': 'sdf3'},
]},
{'name': 'p2', 'items': [
{'itemname': 'p21', 'v': 'sdd1'},
]},
{'name': 'p3', 'items': [
{'itemname': 'p31', 'v': 'sdf1'},
{'itemname': 'p32', 'v': 'sdf2'},
{'itemname': 'p33', 'v': 'sdf3'},
]},
]
})
archtemplate = ExcelArchetectureTemplate(processor, context, evaluator)
rptemplate = ExcelRelpaceTemplate(processor, context, embeddedEvaluator)
archtemplate.process()
rptemplate.process()
processor.save('out5.xlsx')
pass
| 0.302082 | 0.308789 |
import collections
import os
import random
from typing import Deque
import gym
import numpy as np
from cartPoleDqn import DQN
PROJECT_PATH = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyAI")
MODELS_PATH = os.path.join(PROJECT_PATH, "models")
MODEL_PATH = os.path.join(MODELS_PATH, "dqn_cartpole.h5")
class Agent:
def __init__(self, env: gym.Env):
# DQN Env Variables
self.env = env
self.observations = self.env.observation_space.shape
self.actions = self.env.action_space.n
# DQN Agent Variables
self.replay_buffer_size = 50_000
self.train_start = 1_000
self.memory: Deque = collections.deque(maxlen=self.replay_buffer_size)
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
# DQN Network Variables
self.state_shape = self.observations
self.learning_rate = 1e-3
self.dqn = DQN(self.state_shape, self.actions, self.learning_rate)
self.target_dqn = DQN(self.state_shape, self.actions, self.learning_rate)
self.target_dqn.update_model(self.dqn)
self.batch_size = 32
def get_action(self, state: np.ndarray):
if np.random.rand() <= self.epsilon:
return np.random.randint(self.actions)
else:
return np.argmax(self.dqn(state))
def train(self, num_episodes: int):
last_rewards: Deque = collections.deque(maxlen=5)
best_reward_mean = 0.0
for episode in range(1, num_episodes + 1):
total_reward = 0.0
state = self.env.reset()
state = np.reshape(state, newshape=(1, -1)).astype(np.float32)
while True:
action = self.get_action(state)
next_state, reward, done, _ = self.env.step(action)
next_state = np.reshape(next_state, newshape=(1, -1)).astype(np.float32)
if done and total_reward < 499:
reward = -100.0
self.remember(state, action, reward, next_state, done)
self.replay()
total_reward += reward
state = next_state
if done:
if total_reward < 500:
total_reward += 100.0
self.target_dqn.update_model(self.dqn)
print(f"Episode: {episode} Reward: {total_reward} Epsilon: {self.epsilon}")
last_rewards.append(total_reward)
current_reward_mean = np.mean(last_rewards)
if current_reward_mean > best_reward_mean:
best_reward_mean = current_reward_mean
self.dqn.save_model(MODEL_PATH)
print(f"New best mean: {best_reward_mean}")
break
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def replay(self):
if len(self.memory) < self.train_start:
return
minibatch = random.sample(self.memory, self.batch_size)
states, actions, rewards, states_next, dones = zip(*minibatch)
states = np.concatenate(states).astype(np.float32)
states_next = np.concatenate(states_next).astype(np.float32)
q_values = self.dqn(states)
q_values_next = self.target_dqn(states_next)
for i in range(self.batch_size):
a = actions[i]
done = dones[i]
if done:
q_values[i][a] = rewards[i]
else:
q_values[i][a] = rewards[i] + self.gamma * np.max(q_values_next[i])
self.dqn.fit(states, q_values)
def play(self, num_episodes: int, render: bool = True):
self.dqn.load_model(MODEL_PATH)
for episode in range(1, num_episodes + 1):
total_reward = 0.0
state = self.env.reset()
state = np.reshape(state, newshape=(1, -1)).astype(np.float32)
while True:
if render:
self.env.render()
action = self.get_action(state)
next_state, reward, done, _ = self.env.step(action)
next_state = np.reshape(next_state, newshape=(1, -1)).astype(np.float32)
total_reward += reward
state = next_state
if done:
print(f"Episode: {episode} Reward: {total_reward}")
break
if __name__ == "__main__":
env = gym.make("CartPole-v1")
agent = Agent(env)
# agent.train(num_episodes=200)
# input("Play?")
agent.play(num_episodes=30, render=True)
|
Chapter10_DeepQNetworks/5_Finish/cartPoleDqnAgent.py
|
import collections
import os
import random
from typing import Deque
import gym
import numpy as np
from cartPoleDqn import DQN
PROJECT_PATH = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyAI")
MODELS_PATH = os.path.join(PROJECT_PATH, "models")
MODEL_PATH = os.path.join(MODELS_PATH, "dqn_cartpole.h5")
class Agent:
def __init__(self, env: gym.Env):
# DQN Env Variables
self.env = env
self.observations = self.env.observation_space.shape
self.actions = self.env.action_space.n
# DQN Agent Variables
self.replay_buffer_size = 50_000
self.train_start = 1_000
self.memory: Deque = collections.deque(maxlen=self.replay_buffer_size)
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
# DQN Network Variables
self.state_shape = self.observations
self.learning_rate = 1e-3
self.dqn = DQN(self.state_shape, self.actions, self.learning_rate)
self.target_dqn = DQN(self.state_shape, self.actions, self.learning_rate)
self.target_dqn.update_model(self.dqn)
self.batch_size = 32
def get_action(self, state: np.ndarray):
if np.random.rand() <= self.epsilon:
return np.random.randint(self.actions)
else:
return np.argmax(self.dqn(state))
def train(self, num_episodes: int):
last_rewards: Deque = collections.deque(maxlen=5)
best_reward_mean = 0.0
for episode in range(1, num_episodes + 1):
total_reward = 0.0
state = self.env.reset()
state = np.reshape(state, newshape=(1, -1)).astype(np.float32)
while True:
action = self.get_action(state)
next_state, reward, done, _ = self.env.step(action)
next_state = np.reshape(next_state, newshape=(1, -1)).astype(np.float32)
if done and total_reward < 499:
reward = -100.0
self.remember(state, action, reward, next_state, done)
self.replay()
total_reward += reward
state = next_state
if done:
if total_reward < 500:
total_reward += 100.0
self.target_dqn.update_model(self.dqn)
print(f"Episode: {episode} Reward: {total_reward} Epsilon: {self.epsilon}")
last_rewards.append(total_reward)
current_reward_mean = np.mean(last_rewards)
if current_reward_mean > best_reward_mean:
best_reward_mean = current_reward_mean
self.dqn.save_model(MODEL_PATH)
print(f"New best mean: {best_reward_mean}")
break
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def replay(self):
if len(self.memory) < self.train_start:
return
minibatch = random.sample(self.memory, self.batch_size)
states, actions, rewards, states_next, dones = zip(*minibatch)
states = np.concatenate(states).astype(np.float32)
states_next = np.concatenate(states_next).astype(np.float32)
q_values = self.dqn(states)
q_values_next = self.target_dqn(states_next)
for i in range(self.batch_size):
a = actions[i]
done = dones[i]
if done:
q_values[i][a] = rewards[i]
else:
q_values[i][a] = rewards[i] + self.gamma * np.max(q_values_next[i])
self.dqn.fit(states, q_values)
def play(self, num_episodes: int, render: bool = True):
self.dqn.load_model(MODEL_PATH)
for episode in range(1, num_episodes + 1):
total_reward = 0.0
state = self.env.reset()
state = np.reshape(state, newshape=(1, -1)).astype(np.float32)
while True:
if render:
self.env.render()
action = self.get_action(state)
next_state, reward, done, _ = self.env.step(action)
next_state = np.reshape(next_state, newshape=(1, -1)).astype(np.float32)
total_reward += reward
state = next_state
if done:
print(f"Episode: {episode} Reward: {total_reward}")
break
if __name__ == "__main__":
env = gym.make("CartPole-v1")
agent = Agent(env)
# agent.train(num_episodes=200)
# input("Play?")
agent.play(num_episodes=30, render=True)
| 0.684053 | 0.249659 |
import math
import os
import sys
import glob
import gc
import threading
from pathlib import Path
from simplygon9 import simplygon_loader
from simplygon9 import Simplygon
def LoadScene(sg: Simplygon.ISimplygon, path: str):
# Create scene importer
sgSceneImporter = sg.CreateSceneImporter()
sgSceneImporter.SetImportFilePath(path)
# Run scene importer.
importResult = sgSceneImporter.RunImport()
if not importResult:
raise Exception('Failed to load scene.')
sgScene = sgSceneImporter.GetScene()
return sgScene
def SaveScene(sg: Simplygon.ISimplygon, sgScene:Simplygon.spScene, path: str):
# Create scene exporter.
sgSceneExporter = sg.CreateSceneExporter()
sgSceneExporter.SetExportFilePath(path)
sgSceneExporter.SetScene(sgScene)
# Run scene exporter.
exportResult = sgSceneExporter.RunExport()
if not exportResult:
raise Exception('Failed to save scene.')
def CheckLog(sg: Simplygon.ISimplygon):
# Check if any errors occurred.
hasErrors = sg.ErrorOccurred()
if hasErrors:
errors = sg.CreateStringArray()
sg.GetErrorMessages(errors)
errorCount = errors.GetItemCount()
if errorCount > 0:
print("Errors:")
for errorIndex in range(errorCount):
errorString = errors.GetItem(errorIndex)
print(errorString)
sg.ClearErrorMessages()
else:
print("No errors.")
# Check if any warnings occurred.
hasWarnings = sg.WarningOccurred()
if hasWarnings:
warnings = sg.CreateStringArray()
sg.GetWarningMessages(warnings)
warningCount = warnings.GetItemCount()
if warningCount > 0:
print("Warnings:")
for warningIndex in range(warningCount):
warningString = warnings.GetItem(warningIndex)
print(warningString)
sg.ClearWarningMessages()
else:
print("No warnings.")
def RunGeometryDataDasting(sg: Simplygon.ISimplygon):
# Load scene to process.
sgScene = LoadScene(sg, "../../../Assets/SimplygonMan/SimplygonMan.obj")
# Create the remeshing processor.
sgRemeshingProcessor = sg.CreateRemeshingProcessor()
sgRemeshingProcessor.SetScene( sgScene )
sgRemeshingSettings = sgRemeshingProcessor.GetRemeshingSettings()
sgMappingImageSettings = sgRemeshingProcessor.GetMappingImageSettings()
# Set on-screen size target for remeshing.
sgRemeshingSettings.SetOnScreenSize( 300 )
# Generates a mapping image which is used after the remeshing to cast new materials to the new
# remeshed object.
sgMappingImageSettings.SetGenerateMappingImage( True )
sgMappingImageSettings.SetApplyNewMaterialIds( True )
sgMappingImageSettings.SetGenerateTangents( True )
sgMappingImageSettings.SetUseFullRetexturing( True )
sgOutputMaterialSettings = sgMappingImageSettings.GetOutputMaterialSettings(0)
# Setting the size of the output material for the mapping image. This will be the output size of the
# textures when we do material casting in a later stage.
sgOutputMaterialSettings.SetTextureWidth( 2048 )
sgOutputMaterialSettings.SetTextureHeight( 2048 )
# Start the remeshing process.
print("Start the remeshing process.")
sgRemeshingProcessor.RunProcessing()
# Setup and run the geometry data caster casting Coords to a texture.
print("Setup and run the geometry data caster casting Coords to a texture.")
sgGeometryData_CoordsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_CoordsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_CoordsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_CoordsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_CoordsCaster.SetOutputFilePath( 'GeometryData_CoordsTexture' )
sgGeometryData_CoordsCasterSettings = sgGeometryData_CoordsCaster.GetGeometryDataCasterSettings()
sgGeometryData_CoordsCasterSettings.SetMaterialChannel( 'GeometryData_Coords' )
sgGeometryData_CoordsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_CoordsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R16G16B16 )
sgGeometryData_CoordsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_CoordsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_Coords )
sgGeometryData_CoordsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_CoordsCaster.RunProcessing()
geometrydata_coordsTextureFilePath = sgGeometryData_CoordsCaster.GetOutputFilePath()
# Setup and run the geometry data caster casting Normals to a texture.
print("Setup and run the geometry data caster casting Normals to a texture.")
sgGeometryData_NormalsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_NormalsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_NormalsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_NormalsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_NormalsCaster.SetOutputFilePath( 'GeometryData_NormalsTexture' )
sgGeometryData_NormalsCasterSettings = sgGeometryData_NormalsCaster.GetGeometryDataCasterSettings()
sgGeometryData_NormalsCasterSettings.SetMaterialChannel( 'GeometryData_Normals' )
sgGeometryData_NormalsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_NormalsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R16G16B16 )
sgGeometryData_NormalsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_NormalsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_Normals )
sgGeometryData_NormalsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_NormalsCaster.RunProcessing()
geometrydata_normalsTextureFilePath = sgGeometryData_NormalsCaster.GetOutputFilePath()
# Setup and run the geometry data caster casting MaterialIds to a texture.
print("Setup and run the geometry data caster casting MaterialIds to a texture.")
sgGeometryData_MaterialIdsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_MaterialIdsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_MaterialIdsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_MaterialIdsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_MaterialIdsCaster.SetOutputFilePath( 'GeometryData_MaterialIdsTexture' )
sgGeometryData_MaterialIdsCasterSettings = sgGeometryData_MaterialIdsCaster.GetGeometryDataCasterSettings()
sgGeometryData_MaterialIdsCasterSettings.SetMaterialChannel( 'GeometryData_MaterialIds' )
sgGeometryData_MaterialIdsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_MaterialIdsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R8 )
sgGeometryData_MaterialIdsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_MaterialIdsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_MaterialIds )
sgGeometryData_MaterialIdsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_MaterialIdsCaster.RunProcessing()
geometrydata_materialidsTextureFilePath = sgGeometryData_MaterialIdsCaster.GetOutputFilePath()
# Update scene with new casted textures.
sgMaterialTable = sg.CreateMaterialTable()
sgTextureTable = sg.CreateTextureTable()
sgMaterial = sg.CreateMaterial()
sgGeometryData_CoordsTexture = sg.CreateTexture()
sgGeometryData_CoordsTexture.SetName( 'GeometryData_Coords' )
sgGeometryData_CoordsTexture.SetFilePath( geometrydata_coordsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_CoordsTexture )
sgGeometryData_CoordsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_CoordsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_CoordsTextureShadingNode.SetTextureName( 'GeometryData_Coords' )
sgMaterial.AddMaterialChannel( 'GeometryData_Coords' )
sgMaterial.SetShadingNetwork( 'GeometryData_Coords', sgGeometryData_CoordsTextureShadingNode )
sgGeometryData_NormalsTexture = sg.CreateTexture()
sgGeometryData_NormalsTexture.SetName( 'GeometryData_Normals' )
sgGeometryData_NormalsTexture.SetFilePath( geometrydata_normalsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_NormalsTexture )
sgGeometryData_NormalsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_NormalsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_NormalsTextureShadingNode.SetTextureName( 'GeometryData_Normals' )
sgMaterial.AddMaterialChannel( 'GeometryData_Normals' )
sgMaterial.SetShadingNetwork( 'GeometryData_Normals', sgGeometryData_NormalsTextureShadingNode )
sgGeometryData_MaterialIdsTexture = sg.CreateTexture()
sgGeometryData_MaterialIdsTexture.SetName( 'GeometryData_MaterialIds' )
sgGeometryData_MaterialIdsTexture.SetFilePath( geometrydata_materialidsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_MaterialIdsTexture )
sgGeometryData_MaterialIdsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_MaterialIdsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_MaterialIdsTextureShadingNode.SetTextureName( 'GeometryData_MaterialIds' )
sgMaterial.AddMaterialChannel( 'GeometryData_MaterialIds' )
sgMaterial.SetShadingNetwork( 'GeometryData_MaterialIds', sgGeometryData_MaterialIdsTextureShadingNode )
sgMaterialTable.AddMaterial( sgMaterial )
sgScene.GetTextureTable().Clear()
sgScene.GetMaterialTable().Clear()
sgScene.GetTextureTable().Copy(sgTextureTable)
sgScene.GetMaterialTable().Copy(sgMaterialTable)
# Save processed scene.
print("Save processed scene.")
SaveScene(sg, sgScene, "Output.fbx")
# Check log for any warnings or errors.
print("Check log for any warnings or errors.")
CheckLog(sg)
if __name__ == '__main__':
sg = simplygon_loader.init_simplygon()
if sg is None:
exit(Simplygon.GetLastInitializationError())
RunGeometryDataDasting(sg)
sg = None
gc.collect()
|
Src/Python/GeometryDataCasting/GeometryDataCasting.py
|
import math
import os
import sys
import glob
import gc
import threading
from pathlib import Path
from simplygon9 import simplygon_loader
from simplygon9 import Simplygon
def LoadScene(sg: Simplygon.ISimplygon, path: str):
# Create scene importer
sgSceneImporter = sg.CreateSceneImporter()
sgSceneImporter.SetImportFilePath(path)
# Run scene importer.
importResult = sgSceneImporter.RunImport()
if not importResult:
raise Exception('Failed to load scene.')
sgScene = sgSceneImporter.GetScene()
return sgScene
def SaveScene(sg: Simplygon.ISimplygon, sgScene:Simplygon.spScene, path: str):
# Create scene exporter.
sgSceneExporter = sg.CreateSceneExporter()
sgSceneExporter.SetExportFilePath(path)
sgSceneExporter.SetScene(sgScene)
# Run scene exporter.
exportResult = sgSceneExporter.RunExport()
if not exportResult:
raise Exception('Failed to save scene.')
def CheckLog(sg: Simplygon.ISimplygon):
# Check if any errors occurred.
hasErrors = sg.ErrorOccurred()
if hasErrors:
errors = sg.CreateStringArray()
sg.GetErrorMessages(errors)
errorCount = errors.GetItemCount()
if errorCount > 0:
print("Errors:")
for errorIndex in range(errorCount):
errorString = errors.GetItem(errorIndex)
print(errorString)
sg.ClearErrorMessages()
else:
print("No errors.")
# Check if any warnings occurred.
hasWarnings = sg.WarningOccurred()
if hasWarnings:
warnings = sg.CreateStringArray()
sg.GetWarningMessages(warnings)
warningCount = warnings.GetItemCount()
if warningCount > 0:
print("Warnings:")
for warningIndex in range(warningCount):
warningString = warnings.GetItem(warningIndex)
print(warningString)
sg.ClearWarningMessages()
else:
print("No warnings.")
def RunGeometryDataDasting(sg: Simplygon.ISimplygon):
# Load scene to process.
sgScene = LoadScene(sg, "../../../Assets/SimplygonMan/SimplygonMan.obj")
# Create the remeshing processor.
sgRemeshingProcessor = sg.CreateRemeshingProcessor()
sgRemeshingProcessor.SetScene( sgScene )
sgRemeshingSettings = sgRemeshingProcessor.GetRemeshingSettings()
sgMappingImageSettings = sgRemeshingProcessor.GetMappingImageSettings()
# Set on-screen size target for remeshing.
sgRemeshingSettings.SetOnScreenSize( 300 )
# Generates a mapping image which is used after the remeshing to cast new materials to the new
# remeshed object.
sgMappingImageSettings.SetGenerateMappingImage( True )
sgMappingImageSettings.SetApplyNewMaterialIds( True )
sgMappingImageSettings.SetGenerateTangents( True )
sgMappingImageSettings.SetUseFullRetexturing( True )
sgOutputMaterialSettings = sgMappingImageSettings.GetOutputMaterialSettings(0)
# Setting the size of the output material for the mapping image. This will be the output size of the
# textures when we do material casting in a later stage.
sgOutputMaterialSettings.SetTextureWidth( 2048 )
sgOutputMaterialSettings.SetTextureHeight( 2048 )
# Start the remeshing process.
print("Start the remeshing process.")
sgRemeshingProcessor.RunProcessing()
# Setup and run the geometry data caster casting Coords to a texture.
print("Setup and run the geometry data caster casting Coords to a texture.")
sgGeometryData_CoordsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_CoordsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_CoordsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_CoordsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_CoordsCaster.SetOutputFilePath( 'GeometryData_CoordsTexture' )
sgGeometryData_CoordsCasterSettings = sgGeometryData_CoordsCaster.GetGeometryDataCasterSettings()
sgGeometryData_CoordsCasterSettings.SetMaterialChannel( 'GeometryData_Coords' )
sgGeometryData_CoordsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_CoordsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R16G16B16 )
sgGeometryData_CoordsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_CoordsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_Coords )
sgGeometryData_CoordsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_CoordsCaster.RunProcessing()
geometrydata_coordsTextureFilePath = sgGeometryData_CoordsCaster.GetOutputFilePath()
# Setup and run the geometry data caster casting Normals to a texture.
print("Setup and run the geometry data caster casting Normals to a texture.")
sgGeometryData_NormalsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_NormalsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_NormalsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_NormalsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_NormalsCaster.SetOutputFilePath( 'GeometryData_NormalsTexture' )
sgGeometryData_NormalsCasterSettings = sgGeometryData_NormalsCaster.GetGeometryDataCasterSettings()
sgGeometryData_NormalsCasterSettings.SetMaterialChannel( 'GeometryData_Normals' )
sgGeometryData_NormalsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_NormalsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R16G16B16 )
sgGeometryData_NormalsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_NormalsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_Normals )
sgGeometryData_NormalsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_NormalsCaster.RunProcessing()
geometrydata_normalsTextureFilePath = sgGeometryData_NormalsCaster.GetOutputFilePath()
# Setup and run the geometry data caster casting MaterialIds to a texture.
print("Setup and run the geometry data caster casting MaterialIds to a texture.")
sgGeometryData_MaterialIdsCaster = sg.CreateGeometryDataCaster()
sgGeometryData_MaterialIdsCaster.SetMappingImage( sgRemeshingProcessor.GetMappingImage() )
sgGeometryData_MaterialIdsCaster.SetSourceMaterials( sgScene.GetMaterialTable() )
sgGeometryData_MaterialIdsCaster.SetSourceTextures( sgScene.GetTextureTable() )
sgGeometryData_MaterialIdsCaster.SetOutputFilePath( 'GeometryData_MaterialIdsTexture' )
sgGeometryData_MaterialIdsCasterSettings = sgGeometryData_MaterialIdsCaster.GetGeometryDataCasterSettings()
sgGeometryData_MaterialIdsCasterSettings.SetMaterialChannel( 'GeometryData_MaterialIds' )
sgGeometryData_MaterialIdsCasterSettings.SetOutputImageFileFormat( Simplygon.EImageOutputFormat_PNG )
sgGeometryData_MaterialIdsCasterSettings.SetOutputPixelFormat( Simplygon.EPixelFormat_R8 )
sgGeometryData_MaterialIdsCasterSettings.SetFillMode( Simplygon.EAtlasFillMode_NoFill )
sgGeometryData_MaterialIdsCasterSettings.SetGeometryDataFieldType( Simplygon.EGeometryDataFieldType_MaterialIds )
sgGeometryData_MaterialIdsCasterSettings.SetGeometryDataFieldIndex( 0 )
sgGeometryData_MaterialIdsCaster.RunProcessing()
geometrydata_materialidsTextureFilePath = sgGeometryData_MaterialIdsCaster.GetOutputFilePath()
# Update scene with new casted textures.
sgMaterialTable = sg.CreateMaterialTable()
sgTextureTable = sg.CreateTextureTable()
sgMaterial = sg.CreateMaterial()
sgGeometryData_CoordsTexture = sg.CreateTexture()
sgGeometryData_CoordsTexture.SetName( 'GeometryData_Coords' )
sgGeometryData_CoordsTexture.SetFilePath( geometrydata_coordsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_CoordsTexture )
sgGeometryData_CoordsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_CoordsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_CoordsTextureShadingNode.SetTextureName( 'GeometryData_Coords' )
sgMaterial.AddMaterialChannel( 'GeometryData_Coords' )
sgMaterial.SetShadingNetwork( 'GeometryData_Coords', sgGeometryData_CoordsTextureShadingNode )
sgGeometryData_NormalsTexture = sg.CreateTexture()
sgGeometryData_NormalsTexture.SetName( 'GeometryData_Normals' )
sgGeometryData_NormalsTexture.SetFilePath( geometrydata_normalsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_NormalsTexture )
sgGeometryData_NormalsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_NormalsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_NormalsTextureShadingNode.SetTextureName( 'GeometryData_Normals' )
sgMaterial.AddMaterialChannel( 'GeometryData_Normals' )
sgMaterial.SetShadingNetwork( 'GeometryData_Normals', sgGeometryData_NormalsTextureShadingNode )
sgGeometryData_MaterialIdsTexture = sg.CreateTexture()
sgGeometryData_MaterialIdsTexture.SetName( 'GeometryData_MaterialIds' )
sgGeometryData_MaterialIdsTexture.SetFilePath( geometrydata_materialidsTextureFilePath )
sgTextureTable.AddTexture( sgGeometryData_MaterialIdsTexture )
sgGeometryData_MaterialIdsTextureShadingNode = sg.CreateShadingTextureNode()
sgGeometryData_MaterialIdsTextureShadingNode.SetTexCoordLevel( 0 )
sgGeometryData_MaterialIdsTextureShadingNode.SetTextureName( 'GeometryData_MaterialIds' )
sgMaterial.AddMaterialChannel( 'GeometryData_MaterialIds' )
sgMaterial.SetShadingNetwork( 'GeometryData_MaterialIds', sgGeometryData_MaterialIdsTextureShadingNode )
sgMaterialTable.AddMaterial( sgMaterial )
sgScene.GetTextureTable().Clear()
sgScene.GetMaterialTable().Clear()
sgScene.GetTextureTable().Copy(sgTextureTable)
sgScene.GetMaterialTable().Copy(sgMaterialTable)
# Save processed scene.
print("Save processed scene.")
SaveScene(sg, sgScene, "Output.fbx")
# Check log for any warnings or errors.
print("Check log for any warnings or errors.")
CheckLog(sg)
if __name__ == '__main__':
sg = simplygon_loader.init_simplygon()
if sg is None:
exit(Simplygon.GetLastInitializationError())
RunGeometryDataDasting(sg)
sg = None
gc.collect()
| 0.415136 | 0.193566 |
"""Events that fire if messages are sent/updated/deleted."""
from __future__ import annotations
__all__: typing.List[str] = [
"MessageEvent",
"MessageCreateEvent",
"MessageUpdateEvent",
"MessageDeleteEvent",
"GuildMessageCreateEvent",
"GuildMessageUpdateEvent",
"GuildMessageDeleteEvent",
"GuildBulkMessageDeleteEvent",
"DMMessageCreateEvent",
"DMMessageUpdateEvent",
"DMMessageDeleteEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import snowflakes
from hikari import traits
from hikari import undefined
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
from hikari import embeds as embeds_
from hikari import guilds
from hikari import messages
from hikari import users
from hikari.api import shard as shard_
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageEvent(shard_events.ShardEvent, abc.ABC):
"""Any event that concerns manipulation of messages."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel that this event concerns.
"""
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageCreateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is created."""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> users.User:
"""User that sent the message.
Returns
-------
hikari.users.User
The user that sent the message.
"""
return self.message.author
@property
def author_id(self) -> snowflakes.Snowflake:
"""ID of the author of the message this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the author.
"""
return self.author.id
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>
return self.message.channel_id
@property
def content(self) -> typing.Optional[str]:
"""Content of the message.
Returns
-------
typing.Optional[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent).
"""
return self.message.content
@property
def embeds(self) -> typing.Sequence[embeds_.Embed]:
"""Sequence of embeds in the message.
Returns
-------
typing.Sequence[hikari.embeds.Embed]
The embeds in the message.
"""
return self.message.embeds
@property
def is_bot(self) -> bool:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
builtins.bool
`builtins.True` if from a bot, or `builtins.False` otherwise.
"""
return self.message.author.is_bot
@property
def is_human(self) -> bool:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
builtins.bool
`builtins.True` if from a human user, or `builtins.False` otherwise.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
return not self.message.author.is_bot and self.message.webhook_id is None
@property
def is_webhook(self) -> bool:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
return self.message.webhook_id is not None
@property
@abc.abstractmethod
def message(self) -> messages.Message:
"""Message that was sent in the event.
Returns
-------
hikari.messages.Message
The message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a guild.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def author(self) -> users.User:
"""User object of the user that sent the message.
Returns
-------
hikari.users.User
The user object of the user that sent the message.
"""
return self.message.author
@property
def member(self) -> typing.Optional[guilds.Member]:
"""Member object of the user that sent the message.
Returns
-------
typing.Optional[hikari.guilds.Member]
The member object of the user that sent the message or
`builtins.None` if sent by a webhook.
"""
return self.message.member
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), "no guild_id attribute set"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a DM.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageUpdateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is updated.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> undefined.UndefinedOr[users.User]:
"""User that sent the message.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.author
@property
def author_id(self) -> undefined.UndefinedOr[snowflakes.Snowflake]:
"""ID of the author that triggered this event.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
author = self.message.author
return author.id if author is not undefined.UNDEFINED else undefined.UNDEFINED
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>.
return self.message.channel_id
@property
def content(self) -> undefined.UndefinedNoneOr[str]:
"""Content of the message.
Returns
-------
hikari.undefined.UndefinedNoneOr[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent). If not part of the update, then
this will be `hikari.undefined.UNDEFINED` instead.
"""
return self.message.content
@property
def embeds(self) -> undefined.UndefinedOr[typing.Sequence[embeds_.Embed]]:
"""Sequence of embeds in the message.
Returns
-------
hikari.undefined.UndefinedOr[typing.Sequence[hikari.embeds.Embed]]
The embeds in the message. If the embeds were not changed in this
event, then this may instead be `hikari.undefined.UNDEFINED`.
"""
return self.message.embeds
@property
def is_bot(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a bot, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
will return `hikari.undefined.UNDEFINED` instead.
"""
if (author := self.message.author) is not undefined.UNDEFINED:
return author.is_bot
return undefined.UNDEFINED
@property
def is_human(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a human user, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
may return `hikari.undefined.UNDEFINED` instead.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is None
if (author := self.message.author) is not undefined.UNDEFINED:
return not author.is_bot
return undefined.UNDEFINED
@property
def is_webhook(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is not None
return undefined.UNDEFINED
@property
@abc.abstractmethod
def message(self) -> messages.PartialMessage:
"""Partial message that was sent in the event.
Returns
-------
hikari.messages.PartialMessage
The partial message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a guild.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def member(self) -> undefined.UndefinedNoneOr[guilds.Member]:
"""Member that sent the message if provided by the event.
If the message is not in a guild, this will be `builtins.None`.
This will also be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.member
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if self.message.author is not undefined.UNDEFINED and isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), f"expected guild_id, got {guild_id}"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a DM.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES, intents.Intents.DM_MESSAGES)
class MessageDeleteEvent(MessageEvent, abc.ABC):
"""Special event that is triggered when a message gets deleted.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that was deleted."""
@property
@abc.abstractmethod
def old_message(self) -> typing.Optional[messages.Message]:
"""Object of the message that was deleted.
Will be `None` if the message was not found in the cache.
"""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the message were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a DM.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildBulkMessageDeleteEvent(shard_events.ShardEvent):
"""Event that is triggered when a bulk deletion is triggered in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
"""ID of the channel that this event concerns."""
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_ids: typing.AbstractSet[snowflakes.Snowflake] = attr.field()
"""Set of message IDs that were bulk deleted."""
old_messages: typing.Mapping[snowflakes.Snowflake, messages.Message] = attr.field()
"""Mapping of a snowflake to the deleted message object.
If the message was not found in the cache it will be missing from the mapping.
"""
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the messages were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
|
hikari/events/message_events.py
|
"""Events that fire if messages are sent/updated/deleted."""
from __future__ import annotations
__all__: typing.List[str] = [
"MessageEvent",
"MessageCreateEvent",
"MessageUpdateEvent",
"MessageDeleteEvent",
"GuildMessageCreateEvent",
"GuildMessageUpdateEvent",
"GuildMessageDeleteEvent",
"GuildBulkMessageDeleteEvent",
"DMMessageCreateEvent",
"DMMessageUpdateEvent",
"DMMessageDeleteEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import snowflakes
from hikari import traits
from hikari import undefined
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
from hikari import embeds as embeds_
from hikari import guilds
from hikari import messages
from hikari import users
from hikari.api import shard as shard_
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageEvent(shard_events.ShardEvent, abc.ABC):
"""Any event that concerns manipulation of messages."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel that this event concerns.
"""
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageCreateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is created."""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> users.User:
"""User that sent the message.
Returns
-------
hikari.users.User
The user that sent the message.
"""
return self.message.author
@property
def author_id(self) -> snowflakes.Snowflake:
"""ID of the author of the message this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the author.
"""
return self.author.id
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>
return self.message.channel_id
@property
def content(self) -> typing.Optional[str]:
"""Content of the message.
Returns
-------
typing.Optional[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent).
"""
return self.message.content
@property
def embeds(self) -> typing.Sequence[embeds_.Embed]:
"""Sequence of embeds in the message.
Returns
-------
typing.Sequence[hikari.embeds.Embed]
The embeds in the message.
"""
return self.message.embeds
@property
def is_bot(self) -> bool:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
builtins.bool
`builtins.True` if from a bot, or `builtins.False` otherwise.
"""
return self.message.author.is_bot
@property
def is_human(self) -> bool:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
builtins.bool
`builtins.True` if from a human user, or `builtins.False` otherwise.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
return not self.message.author.is_bot and self.message.webhook_id is None
@property
def is_webhook(self) -> bool:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
return self.message.webhook_id is not None
@property
@abc.abstractmethod
def message(self) -> messages.Message:
"""Message that was sent in the event.
Returns
-------
hikari.messages.Message
The message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a guild.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def author(self) -> users.User:
"""User object of the user that sent the message.
Returns
-------
hikari.users.User
The user object of the user that sent the message.
"""
return self.message.author
@property
def member(self) -> typing.Optional[guilds.Member]:
"""Member object of the user that sent the message.
Returns
-------
typing.Optional[hikari.guilds.Member]
The member object of the user that sent the message or
`builtins.None` if sent by a webhook.
"""
return self.message.member
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), "no guild_id attribute set"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a DM.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageUpdateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is updated.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> undefined.UndefinedOr[users.User]:
"""User that sent the message.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.author
@property
def author_id(self) -> undefined.UndefinedOr[snowflakes.Snowflake]:
"""ID of the author that triggered this event.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
author = self.message.author
return author.id if author is not undefined.UNDEFINED else undefined.UNDEFINED
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>.
return self.message.channel_id
@property
def content(self) -> undefined.UndefinedNoneOr[str]:
"""Content of the message.
Returns
-------
hikari.undefined.UndefinedNoneOr[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent). If not part of the update, then
this will be `hikari.undefined.UNDEFINED` instead.
"""
return self.message.content
@property
def embeds(self) -> undefined.UndefinedOr[typing.Sequence[embeds_.Embed]]:
"""Sequence of embeds in the message.
Returns
-------
hikari.undefined.UndefinedOr[typing.Sequence[hikari.embeds.Embed]]
The embeds in the message. If the embeds were not changed in this
event, then this may instead be `hikari.undefined.UNDEFINED`.
"""
return self.message.embeds
@property
def is_bot(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a bot, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
will return `hikari.undefined.UNDEFINED` instead.
"""
if (author := self.message.author) is not undefined.UNDEFINED:
return author.is_bot
return undefined.UNDEFINED
@property
def is_human(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a human user, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
may return `hikari.undefined.UNDEFINED` instead.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is None
if (author := self.message.author) is not undefined.UNDEFINED:
return not author.is_bot
return undefined.UNDEFINED
@property
def is_webhook(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is not None
return undefined.UNDEFINED
@property
@abc.abstractmethod
def message(self) -> messages.PartialMessage:
"""Partial message that was sent in the event.
Returns
-------
hikari.messages.PartialMessage
The partial message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a guild.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def member(self) -> undefined.UndefinedNoneOr[guilds.Member]:
"""Member that sent the message if provided by the event.
If the message is not in a guild, this will be `builtins.None`.
This will also be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.member
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if self.message.author is not undefined.UNDEFINED and isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), f"expected guild_id, got {guild_id}"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a DM.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES, intents.Intents.DM_MESSAGES)
class MessageDeleteEvent(MessageEvent, abc.ABC):
"""Special event that is triggered when a message gets deleted.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that was deleted."""
@property
@abc.abstractmethod
def old_message(self) -> typing.Optional[messages.Message]:
"""Object of the message that was deleted.
Will be `None` if the message was not found in the cache.
"""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the message were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a DM.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildBulkMessageDeleteEvent(shard_events.ShardEvent):
"""Event that is triggered when a bulk deletion is triggered in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
"""ID of the channel that this event concerns."""
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_ids: typing.AbstractSet[snowflakes.Snowflake] = attr.field()
"""Set of message IDs that were bulk deleted."""
old_messages: typing.Mapping[snowflakes.Snowflake, messages.Message] = attr.field()
"""Mapping of a snowflake to the deleted message object.
If the message was not found in the cache it will be missing from the mapping.
"""
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the messages were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
| 0.919448 | 0.319639 |
from django.http import Http404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
import django_filters.rest_framework
from rest_framework import response, status
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.generics import ListAPIView, RetrieveAPIView
from web3 import Web3
from safe_transaction_service.history.services import (
BalanceServiceProvider, CollectiblesServiceProvider)
from . import filters, serializers
from .models import Token
class TokenView(RetrieveAPIView):
serializer_class = serializers.TokenInfoResponseSerializer
lookup_field = 'address'
queryset = Token.objects.all()
@method_decorator(cache_page(60 * 60)) # Cache 1 hour, this should never change
def get(self, request, *args, **kwargs):
address = self.kwargs['address']
if not Web3.isChecksumAddress(address):
return response.Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY,
data={'code': 1,
'message': 'Invalid ethereum address',
'arguments': [address]})
try:
return super().get(request, *args, **kwargs)
except Http404 as exc: # Try to get info about the token
token_info = (BalanceServiceProvider().get_token_info(address)
or CollectiblesServiceProvider().get_token_info(address)) # TODO Refactor
if not token_info:
raise exc
# If token was found it will be added to database, so we try again
return super().get(request, *args, **kwargs)
class TokensView(ListAPIView):
serializer_class = serializers.TokenInfoResponseSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_class = filters.TokenFilter
search_fields = ('name', 'symbol')
ordering_fields = '__all__'
ordering = ('name',)
queryset = Token.objects.all()
@method_decorator(cache_page(60 * 15)) # Cache 15 minutes
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
|
safe_transaction_service/tokens/views.py
|
from django.http import Http404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
import django_filters.rest_framework
from rest_framework import response, status
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.generics import ListAPIView, RetrieveAPIView
from web3 import Web3
from safe_transaction_service.history.services import (
BalanceServiceProvider, CollectiblesServiceProvider)
from . import filters, serializers
from .models import Token
class TokenView(RetrieveAPIView):
serializer_class = serializers.TokenInfoResponseSerializer
lookup_field = 'address'
queryset = Token.objects.all()
@method_decorator(cache_page(60 * 60)) # Cache 1 hour, this should never change
def get(self, request, *args, **kwargs):
address = self.kwargs['address']
if not Web3.isChecksumAddress(address):
return response.Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY,
data={'code': 1,
'message': 'Invalid ethereum address',
'arguments': [address]})
try:
return super().get(request, *args, **kwargs)
except Http404 as exc: # Try to get info about the token
token_info = (BalanceServiceProvider().get_token_info(address)
or CollectiblesServiceProvider().get_token_info(address)) # TODO Refactor
if not token_info:
raise exc
# If token was found it will be added to database, so we try again
return super().get(request, *args, **kwargs)
class TokensView(ListAPIView):
serializer_class = serializers.TokenInfoResponseSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter)
filterset_class = filters.TokenFilter
search_fields = ('name', 'symbol')
ordering_fields = '__all__'
ordering = ('name',)
queryset = Token.objects.all()
@method_decorator(cache_page(60 * 15)) # Cache 15 minutes
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
| 0.50952 | 0.07333 |
import sys
import os, os.path
import shutil
from optparse import OptionParser
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version(ndk_root):
ret_version = "4.8"
version_file_path = os.path.join(ndk_root, "RELEASE.TXT")
try:
versionFile = open(version_file_path)
lines = versionFile.readlines()
versionFile.close()
version_num = None
version_char = None
pattern = r'^[a-zA-Z]+(\d+)(\w)'
for line in lines:
str_line = line.lstrip()
match = re.match(pattern, str_line)
if match:
version_num = int(match.group(1))
version_char = match.group(2)
break
if version_num is None:
print("Parse NDK version from file %s failed." % version_file_path)
else:
version_char = version_char.lower()
if version_num > 10 or (version_num == 10 and cmp(version_char, 'c') >= 0):
ret_version = "4.9"
except:
print("Parse NDK version from file %s failed." % version_file_path)
print("NDK_TOOLCHAIN_VERSION: %s" % ret_version)
if ret_version == "4.8":
print(
"Your application may crash when using c++ 11 regular expression with NDK_TOOLCHAIN_VERSION %s" % ret_version)
return ret_version
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
ndk_toolchain_version = select_toolchain_version(ndk_root)
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/..;%s;%s/external;%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/..:%s:%s/external:%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources
os.mkdir(assets_dir)
assets_res_dir = assets_dir + "/res";
assets_scripts_dir = assets_dir + "/src";
assets_jsb_dir = assets_dir + "/script";
os.mkdir(assets_res_dir);
os.mkdir(assets_scripts_dir);
os.mkdir(assets_jsb_dir);
shutil.copy(os.path.join(app_android_root, "../../../main.js"), assets_dir)
shutil.copy(os.path.join(app_android_root, "../../../project.json"), assets_dir)
resources_dir = os.path.join(app_android_root, "../../../res")
copy_files(resources_dir, assets_res_dir)
resources_dir = os.path.join(app_android_root, "../../../src")
copy_files(resources_dir, assets_scripts_dir)
resources_dir = os.path.join(app_android_root, "../../../frameworks/js-bindings/bindings/script")
copy_files(resources_dir, assets_jsb_dir)
def build(targets,ndk_build_param,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
project_root = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(project_root, "..", "..", "..", "frameworks/js-bindings/cocos2d-x")
print cocos_root
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
copy_resources(project_root)
do_build(cocos_root, ndk_root, project_root,ndk_build_param,sdk_root,build_mode)
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for NDK project, debug or release')
(opts, args) = parser.parse_args()
try:
build(args, opts.ndk_build_param,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
|
samples/MoonWarriors/frameworks/runtime-src/proj.android/build_native.py
|
import sys
import os, os.path
import shutil
from optparse import OptionParser
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version(ndk_root):
ret_version = "4.8"
version_file_path = os.path.join(ndk_root, "RELEASE.TXT")
try:
versionFile = open(version_file_path)
lines = versionFile.readlines()
versionFile.close()
version_num = None
version_char = None
pattern = r'^[a-zA-Z]+(\d+)(\w)'
for line in lines:
str_line = line.lstrip()
match = re.match(pattern, str_line)
if match:
version_num = int(match.group(1))
version_char = match.group(2)
break
if version_num is None:
print("Parse NDK version from file %s failed." % version_file_path)
else:
version_char = version_char.lower()
if version_num > 10 or (version_num == 10 and cmp(version_char, 'c') >= 0):
ret_version = "4.9"
except:
print("Parse NDK version from file %s failed." % version_file_path)
print("NDK_TOOLCHAIN_VERSION: %s" % ret_version)
if ret_version == "4.8":
print(
"Your application may crash when using c++ 11 regular expression with NDK_TOOLCHAIN_VERSION %s" % ret_version)
return ret_version
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
ndk_toolchain_version = select_toolchain_version(ndk_root)
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/..;%s;%s/external;%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/..:%s:%s/external:%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources
os.mkdir(assets_dir)
assets_res_dir = assets_dir + "/res";
assets_scripts_dir = assets_dir + "/src";
assets_jsb_dir = assets_dir + "/script";
os.mkdir(assets_res_dir);
os.mkdir(assets_scripts_dir);
os.mkdir(assets_jsb_dir);
shutil.copy(os.path.join(app_android_root, "../../../main.js"), assets_dir)
shutil.copy(os.path.join(app_android_root, "../../../project.json"), assets_dir)
resources_dir = os.path.join(app_android_root, "../../../res")
copy_files(resources_dir, assets_res_dir)
resources_dir = os.path.join(app_android_root, "../../../src")
copy_files(resources_dir, assets_scripts_dir)
resources_dir = os.path.join(app_android_root, "../../../frameworks/js-bindings/bindings/script")
copy_files(resources_dir, assets_jsb_dir)
def build(targets,ndk_build_param,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
project_root = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(project_root, "..", "..", "..", "frameworks/js-bindings/cocos2d-x")
print cocos_root
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
copy_resources(project_root)
do_build(cocos_root, ndk_root, project_root,ndk_build_param,sdk_root,build_mode)
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for NDK project, debug or release')
(opts, args) = parser.parse_args()
try:
build(args, opts.ndk_build_param,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| 0.092814 | 0.095181 |
__author__ = '<NAME>'
import pika
import json
from pydispatch import dispatcher
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_RECOGNITION ='VISUAL_FACE_RECOGNITION'
VISUAL_FACE_TRACKING = 'VISUAL_FACE_TRACKING'
VISUAL_HUMAN_TRACKING = 'VISUAL_HUMAN_TRACKING'
AUDIO_SPEECH_RECOGNITION = 'AUDIO_SPEECH_RECOGNITION'
AUDIO_TEXT_TO_SPEECH = 'AUDIO_TEXT_TO_SPEECH'
AUDIO_GENDER_RECOGNITION = 'AUDIO_GENDER_RECOGNITION'
AVATAR_DATA_TACTILE = 'AVATAR_DATA_TACTILE'
class DataHandler(object):
'class to control connection'
credential = pika.PlainCredentials('lumen', 'lumen')
isConnected = None
def __init__(self):
try:
self.connection = pika.SelectConnection(parameters=pika.ConnectionParameters('localhost', 5672, '/', DataHandler.credential),on_open_callback=self.on_connected)
DataHandler.isConnected = True
except RuntimeError as e:
print 'unable to connect', e
pass
def start(self):
self.connection.ioloop.start()
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open,channel_number=1)
connection.channel(self.on_channel_open,channel_number=2)
#connection.channel(self.on_channel_open,channel_number=3)
#connection.channel(self.on_channel_open,channel_number=4)
#connection.channel(self.on_channel_open,channel_number=5)
#connection.channel(self.on_channel_open,channel_number=6)
#connection.channel(self.on_channel_open,channel_number=7)
connection.channel(self.on_channel_open,channel_number=8)
pass
def on_channel_open(self,channel):
if channel.channel_number ==1:
self.channelVisualFaceDetection = channel
self.channelVisualFaceDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==2:
self.channelVisualFaceRecognition = channel
self.channelVisualFaceRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==3:
self.channelVisualFaceTracking = channel
self.channelVisualFaceTracking.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.tracking',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==4:
self.channelVisualHumanDetection = channel
self.channelVisualHumanDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.human.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==5:
self.channelAudioSpeechRecognition = channel
self.channelAudioSpeechRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.speech.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==6:
self.channelAudioTextToSpeech = channel
self.channelAudioTextToSpeech.queue_declare(self.on_queue_declareOk,queue='lumen.audio.text.to.speech',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==7:
self.channelAudioGenderRecognition = channel
self.channelAudioGenderRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.gender.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==8:
self.channelAvatarDataTactile = channel
self.channelAvatarDataTactile.queue_declare(self.on_queue_declareOk,queue='avatar.NAO.data.tactile',durable=True,exclusive=False,auto_delete=True)
else:
print 'print do nothing'
pass
pass
def on_queue_declareOk(self,workQueue):
if workQueue.channel_number == 1:
self.channelVisualFaceDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 2:
self.channelVisualFaceRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 3:
self.channelVisualFaceTracking.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 4:
self.channelVisualHumanDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 5:
self.channelAudioSpeechRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 6:
self.channelAudioTextToSpeech.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 7:
self.channelAudioGenderRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 8:
self.channelAvatarDataTactile.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
else:
pass
pass
def on_bindOK(self,frame):
if frame.channel_number == 1:
self.channelVisualFaceDetection.basic_consume(self.faceDetectionCallback,queue='lumen.visual.face.detection',no_ack=True)
elif frame.channel_number==2:
self.channelVisualFaceRecognition.basic_consume(self.faceRecognitionCallback,queue='lumen.visual.face.recognition',no_ack=True)
elif frame.channel_number==3:
self.channelVisualFaceTracking.basic_consume(self.faceTrackingCallback,queue='lumen.visual.face.tracking',no_ack=True)
elif frame.channel_number==4:
self.channelVisualHumanDetection.basic_consume(self.humanDetectionCallback,queue='lumen.visual.human.detection',no_ack=True)
elif frame.channel_number==5:
self.channelAudioSpeechRecognition.basic_consume(self.speechRecognitionCallback,queue='lumen.audio.speech.recognition',no_ack=True)
elif frame.channel_number==6:
self.channelAudioTextToSpeech.basic_consume(self.textToSpeechCallback,queue='lumen.audio.text.to.speech',no_ack=True)
elif frame.channel_number==7:
self.channelAudioGenderRecognition.basic_consume(self.genderRecognitionCallback,queue='lumen.audio.gender.recognition',no_ack=True)
elif frame.channel_number==8:
self.channelAvatarDataTactile.basic_consume(self.tactileDataCallback,queue='avatar.NAO.data.tactile',no_ack=True)
else:
pass
pass
# defenition of event handler
def faceDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
faceLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_FACE_DETECTION,sender=self,result=faceLocation)
pass
def faceRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
faceName = result['name']
dispatcher.send(signal=VISUAL_FACE_RECOGNITION,sender=self,result = faceName)
pass
def faceTrackingCallback(self,ch, method, property, body):
dispatcher.send(signal=VISUAL_FACE_TRACKING,sender=self,result = body)
pass
def humanDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
humanLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_HUMAN_TRACKING,sender=self,result = humanLocation)
pass
def speechRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
recognizedWord = result['result']
dispatcher.send(signal=AUDIO_SPEECH_RECOGNITION,sender=self,result = recognizedWord)
pass
def textToSpeechCallback(self,ch, method, property, body):
result = json.loads(body)
sound = result['sound']
dispatcher.send(signal=AUDIO_TEXT_TO_SPEECH,sender=self,result = sound)
pass
def genderRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
gender = result['gender']
dispatcher.send(signal=AUDIO_GENDER_RECOGNITION,sender=self,result = gender)
pass
def tactileDataCallback(self,ch, method, property, body):
result = json.loads(body)
value = result['value']
dispatcher.send(signal=AVATAR_DATA_TACTILE,sender=self,result = value)
pass
pass
|
Data.py
|
__author__ = '<NAME>'
import pika
import json
from pydispatch import dispatcher
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_DETECTION = 'VISUAL_FACE_DETECTION'
VISUAL_FACE_RECOGNITION ='VISUAL_FACE_RECOGNITION'
VISUAL_FACE_TRACKING = 'VISUAL_FACE_TRACKING'
VISUAL_HUMAN_TRACKING = 'VISUAL_HUMAN_TRACKING'
AUDIO_SPEECH_RECOGNITION = 'AUDIO_SPEECH_RECOGNITION'
AUDIO_TEXT_TO_SPEECH = 'AUDIO_TEXT_TO_SPEECH'
AUDIO_GENDER_RECOGNITION = 'AUDIO_GENDER_RECOGNITION'
AVATAR_DATA_TACTILE = 'AVATAR_DATA_TACTILE'
class DataHandler(object):
'class to control connection'
credential = pika.PlainCredentials('lumen', 'lumen')
isConnected = None
def __init__(self):
try:
self.connection = pika.SelectConnection(parameters=pika.ConnectionParameters('localhost', 5672, '/', DataHandler.credential),on_open_callback=self.on_connected)
DataHandler.isConnected = True
except RuntimeError as e:
print 'unable to connect', e
pass
def start(self):
self.connection.ioloop.start()
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open,channel_number=1)
connection.channel(self.on_channel_open,channel_number=2)
#connection.channel(self.on_channel_open,channel_number=3)
#connection.channel(self.on_channel_open,channel_number=4)
#connection.channel(self.on_channel_open,channel_number=5)
#connection.channel(self.on_channel_open,channel_number=6)
#connection.channel(self.on_channel_open,channel_number=7)
connection.channel(self.on_channel_open,channel_number=8)
pass
def on_channel_open(self,channel):
if channel.channel_number ==1:
self.channelVisualFaceDetection = channel
self.channelVisualFaceDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==2:
self.channelVisualFaceRecognition = channel
self.channelVisualFaceRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==3:
self.channelVisualFaceTracking = channel
self.channelVisualFaceTracking.queue_declare(self.on_queue_declareOk,queue='lumen.visual.face.tracking',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==4:
self.channelVisualHumanDetection = channel
self.channelVisualHumanDetection.queue_declare(self.on_queue_declareOk,queue='lumen.visual.human.detection',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==5:
self.channelAudioSpeechRecognition = channel
self.channelAudioSpeechRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.speech.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==6:
self.channelAudioTextToSpeech = channel
self.channelAudioTextToSpeech.queue_declare(self.on_queue_declareOk,queue='lumen.audio.text.to.speech',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==7:
self.channelAudioGenderRecognition = channel
self.channelAudioGenderRecognition.queue_declare(self.on_queue_declareOk,queue='lumen.audio.gender.recognition',durable=True,exclusive=False,auto_delete=True)
elif channel.channel_number==8:
self.channelAvatarDataTactile = channel
self.channelAvatarDataTactile.queue_declare(self.on_queue_declareOk,queue='avatar.NAO.data.tactile',durable=True,exclusive=False,auto_delete=True)
else:
print 'print do nothing'
pass
pass
def on_queue_declareOk(self,workQueue):
if workQueue.channel_number == 1:
self.channelVisualFaceDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 2:
self.channelVisualFaceRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 3:
self.channelVisualFaceTracking.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 4:
self.channelVisualHumanDetection.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 5:
self.channelAudioSpeechRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 6:
self.channelAudioTextToSpeech.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 7:
self.channelAudioGenderRecognition.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
elif workQueue.channel_number == 8:
self.channelAvatarDataTactile.queue_bind(self.on_bindOK,queue=workQueue.method.queue,exchange='amq.topic',routing_key=workQueue.method.queue)
else:
pass
pass
def on_bindOK(self,frame):
if frame.channel_number == 1:
self.channelVisualFaceDetection.basic_consume(self.faceDetectionCallback,queue='lumen.visual.face.detection',no_ack=True)
elif frame.channel_number==2:
self.channelVisualFaceRecognition.basic_consume(self.faceRecognitionCallback,queue='lumen.visual.face.recognition',no_ack=True)
elif frame.channel_number==3:
self.channelVisualFaceTracking.basic_consume(self.faceTrackingCallback,queue='lumen.visual.face.tracking',no_ack=True)
elif frame.channel_number==4:
self.channelVisualHumanDetection.basic_consume(self.humanDetectionCallback,queue='lumen.visual.human.detection',no_ack=True)
elif frame.channel_number==5:
self.channelAudioSpeechRecognition.basic_consume(self.speechRecognitionCallback,queue='lumen.audio.speech.recognition',no_ack=True)
elif frame.channel_number==6:
self.channelAudioTextToSpeech.basic_consume(self.textToSpeechCallback,queue='lumen.audio.text.to.speech',no_ack=True)
elif frame.channel_number==7:
self.channelAudioGenderRecognition.basic_consume(self.genderRecognitionCallback,queue='lumen.audio.gender.recognition',no_ack=True)
elif frame.channel_number==8:
self.channelAvatarDataTactile.basic_consume(self.tactileDataCallback,queue='avatar.NAO.data.tactile',no_ack=True)
else:
pass
pass
# defenition of event handler
def faceDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
faceLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_FACE_DETECTION,sender=self,result=faceLocation)
pass
def faceRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
faceName = result['name']
dispatcher.send(signal=VISUAL_FACE_RECOGNITION,sender=self,result = faceName)
pass
def faceTrackingCallback(self,ch, method, property, body):
dispatcher.send(signal=VISUAL_FACE_TRACKING,sender=self,result = body)
pass
def humanDetectionCallback(self,ch, method, property, body):
result = json.loads(body)
humanLocation = [result['x'],result['y']]
dispatcher.send(signal=VISUAL_HUMAN_TRACKING,sender=self,result = humanLocation)
pass
def speechRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
recognizedWord = result['result']
dispatcher.send(signal=AUDIO_SPEECH_RECOGNITION,sender=self,result = recognizedWord)
pass
def textToSpeechCallback(self,ch, method, property, body):
result = json.loads(body)
sound = result['sound']
dispatcher.send(signal=AUDIO_TEXT_TO_SPEECH,sender=self,result = sound)
pass
def genderRecognitionCallback(self,ch, method, property, body):
result = json.loads(body)
gender = result['gender']
dispatcher.send(signal=AUDIO_GENDER_RECOGNITION,sender=self,result = gender)
pass
def tactileDataCallback(self,ch, method, property, body):
result = json.loads(body)
value = result['value']
dispatcher.send(signal=AVATAR_DATA_TACTILE,sender=self,result = value)
pass
pass
| 0.333178 | 0.052303 |
import os
import sys
from functools import lru_cache
@lru_cache(maxsize=None)
def _get_test_folder():
"""Get path of the main test folder.
Path is assumed to be located somewhere above this file. This computation
is cached as the absolute directory of the cache isn't expected to change.
Returns:
path to test folder (root for tests)
Raises:
FileNotFoundError: If tests folder could not be found.
RuntimeError: If more than one tests folder is found at the same time.
"""
path = os.path.abspath(os.path.dirname(__file__))
while len(path) > 1:
last_path = path
find_test_dir = [
d
for d in os.listdir(path)
if os.path.isdir(os.path.join(path, d)) and d == "tests"
]
if len(find_test_dir) == 1:
return os.path.join(path, find_test_dir[0])
elif len(find_test_dir) > 1:
raise RuntimeError("Found more than one tests directory")
else:
path = os.path.dirname(path)
if path == last_path:
break
raise FileNotFoundError("Could not find tests directory in path")
def get_file_path(data_folder, file_name):
"""Get the path to a file inside of tests.
Useful for paths to static files, such as data or configs.
Args:
data_folder: Identifies where the file is stored.
file_name: Name of file.
Returns:
path to file
"""
test_path = _get_test_folder()
return os.path.join(test_path, data_folder, file_name)
def debug(): # noqa: D202 # pragma: no cover
"""Add pdb debugger on import.
Utility to add pdb debugging to an entire file so that on error, the pdb
utility is opened.
"""
def _info(type, value, tb):
# Source: https://stackoverflow.com/questions/242485/starting-python-debugger-automatically-on-error # noqa
if hasattr(sys, "ps1") or not sys.stderr.isatty():
sys.__excepthook__(type, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type, value, tb)
pdb.post_mortem(tb)
sys.excepthook = _info
def dynamic_import(attribute, module_path):
"""Import attribute from module found at module_path at runtime.
Args:
attribute: the attribute of the module to import (class, function, ...)
module_path: the path to the module.
Returns:
attribute from module_path.
"""
from importlib import import_module
mod = import_module(module_path)
return getattr(mod, attribute)
def import_init_transformer(
transformer_class,
path="foreshadow.transformers.concrete",
instantiate=True,
params=None,
):
"""Import and init a transformer from a specified path.
Args:
transformer_class (str): The transformer class to import
path (str): The import path to import from, default is
`foreshadow.transformers.concrete`
instantiate (bool): Whether or not to instantiate the class
params (dict): A param dictionary
instantiate: TODO @Adithya
Returns:
object: an initialized version of the transformer
"""
if instantiate:
if params is not None:
return dynamic_import(transformer_class, path)(**params)
else:
return dynamic_import(transformer_class, path)()
else:
return dynamic_import(transformer_class, path)
|
foreshadow/utils/testing.py
|
import os
import sys
from functools import lru_cache
@lru_cache(maxsize=None)
def _get_test_folder():
"""Get path of the main test folder.
Path is assumed to be located somewhere above this file. This computation
is cached as the absolute directory of the cache isn't expected to change.
Returns:
path to test folder (root for tests)
Raises:
FileNotFoundError: If tests folder could not be found.
RuntimeError: If more than one tests folder is found at the same time.
"""
path = os.path.abspath(os.path.dirname(__file__))
while len(path) > 1:
last_path = path
find_test_dir = [
d
for d in os.listdir(path)
if os.path.isdir(os.path.join(path, d)) and d == "tests"
]
if len(find_test_dir) == 1:
return os.path.join(path, find_test_dir[0])
elif len(find_test_dir) > 1:
raise RuntimeError("Found more than one tests directory")
else:
path = os.path.dirname(path)
if path == last_path:
break
raise FileNotFoundError("Could not find tests directory in path")
def get_file_path(data_folder, file_name):
"""Get the path to a file inside of tests.
Useful for paths to static files, such as data or configs.
Args:
data_folder: Identifies where the file is stored.
file_name: Name of file.
Returns:
path to file
"""
test_path = _get_test_folder()
return os.path.join(test_path, data_folder, file_name)
def debug(): # noqa: D202 # pragma: no cover
"""Add pdb debugger on import.
Utility to add pdb debugging to an entire file so that on error, the pdb
utility is opened.
"""
def _info(type, value, tb):
# Source: https://stackoverflow.com/questions/242485/starting-python-debugger-automatically-on-error # noqa
if hasattr(sys, "ps1") or not sys.stderr.isatty():
sys.__excepthook__(type, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type, value, tb)
pdb.post_mortem(tb)
sys.excepthook = _info
def dynamic_import(attribute, module_path):
"""Import attribute from module found at module_path at runtime.
Args:
attribute: the attribute of the module to import (class, function, ...)
module_path: the path to the module.
Returns:
attribute from module_path.
"""
from importlib import import_module
mod = import_module(module_path)
return getattr(mod, attribute)
def import_init_transformer(
transformer_class,
path="foreshadow.transformers.concrete",
instantiate=True,
params=None,
):
"""Import and init a transformer from a specified path.
Args:
transformer_class (str): The transformer class to import
path (str): The import path to import from, default is
`foreshadow.transformers.concrete`
instantiate (bool): Whether or not to instantiate the class
params (dict): A param dictionary
instantiate: TODO @Adithya
Returns:
object: an initialized version of the transformer
"""
if instantiate:
if params is not None:
return dynamic_import(transformer_class, path)(**params)
else:
return dynamic_import(transformer_class, path)()
else:
return dynamic_import(transformer_class, path)
| 0.615897 | 0.331147 |
from .conf import *
from gym_electric_motor.physical_systems import *
from gym_electric_motor.utils import make_module, set_state_array
from gym_electric_motor import ReferenceGenerator, RewardFunction, PhysicalSystem, ElectricMotorVisualization, \
ConstraintMonitor
from gym_electric_motor.physical_systems import PowerElectronicConverter, MechanicalLoad, ElectricMotor, OdeSolver, \
VoltageSupply, NoiseGenerator
import gym_electric_motor.physical_systems.converters as cv
from gym_electric_motor.physical_systems.physical_systems import SCMLSystem
import numpy as np
from gym.spaces import Box, Discrete
from scipy.integrate import ode
from tests.conf import system, jacobian, permex_motor_parameter
from gym_electric_motor.utils import instantiate
from gym_electric_motor.core import Callback
# region first version
def setup_physical_system(motor_type, converter_type, subconverters=None, three_phase=False):
"""
Function to set up a physical system with test parameters
:param motor_type: motor name (string)
:param converter_type: converter name (string)
:param three_phase: if True, than a synchronous motor system will be instantiated
:return: instantiated physical system
"""
# get test parameter
tau = converter_parameter['tau']
u_sup = test_motor_parameter[motor_type]['motor_parameter']['u_sup']
motor_parameter = test_motor_parameter[motor_type]['motor_parameter'] # dict
nominal_values = test_motor_parameter[motor_type]['nominal_values'] # dict
limit_values = test_motor_parameter[motor_type]['limit_values'] # dict
# setup load
load = PolynomialStaticLoad(load_parameter=load_parameter['parameter'])
# setup voltage supply
voltage_supply = IdealVoltageSupply(u_sup)
# setup converter
if motor_type == 'DcExtEx':
if 'Disc' in converter_type:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
subconverters=[converter_type, converter_type],
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
else:
converter = make_module(PowerElectronicConverter, converter_type,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
# setup motor
motor = make_module(ElectricMotor, motor_type, motor_parameter=motor_parameter, nominal_values=nominal_values,
limit_values=limit_values)
# setup solver
solver = ScipySolveIvpSolver(method='RK45')
# combine all modules to a physical system
if three_phase:
if motor_type == "SCIM":
physical_system = SquirrelCageInductionMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
elif motor_type == "DFIM":
physical_system = DoublyFedInductionMotor(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = SynchronousMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = DcMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
return physical_system
def setup_reference_generator(reference_type, physical_system, reference_state='omega'):
"""
Function to setup the reference generator
:param reference_type: name of reference generator
:param physical_system: instantiated physical system
:param reference_state: referenced state name (string)
:return: instantiated reference generator
"""
reference_generator = make_module(ReferenceGenerator, reference_type, reference_state=reference_state)
reference_generator.set_modules(physical_system)
reference_generator.reset()
return reference_generator
def setup_reward_function(reward_function_type, physical_system, reference_generator, reward_weights, observed_states):
reward_function = make_module(RewardFunction, reward_function_type, observed_states=observed_states,
reward_weights=reward_weights)
reward_function.set_modules(physical_system, reference_generator)
return reward_function
def setup_dc_converter(conv, motor_type, subconverters=None):
"""
This function initializes the converter.
It differentiates between single and double converter and can be used for discrete and continuous converters.
:param conv: converter name (string)
:param motor_type: motor name (string)
:return: initialized converter
"""
if motor_type == 'DcExtEx':
# setup double converter
if 'Disc' in conv:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
interlocking_time=converter_parameter['interlocking_time'],
dead_time=converter_parameter['dead_time'],
subconverters=[make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time']),
make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])])
else:
# setup single converter
converter = make_module(PowerElectronicConverter, conv,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
return converter
# endregion
# region second version
instantiate_dict = {}
def mock_instantiate(superclass, key, **kwargs):
# Instantiate the object and log the passed and returned values to validate correct function calls
instantiate_dict[superclass] = {}
instantiate_dict[superclass]['key'] = key
inst = instantiate(superclass, key, **kwargs)
instantiate_dict[superclass]['instance'] = inst
return inst
class DummyReferenceGenerator(ReferenceGenerator):
reference_space = Box(0, 1, shape=(1,))
_reset_counter = 0
def __init__(self, reference_observation=np.array([1]), reference_state='dummy_state_0', **kwargs):
self.kwargs = kwargs
self.closed = False
self.physical_system = None
self.get_reference_state = None
self.get_reference_obs_state = None
self.trajectory = np.sin(np.linspace(0, 50, 100))
self._reference_state = reference_state
self.reference_observation = reference_observation
self.reference_array = None
self.kwargs = kwargs
def set_modules(self, physical_system):
self.physical_system = physical_system
self.reference_array = np.ones_like(physical_system.state_names).astype(float)
super().set_modules(physical_system)
self._referenced_states = set_state_array(
{self._reference_state: 1}, physical_system.state_names
).astype(bool)
def reset(self, initial_state=None, initial_reference=None):
self._reset_counter += 1
res = super().reset(initial_state, initial_reference)
return res[0], res[1], self.trajectory
def get_reference(self, state, *_, **__):
self.get_reference_state = state
return self.reference_array
def get_reference_observation(self, state, *_, **__):
self.get_reference_obs_state = state
return self.reference_observation
def close(self):
self.closed = True
super().close()
class DummyRewardFunction(RewardFunction):
def __init__(self, **kwargs):
self.last_state = None
self.last_reference = None
self.last_action = None
self.last_time_step = None
self.closed = False
self.done = False
self.kwargs = kwargs
super().__init__()
def reset(self, initial_state=None, initial_reference=None):
self.last_state = initial_state
self.last_reference = initial_reference
super().reset(initial_state, initial_reference)
def reward(self, state, reference, k=None, action=None, violation_degree=0.0):
self.last_state = state
self.last_reference = reference
self.last_action = action
self.last_time_step = k
return -1 if violation_degree == 1 else 1
def close(self):
self.closed = True
super().close()
def _limit_violation_reward(self, state):
pass
def _reward(self, state, reference, action):
pass
class DummyPhysicalSystem(PhysicalSystem):
@property
def limits(self):
"""
Returns:
ndarray(float): An array containing the maximum allowed physical values for each state variable.
"""
return self._limits
@property
def nominal_state(self):
"""
Returns:
ndarray(float): An array containing the nominal values for each state variable.
"""
return self._nominal_values
def __init__(self, state_length=1, state_names='dummy_state', **kwargs):
super().__init__(
Box(-1, 1, shape=(1,)), Box(-1, 1, shape=(state_length,)),
[f'{state_names}_{i}' for i in range(state_length)], 1
)
self._limits = np.array([10 * (i + 1) for i in range(state_length)])
self._nominal_values = np.array([(i + 1) for i in range(state_length)])
self.action = None
self.state = None
self.closed = False
self.kwargs = kwargs
def reset(self, initial_state=None):
self.state = np.array([0] * len(self._state_names))
return self.state
def simulate(self, action):
self.action = action
self.state = np.array([action * (i + 1) for i in range(len(self._state_names))])
return self.state
def close(self):
self.closed = True
super().close()
class DummyVisualization(ElectricMotorVisualization):
def __init__(self, **kwargs):
self.closed = False
self.state = None
self.reference = None
self.reward = None
self.reference_trajectory = None
self.physical_system = None
self.reference_generator = None
self.reward_function = None
self.kwargs = kwargs
super().__init__()
def step(self, state, reference, reward, *_, **__):
self.state = state
self.reference = reference
self.reward = reward
def reset(self, reference_trajectories=None, *_, **__):
self.reference_trajectory = reference_trajectories
def set_modules(self, physical_system, reference_generator, reward_function):
self.physical_system = physical_system
self.reference_generator = reference_generator
self.reward_function = reward_function
class DummyVoltageSupply(VoltageSupply):
def __init__(self, u_nominal=560, tau=1e-4, **kwargs):
super().__init__(u_nominal, tau=tau)
self.i_sup = None
self.t = None
self.reset_counter = 0
self.args = None
self.kwargs = kwargs
self.get_voltage_counter = 0
def reset(self):
self.reset_counter += 1
return super().reset()
def get_voltage(self, i_sup, t, *args, **kwargs):
self.get_voltage_counter += 1
self.i_sup = i_sup
self.t = t
self.args = args
self.kwargs = kwargs
return [self._u_nominal]
class DummyConverter(PowerElectronicConverter):
voltages = Box(0, 1, shape=(1,))
currents = Box(-1, 1, shape=(1,))
action_space = Discrete(4)
def __init__(self, tau=2E-4, dead_time=False, interlocking_time=0, action_space=None, voltages=None, currents=None, **kwargs):
super().__init__(tau, dead_time, interlocking_time)
self.action_space = action_space or self.action_space
self.voltages = voltages or self.voltages
self.currents = currents or self.currents
self.reset_counter = 0
self.convert_counter = 0
self.switching_times = [tau]
self.action = None
self.action_set_time = None
self.i_out = None
self.last_i_out = None
self.t = None
self.kwargs = kwargs
self.u_in = None
def i_sup(self, i_out):
self.last_i_out = i_out
return i_out[0]
def set_switching_times(self, switching_times):
self.switching_times = switching_times
def set_action(self, action, t):
self.action_set_time = t
self.action = action
return [t + self._tau / 2, t + self._tau]
def reset(self):
self.reset_counter += 1
return [0.0] * self.voltages.shape[0]
def convert(self, i_out, t):
self.i_out = i_out
self.t = t
self.convert_counter += 1
self.u_in = [self.action] if type(self.action_space) is Discrete else self.action
return self.u_in
class DummyElectricMotor(ElectricMotor):
# defined test values
_default_motor_parameter = permex_motor_parameter['motor_parameter']
_default_limits = dict(omega=16, torque=26, u=15, i=26, i_0=26, i_1=21, u_0=15)
_default_nominal_values = dict(omega=14, torque=20, u=15, i=22, i_0=22, i_1=20)
HAS_JACOBIAN = True
electrical_jac_return = None
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_0', 'i_1']
VOLTAGES = ['u_0']
def __init__(self, tau=1e-5, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
self.u_in = None
super().__init__(tau=tau, **kwargs)
def electrical_ode(self, state, u_in, omega, *_):
self.u_in = u_in
return state - u_in
def reset(self, state_space, state_positions):
self.reset_counter += 1
return super().reset(state_space, state_positions)
def torque(self, currents):
return np.prod(currents)
def i_in(self, state):
return [np.sum(state)]
def electrical_jacobian(self, state, u_in, omega, *_):
return self.electrical_jac_return
class PowerElectronicConverterWrapper(cv.PowerElectronicConverter):
def __init__(self, subconverter, **kwargs):
super().__init__(**kwargs)
self._converter = subconverter
self.action_space = self._converter.action_space
self.currents = self._converter.currents
self.voltages = self._converter.voltages
self.reset_calls = 0
self.set_action_calls = 0
self.last_action = None
self.last_t = None
self.last_i_out = None
self.last_u = None
self.last_i_sup = None
def reset(self):
self.reset_calls += 1
return self._converter.reset()
def set_action(self, action, t):
self.last_action = action
self.last_t = t
return self._converter.set_action(action, t)
def convert(self, i_out, t):
self.last_i_out = i_out
self.last_t = t
self.last_u = self._converter.convert(i_out, t)
return self.last_u
def i_sup(self, i_out):
self.last_i_out = i_out
self.last_i_sup = self._converter.i_sup(i_out)
return self.last_i_sup
class DummyScipyOdeSolver(ode):
"""
Dummy class for ScipyOdeSolver
"""
# defined test values
_kwargs = {'nsteps': 5}
_integrator = 'dop853'
_y = np.zeros(2)
_y_init = np.array([1, 6])
_t = 0
_tau = 1e-3
_t_init = 0.1
jac = None
# counter
_init_counter = 0
_set_integrator_counter = 0
_set_initial_value_counter = 0
_set_f_params_counter = 0
_set_jac_params_counter = 0
_integrate_counter = 0
def __init__(self, system_equation, jacobian_):
self._init_counter += 1
assert system_equation == system
assert jacobian_ == jacobian
super().__init__(system_equation, jacobian_)
def set_integrator(self, integrator, **args):
self._set_integrator_counter += 1
assert integrator == self._integrator
assert args == self._kwargs
return super().set_integrator(integrator, **args)
def set_initial_value(self, y, t=0.0):
self._set_initial_value_counter += 1
assert all(y == self._y_init)
assert t == self._t_init
def set_f_params(self, *args):
self._set_f_params_counter += 1
assert args == (2,)
super().set_f_params(2)
def set_jac_params(self, *args):
self._set_jac_params_counter += 1
assert args == (2,)
super().set_jac_params(*args)
def integrate(self, t, *_):
self._integrate_counter += 1
assert t == self._t_init + self._tau
return self._y_init * 2
class DummyLoad(MechanicalLoad):
"""
dummy class for mechanical load
"""
state_names = ['omega', 'position']
limits = dict(omega=15, position=10)
nominal_values = dict(omega=15, position=10)
mechanical_state = None
t = None
mechanical_ode_return = None
mechanical_jac_return = None
omega_range = None
HAS_JACOBIAN = True
def __init__(self, tau=1e-4, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__(tau=tau, **kwargs)
def reset(self, state_space, state_positions, nominal_state, *_, **__):
self.reset_counter += 1
return np.zeros(2)
def mechanical_ode(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_ode_return
def mechanical_jacobian(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_jac_return
def get_state_space(self, omega_range):
self.omega_range = omega_range
return {'omega': 0, 'position': -1}, {'omega': 1, 'position': -1}
class DummyNoise(NoiseGenerator):
"""
dummy class for noise generator
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__()
def reset(self):
return np.ones_like(self._state_variables, dtype=float) * 0.36
def noise(self, *_, **__):
return np.ones_like(self._state_variables, dtype=float) * 0.42
class DummyOdeSolver(OdeSolver):
"""
Dummy class for ode solver
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
super().__init__()
def integrate(self, t):
self.last_y = self._y
self._y = self._y + t - self._t
self._t = t
return self._y
class DummyConstraint(Constraint):
def __init__(self, violation_degree=0.0):
super().__init__()
self.modules_set = False
self.violation_degree = violation_degree
def __call__(self, state):
return self.violation_degree
def set_modules(self, ps):
super().set_modules(ps)
self.modules_set = True
class DummyConstraintMonitor(ConstraintMonitor):
def __init__(self, no_of_dummy_constraints=1):
constraints = [DummyConstraint() for _ in range(no_of_dummy_constraints)]
super().__init__(additional_constraints=constraints)
class DummySCMLSystem(SCMLSystem):
"""
dummy class for SCMLSystem
"""
# defined test values
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
_limits = {}
_nominal_state = {}
_supply = None
_converter = None
_electrical_motor = None
_mechanical_load = None
_state_names = ['omega_me', 'torque', 'u', 'i', 'u_sup']
_state_length = 5
# counter
_set_limits_counter = 0
_set_nominal_state_counter = 0
def _set_limits(self):
self._set_limits_counter += 1
def _set_nominal_state(self):
self._set_nominal_state_counter += 1
def _build_state_space(self, state_names):
assert state_names == self._state_names
return None
def _build_state_names(self):
return self._state_names
def _set_indices(self):
pass
def simulate(self, action, *_, **__):
return np.ones(self._state_length) * 0.46
def _system_equation(self, t, state, u_in, **__):
return np.ones(self._state_length) * 0.87
def reset(self, *_):
return np.ones(self._state_length) * 0.12
def _forward_transform(self, quantities, motor_state):
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
pass
def _action_transformation(self, action):
return action
class DummyRandom:
_expected_low = None
_expected_high = None
_expected_left = None
_expected_mode = None
_expected_right = None
_expected_values = None
_expected_probabilities = None
_expected_loc = None
_expected_scale = None
_expected_size = None
# counter
_monkey_random_rand_counter = 0
_monkey_random_triangular_counter = 0
_monkey_random_randint_counter = 0
_monkey_random_choice_counter = 0
_monkey_random_normal_counter = 0
def __init__(self, exp_low=None, exp_high=None, exp_left=None, exp_right=None, exp_mode=None, exp_values=None,
exp_probabilities=None, exp_loc=None, exp_scale=None, exp_size=None):
"""
set expected values
:param exp_low: expected lower value
:param exp_high: expected upper value
:param exp_mode: expected mode value
:param exp_right: expected right value
:param exp_left: expected left value
:param exp_values: expected values for choice
:param exp_probabilities: expected probabilities for choice
:param exp_loc: expected loc value
:param exp_scale: expected scale value
:param exp_size: expected size value
"""
self._expected_low = exp_low
self._expected_high = exp_high
self._expected_mode = exp_mode
self._expected_left = exp_left
self._expected_right = exp_right
self._expected_values = exp_values
self._expected_probabilities = exp_probabilities
self._expected_loc = exp_loc
self._expected_scale = exp_scale
self._expected_size = exp_size
def monkey_random_rand(self):
self._monkey_random_rand_counter += 1
"""
mock function for np.random.rand()
:return:
"""
return 0.25
def monkey_random_triangular(self, left, mode, right):
self._monkey_random_triangular_counter += 1
if self._expected_left is not None:
assert left == self._expected_left
if self._expected_high is not None:
assert right == self._expected_right
if self._expected_mode is not None:
assert mode == self._expected_mode
"""
mock function for np.random.triangular()
:return:
"""
return 0.45
def monkey_random_randint(self, low, high):
if self._expected_low is not None:
assert low == self._expected_low
if self._expected_high is not None:
assert high == self._expected_high
self._monkey_random_randint_counter += 1
"""
mock function for random.randint()
:param low:
:param high:
:return:
"""
return 7
def monkey_random_choice(self, a, p):
self._monkey_random_choice_counter += 1
assert len(a) == len(p)
if self._expected_values is not None:
assert a == self._expected_values
if self._expected_probabilities is not None:
assert p == self._expected_probabilities
return a[0]
def monkey_random_normal(self, loc=0, scale=1.0, size=None):
if self._expected_loc is not None:
assert loc == self._expected_loc
if self._expected_scale is not None:
assert scale == self._expected_scale
if self._expected_size is not None:
assert size == self._expected_size
else:
size = 1
self._monkey_random_normal_counter += 1
result = np.array([0.1, -0.2, 0.6, 0.1, -0.5, -0.3, -1.7, 0.1, -0.2, 0.4])
return result[:size]
class DummyElectricMotorEnvironment(ElectricMotorEnvironment):
"""Dummy environment to test pre implemented callbacks. Extend for further testing cases"""
def __init__(self, reference_generator=None, callbacks=(), **kwargs):
reference_generator = reference_generator or DummyReferenceGenerator()
super().__init__(DummyPhysicalSystem(), reference_generator, DummyRewardFunction(), callbacks=callbacks)
def step(self):
self._call_callbacks('on_step_begin', 0, 0)
self._call_callbacks('on_step_end', 0, 0, 0, 0, 0)
def reset(self):
self._call_callbacks('on_reset_begin')
self._call_callbacks('on_reset_end', 0, 0)
def close(self):
self._call_callbacks(self._callbacks, 'on_close')
class DummyCallback(Callback):
def __init__(self):
super().__init__()
self.reset_begin = 0
self.reset_end = 0
self.step_begin = 0
self.step_end = 0
self.close = 0
def on_reset_begin(self):
self.reset_begin += 1
def on_reset_end(self, *_):
self.reset_end += 1
def on_step_begin(self, *_):
self.step_begin += 1
def on_step_end(self, *_):
self.step_end += 1
def on_close(self):
self.close += 1
# endregion
|
tests/testing_utils.py
|
from .conf import *
from gym_electric_motor.physical_systems import *
from gym_electric_motor.utils import make_module, set_state_array
from gym_electric_motor import ReferenceGenerator, RewardFunction, PhysicalSystem, ElectricMotorVisualization, \
ConstraintMonitor
from gym_electric_motor.physical_systems import PowerElectronicConverter, MechanicalLoad, ElectricMotor, OdeSolver, \
VoltageSupply, NoiseGenerator
import gym_electric_motor.physical_systems.converters as cv
from gym_electric_motor.physical_systems.physical_systems import SCMLSystem
import numpy as np
from gym.spaces import Box, Discrete
from scipy.integrate import ode
from tests.conf import system, jacobian, permex_motor_parameter
from gym_electric_motor.utils import instantiate
from gym_electric_motor.core import Callback
# region first version
def setup_physical_system(motor_type, converter_type, subconverters=None, three_phase=False):
"""
Function to set up a physical system with test parameters
:param motor_type: motor name (string)
:param converter_type: converter name (string)
:param three_phase: if True, than a synchronous motor system will be instantiated
:return: instantiated physical system
"""
# get test parameter
tau = converter_parameter['tau']
u_sup = test_motor_parameter[motor_type]['motor_parameter']['u_sup']
motor_parameter = test_motor_parameter[motor_type]['motor_parameter'] # dict
nominal_values = test_motor_parameter[motor_type]['nominal_values'] # dict
limit_values = test_motor_parameter[motor_type]['limit_values'] # dict
# setup load
load = PolynomialStaticLoad(load_parameter=load_parameter['parameter'])
# setup voltage supply
voltage_supply = IdealVoltageSupply(u_sup)
# setup converter
if motor_type == 'DcExtEx':
if 'Disc' in converter_type:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
subconverters=[converter_type, converter_type],
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
else:
converter = make_module(PowerElectronicConverter, converter_type,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
# setup motor
motor = make_module(ElectricMotor, motor_type, motor_parameter=motor_parameter, nominal_values=nominal_values,
limit_values=limit_values)
# setup solver
solver = ScipySolveIvpSolver(method='RK45')
# combine all modules to a physical system
if three_phase:
if motor_type == "SCIM":
physical_system = SquirrelCageInductionMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
elif motor_type == "DFIM":
physical_system = DoublyFedInductionMotor(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = SynchronousMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
else:
physical_system = DcMotorSystem(converter=converter, motor=motor, ode_solver=solver,
supply=voltage_supply, load=load, tau=tau)
return physical_system
def setup_reference_generator(reference_type, physical_system, reference_state='omega'):
"""
Function to setup the reference generator
:param reference_type: name of reference generator
:param physical_system: instantiated physical system
:param reference_state: referenced state name (string)
:return: instantiated reference generator
"""
reference_generator = make_module(ReferenceGenerator, reference_type, reference_state=reference_state)
reference_generator.set_modules(physical_system)
reference_generator.reset()
return reference_generator
def setup_reward_function(reward_function_type, physical_system, reference_generator, reward_weights, observed_states):
reward_function = make_module(RewardFunction, reward_function_type, observed_states=observed_states,
reward_weights=reward_weights)
reward_function.set_modules(physical_system, reference_generator)
return reward_function
def setup_dc_converter(conv, motor_type, subconverters=None):
"""
This function initializes the converter.
It differentiates between single and double converter and can be used for discrete and continuous converters.
:param conv: converter name (string)
:param motor_type: motor name (string)
:return: initialized converter
"""
if motor_type == 'DcExtEx':
# setup double converter
if 'Disc' in conv:
double_converter = 'Disc-Multi'
else:
double_converter = 'Cont-Multi'
converter = make_module(PowerElectronicConverter, double_converter,
interlocking_time=converter_parameter['interlocking_time'],
dead_time=converter_parameter['dead_time'],
subconverters=[make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time']),
make_module(PowerElectronicConverter, conv,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])])
else:
# setup single converter
converter = make_module(PowerElectronicConverter, conv,
subconverters=subconverters,
tau=converter_parameter['tau'],
dead_time=converter_parameter['dead_time'],
interlocking_time=converter_parameter['interlocking_time'])
return converter
# endregion
# region second version
instantiate_dict = {}
def mock_instantiate(superclass, key, **kwargs):
# Instantiate the object and log the passed and returned values to validate correct function calls
instantiate_dict[superclass] = {}
instantiate_dict[superclass]['key'] = key
inst = instantiate(superclass, key, **kwargs)
instantiate_dict[superclass]['instance'] = inst
return inst
class DummyReferenceGenerator(ReferenceGenerator):
reference_space = Box(0, 1, shape=(1,))
_reset_counter = 0
def __init__(self, reference_observation=np.array([1]), reference_state='dummy_state_0', **kwargs):
self.kwargs = kwargs
self.closed = False
self.physical_system = None
self.get_reference_state = None
self.get_reference_obs_state = None
self.trajectory = np.sin(np.linspace(0, 50, 100))
self._reference_state = reference_state
self.reference_observation = reference_observation
self.reference_array = None
self.kwargs = kwargs
def set_modules(self, physical_system):
self.physical_system = physical_system
self.reference_array = np.ones_like(physical_system.state_names).astype(float)
super().set_modules(physical_system)
self._referenced_states = set_state_array(
{self._reference_state: 1}, physical_system.state_names
).astype(bool)
def reset(self, initial_state=None, initial_reference=None):
self._reset_counter += 1
res = super().reset(initial_state, initial_reference)
return res[0], res[1], self.trajectory
def get_reference(self, state, *_, **__):
self.get_reference_state = state
return self.reference_array
def get_reference_observation(self, state, *_, **__):
self.get_reference_obs_state = state
return self.reference_observation
def close(self):
self.closed = True
super().close()
class DummyRewardFunction(RewardFunction):
def __init__(self, **kwargs):
self.last_state = None
self.last_reference = None
self.last_action = None
self.last_time_step = None
self.closed = False
self.done = False
self.kwargs = kwargs
super().__init__()
def reset(self, initial_state=None, initial_reference=None):
self.last_state = initial_state
self.last_reference = initial_reference
super().reset(initial_state, initial_reference)
def reward(self, state, reference, k=None, action=None, violation_degree=0.0):
self.last_state = state
self.last_reference = reference
self.last_action = action
self.last_time_step = k
return -1 if violation_degree == 1 else 1
def close(self):
self.closed = True
super().close()
def _limit_violation_reward(self, state):
pass
def _reward(self, state, reference, action):
pass
class DummyPhysicalSystem(PhysicalSystem):
@property
def limits(self):
"""
Returns:
ndarray(float): An array containing the maximum allowed physical values for each state variable.
"""
return self._limits
@property
def nominal_state(self):
"""
Returns:
ndarray(float): An array containing the nominal values for each state variable.
"""
return self._nominal_values
def __init__(self, state_length=1, state_names='dummy_state', **kwargs):
super().__init__(
Box(-1, 1, shape=(1,)), Box(-1, 1, shape=(state_length,)),
[f'{state_names}_{i}' for i in range(state_length)], 1
)
self._limits = np.array([10 * (i + 1) for i in range(state_length)])
self._nominal_values = np.array([(i + 1) for i in range(state_length)])
self.action = None
self.state = None
self.closed = False
self.kwargs = kwargs
def reset(self, initial_state=None):
self.state = np.array([0] * len(self._state_names))
return self.state
def simulate(self, action):
self.action = action
self.state = np.array([action * (i + 1) for i in range(len(self._state_names))])
return self.state
def close(self):
self.closed = True
super().close()
class DummyVisualization(ElectricMotorVisualization):
def __init__(self, **kwargs):
self.closed = False
self.state = None
self.reference = None
self.reward = None
self.reference_trajectory = None
self.physical_system = None
self.reference_generator = None
self.reward_function = None
self.kwargs = kwargs
super().__init__()
def step(self, state, reference, reward, *_, **__):
self.state = state
self.reference = reference
self.reward = reward
def reset(self, reference_trajectories=None, *_, **__):
self.reference_trajectory = reference_trajectories
def set_modules(self, physical_system, reference_generator, reward_function):
self.physical_system = physical_system
self.reference_generator = reference_generator
self.reward_function = reward_function
class DummyVoltageSupply(VoltageSupply):
def __init__(self, u_nominal=560, tau=1e-4, **kwargs):
super().__init__(u_nominal, tau=tau)
self.i_sup = None
self.t = None
self.reset_counter = 0
self.args = None
self.kwargs = kwargs
self.get_voltage_counter = 0
def reset(self):
self.reset_counter += 1
return super().reset()
def get_voltage(self, i_sup, t, *args, **kwargs):
self.get_voltage_counter += 1
self.i_sup = i_sup
self.t = t
self.args = args
self.kwargs = kwargs
return [self._u_nominal]
class DummyConverter(PowerElectronicConverter):
voltages = Box(0, 1, shape=(1,))
currents = Box(-1, 1, shape=(1,))
action_space = Discrete(4)
def __init__(self, tau=2E-4, dead_time=False, interlocking_time=0, action_space=None, voltages=None, currents=None, **kwargs):
super().__init__(tau, dead_time, interlocking_time)
self.action_space = action_space or self.action_space
self.voltages = voltages or self.voltages
self.currents = currents or self.currents
self.reset_counter = 0
self.convert_counter = 0
self.switching_times = [tau]
self.action = None
self.action_set_time = None
self.i_out = None
self.last_i_out = None
self.t = None
self.kwargs = kwargs
self.u_in = None
def i_sup(self, i_out):
self.last_i_out = i_out
return i_out[0]
def set_switching_times(self, switching_times):
self.switching_times = switching_times
def set_action(self, action, t):
self.action_set_time = t
self.action = action
return [t + self._tau / 2, t + self._tau]
def reset(self):
self.reset_counter += 1
return [0.0] * self.voltages.shape[0]
def convert(self, i_out, t):
self.i_out = i_out
self.t = t
self.convert_counter += 1
self.u_in = [self.action] if type(self.action_space) is Discrete else self.action
return self.u_in
class DummyElectricMotor(ElectricMotor):
# defined test values
_default_motor_parameter = permex_motor_parameter['motor_parameter']
_default_limits = dict(omega=16, torque=26, u=15, i=26, i_0=26, i_1=21, u_0=15)
_default_nominal_values = dict(omega=14, torque=20, u=15, i=22, i_0=22, i_1=20)
HAS_JACOBIAN = True
electrical_jac_return = None
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_0', 'i_1']
VOLTAGES = ['u_0']
def __init__(self, tau=1e-5, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
self.u_in = None
super().__init__(tau=tau, **kwargs)
def electrical_ode(self, state, u_in, omega, *_):
self.u_in = u_in
return state - u_in
def reset(self, state_space, state_positions):
self.reset_counter += 1
return super().reset(state_space, state_positions)
def torque(self, currents):
return np.prod(currents)
def i_in(self, state):
return [np.sum(state)]
def electrical_jacobian(self, state, u_in, omega, *_):
return self.electrical_jac_return
class PowerElectronicConverterWrapper(cv.PowerElectronicConverter):
def __init__(self, subconverter, **kwargs):
super().__init__(**kwargs)
self._converter = subconverter
self.action_space = self._converter.action_space
self.currents = self._converter.currents
self.voltages = self._converter.voltages
self.reset_calls = 0
self.set_action_calls = 0
self.last_action = None
self.last_t = None
self.last_i_out = None
self.last_u = None
self.last_i_sup = None
def reset(self):
self.reset_calls += 1
return self._converter.reset()
def set_action(self, action, t):
self.last_action = action
self.last_t = t
return self._converter.set_action(action, t)
def convert(self, i_out, t):
self.last_i_out = i_out
self.last_t = t
self.last_u = self._converter.convert(i_out, t)
return self.last_u
def i_sup(self, i_out):
self.last_i_out = i_out
self.last_i_sup = self._converter.i_sup(i_out)
return self.last_i_sup
class DummyScipyOdeSolver(ode):
"""
Dummy class for ScipyOdeSolver
"""
# defined test values
_kwargs = {'nsteps': 5}
_integrator = 'dop853'
_y = np.zeros(2)
_y_init = np.array([1, 6])
_t = 0
_tau = 1e-3
_t_init = 0.1
jac = None
# counter
_init_counter = 0
_set_integrator_counter = 0
_set_initial_value_counter = 0
_set_f_params_counter = 0
_set_jac_params_counter = 0
_integrate_counter = 0
def __init__(self, system_equation, jacobian_):
self._init_counter += 1
assert system_equation == system
assert jacobian_ == jacobian
super().__init__(system_equation, jacobian_)
def set_integrator(self, integrator, **args):
self._set_integrator_counter += 1
assert integrator == self._integrator
assert args == self._kwargs
return super().set_integrator(integrator, **args)
def set_initial_value(self, y, t=0.0):
self._set_initial_value_counter += 1
assert all(y == self._y_init)
assert t == self._t_init
def set_f_params(self, *args):
self._set_f_params_counter += 1
assert args == (2,)
super().set_f_params(2)
def set_jac_params(self, *args):
self._set_jac_params_counter += 1
assert args == (2,)
super().set_jac_params(*args)
def integrate(self, t, *_):
self._integrate_counter += 1
assert t == self._t_init + self._tau
return self._y_init * 2
class DummyLoad(MechanicalLoad):
"""
dummy class for mechanical load
"""
state_names = ['omega', 'position']
limits = dict(omega=15, position=10)
nominal_values = dict(omega=15, position=10)
mechanical_state = None
t = None
mechanical_ode_return = None
mechanical_jac_return = None
omega_range = None
HAS_JACOBIAN = True
def __init__(self, tau=1e-4, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__(tau=tau, **kwargs)
def reset(self, state_space, state_positions, nominal_state, *_, **__):
self.reset_counter += 1
return np.zeros(2)
def mechanical_ode(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_ode_return
def mechanical_jacobian(self, t, mechanical_state, torque):
self.mechanical_state = mechanical_state
self.t = t
self.mechanical_ode_return = np.array([torque, -torque])
return self.mechanical_jac_return
def get_state_space(self, omega_range):
self.omega_range = omega_range
return {'omega': 0, 'position': -1}, {'omega': 1, 'position': -1}
class DummyNoise(NoiseGenerator):
"""
dummy class for noise generator
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.reset_counter = 0
super().__init__()
def reset(self):
return np.ones_like(self._state_variables, dtype=float) * 0.36
def noise(self, *_, **__):
return np.ones_like(self._state_variables, dtype=float) * 0.42
class DummyOdeSolver(OdeSolver):
"""
Dummy class for ode solver
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
super().__init__()
def integrate(self, t):
self.last_y = self._y
self._y = self._y + t - self._t
self._t = t
return self._y
class DummyConstraint(Constraint):
def __init__(self, violation_degree=0.0):
super().__init__()
self.modules_set = False
self.violation_degree = violation_degree
def __call__(self, state):
return self.violation_degree
def set_modules(self, ps):
super().set_modules(ps)
self.modules_set = True
class DummyConstraintMonitor(ConstraintMonitor):
def __init__(self, no_of_dummy_constraints=1):
constraints = [DummyConstraint() for _ in range(no_of_dummy_constraints)]
super().__init__(additional_constraints=constraints)
class DummySCMLSystem(SCMLSystem):
"""
dummy class for SCMLSystem
"""
# defined test values
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
_limits = {}
_nominal_state = {}
_supply = None
_converter = None
_electrical_motor = None
_mechanical_load = None
_state_names = ['omega_me', 'torque', 'u', 'i', 'u_sup']
_state_length = 5
# counter
_set_limits_counter = 0
_set_nominal_state_counter = 0
def _set_limits(self):
self._set_limits_counter += 1
def _set_nominal_state(self):
self._set_nominal_state_counter += 1
def _build_state_space(self, state_names):
assert state_names == self._state_names
return None
def _build_state_names(self):
return self._state_names
def _set_indices(self):
pass
def simulate(self, action, *_, **__):
return np.ones(self._state_length) * 0.46
def _system_equation(self, t, state, u_in, **__):
return np.ones(self._state_length) * 0.87
def reset(self, *_):
return np.ones(self._state_length) * 0.12
def _forward_transform(self, quantities, motor_state):
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
pass
def _action_transformation(self, action):
return action
class DummyRandom:
_expected_low = None
_expected_high = None
_expected_left = None
_expected_mode = None
_expected_right = None
_expected_values = None
_expected_probabilities = None
_expected_loc = None
_expected_scale = None
_expected_size = None
# counter
_monkey_random_rand_counter = 0
_monkey_random_triangular_counter = 0
_monkey_random_randint_counter = 0
_monkey_random_choice_counter = 0
_monkey_random_normal_counter = 0
def __init__(self, exp_low=None, exp_high=None, exp_left=None, exp_right=None, exp_mode=None, exp_values=None,
exp_probabilities=None, exp_loc=None, exp_scale=None, exp_size=None):
"""
set expected values
:param exp_low: expected lower value
:param exp_high: expected upper value
:param exp_mode: expected mode value
:param exp_right: expected right value
:param exp_left: expected left value
:param exp_values: expected values for choice
:param exp_probabilities: expected probabilities for choice
:param exp_loc: expected loc value
:param exp_scale: expected scale value
:param exp_size: expected size value
"""
self._expected_low = exp_low
self._expected_high = exp_high
self._expected_mode = exp_mode
self._expected_left = exp_left
self._expected_right = exp_right
self._expected_values = exp_values
self._expected_probabilities = exp_probabilities
self._expected_loc = exp_loc
self._expected_scale = exp_scale
self._expected_size = exp_size
def monkey_random_rand(self):
self._monkey_random_rand_counter += 1
"""
mock function for np.random.rand()
:return:
"""
return 0.25
def monkey_random_triangular(self, left, mode, right):
self._monkey_random_triangular_counter += 1
if self._expected_left is not None:
assert left == self._expected_left
if self._expected_high is not None:
assert right == self._expected_right
if self._expected_mode is not None:
assert mode == self._expected_mode
"""
mock function for np.random.triangular()
:return:
"""
return 0.45
def monkey_random_randint(self, low, high):
if self._expected_low is not None:
assert low == self._expected_low
if self._expected_high is not None:
assert high == self._expected_high
self._monkey_random_randint_counter += 1
"""
mock function for random.randint()
:param low:
:param high:
:return:
"""
return 7
def monkey_random_choice(self, a, p):
self._monkey_random_choice_counter += 1
assert len(a) == len(p)
if self._expected_values is not None:
assert a == self._expected_values
if self._expected_probabilities is not None:
assert p == self._expected_probabilities
return a[0]
def monkey_random_normal(self, loc=0, scale=1.0, size=None):
if self._expected_loc is not None:
assert loc == self._expected_loc
if self._expected_scale is not None:
assert scale == self._expected_scale
if self._expected_size is not None:
assert size == self._expected_size
else:
size = 1
self._monkey_random_normal_counter += 1
result = np.array([0.1, -0.2, 0.6, 0.1, -0.5, -0.3, -1.7, 0.1, -0.2, 0.4])
return result[:size]
class DummyElectricMotorEnvironment(ElectricMotorEnvironment):
"""Dummy environment to test pre implemented callbacks. Extend for further testing cases"""
def __init__(self, reference_generator=None, callbacks=(), **kwargs):
reference_generator = reference_generator or DummyReferenceGenerator()
super().__init__(DummyPhysicalSystem(), reference_generator, DummyRewardFunction(), callbacks=callbacks)
def step(self):
self._call_callbacks('on_step_begin', 0, 0)
self._call_callbacks('on_step_end', 0, 0, 0, 0, 0)
def reset(self):
self._call_callbacks('on_reset_begin')
self._call_callbacks('on_reset_end', 0, 0)
def close(self):
self._call_callbacks(self._callbacks, 'on_close')
class DummyCallback(Callback):
def __init__(self):
super().__init__()
self.reset_begin = 0
self.reset_end = 0
self.step_begin = 0
self.step_end = 0
self.close = 0
def on_reset_begin(self):
self.reset_begin += 1
def on_reset_end(self, *_):
self.reset_end += 1
def on_step_begin(self, *_):
self.step_begin += 1
def on_step_end(self, *_):
self.step_end += 1
def on_close(self):
self.close += 1
# endregion
| 0.858259 | 0.435181 |
from threading import Timer
from olo.database import BaseDataBase, MySQLCursor
from olo.libs.class_proxy import ClassProxy
from olo.libs.pool import Pool, ConnProxy
def create_conn(host, port, user, password, dbname, charset):
try:
from MySQLdb import connect
conn = connect( # pragma: no cover
host=host, port=port, user=user, passwd=password, db=dbname,
charset=charset,
)
except ImportError:
try:
from pymysql import connect
except ImportError: # pragma: no cover
raise Exception( # pragma: no cover
'Cannot found pymsql, please install it: pip install PyMySQL'
)
conn = connect(
host=host, port=port, user=user, password=password, db=dbname,
charset=charset,
)
return conn
class MySQLConnProxy(ConnProxy):
def __init__(self, conn, pool):
super(MySQLConnProxy, self).__init__(
conn, pool
)
self.modified_cursors = set()
self.waiting_for_close = False
def __str__(self):
return '<{} {}>'.format( # pragma: no cover
self.__class__.__name__,
super(MySQLConnProxy, self).__str__()
)
def cursor(self):
cur = self.conn.cursor()
cur = CursorProxy(cur, self)
return cur
def close(self):
if self.modified_cursors:
self.waiting_for_close = True # pragma: no cover
Timer(60, self._close).start() # pragma: no cover
return # pragma: no cover
self.waiting_for_close = False
self._close()
def _close(self):
super(MySQLConnProxy, self).close()
for cur in self.modified_cursors:
cur.close() # pragma: no cover
self.modified_cursors.clear()
def add_modified_cursor(self, cur):
self.modified_cursors.add(cur)
def remove_modified_cursor(self, cur):
if cur in self.modified_cursors:
self.modified_cursors.remove(cur)
if self.waiting_for_close:
self.close() # pragma: no cover
def ping(self):
return self.conn.ping()
class CursorProxy(ClassProxy):
def __init__(self, raw, conn):
super(CursorProxy, self).__init__(raw)
self.conn = conn
self._is_modified = False
@property
def is_modified(self):
return self._is_modified
@is_modified.setter
def is_modified(self, item):
self._is_modified = item
if self.is_modified:
self.conn.add_modified_cursor(self)
else:
self.conn.remove_modified_cursor(self)
def __iter__(self):
return iter(self._raw)
def __str__(self):
return '<CursorProxy {}>'.format(self._raw) # pragma: no cover
def close(self):
self._raw.close() # pragma: no cover
if not self.conn.is_closed: # pragma: no cover
self.conn.remove_modified_cursor(self) # pragma: no cover
def execute(self, sql, params=None, **kwargs):
if (
params is not None and
not isinstance(params, (list, tuple, dict))
):
params = (params,)
return self._raw.execute(sql, params, **kwargs)
class MySQLDataBase(BaseDataBase):
def __init__(self, host, port, user, password, dbname,
charset='utf8mb4',
beansdb=None, autocommit=True,
report=lambda *args, **kwargs: None,
pool_size=5,
pool_timeout=30,
pool_recycle=60*60,
pool_max_overflow=10):
super(MySQLDataBase, self).__init__(
beansdb=beansdb,
autocommit=autocommit,
report=report
)
self.pool = Pool(
lambda: create_conn(
host, port, user, password, dbname, charset
),
conn_proxy_cls=MySQLConnProxy,
size=pool_size,
timeout=pool_timeout,
recycle=pool_recycle,
max_overflow=pool_max_overflow,
)
def get_conn(self):
return self.pool.acquire_conn()
def get_cursor(self): # pylint: disable=W
assert self.in_transaction(), 'db.get_cursor must in transaction!'
tran = self.get_last_transaction()
conn = tran.conn
if conn is None:
conn = tran.conn = self.get_conn()
cur = conn.cursor()
return MySQLCursor(cur, self)
|
olo/database/mysql.py
|
from threading import Timer
from olo.database import BaseDataBase, MySQLCursor
from olo.libs.class_proxy import ClassProxy
from olo.libs.pool import Pool, ConnProxy
def create_conn(host, port, user, password, dbname, charset):
try:
from MySQLdb import connect
conn = connect( # pragma: no cover
host=host, port=port, user=user, passwd=password, db=dbname,
charset=charset,
)
except ImportError:
try:
from pymysql import connect
except ImportError: # pragma: no cover
raise Exception( # pragma: no cover
'Cannot found pymsql, please install it: pip install PyMySQL'
)
conn = connect(
host=host, port=port, user=user, password=password, db=dbname,
charset=charset,
)
return conn
class MySQLConnProxy(ConnProxy):
def __init__(self, conn, pool):
super(MySQLConnProxy, self).__init__(
conn, pool
)
self.modified_cursors = set()
self.waiting_for_close = False
def __str__(self):
return '<{} {}>'.format( # pragma: no cover
self.__class__.__name__,
super(MySQLConnProxy, self).__str__()
)
def cursor(self):
cur = self.conn.cursor()
cur = CursorProxy(cur, self)
return cur
def close(self):
if self.modified_cursors:
self.waiting_for_close = True # pragma: no cover
Timer(60, self._close).start() # pragma: no cover
return # pragma: no cover
self.waiting_for_close = False
self._close()
def _close(self):
super(MySQLConnProxy, self).close()
for cur in self.modified_cursors:
cur.close() # pragma: no cover
self.modified_cursors.clear()
def add_modified_cursor(self, cur):
self.modified_cursors.add(cur)
def remove_modified_cursor(self, cur):
if cur in self.modified_cursors:
self.modified_cursors.remove(cur)
if self.waiting_for_close:
self.close() # pragma: no cover
def ping(self):
return self.conn.ping()
class CursorProxy(ClassProxy):
def __init__(self, raw, conn):
super(CursorProxy, self).__init__(raw)
self.conn = conn
self._is_modified = False
@property
def is_modified(self):
return self._is_modified
@is_modified.setter
def is_modified(self, item):
self._is_modified = item
if self.is_modified:
self.conn.add_modified_cursor(self)
else:
self.conn.remove_modified_cursor(self)
def __iter__(self):
return iter(self._raw)
def __str__(self):
return '<CursorProxy {}>'.format(self._raw) # pragma: no cover
def close(self):
self._raw.close() # pragma: no cover
if not self.conn.is_closed: # pragma: no cover
self.conn.remove_modified_cursor(self) # pragma: no cover
def execute(self, sql, params=None, **kwargs):
if (
params is not None and
not isinstance(params, (list, tuple, dict))
):
params = (params,)
return self._raw.execute(sql, params, **kwargs)
class MySQLDataBase(BaseDataBase):
def __init__(self, host, port, user, password, dbname,
charset='utf8mb4',
beansdb=None, autocommit=True,
report=lambda *args, **kwargs: None,
pool_size=5,
pool_timeout=30,
pool_recycle=60*60,
pool_max_overflow=10):
super(MySQLDataBase, self).__init__(
beansdb=beansdb,
autocommit=autocommit,
report=report
)
self.pool = Pool(
lambda: create_conn(
host, port, user, password, dbname, charset
),
conn_proxy_cls=MySQLConnProxy,
size=pool_size,
timeout=pool_timeout,
recycle=pool_recycle,
max_overflow=pool_max_overflow,
)
def get_conn(self):
return self.pool.acquire_conn()
def get_cursor(self): # pylint: disable=W
assert self.in_transaction(), 'db.get_cursor must in transaction!'
tran = self.get_last_transaction()
conn = tran.conn
if conn is None:
conn = tran.conn = self.get_conn()
cur = conn.cursor()
return MySQLCursor(cur, self)
| 0.521959 | 0.099426 |
import torch
import torch.nn as nn
from .network_builder import NetworkBuilder, A2CBuilder
from ..modules.pointnet_modules import pointnet
class A2CPNBuilder(A2CBuilder):
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
print(f"Input shape of PNBuilder: {input_shape}")
assert "pointcloud" in input_shape
assert "pointnet" in params
input_shape_pc = input_shape["pointcloud"]
# input_shape_pc = (input_shape_pc[0], input_shape_pc[2], input_shape_pc[1]) # (B, N, 3) -> (B, 3, N)
input_shape_state = input_shape.get("obs", 0)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
self.value_size = kwargs.pop('value_size', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
pn_local_shape = params["pointnet"]["local_units"]
pn_global_shape = params["pointnet"]["global_units"]
pn_output_shape = pn_global_shape[-1]
in_mlp_shape = pn_output_shape + input_shape_state[0]
if len(self.units) == 0:
out_size = in_mlp_shape
else:
out_size = self.units[-1]
# Build PointNet
self.pointnet = pointnet.PointNet(in_channels=3, local_channels=pn_local_shape,
global_channels=pn_global_shape)
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
# self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size': in_mlp_shape,
'units': self.units,
'activation': self.activation,
'norm_func_name': self.normalization,
'dense_func': torch.nn.Linear
}
self.mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
self.flatten_act = self.activations_factory.create(self.activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32),
requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
# Initialization
mlp_init = self.init_factory.create(**self.initializer)
for m in self.mlp:
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if self.is_discrete:
mlp_init(self.logits.weight)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
mlp_init(self.value.weight)
def forward(self, obs_dict):
obs_dict = obs_dict['obs']
obs = obs_dict['pointcloud']
obs = obs.permute((0, 2, 1))
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
out = obs
out = self.pointnet(out)["feature"]
out = out.flatten(1)
if self.has_rnn:
if not self.is_rnn_before_mlp:
out = self.mlp(out)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
if type(states) is not tuple:
states = (states,)
if self.is_rnn_before_mlp:
for l in self.mlp:
out = l(out)
else:
# Modification.
if "obs" in obs_dict:
out = torch.cat([out, obs_dict["obs"]], dim=-1)
for l in self.mlp:
out = l(out)
value = self.value_act(self.value(out))
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu * 0 + sigma, value, states
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous' in params['space']
self.is_multi_discrete = 'multi_discrete' in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
self.has_rnn = 'rnn' in params
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
num_layers = self.rnn_layers
if self.rnn_name == 'lstm':
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)),
torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
def build(self, name, **kwargs):
net = A2CPNBuilder.Network(self.params, **kwargs)
return net
|
rl_games/algos_torch/pn_network_builder.py
|
import torch
import torch.nn as nn
from .network_builder import NetworkBuilder, A2CBuilder
from ..modules.pointnet_modules import pointnet
class A2CPNBuilder(A2CBuilder):
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
print(f"Input shape of PNBuilder: {input_shape}")
assert "pointcloud" in input_shape
assert "pointnet" in params
input_shape_pc = input_shape["pointcloud"]
# input_shape_pc = (input_shape_pc[0], input_shape_pc[2], input_shape_pc[1]) # (B, N, 3) -> (B, 3, N)
input_shape_state = input_shape.get("obs", 0)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
self.value_size = kwargs.pop('value_size', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
pn_local_shape = params["pointnet"]["local_units"]
pn_global_shape = params["pointnet"]["global_units"]
pn_output_shape = pn_global_shape[-1]
in_mlp_shape = pn_output_shape + input_shape_state[0]
if len(self.units) == 0:
out_size = in_mlp_shape
else:
out_size = self.units[-1]
# Build PointNet
self.pointnet = pointnet.PointNet(in_channels=3, local_channels=pn_local_shape,
global_channels=pn_global_shape)
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
# self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size': in_mlp_shape,
'units': self.units,
'activation': self.activation,
'norm_func_name': self.normalization,
'dense_func': torch.nn.Linear
}
self.mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
self.flatten_act = self.activations_factory.create(self.activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32),
requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
# Initialization
mlp_init = self.init_factory.create(**self.initializer)
for m in self.mlp:
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if self.is_discrete:
mlp_init(self.logits.weight)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
mlp_init(self.value.weight)
def forward(self, obs_dict):
obs_dict = obs_dict['obs']
obs = obs_dict['pointcloud']
obs = obs.permute((0, 2, 1))
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
out = obs
out = self.pointnet(out)["feature"]
out = out.flatten(1)
if self.has_rnn:
if not self.is_rnn_before_mlp:
out = self.mlp(out)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
if type(states) is not tuple:
states = (states,)
if self.is_rnn_before_mlp:
for l in self.mlp:
out = l(out)
else:
# Modification.
if "obs" in obs_dict:
out = torch.cat([out, obs_dict["obs"]], dim=-1)
for l in self.mlp:
out = l(out)
value = self.value_act(self.value(out))
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu * 0 + sigma, value, states
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous' in params['space']
self.is_multi_discrete = 'multi_discrete' in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
self.has_rnn = 'rnn' in params
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
num_layers = self.rnn_layers
if self.rnn_name == 'lstm':
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)),
torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
def build(self, name, **kwargs):
net = A2CPNBuilder.Network(self.params, **kwargs)
return net
| 0.795777 | 0.320715 |
import numpy as np
import pytest
from peleffy.forcefield.parameters import OPLS2005ParameterWrapper
from simtk import unit
FORCEFIELD_NAME = 'openff_unconstrained-1.2.0.offxml'
METHANE_OPLS_PARAMETERS = OPLS2005ParameterWrapper({
'atom_names': [' C1 ', ' H2 ', ' H3 ', ' H4 ', ' H5 '],
'atom_types': ['CT', 'HC', 'HC', 'HC', 'HC'],
'charges': [unit.Quantity(-0.24, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge)],
'sigmas': [unit.Quantity(3.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom)],
'epsilons': [unit.Quantity(0.066, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole)],
'bonds': [{'atom1_idx': 0, 'atom2_idx': 1,
'spring_constant': unit.Quantity(
340.0, unit.kilocalorie / (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0, 'atom2_idx': 2,
'spring_constant': unit.Quantity(
340.0, unit.kilocalorie / (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0,
'atom2_idx': 3,
'spring_constant': unit.Quantity(340.0, unit.kilocalorie
/ (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0, 'atom2_idx': 4,
'spring_constant': unit.Quantity(340.0, unit.kilocalorie
/ (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)}],
'angles': [{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 2,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 3,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 2, 'atom2_idx': 0, 'atom3_idx': 3,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 2, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 3, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)}],
'SGB_radii': [unit.Quantity(1.975, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom)],
'vdW_radii': [unit.Quantity(1.75, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom)],
'gammas': [0.005, 0.00859824, 0.00859824, 0.00859824, 0.00859824],
'alphas': [-0.74168571, 0.268726247, 0.268726247, 0.268726247,
0.268726247]})
class TestSchrodingerToolkitWrapper(object):
"""
It wraps all tests that check the SchrodingerToolkitWrapperMolecularGraph
class.
"""
def test_get_Schrodinger_parameters(self):
"""
It tests the standard methods to obtain Schrodinger parameters
from an peleffy's Molecule.
"""
from peleffy.topology import Molecule
from peleffy.forcefield import OPLS2005ForceField
from peleffy.utils.toolkits import ToolkitUnavailableException
# Load benzene ring
molecule = Molecule(smiles='c1ccccc1', hydrogens_are_explicit=False)
# Load OPLS2005 force field
opls2005 = OPLS2005ForceField()
# Ensure SCHRODINGER is not in the environment
import os
if 'SCHRODINGER' in os.environ:
del(os.environ['SCHRODINGER'])
with pytest.raises(ToolkitUnavailableException):
opls2005.parameterize(molecule, charge_method='gasteiger')
class TestRDKitToolkitWrapper(object):
"""
It wraps all tests that check the RDKitToolkitWrapper class.
"""
def test_conformer_setter(self):
"""It checks the conformer setter of the RDKit toolkit"""
from peleffy.topology import Molecule
from rdkit import Chem
from copy import deepcopy
from peleffy.utils import get_data_file_path
# Load molecule
mol = Molecule(get_data_file_path('ligands/propionic_acid.pdb'))
# Choose a dihedral to track
dihedral = (0, 1, 2, 3)
# Get initial dihedral's theta
conformer = mol.rdkit_molecule.GetConformer()
initial_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
if initial_theta < -179:
initial_theta += 180.0
elif initial_theta > 179:
initial_theta -= 180.0
assert abs(initial_theta - -0.002) < 10e-3, \
'Unexpected initial theta value'
# Get a copy of the rdkit's molecule representation
rdkit_mol = deepcopy(mol.rdkit_molecule)
# Modify its conformer
conformer = rdkit_mol.GetConformer()
Chem.rdMolTransforms.SetDihedralDeg(conformer, *dihedral, 90)
new_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
assert abs(new_theta - 89.999) < 10e-3, \
'Unexpected new theta value'
# Set new conformer to peleffy molecule
mol.set_conformer(conformer)
# Check new set theta value
conformer = mol.rdkit_molecule.GetConformer()
new_set_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
assert abs(new_set_theta - 89.999) < 10e-3, \
'Unexpected new set theta value'
def test_atom_degrees(self):
"""It checks that the atom degree getter works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/methane.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' H1 ': 1, ' H2 ': 1,
' H3 ': 1, ' H4 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/ethylene.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 3, ' C2 ': 3, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/acetylene.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 2, ' C2 ': 2, ' H1 ': 1,
' H2 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/propionic_acid.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' C2 ': 4, ' C3 ': 3,
' O1 ': 1, ' O2 ': 2, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1,
' H5 ': 1, ' H6 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' N1 ': 4, ' C2 ': 4,
' C3 ': 4, ' C4 ': 4, ' C5 ': 3,
' O1 ': 1, ' O2 ': 1, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1,
' H5 ': 1, ' H6 ': 1, ' H7 ': 1,
' H8 ': 1, ' H9 ': 1, ' H10': 1,
' H11': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/malonate.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' O1 ': 1, ' C1 ': 3, ' O2 ': 1,
' C2 ': 4, ' C3 ': 3, ' O3 ': 2,
' O4 ': 1, ' H1 ': 1, ' H2 ': 1,
' H3 ': 1}, \
'Unexpected pairing between atom names and degrees'
def test_pdb_parsers(self):
"""It checks that the PDB parsers from RDKit are correctly working."""
from rdkit.Chem.rdmolfiles import MolToPDBBlock
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/benzene.pdb')
with open(pdb_path) as f:
pdb_block = f.read()
rdkit_mol1 = wrapper.from_pdb(pdb_path)
rdkit_mol2 = wrapper.from_pdb_block(pdb_block)
block1 = MolToPDBBlock(rdkit_mol1)
block2 = MolToPDBBlock(rdkit_mol2)
assert block1 == block2, 'Unexpected pair of RDKit molecules'
def test_dihedral_angle(self):
"""It checks that the dihedral angle calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
dihedral_degrees = wrapper.get_dihedral(m, 2, 1, 4, 5, units="degrees")
dihedral_rad = wrapper.get_dihedral(m, 2, 1, 4, 5)
np.testing.assert_almost_equal(dihedral_degrees, -176.348, decimal=2)
np.testing.assert_almost_equal(dihedral_degrees, np.rad2deg(dihedral_rad), decimal=3)
def test_dihedral_angle_2(self):
"""It checks that the dihedral angle calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
dihedral_degrees = wrapper.get_dihedral(m, 17, 4, 5, 6, units="degrees")
dihedral_rad = wrapper.get_dihedral(m, 17, 4, 5, 6)
np.testing.assert_almost_equal(dihedral_degrees, 54.828, decimal=2)
np.testing.assert_almost_equal(dihedral_degrees, np.rad2deg(dihedral_rad), decimal=3)
def test_rmsd(self):
"""It checks that the rmsd calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
pdb_path2 = get_data_file_path('ligands/trimethylglycine_moved.pdb')
m2 = Molecule(pdb_path2)
np.testing.assert_almost_equal(wrapper.get_rmsd(m, m2), 0.3346, decimal=3)
|
peleffy/tests/test_toolkits.py
|
import numpy as np
import pytest
from peleffy.forcefield.parameters import OPLS2005ParameterWrapper
from simtk import unit
FORCEFIELD_NAME = 'openff_unconstrained-1.2.0.offxml'
METHANE_OPLS_PARAMETERS = OPLS2005ParameterWrapper({
'atom_names': [' C1 ', ' H2 ', ' H3 ', ' H4 ', ' H5 '],
'atom_types': ['CT', 'HC', 'HC', 'HC', 'HC'],
'charges': [unit.Quantity(-0.24, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge),
unit.Quantity(0.06, unit.elementary_charge)],
'sigmas': [unit.Quantity(3.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom),
unit.Quantity(2.5, unit.angstrom)],
'epsilons': [unit.Quantity(0.066, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole),
unit.Quantity(0.03, unit.kilocalorie / unit.mole)],
'bonds': [{'atom1_idx': 0, 'atom2_idx': 1,
'spring_constant': unit.Quantity(
340.0, unit.kilocalorie / (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0, 'atom2_idx': 2,
'spring_constant': unit.Quantity(
340.0, unit.kilocalorie / (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0,
'atom2_idx': 3,
'spring_constant': unit.Quantity(340.0, unit.kilocalorie
/ (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)},
{'atom1_idx': 0, 'atom2_idx': 4,
'spring_constant': unit.Quantity(340.0, unit.kilocalorie
/ (unit.angstrom ** 2
* unit.mole)),
'eq_dist': unit.Quantity(1.09, unit.angstrom)}],
'angles': [{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 2,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 3,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 1, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 2, 'atom2_idx': 0, 'atom3_idx': 3,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 2, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)},
{'atom1_idx': 3, 'atom2_idx': 0, 'atom3_idx': 4,
'spring_constant': unit.Quantity(
33.0, unit.kilocalorie / (unit.mole
* unit.radian ** 2)),
'eq_angle': unit.Quantity(107.8, unit.degree)}],
'SGB_radii': [unit.Quantity(1.975, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom),
unit.Quantity(1.425, unit.angstrom)],
'vdW_radii': [unit.Quantity(1.75, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom),
unit.Quantity(1.25, unit.angstrom)],
'gammas': [0.005, 0.00859824, 0.00859824, 0.00859824, 0.00859824],
'alphas': [-0.74168571, 0.268726247, 0.268726247, 0.268726247,
0.268726247]})
class TestSchrodingerToolkitWrapper(object):
"""
It wraps all tests that check the SchrodingerToolkitWrapperMolecularGraph
class.
"""
def test_get_Schrodinger_parameters(self):
"""
It tests the standard methods to obtain Schrodinger parameters
from an peleffy's Molecule.
"""
from peleffy.topology import Molecule
from peleffy.forcefield import OPLS2005ForceField
from peleffy.utils.toolkits import ToolkitUnavailableException
# Load benzene ring
molecule = Molecule(smiles='c1ccccc1', hydrogens_are_explicit=False)
# Load OPLS2005 force field
opls2005 = OPLS2005ForceField()
# Ensure SCHRODINGER is not in the environment
import os
if 'SCHRODINGER' in os.environ:
del(os.environ['SCHRODINGER'])
with pytest.raises(ToolkitUnavailableException):
opls2005.parameterize(molecule, charge_method='gasteiger')
class TestRDKitToolkitWrapper(object):
"""
It wraps all tests that check the RDKitToolkitWrapper class.
"""
def test_conformer_setter(self):
"""It checks the conformer setter of the RDKit toolkit"""
from peleffy.topology import Molecule
from rdkit import Chem
from copy import deepcopy
from peleffy.utils import get_data_file_path
# Load molecule
mol = Molecule(get_data_file_path('ligands/propionic_acid.pdb'))
# Choose a dihedral to track
dihedral = (0, 1, 2, 3)
# Get initial dihedral's theta
conformer = mol.rdkit_molecule.GetConformer()
initial_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
if initial_theta < -179:
initial_theta += 180.0
elif initial_theta > 179:
initial_theta -= 180.0
assert abs(initial_theta - -0.002) < 10e-3, \
'Unexpected initial theta value'
# Get a copy of the rdkit's molecule representation
rdkit_mol = deepcopy(mol.rdkit_molecule)
# Modify its conformer
conformer = rdkit_mol.GetConformer()
Chem.rdMolTransforms.SetDihedralDeg(conformer, *dihedral, 90)
new_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
assert abs(new_theta - 89.999) < 10e-3, \
'Unexpected new theta value'
# Set new conformer to peleffy molecule
mol.set_conformer(conformer)
# Check new set theta value
conformer = mol.rdkit_molecule.GetConformer()
new_set_theta = Chem.rdMolTransforms.GetDihedralDeg(conformer,
*dihedral)
assert abs(new_set_theta - 89.999) < 10e-3, \
'Unexpected new set theta value'
def test_atom_degrees(self):
"""It checks that the atom degree getter works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/methane.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' H1 ': 1, ' H2 ': 1,
' H3 ': 1, ' H4 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/ethylene.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 3, ' C2 ': 3, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/acetylene.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 2, ' C2 ': 2, ' H1 ': 1,
' H2 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/propionic_acid.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' C2 ': 4, ' C3 ': 3,
' O1 ': 1, ' O2 ': 2, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1,
' H5 ': 1, ' H6 ': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' C1 ': 4, ' N1 ': 4, ' C2 ': 4,
' C3 ': 4, ' C4 ': 4, ' C5 ': 3,
' O1 ': 1, ' O2 ': 1, ' H1 ': 1,
' H2 ': 1, ' H3 ': 1, ' H4 ': 1,
' H5 ': 1, ' H6 ': 1, ' H7 ': 1,
' H8 ': 1, ' H9 ': 1, ' H10': 1,
' H11': 1}, \
'Unexpected pairing between atom names and degrees'
pdb_path = get_data_file_path('ligands/malonate.pdb')
m = Molecule(pdb_path)
degree_by_name = dict(zip(wrapper.get_atom_names(m),
wrapper.get_atom_degrees(m)))
assert degree_by_name == {' O1 ': 1, ' C1 ': 3, ' O2 ': 1,
' C2 ': 4, ' C3 ': 3, ' O3 ': 2,
' O4 ': 1, ' H1 ': 1, ' H2 ': 1,
' H3 ': 1}, \
'Unexpected pairing between atom names and degrees'
def test_pdb_parsers(self):
"""It checks that the PDB parsers from RDKit are correctly working."""
from rdkit.Chem.rdmolfiles import MolToPDBBlock
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/benzene.pdb')
with open(pdb_path) as f:
pdb_block = f.read()
rdkit_mol1 = wrapper.from_pdb(pdb_path)
rdkit_mol2 = wrapper.from_pdb_block(pdb_block)
block1 = MolToPDBBlock(rdkit_mol1)
block2 = MolToPDBBlock(rdkit_mol2)
assert block1 == block2, 'Unexpected pair of RDKit molecules'
def test_dihedral_angle(self):
"""It checks that the dihedral angle calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
dihedral_degrees = wrapper.get_dihedral(m, 2, 1, 4, 5, units="degrees")
dihedral_rad = wrapper.get_dihedral(m, 2, 1, 4, 5)
np.testing.assert_almost_equal(dihedral_degrees, -176.348, decimal=2)
np.testing.assert_almost_equal(dihedral_degrees, np.rad2deg(dihedral_rad), decimal=3)
def test_dihedral_angle_2(self):
"""It checks that the dihedral angle calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
dihedral_degrees = wrapper.get_dihedral(m, 17, 4, 5, 6, units="degrees")
dihedral_rad = wrapper.get_dihedral(m, 17, 4, 5, 6)
np.testing.assert_almost_equal(dihedral_degrees, 54.828, decimal=2)
np.testing.assert_almost_equal(dihedral_degrees, np.rad2deg(dihedral_rad), decimal=3)
def test_rmsd(self):
"""It checks that the rmsd calculator works well."""
from peleffy.topology import Molecule
from peleffy.utils.toolkits import RDKitToolkitWrapper
from peleffy.utils import get_data_file_path
wrapper = RDKitToolkitWrapper()
pdb_path = get_data_file_path('ligands/trimethylglycine.pdb')
m = Molecule(pdb_path)
pdb_path2 = get_data_file_path('ligands/trimethylglycine_moved.pdb')
m2 = Molecule(pdb_path2)
np.testing.assert_almost_equal(wrapper.get_rmsd(m, m2), 0.3346, decimal=3)
| 0.775095 | 0.628464 |
import xml.etree.ElementTree as ET
from typing import Callable, Dict, List, Mapping, Sequence, TYPE_CHECKING
from chb.app.BasicBlock import BasicBlock
import chb.util.fileutil as UF
from chb.arm.ARMDictionary import ARMDictionary
from chb.arm.ARMInstruction import ARMInstruction
from chb.invariants.XXpr import XXpr
if TYPE_CHECKING:
from chb.arm.ARMFunction import ARMFunction
class ARMBlock(BasicBlock):
def __init__(
self,
armf: "ARMFunction",
xnode: ET.Element) -> None:
BasicBlock.__init__(self, xnode)
self._armf = armf
self._instructions: Dict[str, ARMInstruction] = {}
@property
def armfunction(self) -> "ARMFunction":
return self._armf
@property
def armdictionary(self) -> ARMDictionary:
return self.armfunction.armdictionary
@property
def instructions(self) -> Mapping[str, ARMInstruction]:
if len(self._instructions) == 0:
for n in self.xnode.findall("i"):
iaddr = n.get("ia")
if iaddr is None:
raise UF.CHBError("ARM Instruction without address in xml")
self._instructions[iaddr] = ARMInstruction(self, n)
return self._instructions
@property
def call_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_call_instruction:
result.append(instr)
return result
@property
def store_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_store_instruction:
result.append(instr)
return result
def to_string(
self,
bytes: bool = False,
opcodetxt: bool = True,
opcodewidth: int = 40,
sp: bool = True) -> str:
lines: List[str] = []
for (ia, instr) in sorted(self.instructions.items()):
pinstr = instr.to_string(
bytes=bytes,
opcodetxt=opcodetxt,
opcodewidth=opcodewidth,
sp=sp)
lines.append(str(ia).rjust(10) + " " + pinstr)
return "\n".join(lines)
|
chb/arm/ARMBlock.py
|
import xml.etree.ElementTree as ET
from typing import Callable, Dict, List, Mapping, Sequence, TYPE_CHECKING
from chb.app.BasicBlock import BasicBlock
import chb.util.fileutil as UF
from chb.arm.ARMDictionary import ARMDictionary
from chb.arm.ARMInstruction import ARMInstruction
from chb.invariants.XXpr import XXpr
if TYPE_CHECKING:
from chb.arm.ARMFunction import ARMFunction
class ARMBlock(BasicBlock):
def __init__(
self,
armf: "ARMFunction",
xnode: ET.Element) -> None:
BasicBlock.__init__(self, xnode)
self._armf = armf
self._instructions: Dict[str, ARMInstruction] = {}
@property
def armfunction(self) -> "ARMFunction":
return self._armf
@property
def armdictionary(self) -> ARMDictionary:
return self.armfunction.armdictionary
@property
def instructions(self) -> Mapping[str, ARMInstruction]:
if len(self._instructions) == 0:
for n in self.xnode.findall("i"):
iaddr = n.get("ia")
if iaddr is None:
raise UF.CHBError("ARM Instruction without address in xml")
self._instructions[iaddr] = ARMInstruction(self, n)
return self._instructions
@property
def call_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_call_instruction:
result.append(instr)
return result
@property
def store_instructions(self) -> Sequence[ARMInstruction]:
result: List[ARMInstruction] = []
for (ia, instr) in sorted(self.instructions.items()):
if instr.is_store_instruction:
result.append(instr)
return result
def to_string(
self,
bytes: bool = False,
opcodetxt: bool = True,
opcodewidth: int = 40,
sp: bool = True) -> str:
lines: List[str] = []
for (ia, instr) in sorted(self.instructions.items()):
pinstr = instr.to_string(
bytes=bytes,
opcodetxt=opcodetxt,
opcodewidth=opcodewidth,
sp=sp)
lines.append(str(ia).rjust(10) + " " + pinstr)
return "\n".join(lines)
| 0.814348 | 0.201951 |
import functools
import itertools
import jmespath
from c7n.actions import BaseAction
from c7n.filters import ValueFilter
from c7n.filters.kms import KmsRelatedFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import universal_augment
from c7n.exceptions import PolicyValidationError, PolicyExecutionError
from c7n.utils import get_retry, local_session, type_schema, chunks
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.resolver import ValuesFrom
import c7n.filters.vpc as net_filters
@resources.register('workspaces')
class Workspace(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspaces', 'Workspaces', None)
arn_type = 'workspace'
name = id = dimension = 'WorkspaceId'
universal_taggable = True
cfn_type = 'AWS::WorkSpaces::Workspace'
def augment(self, resources):
return universal_augment(self, resources)
@Workspace.filter_registry.register('connection-status')
class WorkspaceConnectionStatusFilter(ValueFilter):
"""Filter Workspaces based on user connection information
:example:
.. code-block:: yaml
policies:
- name: workspaces-abandoned
resource: workspaces
filters:
- type: connection-status
value_type: age
key: LastKnownUserConnectionTimestamp
op: ge
value: 90
- name: workspaces-expensive-zombies
resource: workspaces
filters:
- "WorkspaceProperties.RunningMode": ALWAYS_ON
- type: connection-status
value_type: age
key: LastKnownUserConnectionTimestamp
op: ge
value: 30
"""
schema = type_schema('connection-status', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('workspaces:DescribeWorkspacesConnectionStatus',)
annotation_key = 'c7n:ConnectionStatus'
def get_connection_status(self, client, workspace_ids):
connection_status_chunk = self.manager.retry(
client.describe_workspaces_connection_status,
WorkspaceIds=workspace_ids
)['WorkspacesConnectionStatus']
return connection_status_chunk
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
annotate_map = {r['WorkspaceId']: r for r in resources if self.annotation_key not in r}
with self.executor_factory(max_workers=2) as w:
self.log.debug(
'Querying connection status for %d workspaces' % len(annotate_map))
for status in itertools.chain(*w.map(
functools.partial(self.get_connection_status, client),
chunks(annotate_map.keys(), 25)
)):
annotate_map[status['WorkspaceId']][self.annotation_key] = status
return list(filter(self, resources))
def get_resource_value(self, k, i):
return super(WorkspaceConnectionStatusFilter, self).get_resource_value(
k, i[self.annotation_key])
@Workspace.filter_registry.register('kms-key')
class KmsFilter(KmsRelatedFilter):
RelatedIdsExpression = 'VolumeEncryptionKey'
@Workspace.action_registry.register('terminate')
class TerminateWorkspace(BaseAction):
"""
Terminates a Workspace
:example:
.. code-block:: yaml
policies:
- name: delete-workspace
resource: workspaces
filters:
- "tag:DeleteMe": present
actions:
- terminate
"""
schema = type_schema('terminate')
permissions = ('workspaces:TerminateWorkspaces',)
valid_origin_states = (
'PENDING',
'AVAILABLE',
'IMPAIRED',
'UNHEALTHY',
'REBOOTING',
'STARTING',
'REBUILDING',
'RESTORING',
'MAINTENANCE',
'ADMIN_MAINTENANCE',
'UPDATING',
'STOPPING',
'STOPPED',
'ERROR'
)
def process(self, resources):
resources = self.filter_resources(resources, 'State', self.valid_origin_states)
client = local_session(self.manager.session_factory).client('workspaces')
for resource_set in chunks(resources, size=25):
ids = [{'WorkspaceId': w['WorkspaceId']} for w in resource_set]
client.terminate_workspaces(TerminateWorkspaceRequests=ids)
@resources.register('workspaces-image')
class WorkspaceImage(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspace_images', 'Images', None)
arn_type = 'workspaceimage'
name = id = 'ImageId'
universal_taggable = True
augment = universal_augment
@WorkspaceImage.filter_registry.register('cross-account')
class WorkspaceImageCrossAccount(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('workspaces:DescribeWorkspaceImagePermissions',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
allowed_accounts = set(self.get_accounts())
results = []
for r in resources:
found = False
try:
accts = client.describe_workspace_image_permissions(
ImageId=r['ImageId']).get('ImagePermissions')
for a in accts:
account_id = a['SharedAccountId']
if (account_id not in allowed_accounts):
r.setdefault('c7n:CrossAccountViolations', []).append(account_id)
found = True
if found:
results.append(r)
except client.exceptions.ResourceNotFoundException:
continue
return results
@WorkspaceImage.action_registry.register('delete')
class DeleteWorkspaceImage(BaseAction):
"""
Deletes a Workspace Image
:example:
.. code-block:: yaml
policies:
- name: delete-workspace-img
resource: workspaces-image
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('workspaces:DeleteWorkspaceImage',)
valid_origin_states = ('AVAILABLE', 'ERROR',)
def process(self, resources):
resources = self.filter_resources(resources, 'State', self.valid_origin_states)
client = local_session(self.manager.session_factory).client('workspaces')
for r in resources:
try:
client.delete_workspace_image(ImageId=r['ImageId'])
except client.exceptions.InvalidResourceStateException as e:
self.log.error(f"Error deleting workspace image: {r['ImageId']} error: {e}")
continue
except client.exceptions.ResourceAssociatedException as e:
self.log.error(f"Error deleting workspace image: {r['ImageId']} error: {e}")
continue
except client.exceptions.ResourceNotFoundException:
continue
@resources.register('workspaces-directory')
class WorkspaceDirectory(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspace_directories', 'Directories', None)
arn_type = 'directory'
id = 'DirectoryId'
name = 'DirectoryName'
universal_taggable = True
augment = universal_augment
@WorkspaceDirectory.filter_registry.register('security-group')
class WorkspacesDirectorySG(net_filters.SecurityGroupFilter):
RelatedIdsExpression = ""
expressions = ("WorkspaceSecurityGroupId", "WorkspaceCreationProperties.CustomSecurityGroupId")
def get_related_ids(self, resources):
sg_ids = set()
for r in resources:
for exp in self.expressions:
id = jmespath.search(exp, r)
if id:
sg_ids.add(id)
return list(sg_ids)
@WorkspaceDirectory.filter_registry.register('subnet')
class WorkSpacesDirectorySg(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetIds[]"
@WorkspaceDirectory.filter_registry.register('client-properties')
class WorkspacesDirectoryClientProperties(ValueFilter):
"""Filter workspace directories based off workspace client properties.
:example:
.. code-block:: yaml
policies:
- name: workspace-client-credentials
resource: aws.workspaces-directory
filters:
- type: client-properties
key: ReconnectEnabled
value: ENABLED
"""
permissions = ('workspaces:DescribeClientProperties',)
schema = type_schema('client-properties', rinherit=ValueFilter.schema)
annotation_key = 'c7n:client-properties'
def process(self, directories, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
results = []
for directory in directories:
if self.annotation_key not in directory:
try:
client_properties = client.describe_client_properties(
ResourceIds=[directory['DirectoryId']]).get(
'ClientPropertiesList')[0].get('ClientProperties')
except client.exceptions.ResourceNotFoundException:
continue
directory[self.annotation_key] = client_properties
if self.match(directory[self.annotation_key]):
results.append(directory)
return results
@WorkspaceDirectory.action_registry.register('modify-client-properties')
class ModifyClientProperties(BaseAction):
"""Action to enable/disable credential caching for Workspaces client.
:example:
.. code-block:: yaml
policies:
- name: workspace-directories-credentials-cache
resource: aws.workspaces-directory
filters:
- type: client-properties
key: ReconnectEnabled
value: ENABLED
actions:
- type: modify-client-properties
attributes:
ClientProperties:
ReconnectEnabled: DISABLED
"""
schema = type_schema(
'modify-client-properties',
required=['attributes'],
attributes={
'type': 'object',
'additionalProperties': False,
'properties': {
'ClientProperties': {
'type': 'object',
'additionalProperties': False,
'required': ['ReconnectEnabled'],
'properties': {
'ReconnectEnabled': {'enum': ['DISABLED', 'ENABLED']}
}
}
}})
permissions = ('workspaces:ModifyClientProperties',)
def validate(self):
for f in self.manager.iter_filters():
if isinstance(f, WorkspacesDirectoryClientProperties):
return self
raise PolicyValidationError(
'`modify-client-properties` may only be used in '
'conjunction with `client-properties` filter on %s' % (self.manager.data,))
def process(self, directories):
client = local_session(self.manager.session_factory).client('workspaces')
for directory in directories:
try:
client.modify_client_properties(
ResourceId=directory['DirectoryId'], **self.data['attributes'])
except client.exceptions.ResourceNotFoundException:
continue
@WorkspaceDirectory.action_registry.register('deregister')
class DeregisterWorkspaceDirectory(BaseAction):
"""
Deregisters a workspace
:example:
.. code-block:: yaml
policies:
- name: deregister-workspace
resource: aws.workspaces-directory
filters:
- "tag:Deregister": present
actions:
- deregister
"""
schema = type_schema('deregister')
permissions = ('workspaces:DeregisterWorkspaceDirectory',)
def process(self, directories):
exceptions = []
retry = get_retry(('InvalidResourceStateException',))
client = local_session(self.manager.session_factory).client('workspaces')
for d in directories:
try:
retry(client.deregister_workspace_directory, DirectoryId=d['DirectoryId'],
ignore_err_codes=('ResourceNotFoundException',))
except client.exceptions.OperationNotSupportedException as e:
self.log.error(f"Error deregistering workspace: {d['DirectoryId']} error: {e}")
exceptions.append(d['DirectoryId'])
if exceptions:
raise PolicyExecutionError(
'The following directories must be removed from WorkSpaces'
'and cannot be deregistered: %s ' % ''.join(map(str, exceptions))
)
|
c7n/resources/workspaces.py
|
import functools
import itertools
import jmespath
from c7n.actions import BaseAction
from c7n.filters import ValueFilter
from c7n.filters.kms import KmsRelatedFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import universal_augment
from c7n.exceptions import PolicyValidationError, PolicyExecutionError
from c7n.utils import get_retry, local_session, type_schema, chunks
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.resolver import ValuesFrom
import c7n.filters.vpc as net_filters
@resources.register('workspaces')
class Workspace(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspaces', 'Workspaces', None)
arn_type = 'workspace'
name = id = dimension = 'WorkspaceId'
universal_taggable = True
cfn_type = 'AWS::WorkSpaces::Workspace'
def augment(self, resources):
return universal_augment(self, resources)
@Workspace.filter_registry.register('connection-status')
class WorkspaceConnectionStatusFilter(ValueFilter):
"""Filter Workspaces based on user connection information
:example:
.. code-block:: yaml
policies:
- name: workspaces-abandoned
resource: workspaces
filters:
- type: connection-status
value_type: age
key: LastKnownUserConnectionTimestamp
op: ge
value: 90
- name: workspaces-expensive-zombies
resource: workspaces
filters:
- "WorkspaceProperties.RunningMode": ALWAYS_ON
- type: connection-status
value_type: age
key: LastKnownUserConnectionTimestamp
op: ge
value: 30
"""
schema = type_schema('connection-status', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('workspaces:DescribeWorkspacesConnectionStatus',)
annotation_key = 'c7n:ConnectionStatus'
def get_connection_status(self, client, workspace_ids):
connection_status_chunk = self.manager.retry(
client.describe_workspaces_connection_status,
WorkspaceIds=workspace_ids
)['WorkspacesConnectionStatus']
return connection_status_chunk
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
annotate_map = {r['WorkspaceId']: r for r in resources if self.annotation_key not in r}
with self.executor_factory(max_workers=2) as w:
self.log.debug(
'Querying connection status for %d workspaces' % len(annotate_map))
for status in itertools.chain(*w.map(
functools.partial(self.get_connection_status, client),
chunks(annotate_map.keys(), 25)
)):
annotate_map[status['WorkspaceId']][self.annotation_key] = status
return list(filter(self, resources))
def get_resource_value(self, k, i):
return super(WorkspaceConnectionStatusFilter, self).get_resource_value(
k, i[self.annotation_key])
@Workspace.filter_registry.register('kms-key')
class KmsFilter(KmsRelatedFilter):
RelatedIdsExpression = 'VolumeEncryptionKey'
@Workspace.action_registry.register('terminate')
class TerminateWorkspace(BaseAction):
"""
Terminates a Workspace
:example:
.. code-block:: yaml
policies:
- name: delete-workspace
resource: workspaces
filters:
- "tag:DeleteMe": present
actions:
- terminate
"""
schema = type_schema('terminate')
permissions = ('workspaces:TerminateWorkspaces',)
valid_origin_states = (
'PENDING',
'AVAILABLE',
'IMPAIRED',
'UNHEALTHY',
'REBOOTING',
'STARTING',
'REBUILDING',
'RESTORING',
'MAINTENANCE',
'ADMIN_MAINTENANCE',
'UPDATING',
'STOPPING',
'STOPPED',
'ERROR'
)
def process(self, resources):
resources = self.filter_resources(resources, 'State', self.valid_origin_states)
client = local_session(self.manager.session_factory).client('workspaces')
for resource_set in chunks(resources, size=25):
ids = [{'WorkspaceId': w['WorkspaceId']} for w in resource_set]
client.terminate_workspaces(TerminateWorkspaceRequests=ids)
@resources.register('workspaces-image')
class WorkspaceImage(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspace_images', 'Images', None)
arn_type = 'workspaceimage'
name = id = 'ImageId'
universal_taggable = True
augment = universal_augment
@WorkspaceImage.filter_registry.register('cross-account')
class WorkspaceImageCrossAccount(CrossAccountAccessFilter):
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('workspaces:DescribeWorkspaceImagePermissions',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
allowed_accounts = set(self.get_accounts())
results = []
for r in resources:
found = False
try:
accts = client.describe_workspace_image_permissions(
ImageId=r['ImageId']).get('ImagePermissions')
for a in accts:
account_id = a['SharedAccountId']
if (account_id not in allowed_accounts):
r.setdefault('c7n:CrossAccountViolations', []).append(account_id)
found = True
if found:
results.append(r)
except client.exceptions.ResourceNotFoundException:
continue
return results
@WorkspaceImage.action_registry.register('delete')
class DeleteWorkspaceImage(BaseAction):
"""
Deletes a Workspace Image
:example:
.. code-block:: yaml
policies:
- name: delete-workspace-img
resource: workspaces-image
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('workspaces:DeleteWorkspaceImage',)
valid_origin_states = ('AVAILABLE', 'ERROR',)
def process(self, resources):
resources = self.filter_resources(resources, 'State', self.valid_origin_states)
client = local_session(self.manager.session_factory).client('workspaces')
for r in resources:
try:
client.delete_workspace_image(ImageId=r['ImageId'])
except client.exceptions.InvalidResourceStateException as e:
self.log.error(f"Error deleting workspace image: {r['ImageId']} error: {e}")
continue
except client.exceptions.ResourceAssociatedException as e:
self.log.error(f"Error deleting workspace image: {r['ImageId']} error: {e}")
continue
except client.exceptions.ResourceNotFoundException:
continue
@resources.register('workspaces-directory')
class WorkspaceDirectory(QueryResourceManager):
class resource_type(TypeInfo):
service = 'workspaces'
enum_spec = ('describe_workspace_directories', 'Directories', None)
arn_type = 'directory'
id = 'DirectoryId'
name = 'DirectoryName'
universal_taggable = True
augment = universal_augment
@WorkspaceDirectory.filter_registry.register('security-group')
class WorkspacesDirectorySG(net_filters.SecurityGroupFilter):
RelatedIdsExpression = ""
expressions = ("WorkspaceSecurityGroupId", "WorkspaceCreationProperties.CustomSecurityGroupId")
def get_related_ids(self, resources):
sg_ids = set()
for r in resources:
for exp in self.expressions:
id = jmespath.search(exp, r)
if id:
sg_ids.add(id)
return list(sg_ids)
@WorkspaceDirectory.filter_registry.register('subnet')
class WorkSpacesDirectorySg(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetIds[]"
@WorkspaceDirectory.filter_registry.register('client-properties')
class WorkspacesDirectoryClientProperties(ValueFilter):
"""Filter workspace directories based off workspace client properties.
:example:
.. code-block:: yaml
policies:
- name: workspace-client-credentials
resource: aws.workspaces-directory
filters:
- type: client-properties
key: ReconnectEnabled
value: ENABLED
"""
permissions = ('workspaces:DescribeClientProperties',)
schema = type_schema('client-properties', rinherit=ValueFilter.schema)
annotation_key = 'c7n:client-properties'
def process(self, directories, event=None):
client = local_session(self.manager.session_factory).client('workspaces')
results = []
for directory in directories:
if self.annotation_key not in directory:
try:
client_properties = client.describe_client_properties(
ResourceIds=[directory['DirectoryId']]).get(
'ClientPropertiesList')[0].get('ClientProperties')
except client.exceptions.ResourceNotFoundException:
continue
directory[self.annotation_key] = client_properties
if self.match(directory[self.annotation_key]):
results.append(directory)
return results
@WorkspaceDirectory.action_registry.register('modify-client-properties')
class ModifyClientProperties(BaseAction):
"""Action to enable/disable credential caching for Workspaces client.
:example:
.. code-block:: yaml
policies:
- name: workspace-directories-credentials-cache
resource: aws.workspaces-directory
filters:
- type: client-properties
key: ReconnectEnabled
value: ENABLED
actions:
- type: modify-client-properties
attributes:
ClientProperties:
ReconnectEnabled: DISABLED
"""
schema = type_schema(
'modify-client-properties',
required=['attributes'],
attributes={
'type': 'object',
'additionalProperties': False,
'properties': {
'ClientProperties': {
'type': 'object',
'additionalProperties': False,
'required': ['ReconnectEnabled'],
'properties': {
'ReconnectEnabled': {'enum': ['DISABLED', 'ENABLED']}
}
}
}})
permissions = ('workspaces:ModifyClientProperties',)
def validate(self):
for f in self.manager.iter_filters():
if isinstance(f, WorkspacesDirectoryClientProperties):
return self
raise PolicyValidationError(
'`modify-client-properties` may only be used in '
'conjunction with `client-properties` filter on %s' % (self.manager.data,))
def process(self, directories):
client = local_session(self.manager.session_factory).client('workspaces')
for directory in directories:
try:
client.modify_client_properties(
ResourceId=directory['DirectoryId'], **self.data['attributes'])
except client.exceptions.ResourceNotFoundException:
continue
@WorkspaceDirectory.action_registry.register('deregister')
class DeregisterWorkspaceDirectory(BaseAction):
"""
Deregisters a workspace
:example:
.. code-block:: yaml
policies:
- name: deregister-workspace
resource: aws.workspaces-directory
filters:
- "tag:Deregister": present
actions:
- deregister
"""
schema = type_schema('deregister')
permissions = ('workspaces:DeregisterWorkspaceDirectory',)
def process(self, directories):
exceptions = []
retry = get_retry(('InvalidResourceStateException',))
client = local_session(self.manager.session_factory).client('workspaces')
for d in directories:
try:
retry(client.deregister_workspace_directory, DirectoryId=d['DirectoryId'],
ignore_err_codes=('ResourceNotFoundException',))
except client.exceptions.OperationNotSupportedException as e:
self.log.error(f"Error deregistering workspace: {d['DirectoryId']} error: {e}")
exceptions.append(d['DirectoryId'])
if exceptions:
raise PolicyExecutionError(
'The following directories must be removed from WorkSpaces'
'and cannot be deregistered: %s ' % ''.join(map(str, exceptions))
)
| 0.489259 | 0.098599 |
import datetime
import decimal
import io
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import numpy as np
import pytest
import matplotlib as mpl
from matplotlib import dviread, pyplot as plt, checkdep_usetex, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.testing.decorators import check_figures_equal, image_comparison
needs_usetex = pytest.mark.skipif(
not checkdep_usetex(True),
reason="This test needs a TeX installation")
@image_comparison(['pdf_use14corefonts.pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
fig, ax = plt.subplots()
ax.set_title('Test PDF backend with option use14corefonts=True')
ax.text(0.5, 0.5, text, horizontalalignment='center',
verticalalignment='bottom',
fontsize=14)
ax.axhline(0.5, linewidth=0.5)
def test_type42():
rcParams['pdf.fonttype'] = 42
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
def test_multipage_properfinalize():
pdfio = io.BytesIO()
with PdfPages(pdfio) as pdf:
for i in range(10):
fig, ax = plt.subplots()
ax.set_title('This is a long title')
fig.savefig(pdf, format="pdf")
s = pdfio.getvalue()
assert s.count(b'startxref') == 1
assert len(s) < 40000
def test_multipage_keep_empty():
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
def test_composite_image():
# Test that figures can be saved with and without combining multiple images
# (on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig, ax = plt.subplots()
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 2
def test_savefig_metadata(monkeypatch):
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
buf = io.BytesIO()
fig.savefig(buf, metadata=md, format='pdf')
with pikepdf.Pdf.open(buf) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pdf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF',
'/Trapped': '/True',
}
def test_invalid_metadata():
fig, ax = plt.subplots()
with pytest.warns(UserWarning,
match="Unknown infodict keyword: 'foobar'."):
fig.savefig(io.BytesIO(), format='pdf', metadata={'foobar': 'invalid'})
with pytest.warns(UserWarning,
match='not an instance of datetime.datetime.'):
fig.savefig(io.BytesIO(), format='pdf',
metadata={'ModDate': '1968-08-01'})
with pytest.warns(UserWarning,
match='not one of {"True", "False", "Unknown"}'):
fig.savefig(io.BytesIO(), format='pdf', metadata={'Trapped': 'foo'})
with pytest.warns(UserWarning, match='not an instance of str.'):
fig.savefig(io.BytesIO(), format='pdf', metadata={'Title': 1234})
def test_multipage_metadata(monkeypatch):
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
buf = io.BytesIO()
with PdfPages(buf, metadata=md) as pdf:
pdf.savefig(fig)
pdf.savefig(fig)
with pikepdf.Pdf.open(buf) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pdf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF',
'/Trapped': '/True',
}
def test_text_urls():
pikepdf = pytest.importorskip('pikepdf')
test_url = 'https://test_text_urls.matplotlib.org/'
fig = plt.figure(figsize=(2, 1))
fig.text(0.1, 0.1, 'test plain 123', url=f'{test_url}plain')
fig.text(0.1, 0.4, 'test mathtext $123$', url=f'{test_url}mathtext')
with io.BytesIO() as fd:
fig.savefig(fd, format='pdf')
with pikepdf.Pdf.open(fd) as pdf:
annots = pdf.pages[0].Annots
for y, fragment in [('0.1', 'plain'), ('0.4', 'mathtext')]:
annot = next(
(a for a in annots if a.A.URI == f'{test_url}{fragment}'),
None)
assert annot is not None
# Positions in points (72 per inch.)
assert annot.Rect[1] == decimal.Decimal(y) * 72
@needs_usetex
def test_text_urls_tex():
pikepdf = pytest.importorskip('pikepdf')
test_url = 'https://test_text_urls.matplotlib.org/'
fig = plt.figure(figsize=(2, 1))
fig.text(0.1, 0.7, 'test tex $123$', usetex=True, url=f'{test_url}tex')
with io.BytesIO() as fd:
fig.savefig(fd, format='pdf')
with pikepdf.Pdf.open(fd) as pdf:
annots = pdf.pages[0].Annots
annot = next(
(a for a in annots if a.A.URI == f'{test_url}tex'),
None)
assert annot is not None
# Positions in points (72 per inch.)
assert annot.Rect[1] == decimal.Decimal('0.7') * 72
def test_pdfpages_fspath():
with PdfPages(Path(os.devnull)) as pdf:
pdf.savefig(plt.figure())
@image_comparison(['hatching_legend.pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(['grayscale_alpha.pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with NamedTemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='pdf')
@pytest.mark.style('default')
@check_figures_equal(extensions=["pdf", "eps"])
def test_pdf_eps_savefig_when_color_is_none(fig_test, fig_ref):
ax_test = fig_test.add_subplot()
ax_test.set_axis_off()
ax_test.plot(np.sin(np.linspace(-5, 5, 100)), "v", c="none")
ax_ref = fig_ref.add_subplot()
ax_ref.set_axis_off()
@needs_usetex
def test_failing_latex():
"""Test failing latex subprocess call"""
plt.xlabel("$22_2_2$", usetex=True) # This fails with "Double subscript"
with pytest.raises(RuntimeError):
plt.savefig(io.BytesIO(), format="pdf")
def test_empty_rasterized():
# Check that empty figures that are rasterised save to pdf files fine
fig, ax = plt.subplots()
ax.plot([], [], rasterized=True)
fig.savefig(io.BytesIO(), format="pdf")
@image_comparison(['kerning.pdf'])
def test_kerning():
fig = plt.figure()
s = "AVAVAVAVAVAVAVAV€AAVV"
fig.text(0, .25, s, size=5)
fig.text(0, .75, s, size=20)
|
venv/Lib/site-packages/matplotlib/tests/test_backend_pdf.py
|
import datetime
import decimal
import io
import os
from pathlib import Path
from tempfile import NamedTemporaryFile
import numpy as np
import pytest
import matplotlib as mpl
from matplotlib import dviread, pyplot as plt, checkdep_usetex, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.testing.decorators import check_figures_equal, image_comparison
needs_usetex = pytest.mark.skipif(
not checkdep_usetex(True),
reason="This test needs a TeX installation")
@image_comparison(['pdf_use14corefonts.pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
fig, ax = plt.subplots()
ax.set_title('Test PDF backend with option use14corefonts=True')
ax.text(0.5, 0.5, text, horizontalalignment='center',
verticalalignment='bottom',
fontsize=14)
ax.axhline(0.5, linewidth=0.5)
def test_type42():
rcParams['pdf.fonttype'] = 42
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
def test_multipage_properfinalize():
pdfio = io.BytesIO()
with PdfPages(pdfio) as pdf:
for i in range(10):
fig, ax = plt.subplots()
ax.set_title('This is a long title')
fig.savefig(pdf, format="pdf")
s = pdfio.getvalue()
assert s.count(b'startxref') == 1
assert len(s) < 40000
def test_multipage_keep_empty():
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
def test_composite_image():
# Test that figures can be saved with and without combining multiple images
# (on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig, ax = plt.subplots()
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 2
def test_savefig_metadata(monkeypatch):
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
buf = io.BytesIO()
fig.savefig(buf, metadata=md, format='pdf')
with pikepdf.Pdf.open(buf) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pdf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF',
'/Trapped': '/True',
}
def test_invalid_metadata():
fig, ax = plt.subplots()
with pytest.warns(UserWarning,
match="Unknown infodict keyword: 'foobar'."):
fig.savefig(io.BytesIO(), format='pdf', metadata={'foobar': 'invalid'})
with pytest.warns(UserWarning,
match='not an instance of datetime.datetime.'):
fig.savefig(io.BytesIO(), format='pdf',
metadata={'ModDate': '1968-08-01'})
with pytest.warns(UserWarning,
match='not one of {"True", "False", "Unknown"}'):
fig.savefig(io.BytesIO(), format='pdf', metadata={'Trapped': 'foo'})
with pytest.warns(UserWarning, match='not an instance of str.'):
fig.savefig(io.BytesIO(), format='pdf', metadata={'Title': 1234})
def test_multipage_metadata(monkeypatch):
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
buf = io.BytesIO()
with PdfPages(buf, metadata=md) as pdf:
pdf.savefig(fig)
pdf.savefig(fig)
with pikepdf.Pdf.open(buf) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pdf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF',
'/Trapped': '/True',
}
def test_text_urls():
pikepdf = pytest.importorskip('pikepdf')
test_url = 'https://test_text_urls.matplotlib.org/'
fig = plt.figure(figsize=(2, 1))
fig.text(0.1, 0.1, 'test plain 123', url=f'{test_url}plain')
fig.text(0.1, 0.4, 'test mathtext $123$', url=f'{test_url}mathtext')
with io.BytesIO() as fd:
fig.savefig(fd, format='pdf')
with pikepdf.Pdf.open(fd) as pdf:
annots = pdf.pages[0].Annots
for y, fragment in [('0.1', 'plain'), ('0.4', 'mathtext')]:
annot = next(
(a for a in annots if a.A.URI == f'{test_url}{fragment}'),
None)
assert annot is not None
# Positions in points (72 per inch.)
assert annot.Rect[1] == decimal.Decimal(y) * 72
@needs_usetex
def test_text_urls_tex():
pikepdf = pytest.importorskip('pikepdf')
test_url = 'https://test_text_urls.matplotlib.org/'
fig = plt.figure(figsize=(2, 1))
fig.text(0.1, 0.7, 'test tex $123$', usetex=True, url=f'{test_url}tex')
with io.BytesIO() as fd:
fig.savefig(fd, format='pdf')
with pikepdf.Pdf.open(fd) as pdf:
annots = pdf.pages[0].Annots
annot = next(
(a for a in annots if a.A.URI == f'{test_url}tex'),
None)
assert annot is not None
# Positions in points (72 per inch.)
assert annot.Rect[1] == decimal.Decimal('0.7') * 72
def test_pdfpages_fspath():
with PdfPages(Path(os.devnull)) as pdf:
pdf.savefig(plt.figure())
@image_comparison(['hatching_legend.pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(['grayscale_alpha.pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with NamedTemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='pdf')
@pytest.mark.style('default')
@check_figures_equal(extensions=["pdf", "eps"])
def test_pdf_eps_savefig_when_color_is_none(fig_test, fig_ref):
ax_test = fig_test.add_subplot()
ax_test.set_axis_off()
ax_test.plot(np.sin(np.linspace(-5, 5, 100)), "v", c="none")
ax_ref = fig_ref.add_subplot()
ax_ref.set_axis_off()
@needs_usetex
def test_failing_latex():
"""Test failing latex subprocess call"""
plt.xlabel("$22_2_2$", usetex=True) # This fails with "Double subscript"
with pytest.raises(RuntimeError):
plt.savefig(io.BytesIO(), format="pdf")
def test_empty_rasterized():
# Check that empty figures that are rasterised save to pdf files fine
fig, ax = plt.subplots()
ax.plot([], [], rasterized=True)
fig.savefig(io.BytesIO(), format="pdf")
@image_comparison(['kerning.pdf'])
def test_kerning():
fig = plt.figure()
s = "AVAVAVAVAVAVAVAV€AAVV"
fig.text(0, .25, s, size=5)
fig.text(0, .75, s, size=20)
| 0.517327 | 0.439868 |
from typing import List
from fastapi_utils.api_model import APIMessage
from fastapi_utils.cbv import cbv
from fastapi_utils.inferring_router import InferringRouter
from fastapi import Depends, status, Response
from odmantic import ObjectId
from app.schema import DatasetPostSchema, DatasetGetSortQuery, \
DatasetToken, ImageAnnotationsData, DatasetPatchSchema
from app.models import Dataset, Label, Project, FastToken
from app.security import get_project, get_dataset_token
from app.services.datasets import DatasetService, DatasetExportingStatus, DatasetExportFormat
router = InferringRouter(
tags=["datasets"],
)
class DatasetsViewBase:
@staticmethod
async def get_dataset_by_id(id: ObjectId, project_id) -> Dataset:
return await DatasetService.get_dataset_by_id(id, project_id)
@staticmethod
async def get_annotations_by_dataset_id(id: ObjectId, project_id: ObjectId) -> List[ImageAnnotationsData]:
return await DatasetService.get_annotations_by_dataset_id(id, project_id)
@staticmethod
async def get_datasets(project_id: ObjectId, name: str = None,
sort: DatasetGetSortQuery = None) -> List[Dataset]:
return await DatasetService.get_datasets(project_id, name, sort)
@staticmethod
async def create_dataset(dataset: DatasetPostSchema, project_id: ObjectId) -> Dataset:
return await DatasetService.create_dataset(dataset, project_id)
@staticmethod
async def replace_dataset(id: ObjectId, body: DatasetPostSchema, project_id: ObjectId) -> Dataset:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.update_dataset(dataset, body)
@staticmethod
async def update_dataset(id: ObjectId, body: DatasetPatchSchema, project_id: ObjectId) -> Dataset:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.update_dataset(dataset, body)
@staticmethod
async def delete_dataset(id: ObjectId, project_id: ObjectId):
await DatasetService.delete_dataset(id, project_id)
return APIMessage(detail=f"Deleted dataset {id}")
@staticmethod
async def get_dataset_labels(id: ObjectId, project_id: ObjectId) -> List[Label]:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.get_dataset_labels(dataset)
@staticmethod
async def create_dataset_access_token(id: ObjectId, project_id: ObjectId) -> List[Label]:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.create_access_token(dataset)
@staticmethod
async def download_dataset(id: ObjectId, project_id: ObjectId,
format: DatasetExportFormat, response: Response) -> APIMessage:
dataset = await DatasetService.get_dataset_by_id(id, project_id)
export_status = await DatasetService.download_dataset(dataset, format)
if export_status == DatasetExportingStatus.STARTED:
response.status_code = status.HTTP_202_ACCEPTED
return APIMessage(detail='Starting dataset exporting')
elif export_status == DatasetExportingStatus.QUEUED:
response.status_code = status.HTTP_202_ACCEPTED
return APIMessage(detail='Dataset exporting in progress')
elif export_status == DatasetExportingStatus.FINISHED:
url = await DatasetService.get_dataset_download_url(dataset, format)
return APIMessage(detail=url)
@staticmethod
def get_export_formats() -> List[str]:
return [x.value for x in DatasetExportFormat]
@cbv(router)
class DatasetsView:
project: Project = Depends(get_project)
@router.get("/dataset/{id}")
async def get_dataset_by_id(self, id: ObjectId) -> Dataset:
return await DatasetsViewBase.get_dataset_by_id(id, self.project.id)
@router.get("/dataset/{id}/annotations")
async def get_annotations_by_dataset_id(self, id: ObjectId) -> List[ImageAnnotationsData]:
return await DatasetsViewBase.get_annotations_by_dataset_id(id, self.project.id)
@router.get("/dataset")
async def get_datasets(self, name: str = None, include_all_revisions: bool = False,
sort: DatasetGetSortQuery = None) -> List[Dataset]:
return await DatasetService.get_datasets(
self.project.id, name, include_all_revisions, sort)
@router.post("/dataset")
async def create_dataset(self, dataset: DatasetPostSchema) -> Dataset:
return await DatasetsViewBase.create_dataset(dataset, self.project.id)
@router.put("/dataset/{id}")
async def replace_dataset(self, id: ObjectId, dataset: DatasetPostSchema) -> Dataset:
return await DatasetsViewBase.replace_dataset(id, dataset, self.project.id)
@router.patch("/dataset/{id}")
async def update_dataset(self, id: ObjectId, dataset: DatasetPatchSchema) -> Dataset:
return await DatasetsViewBase.update_dataset(id, dataset, self.project.id)
@router.delete("/dataset/{id}")
async def delete_dataset(self, id: ObjectId):
return await DatasetsViewBase.delete_dataset(id, self.project.id)
@router.get("/dataset/{id}/labels")
async def get_dataset_labels(self, id: ObjectId) -> List[Label]:
return await DatasetsViewBase.get_dataset_labels(id, self.project.id)
@router.post("/dataset/{id}/token")
async def create_dataset_access_token(self, id: ObjectId) -> DatasetToken:
dataset = await self.get_dataset_by_id(id)
return await DatasetService.create_access_token(dataset)
@router.get("/dataset/{id}/download")
async def download_dataset(self, id: ObjectId, format: DatasetExportFormat, response: Response) -> APIMessage:
return await DatasetsViewBase.download_dataset(id, self.project.id, format, response)
@router.get("/dataset/{id}/revisions")
async def download_dataset(self, id: ObjectId) -> List[Dataset]:
dataset = await self.get_dataset_by_id(id)
return await DatasetService.get_dataset_revisions(dataset)
@router.get("/meta/dataset/formats")
def get_export_formats(self) -> List[str]:
return DatasetsViewBase.get_export_formats()
@cbv(router)
class DatasetsSharedView:
dataset_token: FastToken = Depends(get_dataset_token)
@router.get("/dataset_shared")
async def get_dataset(self) -> Dataset:
return await DatasetsViewBase.get_dataset_by_id(
self.dataset_token.dataset_id, self.dataset_token.project_id)
@router.get("/dataset_shared/annotations")
async def get_annotations_by_dataset_id(self) -> List[ImageAnnotationsData]:
return await DatasetsViewBase.get_annotations_by_dataset_id(
self.dataset_token.dataset_id, self.dataset_token.project_id
)
@router.get("/dataset_shared/labels")
async def get_dataset_labels(self) -> List[Label]:
return await DatasetsViewBase.get_dataset_labels(
self.dataset_token.dataset_id, self.dataset_token.project_id
)
@router.get("/dataset_shared/download")
async def download_dataset(self, format: DatasetExportFormat, response: Response) -> APIMessage:
return await DatasetsViewBase.download_dataset(
self.dataset_token.dataset_id, self.dataset_token.project_id, format, response
)
@router.get("/meta/dataset_shared/formats")
def get_export_formats(self) -> List[str]:
return DatasetsViewBase.get_export_formats()
|
app/views/datasets.py
|
from typing import List
from fastapi_utils.api_model import APIMessage
from fastapi_utils.cbv import cbv
from fastapi_utils.inferring_router import InferringRouter
from fastapi import Depends, status, Response
from odmantic import ObjectId
from app.schema import DatasetPostSchema, DatasetGetSortQuery, \
DatasetToken, ImageAnnotationsData, DatasetPatchSchema
from app.models import Dataset, Label, Project, FastToken
from app.security import get_project, get_dataset_token
from app.services.datasets import DatasetService, DatasetExportingStatus, DatasetExportFormat
router = InferringRouter(
tags=["datasets"],
)
class DatasetsViewBase:
@staticmethod
async def get_dataset_by_id(id: ObjectId, project_id) -> Dataset:
return await DatasetService.get_dataset_by_id(id, project_id)
@staticmethod
async def get_annotations_by_dataset_id(id: ObjectId, project_id: ObjectId) -> List[ImageAnnotationsData]:
return await DatasetService.get_annotations_by_dataset_id(id, project_id)
@staticmethod
async def get_datasets(project_id: ObjectId, name: str = None,
sort: DatasetGetSortQuery = None) -> List[Dataset]:
return await DatasetService.get_datasets(project_id, name, sort)
@staticmethod
async def create_dataset(dataset: DatasetPostSchema, project_id: ObjectId) -> Dataset:
return await DatasetService.create_dataset(dataset, project_id)
@staticmethod
async def replace_dataset(id: ObjectId, body: DatasetPostSchema, project_id: ObjectId) -> Dataset:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.update_dataset(dataset, body)
@staticmethod
async def update_dataset(id: ObjectId, body: DatasetPatchSchema, project_id: ObjectId) -> Dataset:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.update_dataset(dataset, body)
@staticmethod
async def delete_dataset(id: ObjectId, project_id: ObjectId):
await DatasetService.delete_dataset(id, project_id)
return APIMessage(detail=f"Deleted dataset {id}")
@staticmethod
async def get_dataset_labels(id: ObjectId, project_id: ObjectId) -> List[Label]:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.get_dataset_labels(dataset)
@staticmethod
async def create_dataset_access_token(id: ObjectId, project_id: ObjectId) -> List[Label]:
dataset = await DatasetsViewBase.get_dataset_by_id(id, project_id)
return await DatasetService.create_access_token(dataset)
@staticmethod
async def download_dataset(id: ObjectId, project_id: ObjectId,
format: DatasetExportFormat, response: Response) -> APIMessage:
dataset = await DatasetService.get_dataset_by_id(id, project_id)
export_status = await DatasetService.download_dataset(dataset, format)
if export_status == DatasetExportingStatus.STARTED:
response.status_code = status.HTTP_202_ACCEPTED
return APIMessage(detail='Starting dataset exporting')
elif export_status == DatasetExportingStatus.QUEUED:
response.status_code = status.HTTP_202_ACCEPTED
return APIMessage(detail='Dataset exporting in progress')
elif export_status == DatasetExportingStatus.FINISHED:
url = await DatasetService.get_dataset_download_url(dataset, format)
return APIMessage(detail=url)
@staticmethod
def get_export_formats() -> List[str]:
return [x.value for x in DatasetExportFormat]
@cbv(router)
class DatasetsView:
project: Project = Depends(get_project)
@router.get("/dataset/{id}")
async def get_dataset_by_id(self, id: ObjectId) -> Dataset:
return await DatasetsViewBase.get_dataset_by_id(id, self.project.id)
@router.get("/dataset/{id}/annotations")
async def get_annotations_by_dataset_id(self, id: ObjectId) -> List[ImageAnnotationsData]:
return await DatasetsViewBase.get_annotations_by_dataset_id(id, self.project.id)
@router.get("/dataset")
async def get_datasets(self, name: str = None, include_all_revisions: bool = False,
sort: DatasetGetSortQuery = None) -> List[Dataset]:
return await DatasetService.get_datasets(
self.project.id, name, include_all_revisions, sort)
@router.post("/dataset")
async def create_dataset(self, dataset: DatasetPostSchema) -> Dataset:
return await DatasetsViewBase.create_dataset(dataset, self.project.id)
@router.put("/dataset/{id}")
async def replace_dataset(self, id: ObjectId, dataset: DatasetPostSchema) -> Dataset:
return await DatasetsViewBase.replace_dataset(id, dataset, self.project.id)
@router.patch("/dataset/{id}")
async def update_dataset(self, id: ObjectId, dataset: DatasetPatchSchema) -> Dataset:
return await DatasetsViewBase.update_dataset(id, dataset, self.project.id)
@router.delete("/dataset/{id}")
async def delete_dataset(self, id: ObjectId):
return await DatasetsViewBase.delete_dataset(id, self.project.id)
@router.get("/dataset/{id}/labels")
async def get_dataset_labels(self, id: ObjectId) -> List[Label]:
return await DatasetsViewBase.get_dataset_labels(id, self.project.id)
@router.post("/dataset/{id}/token")
async def create_dataset_access_token(self, id: ObjectId) -> DatasetToken:
dataset = await self.get_dataset_by_id(id)
return await DatasetService.create_access_token(dataset)
@router.get("/dataset/{id}/download")
async def download_dataset(self, id: ObjectId, format: DatasetExportFormat, response: Response) -> APIMessage:
return await DatasetsViewBase.download_dataset(id, self.project.id, format, response)
@router.get("/dataset/{id}/revisions")
async def download_dataset(self, id: ObjectId) -> List[Dataset]:
dataset = await self.get_dataset_by_id(id)
return await DatasetService.get_dataset_revisions(dataset)
@router.get("/meta/dataset/formats")
def get_export_formats(self) -> List[str]:
return DatasetsViewBase.get_export_formats()
@cbv(router)
class DatasetsSharedView:
dataset_token: FastToken = Depends(get_dataset_token)
@router.get("/dataset_shared")
async def get_dataset(self) -> Dataset:
return await DatasetsViewBase.get_dataset_by_id(
self.dataset_token.dataset_id, self.dataset_token.project_id)
@router.get("/dataset_shared/annotations")
async def get_annotations_by_dataset_id(self) -> List[ImageAnnotationsData]:
return await DatasetsViewBase.get_annotations_by_dataset_id(
self.dataset_token.dataset_id, self.dataset_token.project_id
)
@router.get("/dataset_shared/labels")
async def get_dataset_labels(self) -> List[Label]:
return await DatasetsViewBase.get_dataset_labels(
self.dataset_token.dataset_id, self.dataset_token.project_id
)
@router.get("/dataset_shared/download")
async def download_dataset(self, format: DatasetExportFormat, response: Response) -> APIMessage:
return await DatasetsViewBase.download_dataset(
self.dataset_token.dataset_id, self.dataset_token.project_id, format, response
)
@router.get("/meta/dataset_shared/formats")
def get_export_formats(self) -> List[str]:
return DatasetsViewBase.get_export_formats()
| 0.704872 | 0.287749 |
import random
from .vbc_class import Player
from .vbc_base import compare_value, base_push, base_push_index, remain_rank_sort
from functools import cmp_to_key
from .vbc_value_probability import final_push_value, final_answer_probability
def final_set(players, set_number):
players_number = 3
winners_number = 1
assert len(players) == players_number, 'メンバーが' + str(players_number) + '人になっていません'
for player in players:
player.point = 0
player.miss = 0
player.win = False
player.lose = False
player.result_str = ''
winner_index = -1
losers = []
while winner_index == -1 and len(losers) < players_number - winners_number:
answer_data = base_push(players, final_push_value(set_number), final_answer_probability(set_number))
lamp_index = answer_data[0]
correct = answer_data[1]
if lamp_index == -1:
print('スルー')
continue
if correct:
players[lamp_index].point += 1
print(players[lamp_index].name + '○')
players[lamp_index].result_str += '○'
if players[lamp_index].point == 7:
players[lamp_index].win = True
winner_index = lamp_index
else:
players[lamp_index].miss += 1
print(players[lamp_index].name + '×')
players[lamp_index].result_str += 'x'
if players[lamp_index].miss == set_number:
losers.append(players[lamp_index])
players[lamp_index].lose = True
if winner_index == -1:
for i, player in enumerate(players):
if player.win == False and player.lose == False:
winner_index = i
player.win = True
print([player.name + ' ' + player.result_str for player in players])
players[winner_index].final_sets += 1
return winner_index, players
def final_set_string(sets):
if sets == 0:
return '0Set'
elif sets == 1:
return '1Set'
else:
return str(sets) + 'Sets'
|
module/vbc_F.py
|
import random
from .vbc_class import Player
from .vbc_base import compare_value, base_push, base_push_index, remain_rank_sort
from functools import cmp_to_key
from .vbc_value_probability import final_push_value, final_answer_probability
def final_set(players, set_number):
players_number = 3
winners_number = 1
assert len(players) == players_number, 'メンバーが' + str(players_number) + '人になっていません'
for player in players:
player.point = 0
player.miss = 0
player.win = False
player.lose = False
player.result_str = ''
winner_index = -1
losers = []
while winner_index == -1 and len(losers) < players_number - winners_number:
answer_data = base_push(players, final_push_value(set_number), final_answer_probability(set_number))
lamp_index = answer_data[0]
correct = answer_data[1]
if lamp_index == -1:
print('スルー')
continue
if correct:
players[lamp_index].point += 1
print(players[lamp_index].name + '○')
players[lamp_index].result_str += '○'
if players[lamp_index].point == 7:
players[lamp_index].win = True
winner_index = lamp_index
else:
players[lamp_index].miss += 1
print(players[lamp_index].name + '×')
players[lamp_index].result_str += 'x'
if players[lamp_index].miss == set_number:
losers.append(players[lamp_index])
players[lamp_index].lose = True
if winner_index == -1:
for i, player in enumerate(players):
if player.win == False and player.lose == False:
winner_index = i
player.win = True
print([player.name + ' ' + player.result_str for player in players])
players[winner_index].final_sets += 1
return winner_index, players
def final_set_string(sets):
if sets == 0:
return '0Set'
elif sets == 1:
return '1Set'
else:
return str(sets) + 'Sets'
| 0.151122 | 0.156105 |
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.graphics.companim as canim
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Planets
origin = 0
planetdist = 1
x_planet = origin + planetdist
earth = phy.MovingObject(origin,
draw_options={'color': 'blue', 'markersize': 20, 'label': 'Earth'})
planet = phy.MovingObject(x_planet,
draw_options={'color': 'purple', 'markersize': 15, 'label': 'Planet'})
# Rocket
v = 3/5
rocket_forward_alltime = phy.MovingObject(origin, velocity=v)
t_turnaround = rocket_forward_alltime.time_for_left_pos(x_planet)
rocket_forward = geom.line_segment((0, 0), (t_turnaround, x_planet),
draw_options={
'color': 'cyan',
'marker': '>',
'markersize': 5,
'linestyle': ':',
'label': 'Traveler (forward)',
}
)
rocket_backward_alltime = phy.MovingObject(origin + 2*planetdist, velocity=-v)
t_return = rocket_backward_alltime.time_for_left_pos(origin)
rocket_backward = geom.line_segment((t_turnaround, x_planet), (t_return, 0),
draw_options={
'color': 'darkorange',
'marker': '<',
'markersize': 5,
'linestyle': ':',
'label': 'Traveler (backward)',
}
)
# Mark events
turnaround_event = geom.STVector(t_turnaround, x_planet,
draw_options={
'color': 'green',
'marker': '*',
'markersize': 10,
'label': 'Turning around',
}
)
return_event = geom.STVector(t_return, origin,
draw_options={
'color': 'red',
'marker': '*',
'markersize': 10,
'label': 'Arrive home',
}
)
# Collect scene
scene = geom.Collection([
earth, planet,
rocket_forward, rocket_backward,
turnaround_event, return_event,
])
# Plot the scene
tlim = (0, geom.STVector.gamma_factor(v)*t_return)
pad = planetdist/5
xlim = (geom.lorentz_transformed(return_event, v).x - pad,
geom.lorentz_transformed(return_event, -v).x + pad)
# From Earth's point of view
current_time_color = 'limegreen'
instant_pause_time = 0.5
fps = 100
legend = True
earth_fname = '9-twinparadox_earth.mp4'
anim_earth = vis.stanimate_with_worldline(scene,
tlim_anim=(0, return_event.t), tlim_worldline=tlim, xlim=xlim,
legend=legend, legend_loc='upper left', fps=fps,
title="Twin paradox (Earth's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_earth.save(earth_fname)
# Rewind
rew_fname = '9-twinparadox_rewind.mp4'
anim_rew = canim.Rewinder(anim_earth, rewind_rate=5)
anim_rew.save(rew_fname)
# Transformation
lt_fname = '9-twinparadox_transform.mp4'
anim_lt = vis.animate_lt_worldline_and_realspace(scene, v,
tlim=tlim, xlim=xlim, legend=legend, fps=fps,
title=f'Transforming frames...',
current_time_color=current_time_color)
anim_lt.save(lt_fname)
# From the traveler's point of view during the first half of the journey
scene.lorentz_transform(v)
forward_fname = '9-twinparadox_forward.mp4'
anim_forward = vis.stanimate_with_worldline(scene,
tlim_anim=(0, turnaround_event.t), tlim_worldline=tlim, xlim=xlim,
legend=legend, legend_loc='upper right', fps=fps,
title="Twin paradox (traveler's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_forward.save(forward_fname)
# Change directions mid-travel. Set the origin to the twin's current point, so
# that it doesn't change mid-acceleration.
dv = geom.lorentz_transformed(rocket_backward_alltime, v).velocity()
# Time value of the turnaround, within the time resolution of a frame
tval = round(turnaround_event.t * fps) / fps
accel_fname = '9-twinparadox_accel.mp4'
anim_accel = vis.animate_lt_worldline_and_realspace(scene, dv,
origin=turnaround_event, tlim=tlim, xlim=xlim, legend=legend, fps=fps,
title=f'Changing direction...\nTime = {tval:.3f}',
current_time_color='limegreen', time=turnaround_event.t,
display_current_velocity=False)
anim_accel.save(accel_fname)
# From the traveler's point of view during the second half of the journey
scene.lorentz_transform(dv, origin=turnaround_event)
backward_fname = '9-twinparadox_backward.mp4'
anim_backward = vis.stanimate_with_worldline(scene,
tlim_anim=(turnaround_event.t, return_event.t), tlim_worldline=tlim,
xlim=xlim, legend=legend, legend_loc='upper left', fps=fps,
title="Twin paradox (traveler's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_backward.save(backward_fname)
# Glue all the parts together
canim.concat_demuxer([earth_fname, rew_fname, lt_fname,
forward_fname, accel_fname, backward_fname], '9-twinparadox.mp4')
|
examples/9-twinparadox.py
|
import sys
sys.path.append('..')
import specrel.geom as geom
import specrel.graphics.companim as canim
import specrel.spacetime.physical as phy
import specrel.visualize as vis
# Planets
origin = 0
planetdist = 1
x_planet = origin + planetdist
earth = phy.MovingObject(origin,
draw_options={'color': 'blue', 'markersize': 20, 'label': 'Earth'})
planet = phy.MovingObject(x_planet,
draw_options={'color': 'purple', 'markersize': 15, 'label': 'Planet'})
# Rocket
v = 3/5
rocket_forward_alltime = phy.MovingObject(origin, velocity=v)
t_turnaround = rocket_forward_alltime.time_for_left_pos(x_planet)
rocket_forward = geom.line_segment((0, 0), (t_turnaround, x_planet),
draw_options={
'color': 'cyan',
'marker': '>',
'markersize': 5,
'linestyle': ':',
'label': 'Traveler (forward)',
}
)
rocket_backward_alltime = phy.MovingObject(origin + 2*planetdist, velocity=-v)
t_return = rocket_backward_alltime.time_for_left_pos(origin)
rocket_backward = geom.line_segment((t_turnaround, x_planet), (t_return, 0),
draw_options={
'color': 'darkorange',
'marker': '<',
'markersize': 5,
'linestyle': ':',
'label': 'Traveler (backward)',
}
)
# Mark events
turnaround_event = geom.STVector(t_turnaround, x_planet,
draw_options={
'color': 'green',
'marker': '*',
'markersize': 10,
'label': 'Turning around',
}
)
return_event = geom.STVector(t_return, origin,
draw_options={
'color': 'red',
'marker': '*',
'markersize': 10,
'label': 'Arrive home',
}
)
# Collect scene
scene = geom.Collection([
earth, planet,
rocket_forward, rocket_backward,
turnaround_event, return_event,
])
# Plot the scene
tlim = (0, geom.STVector.gamma_factor(v)*t_return)
pad = planetdist/5
xlim = (geom.lorentz_transformed(return_event, v).x - pad,
geom.lorentz_transformed(return_event, -v).x + pad)
# From Earth's point of view
current_time_color = 'limegreen'
instant_pause_time = 0.5
fps = 100
legend = True
earth_fname = '9-twinparadox_earth.mp4'
anim_earth = vis.stanimate_with_worldline(scene,
tlim_anim=(0, return_event.t), tlim_worldline=tlim, xlim=xlim,
legend=legend, legend_loc='upper left', fps=fps,
title="Twin paradox (Earth's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_earth.save(earth_fname)
# Rewind
rew_fname = '9-twinparadox_rewind.mp4'
anim_rew = canim.Rewinder(anim_earth, rewind_rate=5)
anim_rew.save(rew_fname)
# Transformation
lt_fname = '9-twinparadox_transform.mp4'
anim_lt = vis.animate_lt_worldline_and_realspace(scene, v,
tlim=tlim, xlim=xlim, legend=legend, fps=fps,
title=f'Transforming frames...',
current_time_color=current_time_color)
anim_lt.save(lt_fname)
# From the traveler's point of view during the first half of the journey
scene.lorentz_transform(v)
forward_fname = '9-twinparadox_forward.mp4'
anim_forward = vis.stanimate_with_worldline(scene,
tlim_anim=(0, turnaround_event.t), tlim_worldline=tlim, xlim=xlim,
legend=legend, legend_loc='upper right', fps=fps,
title="Twin paradox (traveler's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_forward.save(forward_fname)
# Change directions mid-travel. Set the origin to the twin's current point, so
# that it doesn't change mid-acceleration.
dv = geom.lorentz_transformed(rocket_backward_alltime, v).velocity()
# Time value of the turnaround, within the time resolution of a frame
tval = round(turnaround_event.t * fps) / fps
accel_fname = '9-twinparadox_accel.mp4'
anim_accel = vis.animate_lt_worldline_and_realspace(scene, dv,
origin=turnaround_event, tlim=tlim, xlim=xlim, legend=legend, fps=fps,
title=f'Changing direction...\nTime = {tval:.3f}',
current_time_color='limegreen', time=turnaround_event.t,
display_current_velocity=False)
anim_accel.save(accel_fname)
# From the traveler's point of view during the second half of the journey
scene.lorentz_transform(dv, origin=turnaround_event)
backward_fname = '9-twinparadox_backward.mp4'
anim_backward = vis.stanimate_with_worldline(scene,
tlim_anim=(turnaround_event.t, return_event.t), tlim_worldline=tlim,
xlim=xlim, legend=legend, legend_loc='upper left', fps=fps,
title="Twin paradox (traveler's POV)",
current_time_color=current_time_color,
instant_pause_time=instant_pause_time)
anim_backward.save(backward_fname)
# Glue all the parts together
canim.concat_demuxer([earth_fname, rew_fname, lt_fname,
forward_fname, accel_fname, backward_fname], '9-twinparadox.mp4')
| 0.538983 | 0.338241 |
import os
import itertools
import logging as L
import numpy as np
from perf_compare import execute
L.basicConfig(format='%(levelname)s:%(message)s', level=L.DEBUG)
class Autotune():
def __init__(self, template_list, key_values, cmd):
"""
template_list: ['GroupCOOSparseMatrix.h.t', 'cnnBench2.cu.t']
key_values: {'$COLUMN_PER_GROUP$': [2, 4, 8],
'$BS$': [32, 64]}
"""
self.template_list = template_list
self.key_values = key_values
self.cmd = cmd
def _compile(self):
L.info('Compiling ...')
os.system('./make.sh')
def _gen_unrolling_src(self, cpg):
template = {}
c = ['float c%d =0.0; '%i for i in range(cpg)]
c_definition = ''.join(c)
template['$c_definition$'] = c_definition
c_unroll_write = []
for i in range(cpg):
if i == 0:
s = 'if (index == 0) c0 += a*b; '
else:
s = 'else if (index == %d) c%d += a*b; '%(i, i)
c_unroll_write.append(s)
template['$c_unroll_write$'] = ''.join(c_unroll_write);
c_unroll_write_to_C = []
for i in range(cpg):
s = 'if (Cj0+%d < wB) C[Ci * wB + Cj0 + %d] = c%d; ' % (i, i, i)
c_unroll_write_to_C.append(s)
template['$c_unroll_write_to_C$'] = ''.join(c_unroll_write_to_C);
return template
def _replace_src(self, kv):
L.info('Generate source codes with configured values ...')
for template in self.template_list:
with open(template, 'r') as f:
content = f.read()
#print content
cpg = int(kv['$COLUMN_PER_GROUP$'])
unrolling_src = self._gen_unrolling_src(cpg)
kv.update(unrolling_src)
for k in kv:
v = kv[k]
content = content.replace(k, str(v))
new_filename = template[0:-2]
with open(new_filename, 'w') as newf:
newf.write(content)
def run(self):
keys = self.key_values.keys()
all_values = [self.key_values[k] for k in keys]
experiments = list(itertools.product(*all_values))
exps = []
for e in experiments:
ed = {}
for i, v in enumerate(e):
ed[keys[i]] = v
exps.append(ed)
results = []
for ed in exps:
self._replace_src(ed)
self._compile()
#os.system(self.cmd)
try:
ms = execute(self.cmd)
except:
ms = 10000000.0
results.append(ms)
min = np.min(np.array(results))
minidx = np.argmin(np.array(results))
L.info('exps: {}'.format(exps))
L.info('results: {}'.format(results))
with open('result.log', 'a+') as f:
f.write('%s\n%s: %f\n'%(self.cmd, exps[minidx], min))
if __name__ == '__main__':
template_list = ['constants.h.t', 'group_spgemm_kernels.cu.t']
#key_values = {'$COLUMN_PER_GROUP$': [1, 2, 4, 8, 16, 32],
# '$BS$': [32, 64, 128, 256, 512]}
#key_values = {'$COLUMN_PER_GROUP$': [4],
# '$BS$': [32, 64, 128, 256, 512, 1024]}
key_values = {'$COLUMN_PER_GROUP$': [4],
'$BS$': [128]}
with open('bc.conf', 'r') as f:
ls = f.readlines()
for l in ls:
cmd = l[0:-1]
print cmd
at = Autotune(template_list, key_values, cmd)
at.run()
|
src/autotune.py
|
import os
import itertools
import logging as L
import numpy as np
from perf_compare import execute
L.basicConfig(format='%(levelname)s:%(message)s', level=L.DEBUG)
class Autotune():
def __init__(self, template_list, key_values, cmd):
"""
template_list: ['GroupCOOSparseMatrix.h.t', 'cnnBench2.cu.t']
key_values: {'$COLUMN_PER_GROUP$': [2, 4, 8],
'$BS$': [32, 64]}
"""
self.template_list = template_list
self.key_values = key_values
self.cmd = cmd
def _compile(self):
L.info('Compiling ...')
os.system('./make.sh')
def _gen_unrolling_src(self, cpg):
template = {}
c = ['float c%d =0.0; '%i for i in range(cpg)]
c_definition = ''.join(c)
template['$c_definition$'] = c_definition
c_unroll_write = []
for i in range(cpg):
if i == 0:
s = 'if (index == 0) c0 += a*b; '
else:
s = 'else if (index == %d) c%d += a*b; '%(i, i)
c_unroll_write.append(s)
template['$c_unroll_write$'] = ''.join(c_unroll_write);
c_unroll_write_to_C = []
for i in range(cpg):
s = 'if (Cj0+%d < wB) C[Ci * wB + Cj0 + %d] = c%d; ' % (i, i, i)
c_unroll_write_to_C.append(s)
template['$c_unroll_write_to_C$'] = ''.join(c_unroll_write_to_C);
return template
def _replace_src(self, kv):
L.info('Generate source codes with configured values ...')
for template in self.template_list:
with open(template, 'r') as f:
content = f.read()
#print content
cpg = int(kv['$COLUMN_PER_GROUP$'])
unrolling_src = self._gen_unrolling_src(cpg)
kv.update(unrolling_src)
for k in kv:
v = kv[k]
content = content.replace(k, str(v))
new_filename = template[0:-2]
with open(new_filename, 'w') as newf:
newf.write(content)
def run(self):
keys = self.key_values.keys()
all_values = [self.key_values[k] for k in keys]
experiments = list(itertools.product(*all_values))
exps = []
for e in experiments:
ed = {}
for i, v in enumerate(e):
ed[keys[i]] = v
exps.append(ed)
results = []
for ed in exps:
self._replace_src(ed)
self._compile()
#os.system(self.cmd)
try:
ms = execute(self.cmd)
except:
ms = 10000000.0
results.append(ms)
min = np.min(np.array(results))
minidx = np.argmin(np.array(results))
L.info('exps: {}'.format(exps))
L.info('results: {}'.format(results))
with open('result.log', 'a+') as f:
f.write('%s\n%s: %f\n'%(self.cmd, exps[minidx], min))
if __name__ == '__main__':
template_list = ['constants.h.t', 'group_spgemm_kernels.cu.t']
#key_values = {'$COLUMN_PER_GROUP$': [1, 2, 4, 8, 16, 32],
# '$BS$': [32, 64, 128, 256, 512]}
#key_values = {'$COLUMN_PER_GROUP$': [4],
# '$BS$': [32, 64, 128, 256, 512, 1024]}
key_values = {'$COLUMN_PER_GROUP$': [4],
'$BS$': [128]}
with open('bc.conf', 'r') as f:
ls = f.readlines()
for l in ls:
cmd = l[0:-1]
print cmd
at = Autotune(template_list, key_values, cmd)
at.run()
| 0.13852 | 0.190667 |
from collections import OrderedDict
import pytest
from fiona_settings import CRS, Settings, Driver, Geometry, Type
class Collection:
def __init__(
self,
driver: Driver = Driver.GeoJSON,
schema: dict = None,
crs: CRS = CRS.WGS84,
encoding: str = "utf-8",
):
self.driver = driver.value
self.schema = schema
self.crs = crs.value
self.encoding = encoding
@pytest.fixture
def settings():
return Settings()
@pytest.fixture
def schema():
return {'properties': {'column1': 'str'}, 'geometry': 'Point'}
def test_from_collection(schema):
c = Collection(schema=schema)
settings = Settings.from_collection(c)
assert settings.schema == c.schema
assert settings.driver == c.driver
assert settings.crs == c.crs
assert settings.encoding == c.encoding
def test_from_collection_with_override(schema):
c = Collection(schema=schema)
settings = Settings.from_collection(c, driver=Driver.GeoPackage)
assert settings.schema == c.schema
assert settings.driver == 'GPKG'
assert settings.crs == c.crs
assert settings.encoding == c.encoding
def test_properties_and_setters(settings):
settings.crs = CRS.WGS84
assert settings.crs == CRS.WGS84.value
settings.driver = Driver.GeoJSON
assert settings.driver == Driver.GeoJSON.value
settings.encoding = 'latin1'
assert settings.encoding == 'latin1'
settings.geometry = Geometry.LineString
settings.properties['column1'] = 'str'
assert settings.schema == {
'geometry': 'LineString',
'properties': OrderedDict(
column1='str'
)
}
def test_add_inplace(settings):
assert len(settings.properties) == 0
settings += ('column1', Type.str(width=25))
assert len(settings.properties) == 1
assert settings.schema == {
'geometry': None,
'properties': OrderedDict(
column1='str:25'
)
}
def test_add_inplace_wrong_type(settings):
with pytest.raises(TypeError) as exc:
settings += ('column1',)
assert exc.match("operand for __iadd__ must be a 2-tuple of \(column_name, column_type\)")
def test_subtract_in_place_no_column(settings):
with pytest.raises(KeyError) as exc:
settings -= 'column'
assert exc.match("Property 'column' does not exist.")
def test_subtract_inplace(settings):
settings += ('column1', Type.str())
settings += ('column2', Type.str(width=50))
assert len(settings.properties) == 2
settings -= 'column1'
assert len(settings.properties) == 1
assert settings.properties == OrderedDict(
column2='str:50'
)
def test_add(settings):
s2 = settings + ('column1', Type.str())
assert s2 != settings
assert len(s2.properties) == 1
def test_subtract(settings):
settings += ('column1', Type.str())
s2 = settings - 'column1'
assert s2 != settings
assert len(s2.properties) == 0
assert len(settings.properties) == 1
|
test/test_settings.py
|
from collections import OrderedDict
import pytest
from fiona_settings import CRS, Settings, Driver, Geometry, Type
class Collection:
def __init__(
self,
driver: Driver = Driver.GeoJSON,
schema: dict = None,
crs: CRS = CRS.WGS84,
encoding: str = "utf-8",
):
self.driver = driver.value
self.schema = schema
self.crs = crs.value
self.encoding = encoding
@pytest.fixture
def settings():
return Settings()
@pytest.fixture
def schema():
return {'properties': {'column1': 'str'}, 'geometry': 'Point'}
def test_from_collection(schema):
c = Collection(schema=schema)
settings = Settings.from_collection(c)
assert settings.schema == c.schema
assert settings.driver == c.driver
assert settings.crs == c.crs
assert settings.encoding == c.encoding
def test_from_collection_with_override(schema):
c = Collection(schema=schema)
settings = Settings.from_collection(c, driver=Driver.GeoPackage)
assert settings.schema == c.schema
assert settings.driver == 'GPKG'
assert settings.crs == c.crs
assert settings.encoding == c.encoding
def test_properties_and_setters(settings):
settings.crs = CRS.WGS84
assert settings.crs == CRS.WGS84.value
settings.driver = Driver.GeoJSON
assert settings.driver == Driver.GeoJSON.value
settings.encoding = 'latin1'
assert settings.encoding == 'latin1'
settings.geometry = Geometry.LineString
settings.properties['column1'] = 'str'
assert settings.schema == {
'geometry': 'LineString',
'properties': OrderedDict(
column1='str'
)
}
def test_add_inplace(settings):
assert len(settings.properties) == 0
settings += ('column1', Type.str(width=25))
assert len(settings.properties) == 1
assert settings.schema == {
'geometry': None,
'properties': OrderedDict(
column1='str:25'
)
}
def test_add_inplace_wrong_type(settings):
with pytest.raises(TypeError) as exc:
settings += ('column1',)
assert exc.match("operand for __iadd__ must be a 2-tuple of \(column_name, column_type\)")
def test_subtract_in_place_no_column(settings):
with pytest.raises(KeyError) as exc:
settings -= 'column'
assert exc.match("Property 'column' does not exist.")
def test_subtract_inplace(settings):
settings += ('column1', Type.str())
settings += ('column2', Type.str(width=50))
assert len(settings.properties) == 2
settings -= 'column1'
assert len(settings.properties) == 1
assert settings.properties == OrderedDict(
column2='str:50'
)
def test_add(settings):
s2 = settings + ('column1', Type.str())
assert s2 != settings
assert len(s2.properties) == 1
def test_subtract(settings):
settings += ('column1', Type.str())
s2 = settings - 'column1'
assert s2 != settings
assert len(s2.properties) == 0
assert len(settings.properties) == 1
| 0.808219 | 0.441854 |
from unittest import mock
import pytest
from h_matchers import Any
from h.traversal.group import GroupContext
from h.views.admin import groups
from h.views.admin.groups import GroupCreateViews, GroupEditViews
class FakeForm:
appstruct = None
def set_appstruct(self, appstruct):
self.appstruct = appstruct
def render(self):
return self.appstruct
@pytest.mark.usefixtures("group_service")
class TestIndex:
def test_it_paginates_results(self, pyramid_request, paginate):
groups.groups_index(None, pyramid_request)
paginate.assert_called_once_with(pyramid_request, Any(), Any())
def test_it_filters_groups_with_name_param(self, pyramid_request, group_service):
pyramid_request.params["q"] = "fingers"
groups.groups_index(None, pyramid_request)
group_service.filter_by_name.assert_called_once_with(name="fingers")
@pytest.fixture
def paginate(self, patch):
return patch("h.views.admin.groups.paginator.paginate")
@pytest.mark.usefixtures(
"group_create_service",
"group_members_service",
"list_organizations_service",
"routes",
"user_service",
"organization_service",
)
class TestGroupCreateView:
def test_get_sets_form(self, pyramid_request):
view = GroupCreateViews(pyramid_request)
response = view.get()
assert "form" in response
def test_init_fetches_all_organizations(
self, pyramid_request, list_organizations_service
):
GroupCreateViews(pyramid_request)
list_organizations_service.organizations.assert_called_with()
def test_init_binds_schema_with_organizations(
self, pyramid_request, organization, AdminGroupSchema
):
GroupCreateViews(pyramid_request)
schema = AdminGroupSchema.return_value
(_, call_kwargs) = schema.bind.call_args
assert call_kwargs["organizations"] == {organization.pubid: organization}
def test_post_handles_form_submission(
self, pyramid_request, handle_form_submission
):
view = GroupCreateViews(pyramid_request)
view.post()
handle_form_submission.assert_called_once_with(
view.request,
view.form,
Any.function(),
view._template_context, # pylint:disable=protected-access
)
def test_post_redirects_to_list_view_on_success(
self, pyramid_request, matchers, handle_form_submission, base_appstruct
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
response = view.post()
expected_location = pyramid_request.route_url("admin.groups")
assert response == matchers.Redirect302To(expected_location)
def test_post_creates_open_group_on_success(
self,
pyramid_request,
group_create_service,
handle_form_submission,
organization,
user_service,
base_appstruct,
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["group_type"] = "open"
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_create_service.create_open_group.assert_called_with(
name="My New Group",
userid=user_service.fetch.return_value.userid,
description=None,
scopes=["http://example.com"],
organization=organization,
enforce_scope=True,
)
def test_post_creates_restricted_group_on_success(
self,
pyramid_request,
group_create_service,
handle_form_submission,
organization,
user_service,
base_appstruct,
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["group_type"] = "restricted"
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_create_service.create_restricted_group.assert_called_with(
name="My New Group",
userid=user_service.fetch.return_value.userid,
description=None,
scopes=["http://example.com"],
organization=organization,
enforce_scope=True,
)
def test_post_adds_members_on_success(
self,
factories,
pyramid_request,
group_create_service,
group_members_service,
handle_form_submission,
user_service,
base_appstruct,
):
user = factories.User()
user_service.fetch.return_value = user
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["members"] = ["someusername"]
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_members_service.add_members.assert_called_once_with(
group_create_service.create_restricted_group.return_value, [user.userid]
)
@pytest.fixture
def base_appstruct(self, pyramid_request, organization):
return {
"name": "My New Group",
"group_type": "restricted",
"creator": pyramid_request.user.username,
"description": None,
"members": [],
"organization": organization.pubid,
"scopes": ["http://example.com"],
"enforce_scope": True,
}
@pytest.mark.usefixtures(
"routes",
"user_service",
"group_service",
"group_create_service",
"group_update_service",
"group_members_service",
"list_organizations_service",
)
class TestGroupEditViews:
def test_it_binds_schema(
self,
pyramid_request,
group,
user_service,
organization,
AdminGroupSchema,
):
GroupEditViews(GroupContext(group), pyramid_request)
schema = AdminGroupSchema.return_value
schema.bind.assert_called_with(
request=pyramid_request,
group=group,
user_svc=user_service,
organizations={organization.pubid: organization},
)
def test_read_renders_form(self, pyramid_request, factories, group):
factories.Annotation(groupid=group.pubid)
factories.Annotation(groupid=group.pubid)
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.read()
assert response["form"] == self._expected_form(group)
assert response["pubid"] == group.pubid
assert response["group_name"] == group.name
assert response["member_count"] == len(group.members)
assert response["annotation_count"] == 2
def test_read_renders_form_if_group_has_no_creator(self, pyramid_request, group):
group.creator = None
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.read()
assert response["form"] == self._expected_form(group)
def test_read_lists_organizations_in_groups_authority(
self,
pyramid_request,
group,
organization,
AdminGroupSchema,
list_organizations_service,
):
GroupEditViews(GroupContext(group), pyramid_request)
list_organizations_service.organizations.assert_called_with(group.authority)
schema = AdminGroupSchema.return_value
(_, call_kwargs) = schema.bind.call_args
assert call_kwargs["organizations"] == {organization.pubid: organization}
def test_update_proxies_to_update_service_on_success(
self,
factories,
pyramid_request,
user_service,
list_organizations_service,
handle_form_submission,
group_update_service,
group,
GroupScope,
):
fetched_user = factories.User()
user_service.fetch.return_value = fetched_user
updated_org = factories.Organization()
list_organizations_service.organizations.return_value.append(updated_org)
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(
{
"creator": fetched_user.username,
"description": "New description",
"group_type": "open",
"name": "Updated group",
"organization": updated_org.pubid,
"scopes": ["http://somewhereelse.com", "http://www.gladiolus.org"],
"members": [],
"enforce_scope": False,
}
)
handle_form_submission.side_effect = call_on_success
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.update()
group_update_service.update.assert_called_once_with(
group,
organization=updated_org,
creator=fetched_user,
description="New description",
name="Updated group",
scopes=[
GroupScope(scope=scope)
for scope in ["http://somewhereelse.com", "http://www.gladiolus.org"]
],
enforce_scope=False,
)
assert response["form"] == self._expected_form(group)
def test_update_updates_group_members_on_success(
self,
factories,
pyramid_request,
user_service,
group_members_service,
handle_form_submission,
list_organizations_service,
):
group = factories.RestrictedGroup(
pubid="testgroup", organization=factories.Organization()
)
list_organizations_service.organizations.return_value = [group.organization]
fetched_user = factories.User()
user_service.fetch.return_value = fetched_user
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(
{
"authority": pyramid_request.default_authority,
"creator": fetched_user.username,
"description": "a desc",
"group_type": "restricted",
"name": "a name",
"members": ["phil", "sue"],
"organization": group.organization.pubid,
"scopes": ["http://www.example.com"],
"enforce_scope": group.enforce_scope,
}
)
handle_form_submission.side_effect = call_on_success
view = GroupEditViews(GroupContext(group), pyramid_request)
view.update()
group_members_service.update_members.assert_any_call(
group, [fetched_user.userid, fetched_user.userid]
)
def test_delete_deletes_group(self, group, delete_group_service, pyramid_request):
view = GroupEditViews(GroupContext(group), pyramid_request)
view.delete()
delete_group_service.delete.assert_called_once_with(group)
def _expected_form(self, group):
return {
"creator": group.creator.username if group.creator else "",
"description": group.description or "",
"group_type": group.type,
"name": group.name,
"members": [m.username for m in group.members],
"organization": group.organization.pubid,
"scopes": [s.scope for s in group.scopes],
"enforce_scope": group.enforce_scope,
}
@pytest.fixture
def group(self, factories):
return factories.OpenGroup(
pubid="testgroup", organization=factories.Organization()
)
@pytest.fixture
def GroupScope(self, patch):
return patch("h.views.admin.groups.GroupScope")
@pytest.fixture
def authority():
return "foo.com"
@pytest.fixture
def pyramid_request(pyramid_request, factories, authority):
pyramid_request.session = mock.Mock(spec_set=["flash", "get_csrf_token"])
pyramid_request.user = factories.User(authority=authority)
pyramid_request.create_form.return_value = FakeForm()
return pyramid_request
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route("admin.groups", "/admin/groups")
pyramid_config.add_route("admin.groups_create", "/admin/groups/new")
pyramid_config.add_route("group_read", "/groups/{pubid}/{slug}")
@pytest.fixture
def list_organizations_service(list_organizations_service, organization):
list_organizations_service.organizations.return_value = [organization]
return list_organizations_service
@pytest.fixture
def organization(factories):
return factories.Organization()
@pytest.fixture
def handle_form_submission(patch):
return patch("h.views.admin.groups.form.handle_form_submission")
@pytest.fixture
def AdminGroupSchema(patch):
schema = mock.Mock(spec_set=["bind"])
AdminGroupSchema = patch("h.views.admin.groups.AdminGroupSchema")
AdminGroupSchema.return_value = schema
return AdminGroupSchema
|
tests/h/views/admin/groups_test.py
|
from unittest import mock
import pytest
from h_matchers import Any
from h.traversal.group import GroupContext
from h.views.admin import groups
from h.views.admin.groups import GroupCreateViews, GroupEditViews
class FakeForm:
appstruct = None
def set_appstruct(self, appstruct):
self.appstruct = appstruct
def render(self):
return self.appstruct
@pytest.mark.usefixtures("group_service")
class TestIndex:
def test_it_paginates_results(self, pyramid_request, paginate):
groups.groups_index(None, pyramid_request)
paginate.assert_called_once_with(pyramid_request, Any(), Any())
def test_it_filters_groups_with_name_param(self, pyramid_request, group_service):
pyramid_request.params["q"] = "fingers"
groups.groups_index(None, pyramid_request)
group_service.filter_by_name.assert_called_once_with(name="fingers")
@pytest.fixture
def paginate(self, patch):
return patch("h.views.admin.groups.paginator.paginate")
@pytest.mark.usefixtures(
"group_create_service",
"group_members_service",
"list_organizations_service",
"routes",
"user_service",
"organization_service",
)
class TestGroupCreateView:
def test_get_sets_form(self, pyramid_request):
view = GroupCreateViews(pyramid_request)
response = view.get()
assert "form" in response
def test_init_fetches_all_organizations(
self, pyramid_request, list_organizations_service
):
GroupCreateViews(pyramid_request)
list_organizations_service.organizations.assert_called_with()
def test_init_binds_schema_with_organizations(
self, pyramid_request, organization, AdminGroupSchema
):
GroupCreateViews(pyramid_request)
schema = AdminGroupSchema.return_value
(_, call_kwargs) = schema.bind.call_args
assert call_kwargs["organizations"] == {organization.pubid: organization}
def test_post_handles_form_submission(
self, pyramid_request, handle_form_submission
):
view = GroupCreateViews(pyramid_request)
view.post()
handle_form_submission.assert_called_once_with(
view.request,
view.form,
Any.function(),
view._template_context, # pylint:disable=protected-access
)
def test_post_redirects_to_list_view_on_success(
self, pyramid_request, matchers, handle_form_submission, base_appstruct
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
response = view.post()
expected_location = pyramid_request.route_url("admin.groups")
assert response == matchers.Redirect302To(expected_location)
def test_post_creates_open_group_on_success(
self,
pyramid_request,
group_create_service,
handle_form_submission,
organization,
user_service,
base_appstruct,
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["group_type"] = "open"
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_create_service.create_open_group.assert_called_with(
name="My New Group",
userid=user_service.fetch.return_value.userid,
description=None,
scopes=["http://example.com"],
organization=organization,
enforce_scope=True,
)
def test_post_creates_restricted_group_on_success(
self,
pyramid_request,
group_create_service,
handle_form_submission,
organization,
user_service,
base_appstruct,
):
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["group_type"] = "restricted"
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_create_service.create_restricted_group.assert_called_with(
name="My New Group",
userid=user_service.fetch.return_value.userid,
description=None,
scopes=["http://example.com"],
organization=organization,
enforce_scope=True,
)
def test_post_adds_members_on_success(
self,
factories,
pyramid_request,
group_create_service,
group_members_service,
handle_form_submission,
user_service,
base_appstruct,
):
user = factories.User()
user_service.fetch.return_value = user
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
base_appstruct["members"] = ["someusername"]
return on_success(base_appstruct)
handle_form_submission.side_effect = call_on_success
view = GroupCreateViews(pyramid_request)
view.post()
group_members_service.add_members.assert_called_once_with(
group_create_service.create_restricted_group.return_value, [user.userid]
)
@pytest.fixture
def base_appstruct(self, pyramid_request, organization):
return {
"name": "My New Group",
"group_type": "restricted",
"creator": pyramid_request.user.username,
"description": None,
"members": [],
"organization": organization.pubid,
"scopes": ["http://example.com"],
"enforce_scope": True,
}
@pytest.mark.usefixtures(
"routes",
"user_service",
"group_service",
"group_create_service",
"group_update_service",
"group_members_service",
"list_organizations_service",
)
class TestGroupEditViews:
def test_it_binds_schema(
self,
pyramid_request,
group,
user_service,
organization,
AdminGroupSchema,
):
GroupEditViews(GroupContext(group), pyramid_request)
schema = AdminGroupSchema.return_value
schema.bind.assert_called_with(
request=pyramid_request,
group=group,
user_svc=user_service,
organizations={organization.pubid: organization},
)
def test_read_renders_form(self, pyramid_request, factories, group):
factories.Annotation(groupid=group.pubid)
factories.Annotation(groupid=group.pubid)
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.read()
assert response["form"] == self._expected_form(group)
assert response["pubid"] == group.pubid
assert response["group_name"] == group.name
assert response["member_count"] == len(group.members)
assert response["annotation_count"] == 2
def test_read_renders_form_if_group_has_no_creator(self, pyramid_request, group):
group.creator = None
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.read()
assert response["form"] == self._expected_form(group)
def test_read_lists_organizations_in_groups_authority(
self,
pyramid_request,
group,
organization,
AdminGroupSchema,
list_organizations_service,
):
GroupEditViews(GroupContext(group), pyramid_request)
list_organizations_service.organizations.assert_called_with(group.authority)
schema = AdminGroupSchema.return_value
(_, call_kwargs) = schema.bind.call_args
assert call_kwargs["organizations"] == {organization.pubid: organization}
def test_update_proxies_to_update_service_on_success(
self,
factories,
pyramid_request,
user_service,
list_organizations_service,
handle_form_submission,
group_update_service,
group,
GroupScope,
):
fetched_user = factories.User()
user_service.fetch.return_value = fetched_user
updated_org = factories.Organization()
list_organizations_service.organizations.return_value.append(updated_org)
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(
{
"creator": fetched_user.username,
"description": "New description",
"group_type": "open",
"name": "Updated group",
"organization": updated_org.pubid,
"scopes": ["http://somewhereelse.com", "http://www.gladiolus.org"],
"members": [],
"enforce_scope": False,
}
)
handle_form_submission.side_effect = call_on_success
view = GroupEditViews(GroupContext(group), pyramid_request)
response = view.update()
group_update_service.update.assert_called_once_with(
group,
organization=updated_org,
creator=fetched_user,
description="New description",
name="Updated group",
scopes=[
GroupScope(scope=scope)
for scope in ["http://somewhereelse.com", "http://www.gladiolus.org"]
],
enforce_scope=False,
)
assert response["form"] == self._expected_form(group)
def test_update_updates_group_members_on_success(
self,
factories,
pyramid_request,
user_service,
group_members_service,
handle_form_submission,
list_organizations_service,
):
group = factories.RestrictedGroup(
pubid="testgroup", organization=factories.Organization()
)
list_organizations_service.organizations.return_value = [group.organization]
fetched_user = factories.User()
user_service.fetch.return_value = fetched_user
def call_on_success( # pylint:disable=unused-argument
request, form, on_success, on_failure
):
return on_success(
{
"authority": pyramid_request.default_authority,
"creator": fetched_user.username,
"description": "a desc",
"group_type": "restricted",
"name": "a name",
"members": ["phil", "sue"],
"organization": group.organization.pubid,
"scopes": ["http://www.example.com"],
"enforce_scope": group.enforce_scope,
}
)
handle_form_submission.side_effect = call_on_success
view = GroupEditViews(GroupContext(group), pyramid_request)
view.update()
group_members_service.update_members.assert_any_call(
group, [fetched_user.userid, fetched_user.userid]
)
def test_delete_deletes_group(self, group, delete_group_service, pyramid_request):
view = GroupEditViews(GroupContext(group), pyramid_request)
view.delete()
delete_group_service.delete.assert_called_once_with(group)
def _expected_form(self, group):
return {
"creator": group.creator.username if group.creator else "",
"description": group.description or "",
"group_type": group.type,
"name": group.name,
"members": [m.username for m in group.members],
"organization": group.organization.pubid,
"scopes": [s.scope for s in group.scopes],
"enforce_scope": group.enforce_scope,
}
@pytest.fixture
def group(self, factories):
return factories.OpenGroup(
pubid="testgroup", organization=factories.Organization()
)
@pytest.fixture
def GroupScope(self, patch):
return patch("h.views.admin.groups.GroupScope")
@pytest.fixture
def authority():
return "foo.com"
@pytest.fixture
def pyramid_request(pyramid_request, factories, authority):
pyramid_request.session = mock.Mock(spec_set=["flash", "get_csrf_token"])
pyramid_request.user = factories.User(authority=authority)
pyramid_request.create_form.return_value = FakeForm()
return pyramid_request
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route("admin.groups", "/admin/groups")
pyramid_config.add_route("admin.groups_create", "/admin/groups/new")
pyramid_config.add_route("group_read", "/groups/{pubid}/{slug}")
@pytest.fixture
def list_organizations_service(list_organizations_service, organization):
list_organizations_service.organizations.return_value = [organization]
return list_organizations_service
@pytest.fixture
def organization(factories):
return factories.Organization()
@pytest.fixture
def handle_form_submission(patch):
return patch("h.views.admin.groups.form.handle_form_submission")
@pytest.fixture
def AdminGroupSchema(patch):
schema = mock.Mock(spec_set=["bind"])
AdminGroupSchema = patch("h.views.admin.groups.AdminGroupSchema")
AdminGroupSchema.return_value = schema
return AdminGroupSchema
| 0.773302 | 0.341322 |
import numpy as np
class Graph():
def __init__(self , path , graph = dict() , circle = list()):
self.data = np.loadtxt(path , delimiter= "," , dtype=np.int16)
self.data = self.data[np.argsort(self.data[:,0])]
self.graph = graph
self.circlePath = circle
self.tempPath = list()
self.hasUsed = dict()
self.stateKeys = list()
self.stateValues = list()
# 用dict表示图,dict的映射关系可以表示有向图的方向。
def creatGraph(self):
for i in range(self.data.shape[0]):
if self.graph.get(self.data[i , 0]) != None:
self.graph[self.data[i , 0]].append(self.data[i , 1])
self.graph[self.data[i , 0]].sort()
else:
self.graph[self.data[i , 0]] = [self.data[i , 1]]
# 深度优先搜索算法,搜索图的环
def searchGraph(self):
for i in self.graph.keys():
self.tempPath = [i]
self.rootNode = i
self.stateKeys = []
self.stateValues = []
self._searchRecursive(i)
self.hasUsed[i] = True
# print(self.circlePath)
newList = []
for i in range(3 , 8):
for j in self.circlePath:
if len(j) == i:
newList.append(j)
# print("****"*20)
# print(newList)
# print(len(newList))
with open("/projects/student/result.txt" , "w") as f:
f.write(str(len(newList)) + "\n")
for i in newList:
for j in range(len(i)):
if j == len(i) - 1:
f.write(str(i[j])+"\n")
else:
f.write(str(i[j])+",")
# 深度优先搜索,除了向前,全部情况都是后退。
def _searchRecursive(self,fatherNode):
fatherNodeLen = len(self.graph[fatherNode])
for i in range(fatherNodeLen):
nowNode = self.graph[fatherNode][i]
# print(nowNode)
if not self.hasUsed.get(nowNode) == True:
# self.tempPath.append(nowNode)
# print("now path:",self.tempPath)
# print(fatherNode in self.stateKeys)
if fatherNode in self.stateKeys:
del self.stateValues[self.stateKeys.index(fatherNode)]
del self.stateKeys[self.stateKeys.index(fatherNode)]
if i < fatherNodeLen - 1:
self.stateKeys.append(fatherNode)
self.stateValues.append(1)
else:
self.stateKeys.append(fatherNode)
self.stateValues.append(-1)
# print("now visit node from {} to {}:".format(fatherNode ,nowNode))
# print(self.stateKeys)
# print(self.stateValues)
if nowNode not in self.tempPath[1:-1]:
if nowNode == self.rootNode:
# print("\n****************************hava found a path*************************************:\n{}\n".format( self.tempPath) )
if 3 <= len(self.tempPath) <=7:
self.circlePath.append((self.tempPath.copy()))
# print(self.circlePath)
self._backNode()
elif not self.graph.get(nowNode) == None:
self.tempPath.append(nowNode)
# print("did not found path , now go deep")
self._searchRecursive(nowNode)
else:
self._backNode()
else:
self._backNode()
else:
self._backNode()
# 节点回退到最近的一个未走完的节点。
def _backNode(self):
keys = self.stateKeys[::-1]
values = self.stateValues[::-1]
if 1 in values:
del self.tempPath[self.tempPath.index(keys[values.index(1)])+1:]
# print("back to:",self.tempPath[self.tempPath.index(keys[values.index(1)])])
if __name__ == "__main__":
path = "/data/test_data.txt"
graph = Graph(path)
print(graph.data)
graph.creatGraph()
graph.searchGraph()
|
Main.py
|
import numpy as np
class Graph():
def __init__(self , path , graph = dict() , circle = list()):
self.data = np.loadtxt(path , delimiter= "," , dtype=np.int16)
self.data = self.data[np.argsort(self.data[:,0])]
self.graph = graph
self.circlePath = circle
self.tempPath = list()
self.hasUsed = dict()
self.stateKeys = list()
self.stateValues = list()
# 用dict表示图,dict的映射关系可以表示有向图的方向。
def creatGraph(self):
for i in range(self.data.shape[0]):
if self.graph.get(self.data[i , 0]) != None:
self.graph[self.data[i , 0]].append(self.data[i , 1])
self.graph[self.data[i , 0]].sort()
else:
self.graph[self.data[i , 0]] = [self.data[i , 1]]
# 深度优先搜索算法,搜索图的环
def searchGraph(self):
for i in self.graph.keys():
self.tempPath = [i]
self.rootNode = i
self.stateKeys = []
self.stateValues = []
self._searchRecursive(i)
self.hasUsed[i] = True
# print(self.circlePath)
newList = []
for i in range(3 , 8):
for j in self.circlePath:
if len(j) == i:
newList.append(j)
# print("****"*20)
# print(newList)
# print(len(newList))
with open("/projects/student/result.txt" , "w") as f:
f.write(str(len(newList)) + "\n")
for i in newList:
for j in range(len(i)):
if j == len(i) - 1:
f.write(str(i[j])+"\n")
else:
f.write(str(i[j])+",")
# 深度优先搜索,除了向前,全部情况都是后退。
def _searchRecursive(self,fatherNode):
fatherNodeLen = len(self.graph[fatherNode])
for i in range(fatherNodeLen):
nowNode = self.graph[fatherNode][i]
# print(nowNode)
if not self.hasUsed.get(nowNode) == True:
# self.tempPath.append(nowNode)
# print("now path:",self.tempPath)
# print(fatherNode in self.stateKeys)
if fatherNode in self.stateKeys:
del self.stateValues[self.stateKeys.index(fatherNode)]
del self.stateKeys[self.stateKeys.index(fatherNode)]
if i < fatherNodeLen - 1:
self.stateKeys.append(fatherNode)
self.stateValues.append(1)
else:
self.stateKeys.append(fatherNode)
self.stateValues.append(-1)
# print("now visit node from {} to {}:".format(fatherNode ,nowNode))
# print(self.stateKeys)
# print(self.stateValues)
if nowNode not in self.tempPath[1:-1]:
if nowNode == self.rootNode:
# print("\n****************************hava found a path*************************************:\n{}\n".format( self.tempPath) )
if 3 <= len(self.tempPath) <=7:
self.circlePath.append((self.tempPath.copy()))
# print(self.circlePath)
self._backNode()
elif not self.graph.get(nowNode) == None:
self.tempPath.append(nowNode)
# print("did not found path , now go deep")
self._searchRecursive(nowNode)
else:
self._backNode()
else:
self._backNode()
else:
self._backNode()
# 节点回退到最近的一个未走完的节点。
def _backNode(self):
keys = self.stateKeys[::-1]
values = self.stateValues[::-1]
if 1 in values:
del self.tempPath[self.tempPath.index(keys[values.index(1)])+1:]
# print("back to:",self.tempPath[self.tempPath.index(keys[values.index(1)])])
if __name__ == "__main__":
path = "/data/test_data.txt"
graph = Graph(path)
print(graph.data)
graph.creatGraph()
graph.searchGraph()
| 0.048869 | 0.121607 |
import os
import re
import test.support
import time
import unittest
import urllib.request
from http.cookiejar import time2isoz, http2time, iso2time, time2netscape, parse_ns_headers, join_header_words, split_header_words, Cookie, CookieJar, DefaultCookiePolicy, LWPCookieJar, MozillaCookieJar, LoadError, lwp_cookie_str, DEFAULT_HTTP_PORT, escape_path, reach, is_HDN, domain_match, user_domain_match, request_path, request_port, request_host
class DateTimeTests(unittest.TestCase):
def test_time2isoz(self):
base = 1019227000
day = 24 * 3600
self.assertEqual(time2isoz(base), '2002-04-19 14:36:40Z')
self.assertEqual(time2isoz(base + day), '2002-04-20 14:36:40Z')
self.assertEqual(time2isoz(base + 2 * day), '2002-04-21 14:36:40Z')
self.assertEqual(time2isoz(base + 3 * day), '2002-04-22 14:36:40Z')
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assertRegex(text,
'^\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\dZ$',
'bad time2isoz format: %s %s' % (az, bz))
def test_time2netscape(self):
base = 1019227000
day = 24 * 3600
self.assertEqual(time2netscape(base), 'Fri, 19-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + day),
'Sat, 20-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + 2 * day),
'Sun, 21-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + 3 * day),
'Mon, 22-Apr-2002 14:36:40 GMT')
az = time2netscape()
bz = time2netscape(500000)
for text in (az, bz):
self.assertRegex(text,
'[a-zA-Z]{3}, \\d{2}-[a-zA-Z]{3}-\\d{4} \\d{2}:\\d{2}:\\d{2} GMT$'
, 'bad time2netscape format: %s %s' % (az, bz))
def test_http2time(self):
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEqual(parse_date('01 Jan 2001'), (2001, 1, 1, 0, 0, 0.0))
self.assertEqual(parse_date('03-Feb-20'), (2020, 2, 3, 0, 0, 0.0))
self.assertEqual(parse_date('03-Feb-98'), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
tests = ['Thu, 03 Feb 1994 00:00:00 GMT',
'Thursday, 03-Feb-94 00:00:00 GMT',
'Thursday, 03-Feb-1994 00:00:00 GMT',
'03 Feb 1994 00:00:00 GMT', '03-Feb-94 00:00:00 GMT',
'03-Feb-1994 00:00:00 GMT', '03-Feb-1994 00:00 GMT',
'03-Feb-1994 00:00', '02-Feb-1994 24:00', '03-Feb-94',
'03-Feb-1994', '03 Feb 1994', ' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ']
test_t = 760233600
result = time2isoz(test_t)
expected = '1994-02-03 00:00:00Z'
self.assertEqual(result, expected, "%s => '%s' (%s)" % (test_t,
result, expected))
for s in tests:
self.assertEqual(http2time(s), test_t, s)
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
def test_http2time_garbage(self):
for test in ['', 'Garbage', 'Mandag 16. September 1996',
'01-00-1980', '01-13-1980', '00-01-1980', '32-01-1980',
'01-01-1980 25:00:00', '01-01-1980 00:61:00',
'01-01-1980 00:00:62', '08-Oct-3697739', '08-01-3697739',
'09 Feb 19942632 22:23:32 GMT', 'Wed, 09 Feb 1994834 22:23:32 GMT'
]:
self.assertIsNone(http2time(test),
'http2time(%s) is not None\nhttp2time(test) %s' % (test,
http2time(test)))
def test_iso2time(self):
def parse_date(text):
return time.gmtime(iso2time(text))[:6]
self.assertEqual(parse_date('19940203T141529Z'), (1994, 2, 3, 14,
15, 29))
self.assertEqual(parse_date('1994-02-03 07:15:29 -0700'), (1994, 2,
3, 14, 15, 29))
self.assertEqual(parse_date('1994-02-03 19:45:29 +0530'), (1994, 2,
3, 14, 15, 29))
def test_iso2time_formats(self):
tests = ['1994-02-03 00:00:00 -0000', '1994-02-03 00:00:00 +0000',
'1994-02-03 00:00:00', '1994-02-03', '1994-02-03T00:00:00',
'19940203', '1994-02-02 24:00:00', '19940203T000000Z',
' 1994-02-03 ', ' 1994-02-03T00:00:00 ']
test_t = 760233600
for s in tests:
self.assertEqual(iso2time(s), test_t, s)
self.assertEqual(iso2time(s.lower()), test_t, s.lower())
self.assertEqual(iso2time(s.upper()), test_t, s.upper())
def test_iso2time_garbage(self):
for test in ['', 'Garbage', 'Thursday, 03-Feb-94 00:00:00 GMT',
'1980-00-01', '1980-13-01', '1980-01-00', '1980-01-32',
'1980-01-01 25:00:00', '1980-01-01 00:61:00',
'01-01-1980 00:00:62', '01-01-1980T00:00:62',
'19800101T250000Z1980-01-01 00:00:00 -2500']:
self.assertIsNone(iso2time(test),
'iso2time(%s) is not None\niso2time(test) %s' % (test,
iso2time(test)))
class HeaderTests(unittest.TestCase):
def test_parse_ns_headers(self):
expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]
]
for hdr in ['foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"']:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_version(self):
expected = [[('foo', 'bar'), ('version', '1')]]
for hdr in ['foo=bar; version="1"', 'foo=bar; Version="1"']:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[('expires', '01 Jan 2040 22:23:32 GMT'), ('version', '0')]
]
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
joined = join_header_words([[('foo', None), ('bar', 'baz')]])
self.assertEqual(joined, 'foo; bar=baz')
self.assertEqual(join_header_words([[]]), '')
def test_split_header_words(self):
tests = [('foo', [[('foo', None)]]), ('foo=bar', [[('foo', 'bar')]]
), (' foo ', [[('foo', None)]]), (' foo= ', [[('foo',
'')]]), (' foo=', [[('foo', '')]]), (' foo= ; ', [[('foo',
'')]]), (' foo= ; bar= baz ', [[('foo', ''), ('bar', 'baz')
]]), ('foo=bar bar=baz', [[('foo', 'bar'), ('bar', 'baz')]]), (
'foo= bar=baz', [[('foo', 'bar=baz')]]), ('foo=bar;bar=baz', [[
('foo', 'bar'), ('bar', 'baz')]]), ('foo bar baz', [[('foo',
None), ('bar', None), ('baz', None)]]), ('a, b, c', [[('a',
None)], [('b', None)], [('c', None)]]), (
'foo; bar=baz, spam=, foo="\\,\\;\\"", bar= ', [[('foo', None),
('bar', 'baz')], [('spam', '')], [('foo', ',;"')], [('bar', '')]])]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, io
f = io.StringIO()
traceback.print_exc(None, f)
result = '(error -- traceback follows)\n\n%s' % f.getvalue()
self.assertEqual(result, expect,
"""
When parsing: '%s'
Expected: '%s'
Got: '%s'
"""
% (arg, expect, result))
def test_roundtrip(self):
tests = [('foo', 'foo'), ('foo=bar', 'foo=bar'), (' foo ',
'foo'), ('foo=', 'foo=""'), ('foo=bar bar=baz',
'foo=bar; bar=baz'), ('foo=bar;bar=baz', 'foo=bar; bar=baz'), (
'foo bar baz', 'foo; bar; baz'), ('foo="\\"" bar="\\\\"',
'foo="\\""; bar="\\\\"'), ('foo,,,bar', 'foo, bar'), (
'foo=bar,bar=baz', 'foo=bar, bar=baz'), (
'text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'), (
'foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'), (
'Basic realm="\\"foo\\\\\\\\bar\\""',
'Basic; realm="\\"foo\\\\\\\\bar\\""')]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEqual(res, expect,
"""
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
"""
% (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import email
self._headers = email.message_from_string('\n'.join(headers))
self._url = url
def info(self):
return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, 'Set-Cookie2')
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, 'Set-Cookie')
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
req = urllib.request.Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header('Cookie', '')
headers = []
for hdr in set_cookie_hdrs:
headers.append('%s: %s' % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(unittest.TestCase):
def test_lwp_valueless_cookie(self):
filename = test.support.TESTFN
c = LWPCookieJar()
interact_netscape(c, 'http://www.acme.com/', 'boo')
self.assertEqual(c._cookies['www.acme.com']['/']['boo'].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try:
os.unlink(filename)
except OSError:
pass
self.assertEqual(c._cookies['www.acme.com']['/']['boo'].value, None)
def test_bad_magic(self):
filename = test.support.TESTFN
for cookiejar_class in (LWPCookieJar, MozillaCookieJar):
c = cookiejar_class()
try:
c.load(filename=
'for this test to work, a file with this filename should not exist'
)
except OSError as exc:
self.assertIsNot(exc.__class__, LoadError)
else:
self.fail('expected OSError for invalid filename')
try:
with open(filename, 'w') as f:
f.write('oops\n')
for cookiejar_class in (LWPCookieJar, MozillaCookieJar):
c = cookiejar_class()
self.assertRaises(LoadError, c.load, filename)
finally:
try:
os.unlink(filename)
except OSError:
pass
class CookieTests(unittest.TestCase):
def test_domain_return_ok(self):
pol = DefaultCookiePolicy()
for url, domain, ok in [('http://foo.bar.com/', 'blah.com', False),
('http://foo.bar.com/', 'rhubarb.blah.com', False), (
'http://foo.bar.com/', 'rhubarb.foo.bar.com', False), (
'http://foo.bar.com/', '.foo.bar.com', True), (
'http://foo.bar.com/', 'foo.bar.com', True), (
'http://foo.bar.com/', '.bar.com', True), (
'http://foo.bar.com/', 'com', True), ('http://foo.com/',
'rhubarb.foo.com', False), ('http://foo.com/', '.foo.com', True
), ('http://foo.com/', 'foo.com', True), ('http://foo.com/',
'com', True), ('http://foo/', 'rhubarb.foo', False), (
'http://foo/', '.foo', True), ('http://foo/', 'foo', True), (
'http://foo/', 'foo.local', True), ('http://foo/', '.local', True)
]:
request = urllib.request.Request(url)
r = pol.domain_return_ok(domain, request)
if ok:
self.assertTrue(r)
else:
self.assertFalse(r)
def test_missing_value(self):
filename = test.support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, 'http://www.acme.com/', 'eggs')
interact_netscape(c, 'http://www.acme.com/', '"spam"; path=/foo/')
cookie = c._cookies['www.acme.com']['/']['eggs']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, 'eggs')
cookie = c._cookies['www.acme.com']['/foo/']['"spam"']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, '"spam"')
self.assertEqual(lwp_cookie_str(cookie),
'"spam"; path="/foo/"; domain="www.acme.com"; path_spec; discard; version=0'
)
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
self.assertEqual(repr(c), re.sub('path_specified=%s' % True,
'path_specified=%s' % False, old_str))
self.assertEqual(interact_netscape(c, 'http://www.acme.com/foo/'),
'"spam"; eggs')
def test_rfc2109_handling(self):
for rfc2109_as_netscape, rfc2965, version in [(None, False, 0), (
None, True, 1), (False, False, None), (False, True, 1), (True,
False, 0), (True, True, 0)]:
policy = DefaultCookiePolicy(rfc2109_as_netscape=
rfc2109_as_netscape, rfc2965=rfc2965)
c = CookieJar(policy)
interact_netscape(c, 'http://www.example.com/', 'ni=ni; Version=1')
try:
cookie = c._cookies['www.example.com']['/']['ni']
except KeyError:
self.assertIsNone(version)
else:
self.assertEqual(cookie.version, version)
interact_2965(c, 'http://www.example.com/',
'foo=bar; Version=1')
if rfc2965:
cookie2965 = c._cookies['www.example.com']['/']['foo']
self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/',
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, 'http://www.acme.com/', 'ni=ni; port=80,8080')
interact_netscape(c, 'http://www.acme.com:80/', 'nini=ni')
interact_netscape(c, 'http://www.acme.com:80/', 'foo=bar; expires=')
interact_netscape(c, 'http://www.acme.com:80/',
'spam=eggs; expires="Foo Bar 25 33:22:11 3022"')
interact_netscape(c, 'http://www.acme.com/', 'fortytwo=')
interact_netscape(c, 'http://www.acme.com/', '=unladenswallow')
interact_netscape(c, 'http://www.acme.com/', 'holyhandgrenade')
cookie = c._cookies['.acme.com']['/']['spam']
self.assertEqual(cookie.domain, '.acme.com')
self.assertTrue(cookie.domain_specified)
self.assertEqual(cookie.port, DEFAULT_HTTP_PORT)
self.assertFalse(cookie.port_specified)
self.assertTrue(cookie.has_nonstandard_attr('blArgh'))
self.assertFalse(cookie.has_nonstandard_attr('blargh'))
cookie = c._cookies['www.acme.com']['/']['ni']
self.assertEqual(cookie.domain, 'www.acme.com')
self.assertFalse(cookie.domain_specified)
self.assertEqual(cookie.port, '80,8080')
self.assertTrue(cookie.port_specified)
cookie = c._cookies['www.acme.com']['/']['nini']
self.assertIsNone(cookie.port)
self.assertFalse(cookie.port_specified)
foo = c._cookies['www.acme.com']['/']['foo']
spam = c._cookies['www.acme.com']['/']['foo']
self.assertIsNone(foo.expires)
self.assertIsNone(spam.expires)
cookie = c._cookies['www.acme.com']['/']['fortytwo']
self.assertIsNotNone(cookie.value)
self.assertEqual(cookie.value, '')
cookie = c._cookies['www.acme.com']['/']['holyhandgrenade']
self.assertIsNone(cookie.value)
def test_ns_parser_special_names(self):
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/', 'expires=eggs')
interact_netscape(c, 'http://www.acme.com/', 'version=eggs; spam=eggs')
cookies = c._cookies['www.acme.com']['/']
self.assertIn('expires', cookies)
self.assertIn('version', cookies)
def test_expires(self):
c = CookieJar()
future = time2netscape(time.time() + 3600)
interact_netscape(c, 'http://www.acme.com/',
'spam="bar"; expires=%s' % future)
self.assertEqual(len(c), 1)
now = time2netscape(time.time() - 1)
interact_netscape(c, 'http://www.acme.com/',
'foo="eggs"; expires=%s' % now)
h = interact_netscape(c, 'http://www.acme.com/')
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
self.assertNotIn('foo', h)
interact_netscape(c, 'http://www.acme.com/',
'eggs="bar"; expires=%s' % future)
interact_netscape(c, 'http://www.acme.com/',
'bar="bar"; expires=%s' % future)
self.assertEqual(len(c), 3)
interact_netscape(c, 'http://www.acme.com/',
'eggs="bar"; expires=%s; max-age=0' % future)
interact_netscape(c, 'http://www.acme.com/',
'bar="bar"; max-age=0; expires=%s' % future)
h = interact_netscape(c, 'http://www.acme.com/')
self.assertEqual(len(c), 1)
interact_netscape(c, 'http://www.rhubarb.net/', 'whum="fizz"')
self.assertEqual(len(c), 2)
c.clear_session_cookies()
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
cookie = Cookie(0, 'name', 'value', None, False, 'www.python.org',
True, False, '/', False, False, '1444312383.018307', False,
None, None, {})
self.assertEqual(cookie.expires, 1444312383)
def test_default_path(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/', 'spam="bar"; Version="1"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah', 'eggs="bar"; Version="1"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah/rhubarb',
'eggs="bar"; Version="1"')
self.assertIn('/blah/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah/rhubarb/',
'eggs="bar"; Version="1"')
self.assertIn('/blah/rhubarb/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/', 'spam="bar"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah', 'eggs="bar"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah/rhubarb', 'eggs="bar"')
self.assertIn('/blah', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah/rhubarb/', 'eggs="bar"')
self.assertIn('/blah/rhubarb', c._cookies['www.acme.com'])
def test_default_path_with_query(self):
cj = CookieJar()
uri = 'http://example.com/?spam/eggs'
value = 'eggs="bar"'
interact_netscape(cj, uri, value)
self.assertIn('/', cj._cookies['example.com'])
self.assertEqual(interact_netscape(cj, uri), value)
def test_escape_path(self):
cases = [('/foo%2f/bar', '/foo%2F/bar'), ('/foo%2F/bar',
'/foo%2F/bar'), ('/foo%%/bar', '/foo%%/bar'), ('/fo%19o/bar',
'/fo%19o/bar'), ('/fo%7do/bar', '/fo%7Do/bar'), ('/foo/bar&',
'/foo/bar&'), ('/foo//bar', '/foo//bar'), ('~/foo/bar',
'~/foo/bar'), ('/foo\x19/bar', '/foo%19/bar'), ('/}foo/bar',
'/%7Dfoo/bar'), ('/foo/barü', '/foo/bar%C3%BC'), ('/foo/barꯍ',
'/foo/bar%EA%AF%8D')]
for arg, result in cases:
self.assertEqual(escape_path(arg), result)
def test_request_path(self):
req = urllib.request.Request(
'http://www.example.com/rheum/rhaponticum;foo=bar;sing=song?apples=pears&spam=eggs#ni'
)
self.assertEqual(request_path(req),
'/rheum/rhaponticum;foo=bar;sing=song')
req = urllib.request.Request(
'http://www.example.com/rheum/rhaponticum?apples=pears&spam=eggs#ni'
)
self.assertEqual(request_path(req), '/rheum/rhaponticum')
req = urllib.request.Request('http://www.example.com')
self.assertEqual(request_path(req), '/')
def test_request_port(self):
req = urllib.request.Request('http://www.acme.com:1234/', headers={
'Host': 'www.acme.com:4321'})
self.assertEqual(request_port(req), '1234')
req = urllib.request.Request('http://www.acme.com/', headers={
'Host': 'www.acme.com:4321'})
self.assertEqual(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
req = urllib.request.Request('http://1.1.1.1/', headers={'Host':
'www.acme.com:80'})
self.assertEqual(request_host(req), '1.1.1.1')
req = urllib.request.Request('http://www.acme.com/', headers={
'Host': 'irrelevant.com'})
self.assertEqual(request_host(req), 'www.acme.com')
req = urllib.request.Request('http://www.acme.com:2345/resource.html',
headers={'Host': 'www.acme.com:5432'})
self.assertEqual(request_host(req), 'www.acme.com')
def test_is_HDN(self):
self.assertTrue(is_HDN('foo.bar.com'))
self.assertTrue(is_HDN('1foo2.3bar4.5com'))
self.assertFalse(is_HDN('192.168.1.1'))
self.assertFalse(is_HDN(''))
self.assertFalse(is_HDN('.'))
self.assertFalse(is_HDN('.foo.bar.com'))
self.assertFalse(is_HDN('..foo'))
self.assertFalse(is_HDN('foo.'))
def test_reach(self):
self.assertEqual(reach('www.acme.com'), '.acme.com')
self.assertEqual(reach('acme.com'), 'acme.com')
self.assertEqual(reach('acme.local'), '.local')
self.assertEqual(reach('.local'), '.local')
self.assertEqual(reach('.com'), '.com')
self.assertEqual(reach('.'), '.')
self.assertEqual(reach(''), '')
self.assertEqual(reach('192.168.0.1'), '192.168.0.1')
def test_domain_match(self):
self.assertTrue(domain_match('192.168.1.1', '192.168.1.1'))
self.assertFalse(domain_match('192.168.1.1', '.168.1.1'))
self.assertTrue(domain_match('x.y.com', 'x.Y.com'))
self.assertTrue(domain_match('x.y.com', '.Y.com'))
self.assertFalse(domain_match('x.y.com', 'Y.com'))
self.assertTrue(domain_match('a.b.c.com', '.c.com'))
self.assertFalse(domain_match('.c.com', 'a.b.c.com'))
self.assertTrue(domain_match('example.local', '.local'))
self.assertFalse(domain_match('blah.blah', ''))
self.assertFalse(domain_match('', '.rhubarb.rhubarb'))
self.assertTrue(domain_match('', ''))
self.assertTrue(user_domain_match('acme.com', 'acme.com'))
self.assertFalse(user_domain_match('acme.com', '.acme.com'))
self.assertTrue(user_domain_match('rhubarb.acme.com', '.acme.com'))
self.assertTrue(user_domain_match('www.rhubarb.acme.com', '.acme.com'))
self.assertTrue(user_domain_match('x.y.com', 'x.Y.com'))
self.assertTrue(user_domain_match('x.y.com', '.Y.com'))
self.assertFalse(user_domain_match('x.y.com', 'Y.com'))
self.assertTrue(user_domain_match('y.com', 'Y.com'))
self.assertFalse(user_domain_match('.y.com', 'Y.com'))
self.assertTrue(user_domain_match('.y.com', '.Y.com'))
self.assertTrue(user_domain_match('x.y.com', '.com'))
self.assertFalse(user_domain_match('x.y.com', 'com'))
self.assertFalse(user_domain_match('x.y.com', 'm'))
self.assertFalse(user_domain_match('x.y.com', '.m'))
self.assertFalse(user_domain_match('x.y.com', ''))
self.assertFalse(user_domain_match('x.y.com', '.'))
self.assertTrue(user_domain_match('192.168.1.1', '192.168.1.1'))
self.assertFalse(user_domain_match('192.168.1.1', '.168.1.1'))
self.assertFalse(user_domain_match('192.168.1.1', '.'))
self.assertFalse(user_domain_match('192.168.1.1', ''))
def test_wrong_domain(self):
c = CookieJar()
interact_2965(c, 'http://www.nasty.com/',
'foo=bar; domain=friendly.org; Version="1"')
self.assertEqual(len(c), 0)
def test_strict_domain(self):
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, 'http://example.co.uk/', 'no=problemo')
interact_netscape(cj, 'http://example.co.uk/',
'okey=dokey; Domain=.example.co.uk')
self.assertEqual(len(cj), 2)
for pseudo_tld in ['.co.uk', '.org.za', '.tx.us', '.name.us']:
interact_netscape(cj, 'http://example.%s/' % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEqual(len(cj), 2)
def test_two_component_domain_ns(self):
c = CookieJar()
interact_netscape(c, 'http://foo.net/', 'ns=bar')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies['foo.net']['/']['ns'].value, 'bar')
self.assertEqual(interact_netscape(c, 'http://foo.net/'), 'ns=bar')
self.assertEqual(interact_netscape(c, 'http://www.foo.net/'), 'ns=bar')
pol = DefaultCookiePolicy(strict_ns_domain=DefaultCookiePolicy.
DomainStrictNonDomain)
c.set_policy(pol)
self.assertEqual(interact_netscape(c, 'http://www.foo.net/'), '')
interact_netscape(c, 'http://foo.net/foo/',
'spam1=eggs; domain=foo.net')
interact_netscape(c, 'http://foo.net/foo/bar/',
'spam2=eggs; domain=.foo.net')
self.assertEqual(len(c), 3)
self.assertEqual(c._cookies['.foo.net']['/foo']['spam1'].value, 'eggs')
self.assertEqual(c._cookies['.foo.net']['/foo/bar']['spam2'].value,
'eggs')
self.assertEqual(interact_netscape(c, 'http://foo.net/foo/bar/'),
'spam2=eggs; spam1=eggs; ns=bar')
interact_netscape(c, 'http://foo.net/', 'nini="ni"; domain=.net')
self.assertEqual(len(c), 3)
interact_netscape(c, 'http://foo.co.uk', 'nasty=trick; domain=.co.uk')
self.assertEqual(len(c), 4)
def test_two_component_domain_rfc2965(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, 'http://foo.net/', 'foo=bar; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies['foo.net']['/']['foo'].value, 'bar')
self.assertEqual(interact_2965(c, 'http://foo.net/'),
'$Version=1; foo=bar')
self.assertEqual(interact_2965(c, 'http://www.foo.net/'), '')
interact_2965(c, 'http://foo.net/foo',
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(interact_2965(c, 'http://foo.net/foo'),
'$Version=1; foo=bar')
interact_2965(c, 'http://www.foo.net/foo/',
'spam=eggs; domain=foo.net; Version="1"')
self.assertEqual(c._cookies['.foo.net']['/foo/']['spam'].value, 'eggs')
self.assertEqual(len(c), 2)
self.assertEqual(interact_2965(c, 'http://foo.net/foo/'),
'$Version=1; foo=bar')
self.assertEqual(interact_2965(c, 'http://www.foo.net/foo/'),
'$Version=1; spam=eggs; $Domain="foo.net"')
interact_2965(c, 'http://foo.net/',
'ni="ni"; domain=".net"; Version="1"')
self.assertEqual(len(c), 2)
interact_2965(c, 'http://foo.co.uk/',
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEqual(len(c), 3)
def test_domain_allow(self):
c = CookieJar(policy=DefaultCookiePolicy(blocked_domains=[
'acme.com'], allowed_domains=['www.acme.com']))
req = urllib.request.Request('http://acme.com/')
headers = ['Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/']
res = FakeResponse(headers, 'http://acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.coyote.com/')
res = FakeResponse(headers, 'http://www.coyote.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.coyote.com/')
res = FakeResponse(headers, 'http://www.coyote.com/')
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
c.add_cookie_header(req)
self.assertFalse(req.has_header('Cookie'))
def test_domain_block(self):
pol = DefaultCookiePolicy(rfc2965=True, blocked_domains=['.acme.com'])
c = CookieJar(policy=pol)
headers = ['Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/']
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
p = pol.set_blocked_domains(['acme.com'])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
c.clear()
req = urllib.request.Request('http://www.roadrunner.net/')
res = FakeResponse(headers, 'http://www.roadrunner.net/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.roadrunner.net/')
c.add_cookie_header(req)
self.assertTrue(req.has_header('Cookie'))
self.assertTrue(req.has_header('Cookie2'))
c.clear()
pol.set_blocked_domains(['.acme.com'])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
c.add_cookie_header(req)
self.assertFalse(req.has_header('Cookie'))
def test_secure(self):
for ns in (True, False):
for whitespace in (' ', ''):
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ''
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = '; Version=1'
c.set_policy(pol)
url = 'http://www.acme.com/'
int(c, url, 'foo1=bar%s%s' % (vs, whitespace))
int(c, url, 'foo2=bar%s; secure%s' % (vs, whitespace))
self.assertFalse(c._cookies['www.acme.com']['/']['foo1'].
secure, 'non-secure cookie registered secure')
self.assertTrue(c._cookies['www.acme.com']['/']['foo2'].
secure, 'secure cookie registered non-secure')
def test_quote_cookie_value(self):
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://www.acme.com/', 'foo=\\b"a"r; Version=1')
h = interact_2965(c, 'http://www.acme.com/')
self.assertEqual(h, '$Version=1; foo=\\\\b\\"a\\"r')
def test_missing_final_slash(self):
url = 'http://www.acme.com'
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, 'foo=bar; Version=1')
req = urllib.request.Request(url)
self.assertEqual(len(c), 1)
c.add_cookie_header(req)
self.assertTrue(req.has_header('Cookie'))
def test_domain_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Domain', h,
'absent domain returned with domain present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain=".bar.com"', h, 'domain not returned')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain="bar.com"', h, 'domain not returned')
def test_path_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Path', h, 'absent path returned with path present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assertIn('$Path="/"', h, 'path not returned')
def test_port_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Port', h, 'absent port returned with port present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port')
h = interact_2965(c, url)
self.assertRegex(h, '\\$Port([^=]|$)',
'port with no value not returned with no value')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assertIn('$Port="80"', h,
'port with single value not returned with single value')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assertIn('$Port="80,8080"', h,
'port with multiple values not returned with multiple values')
def test_no_return_comment(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = 'http://foo.bar.com/'
interact_2965(c, url,
'spam=eggs; Version=1; Comment="does anybody read these?"; CommentURL="http://foo.bar.net/comment.html"'
)
h = interact_2965(c, url)
self.assertNotIn('Comment', h,
'Comment or CommentURL cookie-attributes returned to server')
def test_Cookie_iterator(self):
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(cs, 'http://blah.spam.org/',
'foo=eggs; Version=1; Comment="does anybody read these?"; CommentURL="http://foo.bar.net/comment.html"'
)
interact_netscape(cs, 'http://www.acme.com/blah/', 'spam=bar; secure')
interact_2965(cs, 'http://www.acme.com/blah/',
'foo=bar; secure; Version=1')
interact_2965(cs, 'http://www.acme.com/blah/',
'foo=bar; path=/; Version=1')
interact_2965(cs, 'http://www.sol.no',
'bang=wallop; version=1; domain=".sol.no"; port="90,100, 80,8080"; max-age=100; Comment = "Just kidding! (\\"|\\\\\\\\) "'
)
versions = [1, 1, 1, 0, 1]
names = ['bang', 'foo', 'foo', 'spam', 'foo']
domains = ['.sol.no', 'blah.spam.org', 'www.acme.com',
'www.acme.com', 'www.acme.com']
paths = ['/', '/', '/', '/blah', '/blah/']
for i in range(4):
i = 0
for c in cs:
self.assertIsInstance(c, Cookie)
self.assertEqual(c.version, versions[i])
self.assertEqual(c.name, names[i])
self.assertEqual(c.domain, domains[i])
self.assertEqual(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
self.assertEqual(parse_ns_headers(['foo=bar; path=/; domain']), [[(
'foo', 'bar'), ('path', '/'), ('domain', None), ('version', '0')]])
self.assertEqual(parse_ns_headers([
'foo=bar; expires=Foo Bar 12 33:22:11 2000']), [[('foo', 'bar'),
('expires', None), ('version', '0')]])
self.assertEqual(parse_ns_headers(['foo']), [[('foo', None), (
'version', '0')]])
self.assertEqual(parse_ns_headers(['foo=bar; expires']), [[('foo',
'bar'), ('expires', None), ('version', '0')]])
self.assertEqual(parse_ns_headers(['foo=bar; version']), [[('foo',
'bar'), ('version', None)]])
self.assertEqual(parse_ns_headers(['']), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
c = CookieJar()
req = urllib.request.Request('http://www.example.com/')
r = FakeResponse(headers, 'http://www.example.com/')
c.extract_cookies(r, req)
return c
future = time2netscape(time.time() + 3600)
for headers in [['Set-Cookie: '], ['Set-Cookie2: '], [
'Set-Cookie2: a=foo; path=/; Version=1; domain'], [
'Set-Cookie: b=foo; max-age=oops'], [
'Set-Cookie: b=foo; version=spam'], ['Set-Cookie:; Expires=%s' %
future]]:
c = cookiejar_from_cookie_headers(headers)
self.assertEqual(len(c), 0)
headers = ['Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000']
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies['www.example.com']['/']['c']
self.assertIsNone(cookie.expires)
class LWPCookieTests(unittest.TestCase):
def test_netscape_example_1(self):
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
req = urllib.request.Request('http://www.acme.com:80/', headers={
'Host': 'www.acme.com:80'})
headers.append(
'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; expires=Wednesday, 09-Nov-%d 23:12:40 GMT'
% year_plus_one)
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'CUSTOMER=WILE_E_COYOTE')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
headers.append('Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/foo/bar')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, 'http://www.acme.com')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
self.assertNotIn('SHIPPING=FEDEX', h)
req = urllib.request.Request('http://www.acme.com/foo/')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
self.assertTrue(h.startswith('SHIPPING=FEDEX;'))
def test_netscape_example_2(self):
c = CookieJar()
headers = []
req = urllib.request.Request('http://www.acme.com/')
headers.append('Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'),
'PART_NUMBER=ROCKET_LAUNCHER_0001')
headers.append('Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo'
)
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/ammo')
c.add_cookie_header(req)
self.assertRegex(req.get_header('Cookie'),
'PART_NUMBER=RIDING_ROCKET_0023;\\s*PART_NUMBER=ROCKET_LAUNCHER_0001'
)
def test_ietf_example_1(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
cookie = interact_2965(c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertFalse(cookie)
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
self.assertRegex(cookie,
'^\\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \\$Path="/acme"$')
cookie = interact_2965(c, 'http://www.acme.com/acme/shipping',
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assertRegex(cookie, '^\\$Version="?1"?;')
self.assertRegex(cookie,
'Part_Number="?Rocket_Launcher_0001"?;\\s*\\$Path="\\/acme"')
self.assertRegex(cookie,
'Customer="?WILE_E_COYOTE"?;\\s*\\$Path="\\/acme"')
cookie = interact_2965(c, 'http://www.acme.com/acme/process')
self.assertRegex(cookie, 'Shipping="?FedEx"?;\\s*\\$Path="\\/acme"')
self.assertIn('WILE_E_COYOTE', cookie)
def test_ietf_example_2(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://www.acme.com/acme/ammo/specific',
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
cookie = interact_2965(c, 'http://www.acme.com/acme/ammo/...')
self.assertRegex(cookie, 'Riding_Rocket_0023.*Rocket_Launcher_0001')
cookie = interact_2965(c, 'http://www.acme.com/acme/parts/')
self.assertIn('Rocket_Launcher_0001', cookie)
self.assertNotIn('Riding_Rocket_0023', cookie)
def test_rejection(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = 'max-age=3600'
cookie = interact_2965(c, 'http://www.acme.com',
'foo=bar; domain=".com"; version=1')
self.assertFalse(c)
cookie = interact_2965(c, 'http://www.acme.com',
'ping=pong; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
cookie = interact_2965(c, 'http://www.a.acme.com',
'whiz=bang; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
cookie = interact_2965(c, 'http://www.a.acme.com',
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://125.125.125.125',
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://www.sol.no',
'blah=rhubarb; domain=".sol.no"; path="/foo"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://www.sol.no/foo/bar',
'bing=bong; domain=".sol.no"; path="/foo"; version=1')
self.assertEqual(len(c), 3)
cookie = interact_2965(c, 'http://www.sol.no',
'whiz=ffft; domain=".sol.no"; port="90,100"; version=1')
self.assertEqual(len(c), 3)
cookie = interact_2965(c, 'http://www.sol.no',
'bang=wallop; version=1; domain=".sol.no"; port="90,100, 80,8080"; max-age=100; Comment = "Just kidding! (\\"|\\\\\\\\) "'
)
self.assertEqual(len(c), 4)
cookie = interact_2965(c, 'http://www.sol.no',
'foo9=bar; version=1; domain=".sol.no"; port; max-age=100;')
self.assertEqual(len(c), 5)
cookie = interact_2965(c, 'http://www.sol.no/<oo/',
'foo8=bar; version=1; path="/%3coo"')
self.assertEqual(len(c), 6)
filename = test.support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try:
os.unlink(filename)
except OSError:
pass
self.assertEqual(old, repr(c))
def test_url_encoding(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c,
'http://www.acme.com/foo%2f%25/%3c%3c%0Anew%C3%A5/%C3%A5',
'foo = bar; version = 1')
cookie = interact_2965(c,
'http://www.acme.com/foo%2f%25/<<%0anewå/æøå',
'bar=baz; path="/foo/"; version=1')
version_re = re.compile('^\\$version=\\"?1\\"?', re.I)
self.assertIn('foo=bar', cookie)
self.assertRegex(cookie, version_re)
cookie = interact_2965(c, 'http://www.acme.com/foo/%25/<<%0anewå/æøå')
self.assertFalse(cookie)
cookie = interact_2965(c, 'http://www.acme.com/ü')
def test_mozilla(self):
year_plus_one = time.localtime()[0] + 1
filename = test.support.TESTFN
c = MozillaCookieJar(filename, policy=DefaultCookiePolicy(rfc2965=True)
)
interact_2965(c, 'http://www.acme.com/',
'foo1=bar; max-age=100; Version=1')
interact_2965(c, 'http://www.acme.com/',
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, 'http://www.acme.com/', 'foo3=bar; secure; Version=1')
expires = 'expires=09-Nov-%d 23:12:40 GMT' % (year_plus_one,)
interact_netscape(c, 'http://www.foo.com/', 'fooa=bar; %s' % expires)
interact_netscape(c, 'http://www.foo.com/',
'foob=bar; Domain=.foo.com; %s' % expires)
interact_netscape(c, 'http://www.foo.com/',
'fooc=bar; Domain=www.foo.com; %s' % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename, DefaultCookiePolicy(
rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try:
os.unlink(filename)
except OSError:
pass
return new_c
new_c = save_and_restore(c, True)
self.assertEqual(len(new_c), 6)
self.assertIn("name='foo1', value='bar'", repr(new_c))
new_c = save_and_restore(c, False)
self.assertEqual(len(new_c), 4)
self.assertIn("name='foo1', value='bar'", repr(new_c))
def test_netscape_misc(self):
c = CookieJar()
headers = []
req = urllib.request.Request('http://foo.bar.acme.com/foo')
headers.append('Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com')
res = FakeResponse(headers, 'http://www.acme.com/foo')
c.extract_cookies(res, req)
headers.append('Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com')
res = FakeResponse(headers, 'http://www.acme.com/foo')
c.extract_cookies(res, req)
req = urllib.request.Request('http://foo.bar.acme.com/foo')
c.add_cookie_header(req)
self.assertIn('PART_NUMBER=3,4', req.get_header('Cookie'))
self.assertIn('Customer=WILE_E_COYOTE', req.get_header('Cookie'))
def test_intranet_domains_2965(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://example/',
'foo1=bar; PORT; Discard; Version=1;')
cookie = interact_2965(c, 'http://example/',
'foo2=bar; domain=".local"; Version=1')
self.assertIn('foo1=bar', cookie)
interact_2965(c, 'http://example/', 'foo3=bar; Version=1')
cookie = interact_2965(c, 'http://example/')
self.assertIn('foo2=bar', cookie)
self.assertEqual(len(c), 3)
def test_intranet_domains_ns(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=False))
interact_netscape(c, 'http://example/', 'foo1=bar')
cookie = interact_netscape(c, 'http://example/',
'foo2=bar; domain=.local')
self.assertEqual(len(c), 2)
self.assertIn('foo1=bar', cookie)
cookie = interact_netscape(c, 'http://example/')
self.assertIn('foo2=bar', cookie)
self.assertEqual(len(c), 2)
def test_empty_path(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
headers = []
req = urllib.request.Request('http://www.ants.com/')
headers.append('Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=')
res = FakeResponse(headers, 'http://www.ants.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.ants.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'JSESSIONID=ABCDERANDOM123')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
req = urllib.request.Request('http://www.ants.com:8080')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'JSESSIONID=ABCDERANDOM123')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
def test_session_cookies(self):
year_plus_one = time.localtime()[0] + 1
req = urllib.request.Request('http://www.perlmeister.com/scripts')
headers = []
headers.append('Set-Cookie: s1=session;Path=/scripts')
headers.append(
'Set-Cookie: p1=perm; Domain=.perlmeister.com;Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT'
% year_plus_one)
headers.append(
'Set-Cookie: p2=perm;Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT' %
year_plus_one)
headers.append(
'Set-Cookie: s2=session;Path=/scripts;Domain=.perlmeister.com')
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
counter = {'session_after': 0, 'perm_after': 0, 'session_before': 0,
'perm_before': 0}
for cookie in c:
key = '%s_before' % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
for cookie in c:
key = '%s_after' % cookie.value
counter[key] = counter[key] + 1
self.assertEqual(counter['perm_after'], counter['perm_before'])
self.assertEqual(counter['session_after'], 0)
self.assertNotEqual(counter['session_before'], 0)
def test_main(verbose=None):
test.support.run_unittest(DateTimeTests, HeaderTests, CookieTests,
FileCookieJarTests, LWPCookieTests)
if __name__ == '__main__':
test_main(verbose=True)
|
code/tmp_rtrip/test/test_http_cookiejar.py
|
import os
import re
import test.support
import time
import unittest
import urllib.request
from http.cookiejar import time2isoz, http2time, iso2time, time2netscape, parse_ns_headers, join_header_words, split_header_words, Cookie, CookieJar, DefaultCookiePolicy, LWPCookieJar, MozillaCookieJar, LoadError, lwp_cookie_str, DEFAULT_HTTP_PORT, escape_path, reach, is_HDN, domain_match, user_domain_match, request_path, request_port, request_host
class DateTimeTests(unittest.TestCase):
def test_time2isoz(self):
base = 1019227000
day = 24 * 3600
self.assertEqual(time2isoz(base), '2002-04-19 14:36:40Z')
self.assertEqual(time2isoz(base + day), '2002-04-20 14:36:40Z')
self.assertEqual(time2isoz(base + 2 * day), '2002-04-21 14:36:40Z')
self.assertEqual(time2isoz(base + 3 * day), '2002-04-22 14:36:40Z')
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assertRegex(text,
'^\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\dZ$',
'bad time2isoz format: %s %s' % (az, bz))
def test_time2netscape(self):
base = 1019227000
day = 24 * 3600
self.assertEqual(time2netscape(base), 'Fri, 19-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + day),
'Sat, 20-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + 2 * day),
'Sun, 21-Apr-2002 14:36:40 GMT')
self.assertEqual(time2netscape(base + 3 * day),
'Mon, 22-Apr-2002 14:36:40 GMT')
az = time2netscape()
bz = time2netscape(500000)
for text in (az, bz):
self.assertRegex(text,
'[a-zA-Z]{3}, \\d{2}-[a-zA-Z]{3}-\\d{4} \\d{2}:\\d{2}:\\d{2} GMT$'
, 'bad time2netscape format: %s %s' % (az, bz))
def test_http2time(self):
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEqual(parse_date('01 Jan 2001'), (2001, 1, 1, 0, 0, 0.0))
self.assertEqual(parse_date('03-Feb-20'), (2020, 2, 3, 0, 0, 0.0))
self.assertEqual(parse_date('03-Feb-98'), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
tests = ['Thu, 03 Feb 1994 00:00:00 GMT',
'Thursday, 03-Feb-94 00:00:00 GMT',
'Thursday, 03-Feb-1994 00:00:00 GMT',
'03 Feb 1994 00:00:00 GMT', '03-Feb-94 00:00:00 GMT',
'03-Feb-1994 00:00:00 GMT', '03-Feb-1994 00:00 GMT',
'03-Feb-1994 00:00', '02-Feb-1994 24:00', '03-Feb-94',
'03-Feb-1994', '03 Feb 1994', ' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ']
test_t = 760233600
result = time2isoz(test_t)
expected = '1994-02-03 00:00:00Z'
self.assertEqual(result, expected, "%s => '%s' (%s)" % (test_t,
result, expected))
for s in tests:
self.assertEqual(http2time(s), test_t, s)
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
def test_http2time_garbage(self):
for test in ['', 'Garbage', 'Mandag 16. September 1996',
'01-00-1980', '01-13-1980', '00-01-1980', '32-01-1980',
'01-01-1980 25:00:00', '01-01-1980 00:61:00',
'01-01-1980 00:00:62', '08-Oct-3697739', '08-01-3697739',
'09 Feb 19942632 22:23:32 GMT', 'Wed, 09 Feb 1994834 22:23:32 GMT'
]:
self.assertIsNone(http2time(test),
'http2time(%s) is not None\nhttp2time(test) %s' % (test,
http2time(test)))
def test_iso2time(self):
def parse_date(text):
return time.gmtime(iso2time(text))[:6]
self.assertEqual(parse_date('19940203T141529Z'), (1994, 2, 3, 14,
15, 29))
self.assertEqual(parse_date('1994-02-03 07:15:29 -0700'), (1994, 2,
3, 14, 15, 29))
self.assertEqual(parse_date('1994-02-03 19:45:29 +0530'), (1994, 2,
3, 14, 15, 29))
def test_iso2time_formats(self):
tests = ['1994-02-03 00:00:00 -0000', '1994-02-03 00:00:00 +0000',
'1994-02-03 00:00:00', '1994-02-03', '1994-02-03T00:00:00',
'19940203', '1994-02-02 24:00:00', '19940203T000000Z',
' 1994-02-03 ', ' 1994-02-03T00:00:00 ']
test_t = 760233600
for s in tests:
self.assertEqual(iso2time(s), test_t, s)
self.assertEqual(iso2time(s.lower()), test_t, s.lower())
self.assertEqual(iso2time(s.upper()), test_t, s.upper())
def test_iso2time_garbage(self):
for test in ['', 'Garbage', 'Thursday, 03-Feb-94 00:00:00 GMT',
'1980-00-01', '1980-13-01', '1980-01-00', '1980-01-32',
'1980-01-01 25:00:00', '1980-01-01 00:61:00',
'01-01-1980 00:00:62', '01-01-1980T00:00:62',
'19800101T250000Z1980-01-01 00:00:00 -2500']:
self.assertIsNone(iso2time(test),
'iso2time(%s) is not None\niso2time(test) %s' % (test,
iso2time(test)))
class HeaderTests(unittest.TestCase):
def test_parse_ns_headers(self):
expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]
]
for hdr in ['foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"']:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_version(self):
expected = [[('foo', 'bar'), ('version', '1')]]
for hdr in ['foo=bar; version="1"', 'foo=bar; Version="1"']:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[('expires', '01 Jan 2040 22:23:32 GMT'), ('version', '0')]
]
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
joined = join_header_words([[('foo', None), ('bar', 'baz')]])
self.assertEqual(joined, 'foo; bar=baz')
self.assertEqual(join_header_words([[]]), '')
def test_split_header_words(self):
tests = [('foo', [[('foo', None)]]), ('foo=bar', [[('foo', 'bar')]]
), (' foo ', [[('foo', None)]]), (' foo= ', [[('foo',
'')]]), (' foo=', [[('foo', '')]]), (' foo= ; ', [[('foo',
'')]]), (' foo= ; bar= baz ', [[('foo', ''), ('bar', 'baz')
]]), ('foo=bar bar=baz', [[('foo', 'bar'), ('bar', 'baz')]]), (
'foo= bar=baz', [[('foo', 'bar=baz')]]), ('foo=bar;bar=baz', [[
('foo', 'bar'), ('bar', 'baz')]]), ('foo bar baz', [[('foo',
None), ('bar', None), ('baz', None)]]), ('a, b, c', [[('a',
None)], [('b', None)], [('c', None)]]), (
'foo; bar=baz, spam=, foo="\\,\\;\\"", bar= ', [[('foo', None),
('bar', 'baz')], [('spam', '')], [('foo', ',;"')], [('bar', '')]])]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, io
f = io.StringIO()
traceback.print_exc(None, f)
result = '(error -- traceback follows)\n\n%s' % f.getvalue()
self.assertEqual(result, expect,
"""
When parsing: '%s'
Expected: '%s'
Got: '%s'
"""
% (arg, expect, result))
def test_roundtrip(self):
tests = [('foo', 'foo'), ('foo=bar', 'foo=bar'), (' foo ',
'foo'), ('foo=', 'foo=""'), ('foo=bar bar=baz',
'foo=bar; bar=baz'), ('foo=bar;bar=baz', 'foo=bar; bar=baz'), (
'foo bar baz', 'foo; bar; baz'), ('foo="\\"" bar="\\\\"',
'foo="\\""; bar="\\\\"'), ('foo,,,bar', 'foo, bar'), (
'foo=bar,bar=baz', 'foo=bar, bar=baz'), (
'text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'), (
'foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'), (
'Basic realm="\\"foo\\\\\\\\bar\\""',
'Basic; realm="\\"foo\\\\\\\\bar\\""')]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEqual(res, expect,
"""
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
"""
% (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import email
self._headers = email.message_from_string('\n'.join(headers))
self._url = url
def info(self):
return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, 'Set-Cookie2')
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, 'Set-Cookie')
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
req = urllib.request.Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header('Cookie', '')
headers = []
for hdr in set_cookie_hdrs:
headers.append('%s: %s' % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(unittest.TestCase):
def test_lwp_valueless_cookie(self):
filename = test.support.TESTFN
c = LWPCookieJar()
interact_netscape(c, 'http://www.acme.com/', 'boo')
self.assertEqual(c._cookies['www.acme.com']['/']['boo'].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try:
os.unlink(filename)
except OSError:
pass
self.assertEqual(c._cookies['www.acme.com']['/']['boo'].value, None)
def test_bad_magic(self):
filename = test.support.TESTFN
for cookiejar_class in (LWPCookieJar, MozillaCookieJar):
c = cookiejar_class()
try:
c.load(filename=
'for this test to work, a file with this filename should not exist'
)
except OSError as exc:
self.assertIsNot(exc.__class__, LoadError)
else:
self.fail('expected OSError for invalid filename')
try:
with open(filename, 'w') as f:
f.write('oops\n')
for cookiejar_class in (LWPCookieJar, MozillaCookieJar):
c = cookiejar_class()
self.assertRaises(LoadError, c.load, filename)
finally:
try:
os.unlink(filename)
except OSError:
pass
class CookieTests(unittest.TestCase):
def test_domain_return_ok(self):
pol = DefaultCookiePolicy()
for url, domain, ok in [('http://foo.bar.com/', 'blah.com', False),
('http://foo.bar.com/', 'rhubarb.blah.com', False), (
'http://foo.bar.com/', 'rhubarb.foo.bar.com', False), (
'http://foo.bar.com/', '.foo.bar.com', True), (
'http://foo.bar.com/', 'foo.bar.com', True), (
'http://foo.bar.com/', '.bar.com', True), (
'http://foo.bar.com/', 'com', True), ('http://foo.com/',
'rhubarb.foo.com', False), ('http://foo.com/', '.foo.com', True
), ('http://foo.com/', 'foo.com', True), ('http://foo.com/',
'com', True), ('http://foo/', 'rhubarb.foo', False), (
'http://foo/', '.foo', True), ('http://foo/', 'foo', True), (
'http://foo/', 'foo.local', True), ('http://foo/', '.local', True)
]:
request = urllib.request.Request(url)
r = pol.domain_return_ok(domain, request)
if ok:
self.assertTrue(r)
else:
self.assertFalse(r)
def test_missing_value(self):
filename = test.support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, 'http://www.acme.com/', 'eggs')
interact_netscape(c, 'http://www.acme.com/', '"spam"; path=/foo/')
cookie = c._cookies['www.acme.com']['/']['eggs']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, 'eggs')
cookie = c._cookies['www.acme.com']['/foo/']['"spam"']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, '"spam"')
self.assertEqual(lwp_cookie_str(cookie),
'"spam"; path="/foo/"; domain="www.acme.com"; path_spec; discard; version=0'
)
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
self.assertEqual(repr(c), re.sub('path_specified=%s' % True,
'path_specified=%s' % False, old_str))
self.assertEqual(interact_netscape(c, 'http://www.acme.com/foo/'),
'"spam"; eggs')
def test_rfc2109_handling(self):
for rfc2109_as_netscape, rfc2965, version in [(None, False, 0), (
None, True, 1), (False, False, None), (False, True, 1), (True,
False, 0), (True, True, 0)]:
policy = DefaultCookiePolicy(rfc2109_as_netscape=
rfc2109_as_netscape, rfc2965=rfc2965)
c = CookieJar(policy)
interact_netscape(c, 'http://www.example.com/', 'ni=ni; Version=1')
try:
cookie = c._cookies['www.example.com']['/']['ni']
except KeyError:
self.assertIsNone(version)
else:
self.assertEqual(cookie.version, version)
interact_2965(c, 'http://www.example.com/',
'foo=bar; Version=1')
if rfc2965:
cookie2965 = c._cookies['www.example.com']['/']['foo']
self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/',
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, 'http://www.acme.com/', 'ni=ni; port=80,8080')
interact_netscape(c, 'http://www.acme.com:80/', 'nini=ni')
interact_netscape(c, 'http://www.acme.com:80/', 'foo=bar; expires=')
interact_netscape(c, 'http://www.acme.com:80/',
'spam=eggs; expires="Foo Bar 25 33:22:11 3022"')
interact_netscape(c, 'http://www.acme.com/', 'fortytwo=')
interact_netscape(c, 'http://www.acme.com/', '=unladenswallow')
interact_netscape(c, 'http://www.acme.com/', 'holyhandgrenade')
cookie = c._cookies['.acme.com']['/']['spam']
self.assertEqual(cookie.domain, '.acme.com')
self.assertTrue(cookie.domain_specified)
self.assertEqual(cookie.port, DEFAULT_HTTP_PORT)
self.assertFalse(cookie.port_specified)
self.assertTrue(cookie.has_nonstandard_attr('blArgh'))
self.assertFalse(cookie.has_nonstandard_attr('blargh'))
cookie = c._cookies['www.acme.com']['/']['ni']
self.assertEqual(cookie.domain, 'www.acme.com')
self.assertFalse(cookie.domain_specified)
self.assertEqual(cookie.port, '80,8080')
self.assertTrue(cookie.port_specified)
cookie = c._cookies['www.acme.com']['/']['nini']
self.assertIsNone(cookie.port)
self.assertFalse(cookie.port_specified)
foo = c._cookies['www.acme.com']['/']['foo']
spam = c._cookies['www.acme.com']['/']['foo']
self.assertIsNone(foo.expires)
self.assertIsNone(spam.expires)
cookie = c._cookies['www.acme.com']['/']['fortytwo']
self.assertIsNotNone(cookie.value)
self.assertEqual(cookie.value, '')
cookie = c._cookies['www.acme.com']['/']['holyhandgrenade']
self.assertIsNone(cookie.value)
def test_ns_parser_special_names(self):
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/', 'expires=eggs')
interact_netscape(c, 'http://www.acme.com/', 'version=eggs; spam=eggs')
cookies = c._cookies['www.acme.com']['/']
self.assertIn('expires', cookies)
self.assertIn('version', cookies)
def test_expires(self):
c = CookieJar()
future = time2netscape(time.time() + 3600)
interact_netscape(c, 'http://www.acme.com/',
'spam="bar"; expires=%s' % future)
self.assertEqual(len(c), 1)
now = time2netscape(time.time() - 1)
interact_netscape(c, 'http://www.acme.com/',
'foo="eggs"; expires=%s' % now)
h = interact_netscape(c, 'http://www.acme.com/')
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
self.assertNotIn('foo', h)
interact_netscape(c, 'http://www.acme.com/',
'eggs="bar"; expires=%s' % future)
interact_netscape(c, 'http://www.acme.com/',
'bar="bar"; expires=%s' % future)
self.assertEqual(len(c), 3)
interact_netscape(c, 'http://www.acme.com/',
'eggs="bar"; expires=%s; max-age=0' % future)
interact_netscape(c, 'http://www.acme.com/',
'bar="bar"; max-age=0; expires=%s' % future)
h = interact_netscape(c, 'http://www.acme.com/')
self.assertEqual(len(c), 1)
interact_netscape(c, 'http://www.rhubarb.net/', 'whum="fizz"')
self.assertEqual(len(c), 2)
c.clear_session_cookies()
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
cookie = Cookie(0, 'name', 'value', None, False, 'www.python.org',
True, False, '/', False, False, '1444312383.018307', False,
None, None, {})
self.assertEqual(cookie.expires, 1444312383)
def test_default_path(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/', 'spam="bar"; Version="1"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah', 'eggs="bar"; Version="1"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah/rhubarb',
'eggs="bar"; Version="1"')
self.assertIn('/blah/', c._cookies['www.acme.com'])
c = CookieJar(pol)
interact_2965(c, 'http://www.acme.com/blah/rhubarb/',
'eggs="bar"; Version="1"')
self.assertIn('/blah/rhubarb/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/', 'spam="bar"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah', 'eggs="bar"')
self.assertIn('/', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah/rhubarb', 'eggs="bar"')
self.assertIn('/blah', c._cookies['www.acme.com'])
c = CookieJar()
interact_netscape(c, 'http://www.acme.com/blah/rhubarb/', 'eggs="bar"')
self.assertIn('/blah/rhubarb', c._cookies['www.acme.com'])
def test_default_path_with_query(self):
cj = CookieJar()
uri = 'http://example.com/?spam/eggs'
value = 'eggs="bar"'
interact_netscape(cj, uri, value)
self.assertIn('/', cj._cookies['example.com'])
self.assertEqual(interact_netscape(cj, uri), value)
def test_escape_path(self):
cases = [('/foo%2f/bar', '/foo%2F/bar'), ('/foo%2F/bar',
'/foo%2F/bar'), ('/foo%%/bar', '/foo%%/bar'), ('/fo%19o/bar',
'/fo%19o/bar'), ('/fo%7do/bar', '/fo%7Do/bar'), ('/foo/bar&',
'/foo/bar&'), ('/foo//bar', '/foo//bar'), ('~/foo/bar',
'~/foo/bar'), ('/foo\x19/bar', '/foo%19/bar'), ('/}foo/bar',
'/%7Dfoo/bar'), ('/foo/barü', '/foo/bar%C3%BC'), ('/foo/barꯍ',
'/foo/bar%EA%AF%8D')]
for arg, result in cases:
self.assertEqual(escape_path(arg), result)
def test_request_path(self):
req = urllib.request.Request(
'http://www.example.com/rheum/rhaponticum;foo=bar;sing=song?apples=pears&spam=eggs#ni'
)
self.assertEqual(request_path(req),
'/rheum/rhaponticum;foo=bar;sing=song')
req = urllib.request.Request(
'http://www.example.com/rheum/rhaponticum?apples=pears&spam=eggs#ni'
)
self.assertEqual(request_path(req), '/rheum/rhaponticum')
req = urllib.request.Request('http://www.example.com')
self.assertEqual(request_path(req), '/')
def test_request_port(self):
req = urllib.request.Request('http://www.acme.com:1234/', headers={
'Host': 'www.acme.com:4321'})
self.assertEqual(request_port(req), '1234')
req = urllib.request.Request('http://www.acme.com/', headers={
'Host': 'www.acme.com:4321'})
self.assertEqual(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
req = urllib.request.Request('http://1.1.1.1/', headers={'Host':
'www.acme.com:80'})
self.assertEqual(request_host(req), '1.1.1.1')
req = urllib.request.Request('http://www.acme.com/', headers={
'Host': 'irrelevant.com'})
self.assertEqual(request_host(req), 'www.acme.com')
req = urllib.request.Request('http://www.acme.com:2345/resource.html',
headers={'Host': 'www.acme.com:5432'})
self.assertEqual(request_host(req), 'www.acme.com')
def test_is_HDN(self):
self.assertTrue(is_HDN('foo.bar.com'))
self.assertTrue(is_HDN('1foo2.3bar4.5com'))
self.assertFalse(is_HDN('192.168.1.1'))
self.assertFalse(is_HDN(''))
self.assertFalse(is_HDN('.'))
self.assertFalse(is_HDN('.foo.bar.com'))
self.assertFalse(is_HDN('..foo'))
self.assertFalse(is_HDN('foo.'))
def test_reach(self):
self.assertEqual(reach('www.acme.com'), '.acme.com')
self.assertEqual(reach('acme.com'), 'acme.com')
self.assertEqual(reach('acme.local'), '.local')
self.assertEqual(reach('.local'), '.local')
self.assertEqual(reach('.com'), '.com')
self.assertEqual(reach('.'), '.')
self.assertEqual(reach(''), '')
self.assertEqual(reach('192.168.0.1'), '192.168.0.1')
def test_domain_match(self):
self.assertTrue(domain_match('192.168.1.1', '192.168.1.1'))
self.assertFalse(domain_match('192.168.1.1', '.168.1.1'))
self.assertTrue(domain_match('x.y.com', 'x.Y.com'))
self.assertTrue(domain_match('x.y.com', '.Y.com'))
self.assertFalse(domain_match('x.y.com', 'Y.com'))
self.assertTrue(domain_match('a.b.c.com', '.c.com'))
self.assertFalse(domain_match('.c.com', 'a.b.c.com'))
self.assertTrue(domain_match('example.local', '.local'))
self.assertFalse(domain_match('blah.blah', ''))
self.assertFalse(domain_match('', '.rhubarb.rhubarb'))
self.assertTrue(domain_match('', ''))
self.assertTrue(user_domain_match('acme.com', 'acme.com'))
self.assertFalse(user_domain_match('acme.com', '.acme.com'))
self.assertTrue(user_domain_match('rhubarb.acme.com', '.acme.com'))
self.assertTrue(user_domain_match('www.rhubarb.acme.com', '.acme.com'))
self.assertTrue(user_domain_match('x.y.com', 'x.Y.com'))
self.assertTrue(user_domain_match('x.y.com', '.Y.com'))
self.assertFalse(user_domain_match('x.y.com', 'Y.com'))
self.assertTrue(user_domain_match('y.com', 'Y.com'))
self.assertFalse(user_domain_match('.y.com', 'Y.com'))
self.assertTrue(user_domain_match('.y.com', '.Y.com'))
self.assertTrue(user_domain_match('x.y.com', '.com'))
self.assertFalse(user_domain_match('x.y.com', 'com'))
self.assertFalse(user_domain_match('x.y.com', 'm'))
self.assertFalse(user_domain_match('x.y.com', '.m'))
self.assertFalse(user_domain_match('x.y.com', ''))
self.assertFalse(user_domain_match('x.y.com', '.'))
self.assertTrue(user_domain_match('192.168.1.1', '192.168.1.1'))
self.assertFalse(user_domain_match('192.168.1.1', '.168.1.1'))
self.assertFalse(user_domain_match('192.168.1.1', '.'))
self.assertFalse(user_domain_match('192.168.1.1', ''))
def test_wrong_domain(self):
c = CookieJar()
interact_2965(c, 'http://www.nasty.com/',
'foo=bar; domain=friendly.org; Version="1"')
self.assertEqual(len(c), 0)
def test_strict_domain(self):
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, 'http://example.co.uk/', 'no=problemo')
interact_netscape(cj, 'http://example.co.uk/',
'okey=dokey; Domain=.example.co.uk')
self.assertEqual(len(cj), 2)
for pseudo_tld in ['.co.uk', '.org.za', '.tx.us', '.name.us']:
interact_netscape(cj, 'http://example.%s/' % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEqual(len(cj), 2)
def test_two_component_domain_ns(self):
c = CookieJar()
interact_netscape(c, 'http://foo.net/', 'ns=bar')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies['foo.net']['/']['ns'].value, 'bar')
self.assertEqual(interact_netscape(c, 'http://foo.net/'), 'ns=bar')
self.assertEqual(interact_netscape(c, 'http://www.foo.net/'), 'ns=bar')
pol = DefaultCookiePolicy(strict_ns_domain=DefaultCookiePolicy.
DomainStrictNonDomain)
c.set_policy(pol)
self.assertEqual(interact_netscape(c, 'http://www.foo.net/'), '')
interact_netscape(c, 'http://foo.net/foo/',
'spam1=eggs; domain=foo.net')
interact_netscape(c, 'http://foo.net/foo/bar/',
'spam2=eggs; domain=.foo.net')
self.assertEqual(len(c), 3)
self.assertEqual(c._cookies['.foo.net']['/foo']['spam1'].value, 'eggs')
self.assertEqual(c._cookies['.foo.net']['/foo/bar']['spam2'].value,
'eggs')
self.assertEqual(interact_netscape(c, 'http://foo.net/foo/bar/'),
'spam2=eggs; spam1=eggs; ns=bar')
interact_netscape(c, 'http://foo.net/', 'nini="ni"; domain=.net')
self.assertEqual(len(c), 3)
interact_netscape(c, 'http://foo.co.uk', 'nasty=trick; domain=.co.uk')
self.assertEqual(len(c), 4)
def test_two_component_domain_rfc2965(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, 'http://foo.net/', 'foo=bar; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies['foo.net']['/']['foo'].value, 'bar')
self.assertEqual(interact_2965(c, 'http://foo.net/'),
'$Version=1; foo=bar')
self.assertEqual(interact_2965(c, 'http://www.foo.net/'), '')
interact_2965(c, 'http://foo.net/foo',
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(interact_2965(c, 'http://foo.net/foo'),
'$Version=1; foo=bar')
interact_2965(c, 'http://www.foo.net/foo/',
'spam=eggs; domain=foo.net; Version="1"')
self.assertEqual(c._cookies['.foo.net']['/foo/']['spam'].value, 'eggs')
self.assertEqual(len(c), 2)
self.assertEqual(interact_2965(c, 'http://foo.net/foo/'),
'$Version=1; foo=bar')
self.assertEqual(interact_2965(c, 'http://www.foo.net/foo/'),
'$Version=1; spam=eggs; $Domain="foo.net"')
interact_2965(c, 'http://foo.net/',
'ni="ni"; domain=".net"; Version="1"')
self.assertEqual(len(c), 2)
interact_2965(c, 'http://foo.co.uk/',
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEqual(len(c), 3)
def test_domain_allow(self):
c = CookieJar(policy=DefaultCookiePolicy(blocked_domains=[
'acme.com'], allowed_domains=['www.acme.com']))
req = urllib.request.Request('http://acme.com/')
headers = ['Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/']
res = FakeResponse(headers, 'http://acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.coyote.com/')
res = FakeResponse(headers, 'http://www.coyote.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.coyote.com/')
res = FakeResponse(headers, 'http://www.coyote.com/')
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
c.add_cookie_header(req)
self.assertFalse(req.has_header('Cookie'))
def test_domain_block(self):
pol = DefaultCookiePolicy(rfc2965=True, blocked_domains=['.acme.com'])
c = CookieJar(policy=pol)
headers = ['Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/']
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
p = pol.set_blocked_domains(['acme.com'])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
c.clear()
req = urllib.request.Request('http://www.roadrunner.net/')
res = FakeResponse(headers, 'http://www.roadrunner.net/')
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.roadrunner.net/')
c.add_cookie_header(req)
self.assertTrue(req.has_header('Cookie'))
self.assertTrue(req.has_header('Cookie2'))
c.clear()
pol.set_blocked_domains(['.acme.com'])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request('http://www.acme.com/')
res = FakeResponse(headers, 'http://www.acme.com/')
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
c.add_cookie_header(req)
self.assertFalse(req.has_header('Cookie'))
def test_secure(self):
for ns in (True, False):
for whitespace in (' ', ''):
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ''
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = '; Version=1'
c.set_policy(pol)
url = 'http://www.acme.com/'
int(c, url, 'foo1=bar%s%s' % (vs, whitespace))
int(c, url, 'foo2=bar%s; secure%s' % (vs, whitespace))
self.assertFalse(c._cookies['www.acme.com']['/']['foo1'].
secure, 'non-secure cookie registered secure')
self.assertTrue(c._cookies['www.acme.com']['/']['foo2'].
secure, 'secure cookie registered non-secure')
def test_quote_cookie_value(self):
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://www.acme.com/', 'foo=\\b"a"r; Version=1')
h = interact_2965(c, 'http://www.acme.com/')
self.assertEqual(h, '$Version=1; foo=\\\\b\\"a\\"r')
def test_missing_final_slash(self):
url = 'http://www.acme.com'
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, 'foo=bar; Version=1')
req = urllib.request.Request(url)
self.assertEqual(len(c), 1)
c.add_cookie_header(req)
self.assertTrue(req.has_header('Cookie'))
def test_domain_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Domain', h,
'absent domain returned with domain present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain=".bar.com"', h, 'domain not returned')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain="bar.com"', h, 'domain not returned')
def test_path_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Path', h, 'absent path returned with path present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assertIn('$Path="/"', h, 'path not returned')
def test_port_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1')
h = interact_2965(c, url)
self.assertNotIn('Port', h, 'absent port returned with port present')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port')
h = interact_2965(c, url)
self.assertRegex(h, '\\$Port([^=]|$)',
'port with no value not returned with no value')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assertIn('$Port="80"', h,
'port with single value not returned with single value')
c = CookieJar(pol)
url = 'http://foo.bar.com/'
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assertIn('$Port="80,8080"', h,
'port with multiple values not returned with multiple values')
def test_no_return_comment(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = 'http://foo.bar.com/'
interact_2965(c, url,
'spam=eggs; Version=1; Comment="does anybody read these?"; CommentURL="http://foo.bar.net/comment.html"'
)
h = interact_2965(c, url)
self.assertNotIn('Comment', h,
'Comment or CommentURL cookie-attributes returned to server')
def test_Cookie_iterator(self):
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(cs, 'http://blah.spam.org/',
'foo=eggs; Version=1; Comment="does anybody read these?"; CommentURL="http://foo.bar.net/comment.html"'
)
interact_netscape(cs, 'http://www.acme.com/blah/', 'spam=bar; secure')
interact_2965(cs, 'http://www.acme.com/blah/',
'foo=bar; secure; Version=1')
interact_2965(cs, 'http://www.acme.com/blah/',
'foo=bar; path=/; Version=1')
interact_2965(cs, 'http://www.sol.no',
'bang=wallop; version=1; domain=".sol.no"; port="90,100, 80,8080"; max-age=100; Comment = "Just kidding! (\\"|\\\\\\\\) "'
)
versions = [1, 1, 1, 0, 1]
names = ['bang', 'foo', 'foo', 'spam', 'foo']
domains = ['.sol.no', 'blah.spam.org', 'www.acme.com',
'www.acme.com', 'www.acme.com']
paths = ['/', '/', '/', '/blah', '/blah/']
for i in range(4):
i = 0
for c in cs:
self.assertIsInstance(c, Cookie)
self.assertEqual(c.version, versions[i])
self.assertEqual(c.name, names[i])
self.assertEqual(c.domain, domains[i])
self.assertEqual(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
self.assertEqual(parse_ns_headers(['foo=bar; path=/; domain']), [[(
'foo', 'bar'), ('path', '/'), ('domain', None), ('version', '0')]])
self.assertEqual(parse_ns_headers([
'foo=bar; expires=Foo Bar 12 33:22:11 2000']), [[('foo', 'bar'),
('expires', None), ('version', '0')]])
self.assertEqual(parse_ns_headers(['foo']), [[('foo', None), (
'version', '0')]])
self.assertEqual(parse_ns_headers(['foo=bar; expires']), [[('foo',
'bar'), ('expires', None), ('version', '0')]])
self.assertEqual(parse_ns_headers(['foo=bar; version']), [[('foo',
'bar'), ('version', None)]])
self.assertEqual(parse_ns_headers(['']), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
c = CookieJar()
req = urllib.request.Request('http://www.example.com/')
r = FakeResponse(headers, 'http://www.example.com/')
c.extract_cookies(r, req)
return c
future = time2netscape(time.time() + 3600)
for headers in [['Set-Cookie: '], ['Set-Cookie2: '], [
'Set-Cookie2: a=foo; path=/; Version=1; domain'], [
'Set-Cookie: b=foo; max-age=oops'], [
'Set-Cookie: b=foo; version=spam'], ['Set-Cookie:; Expires=%s' %
future]]:
c = cookiejar_from_cookie_headers(headers)
self.assertEqual(len(c), 0)
headers = ['Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000']
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies['www.example.com']['/']['c']
self.assertIsNone(cookie.expires)
class LWPCookieTests(unittest.TestCase):
def test_netscape_example_1(self):
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
req = urllib.request.Request('http://www.acme.com:80/', headers={
'Host': 'www.acme.com:80'})
headers.append(
'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; expires=Wednesday, 09-Nov-%d 23:12:40 GMT'
% year_plus_one)
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'CUSTOMER=WILE_E_COYOTE')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
headers.append('Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/foo/bar')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, 'http://www.acme.com')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
self.assertNotIn('SHIPPING=FEDEX', h)
req = urllib.request.Request('http://www.acme.com/foo/')
c.add_cookie_header(req)
h = req.get_header('Cookie')
self.assertIn('PART_NUMBER=ROCKET_LAUNCHER_0001', h)
self.assertIn('CUSTOMER=WILE_E_COYOTE', h)
self.assertTrue(h.startswith('SHIPPING=FEDEX;'))
def test_netscape_example_2(self):
c = CookieJar()
headers = []
req = urllib.request.Request('http://www.acme.com/')
headers.append('Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/')
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'),
'PART_NUMBER=ROCKET_LAUNCHER_0001')
headers.append('Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo'
)
res = FakeResponse(headers, 'http://www.acme.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.acme.com/ammo')
c.add_cookie_header(req)
self.assertRegex(req.get_header('Cookie'),
'PART_NUMBER=RIDING_ROCKET_0023;\\s*PART_NUMBER=ROCKET_LAUNCHER_0001'
)
def test_ietf_example_1(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
cookie = interact_2965(c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertFalse(cookie)
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
self.assertRegex(cookie,
'^\\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \\$Path="/acme"$')
cookie = interact_2965(c, 'http://www.acme.com/acme/shipping',
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assertRegex(cookie, '^\\$Version="?1"?;')
self.assertRegex(cookie,
'Part_Number="?Rocket_Launcher_0001"?;\\s*\\$Path="\\/acme"')
self.assertRegex(cookie,
'Customer="?WILE_E_COYOTE"?;\\s*\\$Path="\\/acme"')
cookie = interact_2965(c, 'http://www.acme.com/acme/process')
self.assertRegex(cookie, 'Shipping="?FedEx"?;\\s*\\$Path="\\/acme"')
self.assertIn('WILE_E_COYOTE', cookie)
def test_ietf_example_2(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://www.acme.com/acme/ammo/specific',
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
cookie = interact_2965(c, 'http://www.acme.com/acme/ammo/...')
self.assertRegex(cookie, 'Riding_Rocket_0023.*Rocket_Launcher_0001')
cookie = interact_2965(c, 'http://www.acme.com/acme/parts/')
self.assertIn('Rocket_Launcher_0001', cookie)
self.assertNotIn('Riding_Rocket_0023', cookie)
def test_rejection(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = 'max-age=3600'
cookie = interact_2965(c, 'http://www.acme.com',
'foo=bar; domain=".com"; version=1')
self.assertFalse(c)
cookie = interact_2965(c, 'http://www.acme.com',
'ping=pong; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
cookie = interact_2965(c, 'http://www.a.acme.com',
'whiz=bang; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
cookie = interact_2965(c, 'http://www.a.acme.com',
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://125.125.125.125',
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://www.sol.no',
'blah=rhubarb; domain=".sol.no"; path="/foo"; version=1')
self.assertEqual(len(c), 2)
cookie = interact_2965(c, 'http://www.sol.no/foo/bar',
'bing=bong; domain=".sol.no"; path="/foo"; version=1')
self.assertEqual(len(c), 3)
cookie = interact_2965(c, 'http://www.sol.no',
'whiz=ffft; domain=".sol.no"; port="90,100"; version=1')
self.assertEqual(len(c), 3)
cookie = interact_2965(c, 'http://www.sol.no',
'bang=wallop; version=1; domain=".sol.no"; port="90,100, 80,8080"; max-age=100; Comment = "Just kidding! (\\"|\\\\\\\\) "'
)
self.assertEqual(len(c), 4)
cookie = interact_2965(c, 'http://www.sol.no',
'foo9=bar; version=1; domain=".sol.no"; port; max-age=100;')
self.assertEqual(len(c), 5)
cookie = interact_2965(c, 'http://www.sol.no/<oo/',
'foo8=bar; version=1; path="/%3coo"')
self.assertEqual(len(c), 6)
filename = test.support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try:
os.unlink(filename)
except OSError:
pass
self.assertEqual(old, repr(c))
def test_url_encoding(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c,
'http://www.acme.com/foo%2f%25/%3c%3c%0Anew%C3%A5/%C3%A5',
'foo = bar; version = 1')
cookie = interact_2965(c,
'http://www.acme.com/foo%2f%25/<<%0anewå/æøå',
'bar=baz; path="/foo/"; version=1')
version_re = re.compile('^\\$version=\\"?1\\"?', re.I)
self.assertIn('foo=bar', cookie)
self.assertRegex(cookie, version_re)
cookie = interact_2965(c, 'http://www.acme.com/foo/%25/<<%0anewå/æøå')
self.assertFalse(cookie)
cookie = interact_2965(c, 'http://www.acme.com/ü')
def test_mozilla(self):
year_plus_one = time.localtime()[0] + 1
filename = test.support.TESTFN
c = MozillaCookieJar(filename, policy=DefaultCookiePolicy(rfc2965=True)
)
interact_2965(c, 'http://www.acme.com/',
'foo1=bar; max-age=100; Version=1')
interact_2965(c, 'http://www.acme.com/',
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, 'http://www.acme.com/', 'foo3=bar; secure; Version=1')
expires = 'expires=09-Nov-%d 23:12:40 GMT' % (year_plus_one,)
interact_netscape(c, 'http://www.foo.com/', 'fooa=bar; %s' % expires)
interact_netscape(c, 'http://www.foo.com/',
'foob=bar; Domain=.foo.com; %s' % expires)
interact_netscape(c, 'http://www.foo.com/',
'fooc=bar; Domain=www.foo.com; %s' % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename, DefaultCookiePolicy(
rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try:
os.unlink(filename)
except OSError:
pass
return new_c
new_c = save_and_restore(c, True)
self.assertEqual(len(new_c), 6)
self.assertIn("name='foo1', value='bar'", repr(new_c))
new_c = save_and_restore(c, False)
self.assertEqual(len(new_c), 4)
self.assertIn("name='foo1', value='bar'", repr(new_c))
def test_netscape_misc(self):
c = CookieJar()
headers = []
req = urllib.request.Request('http://foo.bar.acme.com/foo')
headers.append('Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com')
res = FakeResponse(headers, 'http://www.acme.com/foo')
c.extract_cookies(res, req)
headers.append('Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com')
res = FakeResponse(headers, 'http://www.acme.com/foo')
c.extract_cookies(res, req)
req = urllib.request.Request('http://foo.bar.acme.com/foo')
c.add_cookie_header(req)
self.assertIn('PART_NUMBER=3,4', req.get_header('Cookie'))
self.assertIn('Customer=WILE_E_COYOTE', req.get_header('Cookie'))
def test_intranet_domains_2965(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, 'http://example/',
'foo1=bar; PORT; Discard; Version=1;')
cookie = interact_2965(c, 'http://example/',
'foo2=bar; domain=".local"; Version=1')
self.assertIn('foo1=bar', cookie)
interact_2965(c, 'http://example/', 'foo3=bar; Version=1')
cookie = interact_2965(c, 'http://example/')
self.assertIn('foo2=bar', cookie)
self.assertEqual(len(c), 3)
def test_intranet_domains_ns(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=False))
interact_netscape(c, 'http://example/', 'foo1=bar')
cookie = interact_netscape(c, 'http://example/',
'foo2=bar; domain=.local')
self.assertEqual(len(c), 2)
self.assertIn('foo1=bar', cookie)
cookie = interact_netscape(c, 'http://example/')
self.assertIn('foo2=bar', cookie)
self.assertEqual(len(c), 2)
def test_empty_path(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
headers = []
req = urllib.request.Request('http://www.ants.com/')
headers.append('Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=')
res = FakeResponse(headers, 'http://www.ants.com/')
c.extract_cookies(res, req)
req = urllib.request.Request('http://www.ants.com/')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'JSESSIONID=ABCDERANDOM123')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
req = urllib.request.Request('http://www.ants.com:8080')
c.add_cookie_header(req)
self.assertEqual(req.get_header('Cookie'), 'JSESSIONID=ABCDERANDOM123')
self.assertEqual(req.get_header('Cookie2'), '$Version="1"')
def test_session_cookies(self):
year_plus_one = time.localtime()[0] + 1
req = urllib.request.Request('http://www.perlmeister.com/scripts')
headers = []
headers.append('Set-Cookie: s1=session;Path=/scripts')
headers.append(
'Set-Cookie: p1=perm; Domain=.perlmeister.com;Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT'
% year_plus_one)
headers.append(
'Set-Cookie: p2=perm;Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT' %
year_plus_one)
headers.append(
'Set-Cookie: s2=session;Path=/scripts;Domain=.perlmeister.com')
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
counter = {'session_after': 0, 'perm_after': 0, 'session_before': 0,
'perm_before': 0}
for cookie in c:
key = '%s_before' % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
for cookie in c:
key = '%s_after' % cookie.value
counter[key] = counter[key] + 1
self.assertEqual(counter['perm_after'], counter['perm_before'])
self.assertEqual(counter['session_after'], 0)
self.assertNotEqual(counter['session_before'], 0)
def test_main(verbose=None):
test.support.run_unittest(DateTimeTests, HeaderTests, CookieTests,
FileCookieJarTests, LWPCookieTests)
if __name__ == '__main__':
test_main(verbose=True)
| 0.421076 | 0.272164 |
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from keras.utils import np_utils
import pickle
data = pd.read_csv("C:/project/Manual-Data/Training.csv")
data_alpha=pd.read_csv("C:/project/Manual-Data/Training.csv")
data_alpha=data_alpha.drop('prognosis', axis=1)
data_alpha= data_alpha.reindex(sorted(data_alpha.columns), axis=1)
x=data_alpha.iloc[:,:133]
y=data.iloc[:,132:]
num_classes=41
y1=np.ravel(y)
symptom=data_alpha.columns
disease=data['prognosis'].unique()
label_prognosis=LabelEncoder()
y_integer_encoded=label_prognosis.fit_transform(y1)
y_integer_encoded = y_integer_encoded.reshape(len(y_integer_encoded), 1)
y_new = keras.utils.to_categorical(y_integer_encoded, num_classes)
dummy_y = np_utils.to_categorical(y_integer_encoded)
x_train, x_test, y_train, y_test = train_test_split(x, y_new, test_size=0.33, random_state=42)
model=Sequential()
model.add(Dense(input_dim=132,units=132,kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=153, kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=153, kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=num_classes, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train,y_train, batch_size=100, epochs=10)
predict=model.predict(x_test)
from numpy import argmax
from sklearn import preprocessing
lb = preprocessing.MultiLabelBinarizer()
lb.fit([symptom])
lb1=lb.transform([('watering_from_eyes','chills','shivering','continuous_sneezing',)])
#lb5=[('vomiting','watering_from_eyes','weakness_in_limbs','weakness_of_one_body_side','weight_gain','weight_loss','yellow_crust_ooze','yellow_urine','yellowing_of_eyes','yellowish_skin')]
lb4=np.array(lb1)
lb2=lb.transform([lb4])
print(lb2)
lb1=lb.transform([('abdominal_pain','abnormal_menstruation')])
symptoms=lb.classes_
symptoms=np.array(symptoms)
predict_encoded=model.predict(lb2)
predict_encoded
inverted = label_prognosis.inverse_transform(argmax(predict_encoded[:,0:42]))
print(inverted)
percent=argmax(predict_encoded[:,0:42])
inverted_new = label_prognosis.inverse_transform(percent)
symptom
with open('model_pickle','wb') as f:
pickle.dump(model,f)
|
final_running_model.py
|
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from keras.utils import np_utils
import pickle
data = pd.read_csv("C:/project/Manual-Data/Training.csv")
data_alpha=pd.read_csv("C:/project/Manual-Data/Training.csv")
data_alpha=data_alpha.drop('prognosis', axis=1)
data_alpha= data_alpha.reindex(sorted(data_alpha.columns), axis=1)
x=data_alpha.iloc[:,:133]
y=data.iloc[:,132:]
num_classes=41
y1=np.ravel(y)
symptom=data_alpha.columns
disease=data['prognosis'].unique()
label_prognosis=LabelEncoder()
y_integer_encoded=label_prognosis.fit_transform(y1)
y_integer_encoded = y_integer_encoded.reshape(len(y_integer_encoded), 1)
y_new = keras.utils.to_categorical(y_integer_encoded, num_classes)
dummy_y = np_utils.to_categorical(y_integer_encoded)
x_train, x_test, y_train, y_test = train_test_split(x, y_new, test_size=0.33, random_state=42)
model=Sequential()
model.add(Dense(input_dim=132,units=132,kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=153, kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=153, kernel_initializer='uniform', activation='relu'))
model.add(Dense(units=num_classes, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train,y_train, batch_size=100, epochs=10)
predict=model.predict(x_test)
from numpy import argmax
from sklearn import preprocessing
lb = preprocessing.MultiLabelBinarizer()
lb.fit([symptom])
lb1=lb.transform([('watering_from_eyes','chills','shivering','continuous_sneezing',)])
#lb5=[('vomiting','watering_from_eyes','weakness_in_limbs','weakness_of_one_body_side','weight_gain','weight_loss','yellow_crust_ooze','yellow_urine','yellowing_of_eyes','yellowish_skin')]
lb4=np.array(lb1)
lb2=lb.transform([lb4])
print(lb2)
lb1=lb.transform([('abdominal_pain','abnormal_menstruation')])
symptoms=lb.classes_
symptoms=np.array(symptoms)
predict_encoded=model.predict(lb2)
predict_encoded
inverted = label_prognosis.inverse_transform(argmax(predict_encoded[:,0:42]))
print(inverted)
percent=argmax(predict_encoded[:,0:42])
inverted_new = label_prognosis.inverse_transform(percent)
symptom
with open('model_pickle','wb') as f:
pickle.dump(model,f)
| 0.531453 | 0.181155 |
from datetime import datetime, timedelta
import pandas as pd
class TuDataModel:
"""
Class Terminal Unit, never modify any variable direct. The idea is that all gets managed via functions
"""
def __init__(self, name):
"""
Constructor of the Tu as a holder for all the TG Tu Data. we will refer in the function descriptions as Tu
PLease never change parameters directly and use the functions and setters and getters
:param name: str, name of the instanced unit
"""
# name of the widget
self.name = name
# beginning of time from 1st connection
self.first_connection = -1
# counter for disconnections
self.disc_counter = 0
# management of disconnections event
self.disconnection_start = None # start of event
self.disconnection_end = None # end of event
self.connection_state = False # state of the connection
self.disconnection_last_event_time = timedelta(seconds=0) # the total time of last disconnection
# total disconnection time
self.disconnection_total_time = timedelta(seconds=0)
# total actual availability
self.availability = 100.0
# Variables to capture and show
self.local_sector = None
self.rssi = None
self.snr = None
self.rx_mcs = None
self.tx_mcs = None
self.rx_speed_num = None
self.tx_speed_num = None
self.rx_mcs_dr = None
self.tx_mcs_dr = None
self.tx_power_index = None
# disconnections dataframe to have the list
# we need to append with a pd.Series(data_in_a_dict, name=datetime.now()/or time)
self.disconnections = pd.DataFrame(columns=['Time End', 'Disconnection #', 'Downtime', 'Availability'])
self.parameters_df = pd.DataFrame(columns=['Power Index', 'RSSI', 'SNR', 'MCS-RX', 'MCS-TX', 'MCS-DR-RX',
'MCS-DR-TX', 'Local Sector'])
# setters and getters for the internal variables
def get_local_sector(self):
"""
Returns the local sector antenna index of the connected unit
:return: int, index of connected unit
"""
return self.local_sector
def set_local_sector(self, ls):
"""
Sets the local sector index
:param ls: int, local sector index of connected unit
:return: None
"""
self.local_sector = ls
def get_rssi(self):
"""
Gets the rssi value of the connected unit
:return: int, rssi value of the connected unit
"""
return self.rssi
def set_rssi(self, rssi):
"""
Sets the rssi
:param rssi: int, rssi to set to the object
:return: None
"""
self.rssi = rssi
def get_snr(self):
"""
Gets the CINR of the connected unit
:return: int, CINR of the connected unit
"""
return self.snr
def set_snr(self, snr):
"""
Sets the CINR value of the connected unit
:param snr: int, CINR value
:return: None
"""
self.snr = snr
def get_rxmcs(self):
"""
Gets the Rx Modulation Coding Scheme of the connected Unit
:return: int, Rx MCS value
"""
return self.rx_mcs
def set_rxmcs(self, rxmcs):
"""
Sets the Rx Modulation Coding Scheme of the connected Unit
:param rxmcs: int, Rx MCS value
:return: None
"""
self.rx_mcs = rxmcs
def get_txmcs(self):
"""
Gets the Tx Modulation Coding Scheme of the connected Unit
:return: int, Tx MCS value
"""
return self.tx_mcs
def set_txmcs(self, txmcs):
"""
Sets the Tx Modulation Coding Scheme of the connected Unit
:param txmcs: int, Tx MCS value
:return: None
"""
self.tx_mcs = txmcs
def get_rxspeednum(self):
"""
Gets the Rx capacity currently going in the Tu in Mbps
:return: float, Rx In capacity in Mbps
"""
return self.rx_speed_num
def set_rxspeednum(self, rxspeednum):
"""
Sets the Rx capacity currently going in the Tu in Mbps
:param rxspeednum: float, Rx In capacity in Mbps
:return: None
"""
self.rx_speed_num = rxspeednum
def get_txspeednum(self):
"""
Gets the Tx capacity currently going in the Tu in Mbps
:return: float, Tx In capacity in Mbps
"""
return self.tx_speed_num
def set_txspeednum(self, txspeednum):
"""
Sets the Tx capacity currently going in the Tu in Mbps
:param txspeednum: float, Rx In capacity in Mbps
:return: None
"""
self.tx_speed_num = txspeednum
def get_rxmcsdr(self):
"""
Gets the Rx Over the Air Data Rate
:return: int, Rx OTA DR
"""
return self.rx_mcs_dr
def set_rxmcsdr(self):
"""
Sets the Rx Over the Air Dara Rate. based on the RX-MCS
:param rxmcsdr: int, OTA DR value
:return: None
"""
value_rx = self.get_rxmcs()
if value_rx == '0':
self.rx_mcs_dr = '0'
elif value_rx == '2':
self.rx_mcs_dr = '620'
elif value_rx == '3':
self.rx_mcs_dr = '780'
elif value_rx == '4':
self.rx_mcs_dr = '950'
elif value_rx == '7':
self.rx_mcs_dr = '1580'
elif value_rx == '8':
self.rx_mcs_dr = '1900'
elif value_rx == '9':
self.rx_mcs_dr = '2050'
elif value_rx == '10':
self.rx_mcs_dr = '2500'
elif value_rx == '11':
self.rx_mcs_dr = '3150'
elif value_rx == '12':
self.rx_mcs_dr = '3800'
else:
self.rx_mcs_dr = '0'
def get_txmcsdr(self):
"""
Gets the Tx Over the Air Data Rate
:return: int, Tx OTA DR
"""
return self.tx_mcs_dr
def set_txmcsdr(self):
"""
Sets the Tx Over the Air Dara Rate. Based on TX-MCS
:param txmcsdr: int, OTA DR value
:return: None
"""
value_tx = self.get_txmcs()
if value_tx == '0':
self.tx_mcs_dr = '0'
elif value_tx == '2':
self.tx_mcs_dr = '620'
elif value_tx == '3':
self.tx_mcs_dr = '780'
elif value_tx == '4':
self.tx_mcs_dr = '950'
elif value_tx == '7':
self.tx_mcs_dr = '1580'
elif value_tx == '8':
self.tx_mcs_dr = '1900'
elif value_tx == '9':
self.tx_mcs_dr = '2050'
elif value_tx == '10':
self.tx_mcs_dr = '2500'
elif value_tx == '11':
self.tx_mcs_dr = '3150'
elif value_tx == '12':
self.tx_mcs_dr = '3800'
else:
self.tx_mcs_dr = '0'
def get_power_index(self):
"""
Gets the Power Index
:return: int, Power Index
"""
return self.tx_power_index
def set_power_index(self, power_index_):
"""
Sets the Power Index
:return: int, Power Index
"""
self.tx_power_index = power_index_
def get_availability(self):
"""
Gets the Availability
:return: float, calculated availability value
"""
return self.availability
def get_disconnection_counter(self):
return self.disc_counter
def get_disconnection_ldt(self):
return self.disconnection_last_event_time
def get_disconnection_lds(self):
return self.disconnection_start
def get_disconnection_tdt(self):
return self.disconnection_total_time
def get_connection_status(self):
return self.connection_state
# Automated behaviour of the object for connections and disconnections
def disconnected(self, time_disc):
"""
Function that sets the start of a disconnection. It will get a datetime time
:param time_disc: datetime, will set the time
:return: None
"""
if self.connection_state: # the Tu was connected and we will disconnect it
self.connection_state = False # Set the connection flag down
self.disconnection_start = time_disc # record the time of the disconnection time
self.disc_counter = self.increment_disconnections(self.disc_counter) # increment the counter of disconn.
# We update parameters to reflect the disconnection:
self.set_rssi(-100)
self.set_snr(0)
self.set_rxmcs(0)
self.set_txmcs(0)
else: # we enter the disconnected state but the unit was already disconnected
pass
def connected(self, time_con):
if not self.connection_state and self.first_connection != -1: # the Tu was disconnected and it got connected
self.disconnection_end = time_con # record the time the disconnection time ended
# calculate the total time of the disconnection
self.disconnection_last_event_time = self.calculate_disconnection_time(self.disconnection_start, time_con)
# calculate the total time of disconnection
self.disconnection_total_time = self.update_total_time(self.disconnection_total_time,
self.disconnection_last_event_time)
# calculate availability
availability = self.calculate_availability(self.disconnection_total_time, self.first_connection, time_con)
self.availability = availability
# update the disconnections dataframe
# update 1 : update time end
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Time End', time_con)
# update 2: update duration of the desconnection
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Downtime',
f'{self.disconnection_last_event_time}')
# update 3: update of the availability
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Availability',
availability)
# update 4: update of disconnection#
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Disconnection #',
self.disc_counter)
self.connection_state = True # change flag to connected
elif self.first_connection == -1: # the Tu was first connected
self.first_connection = time_con
self.connection_state = True
else:
# calculate availability
availability = self.calculate_availability(self.disconnection_total_time, self.first_connection, time_con)
self.availability = availability
@staticmethod
def calculate_availability(time_span, start_t, time_t):
"""
Calculate availability of time_span from start to time
:param time_span: datetime, time where we can to calculate availability
:param start_t: datetime, start time to calculate availability
:param time_t: datetime, time to calculate availability
:return: float, availability
"""
if start_t == -1: # the unit was never connected
return 0
return (1 - (time_span / (time_t - start_t))) * 100
@staticmethod
def update_total_time(total_time_counter, update):
"""
Updates the total_time_counter by update
:param total_time_counter: datetime, has the current total time
:param update: datetime, the value to update the total time
:return: total_time_counter + update
"""
return total_time_counter + update
@staticmethod
def calculate_disconnection_time(start, end):
"""
Calculates the total time of disconnection end - start
:param start: datetime, start time of the event
:param end: datetime, end time of the event
:return: end - start
"""
return end - start
@staticmethod
def update_record(df, find_variable, field, update_data):
df.loc[find_variable, field] = update_data
return df
def create_end(self, end_time):
end_ = pd.Series(
{'Time End': datetime.now(), 'Disconnection #': self.disc_counter,
'Downtime': f'{self.disconnection_total_time}',
'Availability': self.availability}, name='Total')
self.disconnections = self.disconnections.append(end_)
# Change type of columns to print in excel to proper value
self.disconnections['Disconnection #'] = self.disconnections['Disconnection #'].astype(int)
self.disconnections['Availability'] = self.disconnections['Availability'].astype(float)
@staticmethod
def increment_disconnections(counter):
"""
Function that will add counter + 1 and return it
:param counter: int, disconnections counter
:return: int, counter + 1
"""
return counter + 1
@staticmethod
def seconds_to_split(time_split):
"""
Function that will get a time (timedelta) range and will convert it to days minutes seconds. It will trunkate
the value to only days, hours minutes and seconds. if the time is not timedelta it will raise an exception
:return: days (int), hours (int), minutes (int), seconds (int)
"""
# validation that the time is timedelta
if isinstance(timedelta, time_split):
total_seconds = time_split.seconds
days = time_split.days
hours = total_seconds // 3600
total_seconds_wo_hours = total_seconds - (hours * 3600)
minutes = total_seconds_wo_hours // 60
seconds = total_seconds_wo_hours - (minutes * 60)
return set(days, hours, minutes, seconds)
else:
raise ValueError(f'The input to the function is not timedelta, it is {type(time_split)}')
def print(self):
print('*****Tu instance*****')
print(f'- name: {self.name}')
print(f'- first connected: {self.first_connection}')
print(f'-------conection status------------')
print(f'connection: {self.connection_state}')
print(f'-------disconnection info----------')
print(f'- diconnections: {self.disc_counter}')
print(f'- disconnection event-start: {self.disconnection_start}')
print(f'- disconnection event-end: {self.disconnection_end}')
print(f'- disconnection event time: {self.disconnection_last_event_time}')
print(f'----disconnection total time-------')
print(f'- total time disconnected: {self.disconnection_total_time}')
print(f'-----total availability at the time of print----')
print(f'- availability: {self.calculate_availability(self.disconnection_total_time, self.first_connection, datetime.now())}')
print(f'--------operation parameters-------')
print(f'- local sector: {self.local_sector}')
print(f'- rssi: {self.rssi}')
print(f'- srn: {self.rssi}')
print(f'- rx_mcs: {self.rx_mcs}')
print(f'- tx_mcs: {self.tx_mcs}')
print(f'- rx_speed_num: {self.rx_speed_num}')
print(f'- tx_speed_num: {self.tx_speed_num}')
print(f'- rx_mcs_dr: {self.rx_mcs_dr}')
print(f'- tx_mcs_dr: {self.tx_mcs_dr}')
print(f'- power_index: {self.tx_power_index}')
print(f'------------events dataframe-------------')
print(f'{self.disconnections}')
if __name__ == '__main__':
import time
# options to display the whole dataframe for checks
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
print(f'Creating object: Test Tu No. 1')
test_tu = TuDataModel('Test Tu No. 1')
# test_tu.print()
# Testing connecting for the first time
print(f'Connection the unit to the network for the first time')
start_time = datetime.now()
print(start_time)
test_tu.connected(start_time)
# test_tu.print()
# Have connection 10 seconds
print(f'emulating time for 10 seconds')
time.sleep(10)
# after 10 seconds disconnect
print('dropping for 5 seconds...')
test_tu.disconnected(datetime.now())
# test_tu.print()
time.sleep(5)
print('reconnecting')
test_tu.connected(datetime.now())
print(f'emulating time for 3 seconds')
time.sleep(3)
print('reconnecting after 2 seconds')
test_tu.disconnected(datetime.now())
time.sleep(2)
print('reconnecting emulating time for 120 seconds')
test_tu.connected(datetime.now())
time.sleep(120)
print('printing')
print(datetime.now())
test_tu.print()
|
tu_data_model.py
|
from datetime import datetime, timedelta
import pandas as pd
class TuDataModel:
"""
Class Terminal Unit, never modify any variable direct. The idea is that all gets managed via functions
"""
def __init__(self, name):
"""
Constructor of the Tu as a holder for all the TG Tu Data. we will refer in the function descriptions as Tu
PLease never change parameters directly and use the functions and setters and getters
:param name: str, name of the instanced unit
"""
# name of the widget
self.name = name
# beginning of time from 1st connection
self.first_connection = -1
# counter for disconnections
self.disc_counter = 0
# management of disconnections event
self.disconnection_start = None # start of event
self.disconnection_end = None # end of event
self.connection_state = False # state of the connection
self.disconnection_last_event_time = timedelta(seconds=0) # the total time of last disconnection
# total disconnection time
self.disconnection_total_time = timedelta(seconds=0)
# total actual availability
self.availability = 100.0
# Variables to capture and show
self.local_sector = None
self.rssi = None
self.snr = None
self.rx_mcs = None
self.tx_mcs = None
self.rx_speed_num = None
self.tx_speed_num = None
self.rx_mcs_dr = None
self.tx_mcs_dr = None
self.tx_power_index = None
# disconnections dataframe to have the list
# we need to append with a pd.Series(data_in_a_dict, name=datetime.now()/or time)
self.disconnections = pd.DataFrame(columns=['Time End', 'Disconnection #', 'Downtime', 'Availability'])
self.parameters_df = pd.DataFrame(columns=['Power Index', 'RSSI', 'SNR', 'MCS-RX', 'MCS-TX', 'MCS-DR-RX',
'MCS-DR-TX', 'Local Sector'])
# setters and getters for the internal variables
def get_local_sector(self):
"""
Returns the local sector antenna index of the connected unit
:return: int, index of connected unit
"""
return self.local_sector
def set_local_sector(self, ls):
"""
Sets the local sector index
:param ls: int, local sector index of connected unit
:return: None
"""
self.local_sector = ls
def get_rssi(self):
"""
Gets the rssi value of the connected unit
:return: int, rssi value of the connected unit
"""
return self.rssi
def set_rssi(self, rssi):
"""
Sets the rssi
:param rssi: int, rssi to set to the object
:return: None
"""
self.rssi = rssi
def get_snr(self):
"""
Gets the CINR of the connected unit
:return: int, CINR of the connected unit
"""
return self.snr
def set_snr(self, snr):
"""
Sets the CINR value of the connected unit
:param snr: int, CINR value
:return: None
"""
self.snr = snr
def get_rxmcs(self):
"""
Gets the Rx Modulation Coding Scheme of the connected Unit
:return: int, Rx MCS value
"""
return self.rx_mcs
def set_rxmcs(self, rxmcs):
"""
Sets the Rx Modulation Coding Scheme of the connected Unit
:param rxmcs: int, Rx MCS value
:return: None
"""
self.rx_mcs = rxmcs
def get_txmcs(self):
"""
Gets the Tx Modulation Coding Scheme of the connected Unit
:return: int, Tx MCS value
"""
return self.tx_mcs
def set_txmcs(self, txmcs):
"""
Sets the Tx Modulation Coding Scheme of the connected Unit
:param txmcs: int, Tx MCS value
:return: None
"""
self.tx_mcs = txmcs
def get_rxspeednum(self):
"""
Gets the Rx capacity currently going in the Tu in Mbps
:return: float, Rx In capacity in Mbps
"""
return self.rx_speed_num
def set_rxspeednum(self, rxspeednum):
"""
Sets the Rx capacity currently going in the Tu in Mbps
:param rxspeednum: float, Rx In capacity in Mbps
:return: None
"""
self.rx_speed_num = rxspeednum
def get_txspeednum(self):
"""
Gets the Tx capacity currently going in the Tu in Mbps
:return: float, Tx In capacity in Mbps
"""
return self.tx_speed_num
def set_txspeednum(self, txspeednum):
"""
Sets the Tx capacity currently going in the Tu in Mbps
:param txspeednum: float, Rx In capacity in Mbps
:return: None
"""
self.tx_speed_num = txspeednum
def get_rxmcsdr(self):
"""
Gets the Rx Over the Air Data Rate
:return: int, Rx OTA DR
"""
return self.rx_mcs_dr
def set_rxmcsdr(self):
"""
Sets the Rx Over the Air Dara Rate. based on the RX-MCS
:param rxmcsdr: int, OTA DR value
:return: None
"""
value_rx = self.get_rxmcs()
if value_rx == '0':
self.rx_mcs_dr = '0'
elif value_rx == '2':
self.rx_mcs_dr = '620'
elif value_rx == '3':
self.rx_mcs_dr = '780'
elif value_rx == '4':
self.rx_mcs_dr = '950'
elif value_rx == '7':
self.rx_mcs_dr = '1580'
elif value_rx == '8':
self.rx_mcs_dr = '1900'
elif value_rx == '9':
self.rx_mcs_dr = '2050'
elif value_rx == '10':
self.rx_mcs_dr = '2500'
elif value_rx == '11':
self.rx_mcs_dr = '3150'
elif value_rx == '12':
self.rx_mcs_dr = '3800'
else:
self.rx_mcs_dr = '0'
def get_txmcsdr(self):
"""
Gets the Tx Over the Air Data Rate
:return: int, Tx OTA DR
"""
return self.tx_mcs_dr
def set_txmcsdr(self):
"""
Sets the Tx Over the Air Dara Rate. Based on TX-MCS
:param txmcsdr: int, OTA DR value
:return: None
"""
value_tx = self.get_txmcs()
if value_tx == '0':
self.tx_mcs_dr = '0'
elif value_tx == '2':
self.tx_mcs_dr = '620'
elif value_tx == '3':
self.tx_mcs_dr = '780'
elif value_tx == '4':
self.tx_mcs_dr = '950'
elif value_tx == '7':
self.tx_mcs_dr = '1580'
elif value_tx == '8':
self.tx_mcs_dr = '1900'
elif value_tx == '9':
self.tx_mcs_dr = '2050'
elif value_tx == '10':
self.tx_mcs_dr = '2500'
elif value_tx == '11':
self.tx_mcs_dr = '3150'
elif value_tx == '12':
self.tx_mcs_dr = '3800'
else:
self.tx_mcs_dr = '0'
def get_power_index(self):
"""
Gets the Power Index
:return: int, Power Index
"""
return self.tx_power_index
def set_power_index(self, power_index_):
"""
Sets the Power Index
:return: int, Power Index
"""
self.tx_power_index = power_index_
def get_availability(self):
"""
Gets the Availability
:return: float, calculated availability value
"""
return self.availability
def get_disconnection_counter(self):
return self.disc_counter
def get_disconnection_ldt(self):
return self.disconnection_last_event_time
def get_disconnection_lds(self):
return self.disconnection_start
def get_disconnection_tdt(self):
return self.disconnection_total_time
def get_connection_status(self):
return self.connection_state
# Automated behaviour of the object for connections and disconnections
def disconnected(self, time_disc):
"""
Function that sets the start of a disconnection. It will get a datetime time
:param time_disc: datetime, will set the time
:return: None
"""
if self.connection_state: # the Tu was connected and we will disconnect it
self.connection_state = False # Set the connection flag down
self.disconnection_start = time_disc # record the time of the disconnection time
self.disc_counter = self.increment_disconnections(self.disc_counter) # increment the counter of disconn.
# We update parameters to reflect the disconnection:
self.set_rssi(-100)
self.set_snr(0)
self.set_rxmcs(0)
self.set_txmcs(0)
else: # we enter the disconnected state but the unit was already disconnected
pass
def connected(self, time_con):
if not self.connection_state and self.first_connection != -1: # the Tu was disconnected and it got connected
self.disconnection_end = time_con # record the time the disconnection time ended
# calculate the total time of the disconnection
self.disconnection_last_event_time = self.calculate_disconnection_time(self.disconnection_start, time_con)
# calculate the total time of disconnection
self.disconnection_total_time = self.update_total_time(self.disconnection_total_time,
self.disconnection_last_event_time)
# calculate availability
availability = self.calculate_availability(self.disconnection_total_time, self.first_connection, time_con)
self.availability = availability
# update the disconnections dataframe
# update 1 : update time end
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Time End', time_con)
# update 2: update duration of the desconnection
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Downtime',
f'{self.disconnection_last_event_time}')
# update 3: update of the availability
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Availability',
availability)
# update 4: update of disconnection#
self.disconnections = self.update_record(self.disconnections, self.disconnection_start, 'Disconnection #',
self.disc_counter)
self.connection_state = True # change flag to connected
elif self.first_connection == -1: # the Tu was first connected
self.first_connection = time_con
self.connection_state = True
else:
# calculate availability
availability = self.calculate_availability(self.disconnection_total_time, self.first_connection, time_con)
self.availability = availability
@staticmethod
def calculate_availability(time_span, start_t, time_t):
"""
Calculate availability of time_span from start to time
:param time_span: datetime, time where we can to calculate availability
:param start_t: datetime, start time to calculate availability
:param time_t: datetime, time to calculate availability
:return: float, availability
"""
if start_t == -1: # the unit was never connected
return 0
return (1 - (time_span / (time_t - start_t))) * 100
@staticmethod
def update_total_time(total_time_counter, update):
"""
Updates the total_time_counter by update
:param total_time_counter: datetime, has the current total time
:param update: datetime, the value to update the total time
:return: total_time_counter + update
"""
return total_time_counter + update
@staticmethod
def calculate_disconnection_time(start, end):
"""
Calculates the total time of disconnection end - start
:param start: datetime, start time of the event
:param end: datetime, end time of the event
:return: end - start
"""
return end - start
@staticmethod
def update_record(df, find_variable, field, update_data):
df.loc[find_variable, field] = update_data
return df
def create_end(self, end_time):
end_ = pd.Series(
{'Time End': datetime.now(), 'Disconnection #': self.disc_counter,
'Downtime': f'{self.disconnection_total_time}',
'Availability': self.availability}, name='Total')
self.disconnections = self.disconnections.append(end_)
# Change type of columns to print in excel to proper value
self.disconnections['Disconnection #'] = self.disconnections['Disconnection #'].astype(int)
self.disconnections['Availability'] = self.disconnections['Availability'].astype(float)
@staticmethod
def increment_disconnections(counter):
"""
Function that will add counter + 1 and return it
:param counter: int, disconnections counter
:return: int, counter + 1
"""
return counter + 1
@staticmethod
def seconds_to_split(time_split):
"""
Function that will get a time (timedelta) range and will convert it to days minutes seconds. It will trunkate
the value to only days, hours minutes and seconds. if the time is not timedelta it will raise an exception
:return: days (int), hours (int), minutes (int), seconds (int)
"""
# validation that the time is timedelta
if isinstance(timedelta, time_split):
total_seconds = time_split.seconds
days = time_split.days
hours = total_seconds // 3600
total_seconds_wo_hours = total_seconds - (hours * 3600)
minutes = total_seconds_wo_hours // 60
seconds = total_seconds_wo_hours - (minutes * 60)
return set(days, hours, minutes, seconds)
else:
raise ValueError(f'The input to the function is not timedelta, it is {type(time_split)}')
def print(self):
print('*****Tu instance*****')
print(f'- name: {self.name}')
print(f'- first connected: {self.first_connection}')
print(f'-------conection status------------')
print(f'connection: {self.connection_state}')
print(f'-------disconnection info----------')
print(f'- diconnections: {self.disc_counter}')
print(f'- disconnection event-start: {self.disconnection_start}')
print(f'- disconnection event-end: {self.disconnection_end}')
print(f'- disconnection event time: {self.disconnection_last_event_time}')
print(f'----disconnection total time-------')
print(f'- total time disconnected: {self.disconnection_total_time}')
print(f'-----total availability at the time of print----')
print(f'- availability: {self.calculate_availability(self.disconnection_total_time, self.first_connection, datetime.now())}')
print(f'--------operation parameters-------')
print(f'- local sector: {self.local_sector}')
print(f'- rssi: {self.rssi}')
print(f'- srn: {self.rssi}')
print(f'- rx_mcs: {self.rx_mcs}')
print(f'- tx_mcs: {self.tx_mcs}')
print(f'- rx_speed_num: {self.rx_speed_num}')
print(f'- tx_speed_num: {self.tx_speed_num}')
print(f'- rx_mcs_dr: {self.rx_mcs_dr}')
print(f'- tx_mcs_dr: {self.tx_mcs_dr}')
print(f'- power_index: {self.tx_power_index}')
print(f'------------events dataframe-------------')
print(f'{self.disconnections}')
if __name__ == '__main__':
import time
# options to display the whole dataframe for checks
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
print(f'Creating object: Test Tu No. 1')
test_tu = TuDataModel('Test Tu No. 1')
# test_tu.print()
# Testing connecting for the first time
print(f'Connection the unit to the network for the first time')
start_time = datetime.now()
print(start_time)
test_tu.connected(start_time)
# test_tu.print()
# Have connection 10 seconds
print(f'emulating time for 10 seconds')
time.sleep(10)
# after 10 seconds disconnect
print('dropping for 5 seconds...')
test_tu.disconnected(datetime.now())
# test_tu.print()
time.sleep(5)
print('reconnecting')
test_tu.connected(datetime.now())
print(f'emulating time for 3 seconds')
time.sleep(3)
print('reconnecting after 2 seconds')
test_tu.disconnected(datetime.now())
time.sleep(2)
print('reconnecting emulating time for 120 seconds')
test_tu.connected(datetime.now())
time.sleep(120)
print('printing')
print(datetime.now())
test_tu.print()
| 0.750827 | 0.346458 |
import cv2
import time
import argparse
import os
import torch
import posenet
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--scale_factor', type=float, default=1.0)
parser.add_argument('--notxt', action='store_true')
parser.add_argument('--image_dir', type=str, default='./images')
parser.add_argument('--output_dir', type=str, default='./output')
args = parser.parse_args()
def main():
model = posenet.load_model(args.model)
model = model.cuda()
output_stride = model.output_stride
if args.output_dir:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
filenames = [
f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]
start = time.time()
for f in filenames:
input_image, draw_image, output_scale = posenet.read_imgfile(
f, scale_factor=args.scale_factor, output_stride=output_stride)
with torch.no_grad():
input_image = torch.Tensor(input_image).cuda()
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
heatmaps_result.squeeze(0),
offsets_result.squeeze(0),
displacement_fwd_result.squeeze(0),
displacement_bwd_result.squeeze(0),
output_stride=output_stride,
max_pose_detections=10,
min_pose_score=0.25)
keypoint_coords *= output_scale
if args.output_dir:
draw_image = posenet.draw_skel_and_kp(
draw_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.25, min_part_score=0.25)
cv2.imwrite(os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)), draw_image)
if not args.notxt:
print()
print("Results for image: %s" % f)
for pi in range(len(pose_scores)):
if pose_scores[pi] == 0.:
break
print('Pose #%d, score = %f' % (pi, pose_scores[pi]))
for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))
print('Average FPS:', len(filenames) / (time.time() - start))
if __name__ == "__main__":
main()
|
code/posenet-py-torch/image_demo.py
|
import cv2
import time
import argparse
import os
import torch
import posenet
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--scale_factor', type=float, default=1.0)
parser.add_argument('--notxt', action='store_true')
parser.add_argument('--image_dir', type=str, default='./images')
parser.add_argument('--output_dir', type=str, default='./output')
args = parser.parse_args()
def main():
model = posenet.load_model(args.model)
model = model.cuda()
output_stride = model.output_stride
if args.output_dir:
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
filenames = [
f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]
start = time.time()
for f in filenames:
input_image, draw_image, output_scale = posenet.read_imgfile(
f, scale_factor=args.scale_factor, output_stride=output_stride)
with torch.no_grad():
input_image = torch.Tensor(input_image).cuda()
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
heatmaps_result.squeeze(0),
offsets_result.squeeze(0),
displacement_fwd_result.squeeze(0),
displacement_bwd_result.squeeze(0),
output_stride=output_stride,
max_pose_detections=10,
min_pose_score=0.25)
keypoint_coords *= output_scale
if args.output_dir:
draw_image = posenet.draw_skel_and_kp(
draw_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.25, min_part_score=0.25)
cv2.imwrite(os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)), draw_image)
if not args.notxt:
print()
print("Results for image: %s" % f)
for pi in range(len(pose_scores)):
if pose_scores[pi] == 0.:
break
print('Pose #%d, score = %f' % (pi, pose_scores[pi]))
for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))
print('Average FPS:', len(filenames) / (time.time() - start))
if __name__ == "__main__":
main()
| 0.460289 | 0.116991 |
import numpy as np
from deep_filters.core.filters import filters
from keras.preprocessing.image import load_img, img_to_array
def load_array_image(paths, mode, kernel=(128, 128), img_filter='zoom', channels=3, model=None, zoom_learn=2, resize=False, **kwargs):
"""
3 channels as 3 batch datas
:param path:
:return:
"""
l = []
t = []
for f in paths:
print(f)
img = load_img(f)
if resize:
img = img.resize(kernel)
image_name = f.split('/')[-1]
process_image(img, kernel, l, t, mode, img_filter=img_filter, channels=channels, model=model, image_name=image_name, **kwargs)
return np.array(l), np.array(t)
def process_image(img, kernel, l, t, mode, omit_coef=0.5, img_filter='zoom', channels=3, model=None, **kwargs):
if filters.get(img_filter):
img_to_process = filters[img_filter](img, **kwargs)
else:
img_to_process = load_img(img_filter)
img_to_process = img_to_process.resize(img.size)
crop_whole_image(channels, img, img_to_process, kernel, l, mode, model, omit_coef, t)
def crop_whole_image(channels, img, img_to_process, kernel, l, mode, model, omit_coef, t):
for y in range(0, img.height, int(kernel[1])):
for x in range(0, img.width, int(kernel[0])):
crop_image(channels, img, kernel, mode, model, t, x, y, 'OUTPUT')
crop_image(channels, img_to_process, kernel, mode, model, l, x, y)
def crop_one_image(channels, img, kernel, mode, model, t, learn_mode):
for y in range(0, img.height, int(kernel[1])):
for x in range(0, img.width, int(kernel[0])):
crop_image(channels, img, kernel, mode, model, t, x, y, learn_mode)
def crop_image(channels, img, kernel, mode, model, t, x, y, learn_mode='INPUT'):
if learn_mode == 'INPUT' and model:
posx = (model.layers[0].input_shape[2] - kernel[0])
posy = (model.layers[0].input_shape[3] - kernel[1])
else:
posx = 0
posy = 0
img_c = img.crop((x, y, x + kernel[0] + posx, y + kernel[1] + posy))
img_converted = img_c
if mode == 'YCbCr':
img_converted = img_c.convert('YCbCr')
ar = img_to_array(img_converted)
input_array = ar[0].reshape(1, img_c.width, img_c.height)
t.append(input_array)
else:
if channels == 1:
ar = img_to_array(img_converted)
for c in range(0, 3):
ar_new = ar[c].reshape(1, img_converted.width, img_converted.height)
t.append(ar_new)
else:
ar = img_to_array(img_converted)
t.append(ar)
return img_c
|
deep_filters/core/images.py
|
import numpy as np
from deep_filters.core.filters import filters
from keras.preprocessing.image import load_img, img_to_array
def load_array_image(paths, mode, kernel=(128, 128), img_filter='zoom', channels=3, model=None, zoom_learn=2, resize=False, **kwargs):
"""
3 channels as 3 batch datas
:param path:
:return:
"""
l = []
t = []
for f in paths:
print(f)
img = load_img(f)
if resize:
img = img.resize(kernel)
image_name = f.split('/')[-1]
process_image(img, kernel, l, t, mode, img_filter=img_filter, channels=channels, model=model, image_name=image_name, **kwargs)
return np.array(l), np.array(t)
def process_image(img, kernel, l, t, mode, omit_coef=0.5, img_filter='zoom', channels=3, model=None, **kwargs):
if filters.get(img_filter):
img_to_process = filters[img_filter](img, **kwargs)
else:
img_to_process = load_img(img_filter)
img_to_process = img_to_process.resize(img.size)
crop_whole_image(channels, img, img_to_process, kernel, l, mode, model, omit_coef, t)
def crop_whole_image(channels, img, img_to_process, kernel, l, mode, model, omit_coef, t):
for y in range(0, img.height, int(kernel[1])):
for x in range(0, img.width, int(kernel[0])):
crop_image(channels, img, kernel, mode, model, t, x, y, 'OUTPUT')
crop_image(channels, img_to_process, kernel, mode, model, l, x, y)
def crop_one_image(channels, img, kernel, mode, model, t, learn_mode):
for y in range(0, img.height, int(kernel[1])):
for x in range(0, img.width, int(kernel[0])):
crop_image(channels, img, kernel, mode, model, t, x, y, learn_mode)
def crop_image(channels, img, kernel, mode, model, t, x, y, learn_mode='INPUT'):
if learn_mode == 'INPUT' and model:
posx = (model.layers[0].input_shape[2] - kernel[0])
posy = (model.layers[0].input_shape[3] - kernel[1])
else:
posx = 0
posy = 0
img_c = img.crop((x, y, x + kernel[0] + posx, y + kernel[1] + posy))
img_converted = img_c
if mode == 'YCbCr':
img_converted = img_c.convert('YCbCr')
ar = img_to_array(img_converted)
input_array = ar[0].reshape(1, img_c.width, img_c.height)
t.append(input_array)
else:
if channels == 1:
ar = img_to_array(img_converted)
for c in range(0, 3):
ar_new = ar[c].reshape(1, img_converted.width, img_converted.height)
t.append(ar_new)
else:
ar = img_to_array(img_converted)
t.append(ar)
return img_c
| 0.627267 | 0.46721 |
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import socket
import psycopg2
import json
from google.oauth2 import id_token
from google.auth.transport import requests as requests_google
import requests
print('starting server')
#initialize app
app = Flask(__name__)
CORS(app)
#functions for database access
def get_connection():
return psycopg2.connect(host = "localhost", dbname="todo_app", user="postgres", password="<PASSWORD>")
def add_task_db(curs, task, user_id):
curs.execute("INSERT INTO tasks (task_name, due_time, notifications, task_id, last_updated, user_id, time_made) VALUES(%s, %s, %s, %s, %s, %s, %s)",
(task.get('task_name'), task.get('due_time'), task.get('notifications'), task.get('task_id'), task.get('last_updated'), user_id, task.get('time_made')))
def delete_task_db(curs, data, user_id):
curs.execute("DELETE FROM tasks WHERE task_id = %s AND user_id = %s", (data, user_id))
#function for authorizing token with each request
def authorize_token(token):
try:
decoded_token = id_token.verify_oauth2_token(token, requests_google.Request(), "195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com")
print(decoded_token)
if(decoded_token.get('aud') == "195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com" and decoded_token.get('iss') == "accounts.google.com"):
print('epicsauce')
return decoded_token
else:
return False
except:
return False
#functions for requests
@app.route('/sign-up-in', methods=['POST'])
def sign_up_in():
print('sign up in ')
conn = get_connection()
curs = conn.cursor()
data = (request.get_data(as_text = False)).decode('utf-8')
payload = {'code': data, 'client_id':'195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com', 'client_secret': '<KEY>', 'grant_type': 'authorization_code', 'redirect_uri': 'postmessage', 'access_type':'offline'}
try:
resp = json.loads((requests.post('https://oauth2.googleapis.com/token', params=payload)).text)
id_token = authorize_token(resp.get('id_token'))
user_id = id_token.get('sub')
curs.execute("SELECT user_id FROM users")
le_data = (curs.fetchall())
le_data = [user_id[0] for user_id in le_data]
settings = '{"clock_mode":false}'
if(user_id not in le_data):
print('creating user account')
curs.execute("INSERT INTO users (name, user_id, email, settings) VALUES(%s, %s, %s, %s)", (id_token.get('given_name'), id_token.get('sub'), id_token.get('email'), settings))
else:
print('user already has account, logging in')
print('token authorized, user logged in, returning refresh token, id_token and access token')
conn.commit()
curs.close()
conn.close()
return jsonify(resp)
except:
return jsonify(resp)
@app.route('/post-settings', methods=['POST'])
def post_settings():
print('posting settings')
data = request.get_data()
data = str((data.decode("utf-8")).strip("''"))
decoded_token = authorize_token(request.headers['Authorization'])
if(decoded_token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("UPDATE users SET settings = %s WHERE user_id = %s", (data, decoded_token.get('sub')))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/get-settings', methods=['GET'])
def get_settings():
print('get settings')
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("SELECT settings FROM users WHERE user_id = %s", [token.get('sub')])
settings = curs.fetchone()[0]
curs.close()
conn.close()
return jsonify(settings)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/recieve-all-tasks', methods=['POST'])
def recieve_all_tasks():
print('recieving all tasks')
data = request.get_json()
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
for task in data:
print(task)
add_task_db(curs, task, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/get-tasks', methods=['GET'])
def get_tasks():
print('getting tasks')
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("SELECT row_to_json(tasks) FROM tasks WHERE user_id = %s", [token.get('sub')])
rows = (curs.fetchall())
rows = [row[0] for row in rows]
curs.close()
conn.close()
return jsonify(rows)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/delete-task', methods=['DELETE'])
def delete_task():
print('deleting task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = str(request.get_json())
conn = get_connection()
curs = conn.cursor()
delete_task_db(curs, data, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/post-task', methods=['POST'])
def post_task():
print('posting task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
add_task_db(curs, data, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/edit-task', methods=['POST'])
def edit_task():
print('editing task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
user_id = token.get('sub')
delete_task_db(curs, data.get('task_id'), user_id)
add_task_db(curs, data, user_id)
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/recieve-ticket', methods=['POST'])
def recieve_ticket():
print('recieving ticket')
decoded_token = authorize_token(request.headers['Authorization'])
if(decoded_token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
user_id = decoded_token.get('sub')
for action in data:
action_mode = action.get('mode');
task = action.get('data');
if action_mode == 'add':
add_task_db(curs, task, user_id)
elif action_mode == 'edit':
curs.execute('SELECT last_updated FROM tasks WHERE task_id = %s AND user_id = %s', (task.get('task_id'), user_id))
last_updated = (curs.fetchone())[0]
if(task.get('last_updated') > int(last_updated)):
delete_task_db(curs, task.get('task_id'), user_id)
add_task_db(curs, task, user_id)
elif action_mode == 'delete':
delete_task_db(curs, task, user_id)
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5000', debug='False')
|
app.py
|
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import socket
import psycopg2
import json
from google.oauth2 import id_token
from google.auth.transport import requests as requests_google
import requests
print('starting server')
#initialize app
app = Flask(__name__)
CORS(app)
#functions for database access
def get_connection():
return psycopg2.connect(host = "localhost", dbname="todo_app", user="postgres", password="<PASSWORD>")
def add_task_db(curs, task, user_id):
curs.execute("INSERT INTO tasks (task_name, due_time, notifications, task_id, last_updated, user_id, time_made) VALUES(%s, %s, %s, %s, %s, %s, %s)",
(task.get('task_name'), task.get('due_time'), task.get('notifications'), task.get('task_id'), task.get('last_updated'), user_id, task.get('time_made')))
def delete_task_db(curs, data, user_id):
curs.execute("DELETE FROM tasks WHERE task_id = %s AND user_id = %s", (data, user_id))
#function for authorizing token with each request
def authorize_token(token):
try:
decoded_token = id_token.verify_oauth2_token(token, requests_google.Request(), "195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com")
print(decoded_token)
if(decoded_token.get('aud') == "195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com" and decoded_token.get('iss') == "accounts.google.com"):
print('epicsauce')
return decoded_token
else:
return False
except:
return False
#functions for requests
@app.route('/sign-up-in', methods=['POST'])
def sign_up_in():
print('sign up in ')
conn = get_connection()
curs = conn.cursor()
data = (request.get_data(as_text = False)).decode('utf-8')
payload = {'code': data, 'client_id':'195081855240-jjsqpn2t0oucb8ets7li98p8vodja8jd.apps.googleusercontent.com', 'client_secret': '<KEY>', 'grant_type': 'authorization_code', 'redirect_uri': 'postmessage', 'access_type':'offline'}
try:
resp = json.loads((requests.post('https://oauth2.googleapis.com/token', params=payload)).text)
id_token = authorize_token(resp.get('id_token'))
user_id = id_token.get('sub')
curs.execute("SELECT user_id FROM users")
le_data = (curs.fetchall())
le_data = [user_id[0] for user_id in le_data]
settings = '{"clock_mode":false}'
if(user_id not in le_data):
print('creating user account')
curs.execute("INSERT INTO users (name, user_id, email, settings) VALUES(%s, %s, %s, %s)", (id_token.get('given_name'), id_token.get('sub'), id_token.get('email'), settings))
else:
print('user already has account, logging in')
print('token authorized, user logged in, returning refresh token, id_token and access token')
conn.commit()
curs.close()
conn.close()
return jsonify(resp)
except:
return jsonify(resp)
@app.route('/post-settings', methods=['POST'])
def post_settings():
print('posting settings')
data = request.get_data()
data = str((data.decode("utf-8")).strip("''"))
decoded_token = authorize_token(request.headers['Authorization'])
if(decoded_token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("UPDATE users SET settings = %s WHERE user_id = %s", (data, decoded_token.get('sub')))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/get-settings', methods=['GET'])
def get_settings():
print('get settings')
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("SELECT settings FROM users WHERE user_id = %s", [token.get('sub')])
settings = curs.fetchone()[0]
curs.close()
conn.close()
return jsonify(settings)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/recieve-all-tasks', methods=['POST'])
def recieve_all_tasks():
print('recieving all tasks')
data = request.get_json()
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
for task in data:
print(task)
add_task_db(curs, task, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/get-tasks', methods=['GET'])
def get_tasks():
print('getting tasks')
token = authorize_token(request.headers['Authorization'])
if(token != False):
conn = get_connection()
curs = conn.cursor()
curs.execute("SELECT row_to_json(tasks) FROM tasks WHERE user_id = %s", [token.get('sub')])
rows = (curs.fetchall())
rows = [row[0] for row in rows]
curs.close()
conn.close()
return jsonify(rows)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/delete-task', methods=['DELETE'])
def delete_task():
print('deleting task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = str(request.get_json())
conn = get_connection()
curs = conn.cursor()
delete_task_db(curs, data, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/post-task', methods=['POST'])
def post_task():
print('posting task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
add_task_db(curs, data, token.get('sub'))
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/edit-task', methods=['POST'])
def edit_task():
print('editing task')
token = authorize_token(request.headers['Authorization'])
if(token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
user_id = token.get('sub')
delete_task_db(curs, data.get('task_id'), user_id)
add_task_db(curs, data, user_id)
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
@app.route('/recieve-ticket', methods=['POST'])
def recieve_ticket():
print('recieving ticket')
decoded_token = authorize_token(request.headers['Authorization'])
if(decoded_token != False):
data = request.get_json()
conn = get_connection()
curs = conn.cursor()
user_id = decoded_token.get('sub')
for action in data:
action_mode = action.get('mode');
task = action.get('data');
if action_mode == 'add':
add_task_db(curs, task, user_id)
elif action_mode == 'edit':
curs.execute('SELECT last_updated FROM tasks WHERE task_id = %s AND user_id = %s', (task.get('task_id'), user_id))
last_updated = (curs.fetchone())[0]
if(task.get('last_updated') > int(last_updated)):
delete_task_db(curs, task.get('task_id'), user_id)
add_task_db(curs, task, user_id)
elif action_mode == 'delete':
delete_task_db(curs, task, user_id)
conn.commit()
curs.close()
conn.close()
return str(data)
else:
print('error')
return jsonify({"result" : "failure", "error" : "401", "message" : "was not able to authorize user"}), 401
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5000', debug='False')
| 0.195287 | 0.072571 |
import locale
import sys
from collections import OrderedDict
from io import StringIO
import numpy as np
import pandas as pd
from .. import dp_logging
from . import data_utils
logger = dp_logging.get_child_logger(__name__)
class BaseData(object):
"""
Abstract class for data loading and saving
"""
data_type = None
info = None
def __init__(self, input_file_path, data, options):
"""
Base class for loading a dataset. Options can be specified and maybe
more specific to the subclasses.
:param input_file_path: path to the file being loaded or None
:type input_file_path: str
:param data: data being loaded into the class instead of an input file
:type data: multiple types
:param options: options pertaining to the data type
:type options: dict
:return: None
"""
if options is None:
options = {}
# Public properties
self.input_file_path = input_file_path
self.options = options
# 'Private' properties
# _data_formats: dict containing data_formats (key) and function
# calls (values) which take self._data and convert it
# into the desired data_format for output.
# _selected_data_format: user selected format in which to return data
# can only be of types in _data_formats
# _data: data being stored im memory
# _batch_info: when iterating through batches, information about the
# iteration/permutation are necessary to be held to keep
# constant across function calls.
# _tmp_file_name: randomly set variables for file name usable by system
# _file_encoding: contains the suggested file encoding for reading data
self._data_formats = OrderedDict()
self._selected_data_format = None
self._data = data
self._batch_info = dict(perm=list(), iter=0)
self._tmp_file_name = None
self._file_encoding = options.get('encoding', None)
@property
def data(self):
if self._data is None:
self._load_data()
allowed_data_formats = list(self._data_formats.keys())
if not self._selected_data_format:
return self._data
elif self._selected_data_format in allowed_data_formats:
return self._data_formats[self._selected_data_format](self._data)
else:
raise ValueError(
"The data format must be one of the following: {}".format(
str(allowed_data_formats)
)
)
@property
def data_format(self):
return self._selected_data_format
@property
def is_structured(self):
"""
Determines compatibility with StructuredProfiler
"""
raise NotImplementedError
@data_format.setter
def data_format(self, value):
allowed_data_formats = list(self._data_formats.keys())
if value.lower() not in allowed_data_formats:
raise ValueError(
"The data format must be one of the following: {}".format(
str(allowed_data_formats)
)
)
self._selected_data_format = value.lower()
@property
def file_encoding(self):
if not self._file_encoding:
# get system default, but if set to ascii, just update to utf-8
file_encoding = "utf-8"
try:
file_encoding = locale.getpreferredencoding(False)
except:
file_encoding = sys.getfilesystemencoding()
finally:
if file_encoding.lower() in ['ansi_x3.4-1968', 'ascii']:
file_encoding = 'utf-8'
self._file_encoding = file_encoding
# set to default, detect if not StringIO
if self.input_file_path \
and not isinstance(self.input_file_path, StringIO):
self._file_encoding = data_utils.detect_file_encoding(
self.input_file_path)
return self._file_encoding
@file_encoding.setter
def file_encoding(self, value):
valid_user_set_encodings = [
"ascii", "utf-8", "utf-16", "utf-32"
]
if not value or value.lower() not in valid_user_set_encodings:
raise ValueError(
"File Encoding must be one of the following: {}"
.format(valid_user_set_encodings)
)
self._file_encoding = value
@staticmethod
def _check_and_return_options(options):
if not options:
options = dict()
elif not isinstance(options, dict):
raise ValueError("Options must be a dictionary.")
return options
def _load_data(self, data=None):
raise NotImplementedError()
def get_batch_generator(self, batch_size):
data_length = len(self.data)
indices = np.random.permutation(data_length)
for i in range(0, data_length, batch_size):
if isinstance(self.data, pd.DataFrame):
yield self.data.iloc[indices[i:i + batch_size], :]
else:
yield list(self.data[k] for k in indices[i:i + batch_size])
@classmethod
def is_match(cls, input_file_path, options):
raise NotImplementedError()
def reload(self, input_file_path, data, options):
"""
Reload the data class with a new dataset. This erases all existing
data/options and replaces it with the input data/options.
:param input_file_path: path to the file being loaded or None
:type input_file_path: str
:param data: data being loaded into the class instead of an input file
:type data: multiple types
:param options: options pertaining to the data type
:type options: dict
:return: None
"""
if input_file_path and not self.is_match(input_file_path, options):
raise ValueError(
"Reloaded dataset does not match the specified data_type"
)
elif input_file_path:
self.input_file_path = input_file_path
self._data = None
self._tmp_file_name = None
self.options = None
self._batch_info = dict(perm=list(), iter=0)
def __len__(self):
"""
Returns the length of the dataset which is loaded.
:return: length of the dataset
"""
return len(self.data)
@property
def length(self):
"""
Returns the length of the dataset which is loaded.
:return: length of the dataset
"""
return len(self)
def __getattribute__(self, name):
"""
Overrides getattr for the class such that functions can be applied
directly to the data class if the function is not part of the data
class.
e.g. if data is BaseData where self.data = [1, 2, 3, 1]
```
data.count(1) # returns 2, bc data.data has the function 'count'
```
"""
try:
returned = object.__getattribute__(self, name)
except AttributeError as attr_error:
class_name = self.__class__.__name__
data_class_name = self.data.__class__.__name__
if (not f"'{class_name}' object has no attribute '{name}'"
== str(attr_error)):
raise
try:
returned = object.__getattribute__(self.data, name)
except AttributeError as attr_error:
if (not f"'{data_class_name}' object has no attribute '{name}'"
== str(attr_error)):
raise
raise AttributeError(f"Neither '{class_name}' nor "
f"'{data_class_name}' objects have "
f"attribute '{name}'")
return returned
|
dataprofiler/data_readers/base_data.py
|
import locale
import sys
from collections import OrderedDict
from io import StringIO
import numpy as np
import pandas as pd
from .. import dp_logging
from . import data_utils
logger = dp_logging.get_child_logger(__name__)
class BaseData(object):
"""
Abstract class for data loading and saving
"""
data_type = None
info = None
def __init__(self, input_file_path, data, options):
"""
Base class for loading a dataset. Options can be specified and maybe
more specific to the subclasses.
:param input_file_path: path to the file being loaded or None
:type input_file_path: str
:param data: data being loaded into the class instead of an input file
:type data: multiple types
:param options: options pertaining to the data type
:type options: dict
:return: None
"""
if options is None:
options = {}
# Public properties
self.input_file_path = input_file_path
self.options = options
# 'Private' properties
# _data_formats: dict containing data_formats (key) and function
# calls (values) which take self._data and convert it
# into the desired data_format for output.
# _selected_data_format: user selected format in which to return data
# can only be of types in _data_formats
# _data: data being stored im memory
# _batch_info: when iterating through batches, information about the
# iteration/permutation are necessary to be held to keep
# constant across function calls.
# _tmp_file_name: randomly set variables for file name usable by system
# _file_encoding: contains the suggested file encoding for reading data
self._data_formats = OrderedDict()
self._selected_data_format = None
self._data = data
self._batch_info = dict(perm=list(), iter=0)
self._tmp_file_name = None
self._file_encoding = options.get('encoding', None)
@property
def data(self):
if self._data is None:
self._load_data()
allowed_data_formats = list(self._data_formats.keys())
if not self._selected_data_format:
return self._data
elif self._selected_data_format in allowed_data_formats:
return self._data_formats[self._selected_data_format](self._data)
else:
raise ValueError(
"The data format must be one of the following: {}".format(
str(allowed_data_formats)
)
)
@property
def data_format(self):
return self._selected_data_format
@property
def is_structured(self):
"""
Determines compatibility with StructuredProfiler
"""
raise NotImplementedError
@data_format.setter
def data_format(self, value):
allowed_data_formats = list(self._data_formats.keys())
if value.lower() not in allowed_data_formats:
raise ValueError(
"The data format must be one of the following: {}".format(
str(allowed_data_formats)
)
)
self._selected_data_format = value.lower()
@property
def file_encoding(self):
if not self._file_encoding:
# get system default, but if set to ascii, just update to utf-8
file_encoding = "utf-8"
try:
file_encoding = locale.getpreferredencoding(False)
except:
file_encoding = sys.getfilesystemencoding()
finally:
if file_encoding.lower() in ['ansi_x3.4-1968', 'ascii']:
file_encoding = 'utf-8'
self._file_encoding = file_encoding
# set to default, detect if not StringIO
if self.input_file_path \
and not isinstance(self.input_file_path, StringIO):
self._file_encoding = data_utils.detect_file_encoding(
self.input_file_path)
return self._file_encoding
@file_encoding.setter
def file_encoding(self, value):
valid_user_set_encodings = [
"ascii", "utf-8", "utf-16", "utf-32"
]
if not value or value.lower() not in valid_user_set_encodings:
raise ValueError(
"File Encoding must be one of the following: {}"
.format(valid_user_set_encodings)
)
self._file_encoding = value
@staticmethod
def _check_and_return_options(options):
if not options:
options = dict()
elif not isinstance(options, dict):
raise ValueError("Options must be a dictionary.")
return options
def _load_data(self, data=None):
raise NotImplementedError()
def get_batch_generator(self, batch_size):
data_length = len(self.data)
indices = np.random.permutation(data_length)
for i in range(0, data_length, batch_size):
if isinstance(self.data, pd.DataFrame):
yield self.data.iloc[indices[i:i + batch_size], :]
else:
yield list(self.data[k] for k in indices[i:i + batch_size])
@classmethod
def is_match(cls, input_file_path, options):
raise NotImplementedError()
def reload(self, input_file_path, data, options):
"""
Reload the data class with a new dataset. This erases all existing
data/options and replaces it with the input data/options.
:param input_file_path: path to the file being loaded or None
:type input_file_path: str
:param data: data being loaded into the class instead of an input file
:type data: multiple types
:param options: options pertaining to the data type
:type options: dict
:return: None
"""
if input_file_path and not self.is_match(input_file_path, options):
raise ValueError(
"Reloaded dataset does not match the specified data_type"
)
elif input_file_path:
self.input_file_path = input_file_path
self._data = None
self._tmp_file_name = None
self.options = None
self._batch_info = dict(perm=list(), iter=0)
def __len__(self):
"""
Returns the length of the dataset which is loaded.
:return: length of the dataset
"""
return len(self.data)
@property
def length(self):
"""
Returns the length of the dataset which is loaded.
:return: length of the dataset
"""
return len(self)
def __getattribute__(self, name):
"""
Overrides getattr for the class such that functions can be applied
directly to the data class if the function is not part of the data
class.
e.g. if data is BaseData where self.data = [1, 2, 3, 1]
```
data.count(1) # returns 2, bc data.data has the function 'count'
```
"""
try:
returned = object.__getattribute__(self, name)
except AttributeError as attr_error:
class_name = self.__class__.__name__
data_class_name = self.data.__class__.__name__
if (not f"'{class_name}' object has no attribute '{name}'"
== str(attr_error)):
raise
try:
returned = object.__getattribute__(self.data, name)
except AttributeError as attr_error:
if (not f"'{data_class_name}' object has no attribute '{name}'"
== str(attr_error)):
raise
raise AttributeError(f"Neither '{class_name}' nor "
f"'{data_class_name}' objects have "
f"attribute '{name}'")
return returned
| 0.483892 | 0.45302 |
from __future__ import absolute_import
import logging
import os
import time
import click
import cachetools
import functools
import itertools
import re
from pathlib import Path
import pandas as pd
try:
import cPickle as pickle
except ImportError:
import pickle
from datacube.ui import click as dc_ui
from datacube.utils import read_documents
_LOG = logging.getLogger(__name__)
@cachetools.cached(cache={}, key=lambda index, id_: id_)
def get_full_lineage(index, id_):
return index.datasets.get(id_, include_sources=True)
def load_config(index, app_config_file, make_config, make_tasks, *args, **kwargs):
app_config_path = Path(app_config_file)
_, config = next(read_documents(app_config_path))
config['app_config_file'] = app_config_path.name
config['task_timestamp'] = int(time.time())
config = make_config(index, config, **kwargs)
tasks = make_tasks(index, config, **kwargs)
return config, iter(tasks)
def pickle_stream(objs, filename):
idx = 0
with open(filename, 'wb') as stream:
for idx, obj in enumerate(objs, start=1):
pickle.dump(obj, stream, pickle.HIGHEST_PROTOCOL)
return idx
def unpickle_stream(filename):
with open(filename, 'rb') as stream:
while True:
try:
yield pickle.load(stream)
except EOFError:
break
def save_tasks(config, tasks, taskfile):
"""Saves the config
:param config: dict of configuration options common to all tasks
:param tasks:
:param str taskfile: Name of output file
:return: Number of tasks saved to the file
"""
i = pickle_stream(itertools.chain([config], tasks), taskfile)
if i <= 1:
# Only saved the config, no tasks!
os.remove(taskfile)
return 0
else:
_LOG.info('Saved config and %d tasks to %s', i, taskfile)
return i - 1
def load_tasks(taskfile):
stream = unpickle_stream(taskfile)
config = next(stream)
return config, stream
# This is a function, so it's valid to be lowercase.
#: pylint: disable=invalid-name
app_config_option = click.option('--app-config', help='App configuration file',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
#: pylint: disable=invalid-name
load_tasks_option = click.option('--load-tasks', 'input_tasks_file', help='Load tasks from the specified file',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
#: pylint: disable=invalid-name
save_tasks_option = click.option('--save-tasks', 'output_tasks_file', help='Save tasks to the specified file',
type=click.Path(exists=False))
#: pylint: disable=invalid-name
queue_size_option = click.option('--queue-size', help='Number of tasks to queue at the start',
type=click.IntRange(1, 100000), default=3200)
#: pylint: disable=invalid-name
task_app_options = dc_ui.compose(
app_config_option,
load_tasks_option,
save_tasks_option,
dc_ui.config_option,
dc_ui.verbose_option,
dc_ui.log_queries_option,
dc_ui.executor_cli_options,
)
def _cell_list_from_file(filename):
cell_matcher = re.compile(r'(-?\d+)(?:\s*(?:,|_|\s)\s*)(-?\d+)')
with open(filename) as cell_file:
for line in cell_file:
match = cell_matcher.match(line)
if match:
yield tuple(int(i) for i in match.groups())
def cell_list_to_file(filename, cell_list):
with open(filename, 'w') as cell_file:
for cell in cell_list:
cell_file.write('{0},{1}\n'.format(*cell))
def validate_cell_list(ctx, param, value):
try:
if value is None:
return None
return list(_cell_list_from_file(value))
except ValueError:
raise click.BadParameter('cell_index_list must be a file with lines in the form "14,-11"')
def validate_cell_index(ctx, param, value):
try:
if value is None:
return None
return tuple(int(i) for i in value.split(',', 2))
except ValueError:
raise click.BadParameter('cell_index must be specified in the form "14,-11"')
def validate_year(ctx, param, value):
try:
if value is None:
return None
years = [pd.Period(y) for y in value.split('-', 2)]
return years[0].start_time.to_pydatetime(warn=False), years[-1].end_time.to_pydatetime(warn=False)
except ValueError:
raise click.BadParameter('year must be specified as a single year (eg 1996) '
'or as an inclusive range (eg 1996-2001)')
def break_query_into_years(time_query, **kwargs):
if time_query is None:
return [kwargs]
return [dict(time=time_range, **kwargs) for time_range in year_splitter(*time_query)]
def year_splitter(start, end):
"""
Produces a list of time ranges based that represent each year in the range.
`year_splitter('1992', '1993')` returns:
::
[('1992-01-01 00:00:00', '1992-12-31 23:59:59.9999999'),
('1993-01-01 00:00:00', '1993-12-31 23:59:59.9999999')]
:param str start: start year
:param str end: end year
:return Generator[tuple(str, str)]: strings representing the ranges
"""
start_ts = pd.Timestamp(start)
end_ts = pd.Timestamp(end)
for p in pd.period_range(start=start_ts, end=end_ts, freq='A'):
yield str(p.start_time), str(p.end_time)
#: pylint: disable=invalid-name
cell_index_option = click.option('--cell-index', 'cell_index',
help='Limit the process to a particular cell (e.g. 14,-11)',
callback=validate_cell_index, default=None)
#: pylint: disable=invalid-name
cell_index_list_option = click.option('--cell-index-list', 'cell_index_list',
help='Limit the process to a file of cells indexes (e.g. 14,-11)',
callback=validate_cell_list, default=None)
#: pylint: disable=invalid-name
year_option = click.option('--year', 'time', help='Limit the process to a particular year',
callback=validate_year)
def task_app(make_config, make_tasks):
"""
Create a `Task App` from a function
Decorates a function
:param make_config: callable(index, config, **query)
:param make_tasks: callable(index, config, **kwargs)
:return:
"""
def decorate(app_func):
def with_app_args(index, app_config=None, input_tasks_file=None, output_tasks_file=None, *args, **kwargs):
if (app_config is None) == (input_tasks_file is None):
click.echo('Must specify exactly one of --app-config, --load-tasks')
click.get_current_context().exit(1)
if app_config is not None:
config, tasks = load_config(index, app_config, make_config, make_tasks, *args, **kwargs)
if input_tasks_file:
config, tasks = load_tasks(input_tasks_file)
if output_tasks_file:
num_tasks_saved = save_tasks(config, tasks, output_tasks_file)
return num_tasks_saved != 0
return app_func(index, config, tasks, *args, **kwargs)
return functools.update_wrapper(with_app_args, app_func)
return decorate
def check_existing_files(paths):
"""Check for existing files and optionally delete them.
:param paths: sequence of path strings or path objects
"""
click.echo('Files to be created:')
existing_files = []
total = 0
for path in paths:
total += 1
file_path = Path(path)
file_info = ''
if file_path.exists():
existing_files.append(file_path)
file_info = ' - ALREADY EXISTS'
click.echo('{}{}'.format(path, file_info))
if existing_files:
if click.confirm('There were {} existing files found that are not indexed. Delete those files now?'.format(
len(existing_files))):
for file_path in existing_files:
file_path.unlink()
click.echo('{total} tasks files to be created ({valid} valid files, {invalid} existing paths)'.format(
total=total, valid=total - len(existing_files), invalid=len(existing_files)
))
def add_dataset_to_db(index, datasets):
for dataset in datasets.values:
index.datasets.add(dataset, with_lineage=False)
_LOG.info('Dataset added')
def do_nothing(result):
pass
def _wrap_impl(f, args, kwargs, task):
"""
Helper method, needs to be at the top level
"""
return f(task, *args, **kwargs)
def wrap_task(f, *args, **kwargs):
"""
Turn function `f(task, *args, **kwargs)` into `g(task)` in pickle-able fashion
"""
return functools.partial(_wrap_impl, f, args, kwargs)
def run_tasks(tasks, executor, run_task, process_result=None, queue_size=50):
"""
:param tasks: iterable of tasks. Usually a generator to create them as required.
:param executor: a datacube executor, similar to `distributed.Client` or `concurrent.futures`
:param run_task: the function used to run a task. Expects a single argument of one of the tasks
:param process_result: a function to do something based on the result of a completed task. It
takes a single argument, the return value from `run_task(task)`
:param queue_size: How large the queue of tasks should be. Will depend on how fast tasks are
processed, and how much memory is available to buffer them.
"""
click.echo('Starting processing...')
process_result = process_result or do_nothing
results = []
task_queue = itertools.islice(tasks, queue_size)
for task in task_queue:
_LOG.info('Running task: %s', task.get('tile_index', str(task)))
results.append(executor.submit(run_task, task=task))
click.echo('Task queue filled, waiting for first result...')
successful = failed = 0
while results:
result, results = executor.next_completed(results, None)
# submit a new _task to replace the one we just finished
task = next(tasks, None)
if task:
_LOG.info('Running task: %s', task.get('tile_index', str(task)))
results.append(executor.submit(run_task, task=task))
# Process the result
try:
actual_result = executor.result(result)
process_result(actual_result)
successful += 1
except Exception as err: # pylint: disable=broad-except
_LOG.exception('Task failed: %s', err)
failed += 1
continue
finally:
# Release the _task to free memory so there is no leak in executor/scheduler/worker process
executor.release(result)
click.echo('%d successful, %d failed' % (successful, failed))
|
datacube/ui/task_app.py
|
from __future__ import absolute_import
import logging
import os
import time
import click
import cachetools
import functools
import itertools
import re
from pathlib import Path
import pandas as pd
try:
import cPickle as pickle
except ImportError:
import pickle
from datacube.ui import click as dc_ui
from datacube.utils import read_documents
_LOG = logging.getLogger(__name__)
@cachetools.cached(cache={}, key=lambda index, id_: id_)
def get_full_lineage(index, id_):
return index.datasets.get(id_, include_sources=True)
def load_config(index, app_config_file, make_config, make_tasks, *args, **kwargs):
app_config_path = Path(app_config_file)
_, config = next(read_documents(app_config_path))
config['app_config_file'] = app_config_path.name
config['task_timestamp'] = int(time.time())
config = make_config(index, config, **kwargs)
tasks = make_tasks(index, config, **kwargs)
return config, iter(tasks)
def pickle_stream(objs, filename):
idx = 0
with open(filename, 'wb') as stream:
for idx, obj in enumerate(objs, start=1):
pickle.dump(obj, stream, pickle.HIGHEST_PROTOCOL)
return idx
def unpickle_stream(filename):
with open(filename, 'rb') as stream:
while True:
try:
yield pickle.load(stream)
except EOFError:
break
def save_tasks(config, tasks, taskfile):
"""Saves the config
:param config: dict of configuration options common to all tasks
:param tasks:
:param str taskfile: Name of output file
:return: Number of tasks saved to the file
"""
i = pickle_stream(itertools.chain([config], tasks), taskfile)
if i <= 1:
# Only saved the config, no tasks!
os.remove(taskfile)
return 0
else:
_LOG.info('Saved config and %d tasks to %s', i, taskfile)
return i - 1
def load_tasks(taskfile):
stream = unpickle_stream(taskfile)
config = next(stream)
return config, stream
# This is a function, so it's valid to be lowercase.
#: pylint: disable=invalid-name
app_config_option = click.option('--app-config', help='App configuration file',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
#: pylint: disable=invalid-name
load_tasks_option = click.option('--load-tasks', 'input_tasks_file', help='Load tasks from the specified file',
type=click.Path(exists=True, readable=True, writable=False, dir_okay=False))
#: pylint: disable=invalid-name
save_tasks_option = click.option('--save-tasks', 'output_tasks_file', help='Save tasks to the specified file',
type=click.Path(exists=False))
#: pylint: disable=invalid-name
queue_size_option = click.option('--queue-size', help='Number of tasks to queue at the start',
type=click.IntRange(1, 100000), default=3200)
#: pylint: disable=invalid-name
task_app_options = dc_ui.compose(
app_config_option,
load_tasks_option,
save_tasks_option,
dc_ui.config_option,
dc_ui.verbose_option,
dc_ui.log_queries_option,
dc_ui.executor_cli_options,
)
def _cell_list_from_file(filename):
cell_matcher = re.compile(r'(-?\d+)(?:\s*(?:,|_|\s)\s*)(-?\d+)')
with open(filename) as cell_file:
for line in cell_file:
match = cell_matcher.match(line)
if match:
yield tuple(int(i) for i in match.groups())
def cell_list_to_file(filename, cell_list):
with open(filename, 'w') as cell_file:
for cell in cell_list:
cell_file.write('{0},{1}\n'.format(*cell))
def validate_cell_list(ctx, param, value):
try:
if value is None:
return None
return list(_cell_list_from_file(value))
except ValueError:
raise click.BadParameter('cell_index_list must be a file with lines in the form "14,-11"')
def validate_cell_index(ctx, param, value):
try:
if value is None:
return None
return tuple(int(i) for i in value.split(',', 2))
except ValueError:
raise click.BadParameter('cell_index must be specified in the form "14,-11"')
def validate_year(ctx, param, value):
try:
if value is None:
return None
years = [pd.Period(y) for y in value.split('-', 2)]
return years[0].start_time.to_pydatetime(warn=False), years[-1].end_time.to_pydatetime(warn=False)
except ValueError:
raise click.BadParameter('year must be specified as a single year (eg 1996) '
'or as an inclusive range (eg 1996-2001)')
def break_query_into_years(time_query, **kwargs):
if time_query is None:
return [kwargs]
return [dict(time=time_range, **kwargs) for time_range in year_splitter(*time_query)]
def year_splitter(start, end):
"""
Produces a list of time ranges based that represent each year in the range.
`year_splitter('1992', '1993')` returns:
::
[('1992-01-01 00:00:00', '1992-12-31 23:59:59.9999999'),
('1993-01-01 00:00:00', '1993-12-31 23:59:59.9999999')]
:param str start: start year
:param str end: end year
:return Generator[tuple(str, str)]: strings representing the ranges
"""
start_ts = pd.Timestamp(start)
end_ts = pd.Timestamp(end)
for p in pd.period_range(start=start_ts, end=end_ts, freq='A'):
yield str(p.start_time), str(p.end_time)
#: pylint: disable=invalid-name
cell_index_option = click.option('--cell-index', 'cell_index',
help='Limit the process to a particular cell (e.g. 14,-11)',
callback=validate_cell_index, default=None)
#: pylint: disable=invalid-name
cell_index_list_option = click.option('--cell-index-list', 'cell_index_list',
help='Limit the process to a file of cells indexes (e.g. 14,-11)',
callback=validate_cell_list, default=None)
#: pylint: disable=invalid-name
year_option = click.option('--year', 'time', help='Limit the process to a particular year',
callback=validate_year)
def task_app(make_config, make_tasks):
"""
Create a `Task App` from a function
Decorates a function
:param make_config: callable(index, config, **query)
:param make_tasks: callable(index, config, **kwargs)
:return:
"""
def decorate(app_func):
def with_app_args(index, app_config=None, input_tasks_file=None, output_tasks_file=None, *args, **kwargs):
if (app_config is None) == (input_tasks_file is None):
click.echo('Must specify exactly one of --app-config, --load-tasks')
click.get_current_context().exit(1)
if app_config is not None:
config, tasks = load_config(index, app_config, make_config, make_tasks, *args, **kwargs)
if input_tasks_file:
config, tasks = load_tasks(input_tasks_file)
if output_tasks_file:
num_tasks_saved = save_tasks(config, tasks, output_tasks_file)
return num_tasks_saved != 0
return app_func(index, config, tasks, *args, **kwargs)
return functools.update_wrapper(with_app_args, app_func)
return decorate
def check_existing_files(paths):
"""Check for existing files and optionally delete them.
:param paths: sequence of path strings or path objects
"""
click.echo('Files to be created:')
existing_files = []
total = 0
for path in paths:
total += 1
file_path = Path(path)
file_info = ''
if file_path.exists():
existing_files.append(file_path)
file_info = ' - ALREADY EXISTS'
click.echo('{}{}'.format(path, file_info))
if existing_files:
if click.confirm('There were {} existing files found that are not indexed. Delete those files now?'.format(
len(existing_files))):
for file_path in existing_files:
file_path.unlink()
click.echo('{total} tasks files to be created ({valid} valid files, {invalid} existing paths)'.format(
total=total, valid=total - len(existing_files), invalid=len(existing_files)
))
def add_dataset_to_db(index, datasets):
for dataset in datasets.values:
index.datasets.add(dataset, with_lineage=False)
_LOG.info('Dataset added')
def do_nothing(result):
pass
def _wrap_impl(f, args, kwargs, task):
"""
Helper method, needs to be at the top level
"""
return f(task, *args, **kwargs)
def wrap_task(f, *args, **kwargs):
"""
Turn function `f(task, *args, **kwargs)` into `g(task)` in pickle-able fashion
"""
return functools.partial(_wrap_impl, f, args, kwargs)
def run_tasks(tasks, executor, run_task, process_result=None, queue_size=50):
"""
:param tasks: iterable of tasks. Usually a generator to create them as required.
:param executor: a datacube executor, similar to `distributed.Client` or `concurrent.futures`
:param run_task: the function used to run a task. Expects a single argument of one of the tasks
:param process_result: a function to do something based on the result of a completed task. It
takes a single argument, the return value from `run_task(task)`
:param queue_size: How large the queue of tasks should be. Will depend on how fast tasks are
processed, and how much memory is available to buffer them.
"""
click.echo('Starting processing...')
process_result = process_result or do_nothing
results = []
task_queue = itertools.islice(tasks, queue_size)
for task in task_queue:
_LOG.info('Running task: %s', task.get('tile_index', str(task)))
results.append(executor.submit(run_task, task=task))
click.echo('Task queue filled, waiting for first result...')
successful = failed = 0
while results:
result, results = executor.next_completed(results, None)
# submit a new _task to replace the one we just finished
task = next(tasks, None)
if task:
_LOG.info('Running task: %s', task.get('tile_index', str(task)))
results.append(executor.submit(run_task, task=task))
# Process the result
try:
actual_result = executor.result(result)
process_result(actual_result)
successful += 1
except Exception as err: # pylint: disable=broad-except
_LOG.exception('Task failed: %s', err)
failed += 1
continue
finally:
# Release the _task to free memory so there is no leak in executor/scheduler/worker process
executor.release(result)
click.echo('%d successful, %d failed' % (successful, failed))
| 0.574037 | 0.104981 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Config(object):
"""
配置参数
"""
def __init__(self, dataset):
self.model_name = 'DPCNN'
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.save_path = dataset + '/model/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/logs/' + self.model_name # 日志保存路径
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活
self.require_improvement = 2000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 50000 # 词表大小,在运行时赋值
self.num_epochs = 20 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = 300 # 词向量维度
self.num_filters = 250 # 卷积核数量(channels数)
class Model(nn.Module):
"""
深度卷积神经网络用作文本分类
"""
def __init__(self, config):
super(Model, self).__init__()
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=0)
self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1)
self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)
self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottom
self.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottom
self.relu = nn.ReLU()
self.fc = nn.Linear(config.num_filters, config.num_classes)
def forward(self, x):
x = self.embedding(x)
x = x.unsqueeze(1) # [batch_size, 250, seq_len, 1]
x = self.conv_region(x) # [batch_size, 250, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
while x.size()[2] > 2:
x = self._block(x)
x = x.squeeze() # [batch_size, num_filters(250)]
x = self.fc(x)
return x
def _block(self, x):
x = self.padding2(x[0])
px = self.max_pool(x)
x = self.padding1(px)
x = F.relu(x)
x = self.conv(x)
x = self.padding1(x)
x = F.relu(x)
x = self.conv(x)
# Short Cut
x = x + px
return x
|
src/DL/models/DPCNN.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Config(object):
"""
配置参数
"""
def __init__(self, dataset):
self.model_name = 'DPCNN'
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.save_path = dataset + '/model/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/logs/' + self.model_name # 日志保存路径
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活
self.require_improvement = 2000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 50000 # 词表大小,在运行时赋值
self.num_epochs = 20 # epoch数
self.batch_size = 128 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = 300 # 词向量维度
self.num_filters = 250 # 卷积核数量(channels数)
class Model(nn.Module):
"""
深度卷积神经网络用作文本分类
"""
def __init__(self, config):
super(Model, self).__init__()
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=0)
self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1)
self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)
self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottom
self.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottom
self.relu = nn.ReLU()
self.fc = nn.Linear(config.num_filters, config.num_classes)
def forward(self, x):
x = self.embedding(x)
x = x.unsqueeze(1) # [batch_size, 250, seq_len, 1]
x = self.conv_region(x) # [batch_size, 250, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
x = self.relu(x)
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
while x.size()[2] > 2:
x = self._block(x)
x = x.squeeze() # [batch_size, num_filters(250)]
x = self.fc(x)
return x
def _block(self, x):
x = self.padding2(x[0])
px = self.max_pool(x)
x = self.padding1(px)
x = F.relu(x)
x = self.conv(x)
x = self.padding1(x)
x = F.relu(x)
x = self.conv(x)
# Short Cut
x = x + px
return x
| 0.714329 | 0.305185 |
import datetime as dt
from ravenpy.models import GR4JCN
from ravenpy.utilities.testdata import get_local_testdata
"""
Test to perform a hindcast using Caspar data on THREDDS.
Currently only runs GEPS, eventually will run GEPS, GDPS, REPS and RDPS.
To do so will need to add the actual data from Caspar, currently being downloaded
but this is a good proof of concept.
"""
class TestHindcasting:
def test_hindcasting_GEPS(self, tmpdir):
# Prepare a RAVEN model run using historical data, GR4JCN in this case.
# This is a dummy run to get initial states. In a real forecast situation,
# this run would end on the day before the forecast, but process is the same.
ts = get_local_testdata(
"raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc"
)
model = GR4JCN(workdir=tmpdir)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 6, 1),
area=44250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
)
# Extract the final states that will be used as the next initial states
rvc = model.outputs["solution"]
ts20 = get_local_testdata("caspar_eccc_hindcasts/geps_watershed.nc")
nm = 20
# It is necessary to clean the model state because the input variables of the previous
# model are not the same as the ones provided in the forecast model. therefore, if we
# do not clean, the model will simply add the hindcast file to the list of available
# data provided in the testdata above. Then the dates will not work, and the model errors.
model = GR4JCN()
model.rvc.parse(rvc.read_text())
# And run the model with the forecast data.
model(
ts=ts20,
nc_index=range(nm),
start_date=dt.datetime(2018, 6, 1),
end_date=dt.datetime(2018, 6, 10),
area=44250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
overwrite=True,
pr={
"linear_transform": (1000.0, 0.0),
"time_shift": -0.25,
"deaccumulate": True,
},
tas={"time_shift": -0.25},
)
# The model now has the forecast data generated and it has 10 days of forecasts.
assert len(model.q_sim.values) == 10
# Also see if GEPS has 20 members produced.
assert model.q_sim.values.shape[1] == nm
|
tests/test_hindcasting.py
|
import datetime as dt
from ravenpy.models import GR4JCN
from ravenpy.utilities.testdata import get_local_testdata
"""
Test to perform a hindcast using Caspar data on THREDDS.
Currently only runs GEPS, eventually will run GEPS, GDPS, REPS and RDPS.
To do so will need to add the actual data from Caspar, currently being downloaded
but this is a good proof of concept.
"""
class TestHindcasting:
def test_hindcasting_GEPS(self, tmpdir):
# Prepare a RAVEN model run using historical data, GR4JCN in this case.
# This is a dummy run to get initial states. In a real forecast situation,
# this run would end on the day before the forecast, but process is the same.
ts = get_local_testdata(
"raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc"
)
model = GR4JCN(workdir=tmpdir)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 6, 1),
area=44250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
)
# Extract the final states that will be used as the next initial states
rvc = model.outputs["solution"]
ts20 = get_local_testdata("caspar_eccc_hindcasts/geps_watershed.nc")
nm = 20
# It is necessary to clean the model state because the input variables of the previous
# model are not the same as the ones provided in the forecast model. therefore, if we
# do not clean, the model will simply add the hindcast file to the list of available
# data provided in the testdata above. Then the dates will not work, and the model errors.
model = GR4JCN()
model.rvc.parse(rvc.read_text())
# And run the model with the forecast data.
model(
ts=ts20,
nc_index=range(nm),
start_date=dt.datetime(2018, 6, 1),
end_date=dt.datetime(2018, 6, 10),
area=44250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
overwrite=True,
pr={
"linear_transform": (1000.0, 0.0),
"time_shift": -0.25,
"deaccumulate": True,
},
tas={"time_shift": -0.25},
)
# The model now has the forecast data generated and it has 10 days of forecasts.
assert len(model.q_sim.values) == 10
# Also see if GEPS has 20 members produced.
assert model.q_sim.values.shape[1] == nm
| 0.581897 | 0.602383 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from fairseq.incremental_decoding_utils import with_incremental_state
@with_incremental_state
class MaskedConvolution(nn.Conv2d):
""" 2d convolution with masked kernel """
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
stride=1,
groups=1,
bias=False,
unidirectional=False,
source_dilation=1,
target_dilation=1):
stride = (1, stride) # source dimension stride
self.dilsrc = source_dilation
self.diltrg = target_dilation
super().__init__(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=(self.diltrg, self.dilsrc),
bias=bias,
groups=groups)
self.inc = in_channels
self.outc = out_channels
self.kernel_size = kernel_size
self.pad = padding
mask = self.build_mask(unidirectional)
self.register_buffer('mask', mask)
# print('Mask:', self.mask)
def build_mask(self, unidirectional=False):
mask = torch.ones_like(self.weight)
if self.kernel_size > 1:
mask[:, :, self.kernel_size // 2 + 1:, :] = 0
if unidirectional:
mask[:, :, :, self.kernel_size // 2 + 1:] = 0
assert(mask.shape == self.weight.shape), \
"Mask of shape {} must match weights of shape {}" \
.format(mask.shape, self.weight.shape)
return mask
def forward_with_update(self, x):
self.weight.data *= self.mask
x = super().forward(x)
return x
def forward(self, x, incremental_state=None):
self.weight.data *= self.mask
saved_state = None
if incremental_state is not None:
# check saved context and append it to the input
saved_state = self._get_input_buffer(incremental_state)
if 'activations' in saved_state:
xprev = saved_state['activations'] # B, C, hist, Ts
diff = x.size(-1) - xprev.size(-1)
if diff > 0:
pd = xprev.new_zeros((xprev.size(0), xprev.size(1), xprev.size(2), diff))
xprev = torch.cat((xprev, pd), dim=-1)
elif diff < 0:
xprev = xprev[...,:diff]
x = torch.cat((xprev, x), dim=2)
# cache the input
hist = min(x.size(1), (self.kernel_size // 2)*self.diltrg)
self._set_input_buffer(incremental_state,
{'activations': x[:, :, -hist:]})
x = super().forward(x)
if saved_state is not None:
# Return the last token
x = x[:, :, -1:]
return x
def _get_input_buffer(self, incremental_state):
return self.get_incremental_state(
incremental_state,
'conv_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
self.set_incremental_state(
incremental_state,
'conv_state',
buffer,
)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
|
examples/pervasive/modules/masked_convolution.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from fairseq.incremental_decoding_utils import with_incremental_state
@with_incremental_state
class MaskedConvolution(nn.Conv2d):
""" 2d convolution with masked kernel """
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
stride=1,
groups=1,
bias=False,
unidirectional=False,
source_dilation=1,
target_dilation=1):
stride = (1, stride) # source dimension stride
self.dilsrc = source_dilation
self.diltrg = target_dilation
super().__init__(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=(self.diltrg, self.dilsrc),
bias=bias,
groups=groups)
self.inc = in_channels
self.outc = out_channels
self.kernel_size = kernel_size
self.pad = padding
mask = self.build_mask(unidirectional)
self.register_buffer('mask', mask)
# print('Mask:', self.mask)
def build_mask(self, unidirectional=False):
mask = torch.ones_like(self.weight)
if self.kernel_size > 1:
mask[:, :, self.kernel_size // 2 + 1:, :] = 0
if unidirectional:
mask[:, :, :, self.kernel_size // 2 + 1:] = 0
assert(mask.shape == self.weight.shape), \
"Mask of shape {} must match weights of shape {}" \
.format(mask.shape, self.weight.shape)
return mask
def forward_with_update(self, x):
self.weight.data *= self.mask
x = super().forward(x)
return x
def forward(self, x, incremental_state=None):
self.weight.data *= self.mask
saved_state = None
if incremental_state is not None:
# check saved context and append it to the input
saved_state = self._get_input_buffer(incremental_state)
if 'activations' in saved_state:
xprev = saved_state['activations'] # B, C, hist, Ts
diff = x.size(-1) - xprev.size(-1)
if diff > 0:
pd = xprev.new_zeros((xprev.size(0), xprev.size(1), xprev.size(2), diff))
xprev = torch.cat((xprev, pd), dim=-1)
elif diff < 0:
xprev = xprev[...,:diff]
x = torch.cat((xprev, x), dim=2)
# cache the input
hist = min(x.size(1), (self.kernel_size // 2)*self.diltrg)
self._set_input_buffer(incremental_state,
{'activations': x[:, :, -hist:]})
x = super().forward(x)
if saved_state is not None:
# Return the last token
x = x[:, :, -1:]
return x
def _get_input_buffer(self, incremental_state):
return self.get_incremental_state(
incremental_state,
'conv_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
self.set_incremental_state(
incremental_state,
'conv_state',
buffer,
)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
| 0.916044 | 0.39636 |
import zmq.green as zmq
import gevent
from gevent.queue import PriorityQueue
import yaml
from .. import messages
from itertools import count
import logging
logger = logging.getLogger('ansible_automata.connectors.zmq')
class ZMQEventChannel(object):
def __init__(self, fsm_registry, connector_registry, configuration):
self.fsm_registry = fsm_registry
self.connector_registry = connector_registry
self.context = zmq.Context.instance()
self.socket = self.context.socket(zmq.ROUTER)
if 'bind_port' in configuration:
self.socket_port = configuration.get('bind_port')
self.socket.bind('tcp://{0}:{1}'.format(configuration.get('bind_address', '127.0.0.1'),
self.socket_port))
else:
self.socket_port = self.socket.bind_to_random_port('tcp://{0}'.format(configuration.get('bind_address', '127.0.0.1')))
logger.info('starting zmq_thread')
self.zmq_thread = gevent.spawn(self.receive_external_messages)
self.inbox_thread = gevent.spawn(self.receive_internal_messages)
self.inbox = PriorityQueue()
self.message_id_seq = count(0)
self.client_id_seq = count(0)
self.clients = dict()
def receive_internal_messages(self):
while True:
gevent.sleep(0.1)
logger.info("Waiting for messages")
priority, order, message = self.inbox.get()
message_type = message.name
logger.info('Received %s', message_type)
if 'client_id' in message.data and message.data['client_id'] in self.clients:
#Unicast
logger.info("Unicasting message to %s aka %r", message.data['client_id'], self.clients[message.data['client_id']])
msg = [self.clients[message.data['client_id']]]
msg.extend(messages.serialize(message))
self.socket.send_multipart(msg)
else:
#Broadcast
logger.info("Broadcasting message to all listening clients")
for client_id in list(self.clients.values()):
msg = [client_id]
msg.extend(messages.serialize(message))
self.socket.send_multipart(msg)
def receive_external_messages(self):
while True:
to_fsm_id = None
from_fsm_id = None
zmq_client_id = None
logger.info('waiting on recv_multipart')
message = self.socket.recv_multipart()
logger.info(repr(message))
zmq_client_id = message.pop(0)
client_id = str(next(self.client_id_seq))
self.clients[client_id] = zmq_client_id
try:
msg_type = message.pop(0).decode()
msg_data = yaml.safe_load(message.pop(0).decode())
if b'Listening' in message:
msg_data['data']['client_id'] = client_id
logger.info(repr(msg_type))
logger.info(repr(msg_data))
except Exception as e:
self.socket.send_multipart([zmq_client_id, b'Error'])
logger.error(str(e))
continue
if not isinstance(msg_type, str):
self.socket.send_multipart([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
logger.error([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
continue
if not isinstance(msg_data, dict):
self.socket.send_multipart([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
logger.error([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
continue
to_fsm_id = msg_data.get('to_fsm_id', None)
from_fsm_id = msg_data.get('from_fsm_id', None)
if not from_fsm_id:
from_fsm_id = 'zmq'
if to_fsm_id in self.fsm_registry:
logger.info('Sending to FSM {} from {}'.format(to_fsm_id, from_fsm_id))
self.fsm_registry[to_fsm_id].inbox.put((1,
next(self.fsm_registry[to_fsm_id].message_id_seq),
messages.Event(from_fsm_id,
to_fsm_id,
msg_data['name'],
msg_data['data'])))
logger.info('Processed')
self.socket.send_multipart([zmq_client_id, b'Processed'])
else:
logger.info('Not processed')
self.socket.send_multipart([zmq_client_id, b'Not processed'])
gevent.sleep(0)
|
ansible_automata/connectors/zmq.py
|
import zmq.green as zmq
import gevent
from gevent.queue import PriorityQueue
import yaml
from .. import messages
from itertools import count
import logging
logger = logging.getLogger('ansible_automata.connectors.zmq')
class ZMQEventChannel(object):
def __init__(self, fsm_registry, connector_registry, configuration):
self.fsm_registry = fsm_registry
self.connector_registry = connector_registry
self.context = zmq.Context.instance()
self.socket = self.context.socket(zmq.ROUTER)
if 'bind_port' in configuration:
self.socket_port = configuration.get('bind_port')
self.socket.bind('tcp://{0}:{1}'.format(configuration.get('bind_address', '127.0.0.1'),
self.socket_port))
else:
self.socket_port = self.socket.bind_to_random_port('tcp://{0}'.format(configuration.get('bind_address', '127.0.0.1')))
logger.info('starting zmq_thread')
self.zmq_thread = gevent.spawn(self.receive_external_messages)
self.inbox_thread = gevent.spawn(self.receive_internal_messages)
self.inbox = PriorityQueue()
self.message_id_seq = count(0)
self.client_id_seq = count(0)
self.clients = dict()
def receive_internal_messages(self):
while True:
gevent.sleep(0.1)
logger.info("Waiting for messages")
priority, order, message = self.inbox.get()
message_type = message.name
logger.info('Received %s', message_type)
if 'client_id' in message.data and message.data['client_id'] in self.clients:
#Unicast
logger.info("Unicasting message to %s aka %r", message.data['client_id'], self.clients[message.data['client_id']])
msg = [self.clients[message.data['client_id']]]
msg.extend(messages.serialize(message))
self.socket.send_multipart(msg)
else:
#Broadcast
logger.info("Broadcasting message to all listening clients")
for client_id in list(self.clients.values()):
msg = [client_id]
msg.extend(messages.serialize(message))
self.socket.send_multipart(msg)
def receive_external_messages(self):
while True:
to_fsm_id = None
from_fsm_id = None
zmq_client_id = None
logger.info('waiting on recv_multipart')
message = self.socket.recv_multipart()
logger.info(repr(message))
zmq_client_id = message.pop(0)
client_id = str(next(self.client_id_seq))
self.clients[client_id] = zmq_client_id
try:
msg_type = message.pop(0).decode()
msg_data = yaml.safe_load(message.pop(0).decode())
if b'Listening' in message:
msg_data['data']['client_id'] = client_id
logger.info(repr(msg_type))
logger.info(repr(msg_data))
except Exception as e:
self.socket.send_multipart([zmq_client_id, b'Error'])
logger.error(str(e))
continue
if not isinstance(msg_type, str):
self.socket.send_multipart([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
logger.error([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
continue
if not isinstance(msg_data, dict):
self.socket.send_multipart([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
logger.error([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
continue
to_fsm_id = msg_data.get('to_fsm_id', None)
from_fsm_id = msg_data.get('from_fsm_id', None)
if not from_fsm_id:
from_fsm_id = 'zmq'
if to_fsm_id in self.fsm_registry:
logger.info('Sending to FSM {} from {}'.format(to_fsm_id, from_fsm_id))
self.fsm_registry[to_fsm_id].inbox.put((1,
next(self.fsm_registry[to_fsm_id].message_id_seq),
messages.Event(from_fsm_id,
to_fsm_id,
msg_data['name'],
msg_data['data'])))
logger.info('Processed')
self.socket.send_multipart([zmq_client_id, b'Processed'])
else:
logger.info('Not processed')
self.socket.send_multipart([zmq_client_id, b'Not processed'])
gevent.sleep(0)
| 0.207616 | 0.049797 |
import config
import awb
import csv
import json
import requests
import re
emapping = {
"ekialdeko-nafarra": "Q752",
"erronkarikoa": "Q753",
"zaraitzukoa": "Q754",
"erdialdekoa-gipuzkera": "Q755",
"erdigunekoa-g": "Q756",
"beterrikoa": "Q757",
"tolosaldekoa": "Q758",
"sartaldekoa-g": "Q759",
"goierrikoa": "Q760",
"urolaldekoa": "Q761",
"sortaldekoa-g": "Q762",
"bidasokoa": "Q763",
"basaburukoa": "Q764",
"imozkoa": "Q765",
"larraungoa": "Q766",
"leitzaldekoa": "Q767",
"mendebalekoa-bizkaiera": "Q768",
"sartaldekoa-m": "Q769",
"arratiakoa": "Q770",
"laudiokoa": "Q771",
"mungialdekoa": "Q772",
"nerbioi-ibarrekoa": "Q773",
"orozkokoa": "Q774",
"txorierrikoa": "Q775",
"uribe-kostakoa": "Q776",
"sortaldekoa-m": "Q777",
"debabarrenekoa": "Q778",
"debaerdikoa": "Q779",
"debagoienekoa": "Q780",
"durangaldekoa": "Q781",
"lea-artibaikoa": "Q782",
"tartekoa-m": "Q783",
"busturialdekoa": "Q784",
"otxandio-ingurukoa": "Q785",
"nafar-lapurtarra": "Q786",
"erdigunekoa-nl": "Q787",
"baigorrikoa": "Q788",
"uztaritze-ingurukoa": "Q789",
"sartaldekoa-nl": "Q790",
"kostatarra": "Q791",
"sara-ainhoa-ingurukoa": "Q792",
"sortaldekoa-nl": "Q793",
"amikuzekoa": "Q794",
"arberoakoa": "Q795",
"beskoitzekoa": "Q796",
"garazikoa": "Q797",
"nafarra": "Q798",
"baztangoa": "Q799",
"erdigunekoa-n": "Q800",
"arakilgoa": "Q801",
"lantzekoa": "Q802",
"ultzamakoa": "Q803",
"hego-sartaldekoa": "Q804",
"burundakoa": "Q805",
"sakanakoa": "Q806",
"hegoaldeko-nafarra": "Q807",
"ipar-sartaldekoa": "Q808",
"bortzirietakoa": "Q809",
"malerrekakoa": "Q810",
"sortaldekoa-n": "Q811",
"aezkoakoa": "Q812",
"erroibarkoa": "Q813",
"esteribarkoa": "Q814",
"zuberotarra": "Q815",
"basaburua": "Q816",
"pettarrakoa": "Q817"}
with open('D:/Ahotsak/herriak/herriak_htmlak.csv', 'r', encoding="utf-8") as csvfile:
places = csv.DictReader(csvfile)
placelinks = ""
for place in places:
#print(str(lang))
qid = place['qid']
name = place['izena']
html = place['html']
if html != "None":
with open(html, "r", encoding="utf8") as htmlfile:
placehtml = htmlfile.read()
herria_info = placehtml.split('<div class="herria-info">')[1].split('<div class="herriko-pasarteak">')[0]
#print(herria_info)
try:
euskalki_link = re.search(r'href="/euskalkiak/([^"]+)">',herria_info).group(1)
euskalkia = emapping[re.search(r'/([^/]+)$', euskalki_link).group(1)]
except:
euskalkia = None
if euskalkia:
print('Euskalki of '+name+' is: '+euskalkia)
awb.itemclaim(qid,"P18",euskalkia)
|
herriak_ahotsak_parsehtml.py
|
import config
import awb
import csv
import json
import requests
import re
emapping = {
"ekialdeko-nafarra": "Q752",
"erronkarikoa": "Q753",
"zaraitzukoa": "Q754",
"erdialdekoa-gipuzkera": "Q755",
"erdigunekoa-g": "Q756",
"beterrikoa": "Q757",
"tolosaldekoa": "Q758",
"sartaldekoa-g": "Q759",
"goierrikoa": "Q760",
"urolaldekoa": "Q761",
"sortaldekoa-g": "Q762",
"bidasokoa": "Q763",
"basaburukoa": "Q764",
"imozkoa": "Q765",
"larraungoa": "Q766",
"leitzaldekoa": "Q767",
"mendebalekoa-bizkaiera": "Q768",
"sartaldekoa-m": "Q769",
"arratiakoa": "Q770",
"laudiokoa": "Q771",
"mungialdekoa": "Q772",
"nerbioi-ibarrekoa": "Q773",
"orozkokoa": "Q774",
"txorierrikoa": "Q775",
"uribe-kostakoa": "Q776",
"sortaldekoa-m": "Q777",
"debabarrenekoa": "Q778",
"debaerdikoa": "Q779",
"debagoienekoa": "Q780",
"durangaldekoa": "Q781",
"lea-artibaikoa": "Q782",
"tartekoa-m": "Q783",
"busturialdekoa": "Q784",
"otxandio-ingurukoa": "Q785",
"nafar-lapurtarra": "Q786",
"erdigunekoa-nl": "Q787",
"baigorrikoa": "Q788",
"uztaritze-ingurukoa": "Q789",
"sartaldekoa-nl": "Q790",
"kostatarra": "Q791",
"sara-ainhoa-ingurukoa": "Q792",
"sortaldekoa-nl": "Q793",
"amikuzekoa": "Q794",
"arberoakoa": "Q795",
"beskoitzekoa": "Q796",
"garazikoa": "Q797",
"nafarra": "Q798",
"baztangoa": "Q799",
"erdigunekoa-n": "Q800",
"arakilgoa": "Q801",
"lantzekoa": "Q802",
"ultzamakoa": "Q803",
"hego-sartaldekoa": "Q804",
"burundakoa": "Q805",
"sakanakoa": "Q806",
"hegoaldeko-nafarra": "Q807",
"ipar-sartaldekoa": "Q808",
"bortzirietakoa": "Q809",
"malerrekakoa": "Q810",
"sortaldekoa-n": "Q811",
"aezkoakoa": "Q812",
"erroibarkoa": "Q813",
"esteribarkoa": "Q814",
"zuberotarra": "Q815",
"basaburua": "Q816",
"pettarrakoa": "Q817"}
with open('D:/Ahotsak/herriak/herriak_htmlak.csv', 'r', encoding="utf-8") as csvfile:
places = csv.DictReader(csvfile)
placelinks = ""
for place in places:
#print(str(lang))
qid = place['qid']
name = place['izena']
html = place['html']
if html != "None":
with open(html, "r", encoding="utf8") as htmlfile:
placehtml = htmlfile.read()
herria_info = placehtml.split('<div class="herria-info">')[1].split('<div class="herriko-pasarteak">')[0]
#print(herria_info)
try:
euskalki_link = re.search(r'href="/euskalkiak/([^"]+)">',herria_info).group(1)
euskalkia = emapping[re.search(r'/([^/]+)$', euskalki_link).group(1)]
except:
euskalkia = None
if euskalkia:
print('Euskalki of '+name+' is: '+euskalkia)
awb.itemclaim(qid,"P18",euskalkia)
| 0.052619 | 0.247822 |
class TrieNode:
# Initialize your data structure here.
def __init__(self):
# reference to related trie node
self.children = {}
# flag to determine if this node represents a word ending
self.word_end = False
def add(self, char):
self.children[char] = TrieNode()
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
node = self.root
for char in word:
if not char in node.children:
node.add(char)
node = node.children[char]
node.word_end = True
class Solution:
# @param {character[][]} board
# @param {string[]} words
# @return {string[]}
def findWords(self, board, words):
# base case
if len(words) == 0 or len(board) == 0:
return []
self.board = board
# create a trie for fast prefix lookup
self.trie = Trie()
for word in words:
self.trie.insert(word)
# visited flag for each char in board
self.visited = [[False] * len(board[0]) for i in range(len(board))]
# result set
self.result = []
# DFS search from all possible chars
for i in range(len(board)):
for j in range(len(board[0])):
self.traverse(i, j, "", self.trie.root)
return self.result
def traverse(self, row, colm, current, root):
# already visited before
if self.visited[row][colm]:
return
# word currently searched is not present
current_char = self.board[row][colm]
if not current_char in root.children:
return
# update current word
current += self.board[row][colm]
next_root = root.children[current_char]
# found a valid word
if next_root.word_end:
self.result.append(current)
next_root.word_end = False # prevent duplications
# mark current word as visited
self.visited[row][colm] = True
# move up
if row > 0:
self.traverse(row - 1, colm, current, next_root)
# move down
if row < len(self.board) - 1:
self.traverse(row + 1, colm, current, next_root)
# movel left
if colm > 0:
self.traverse(row, colm - 1, current, next_root)
# move right
if colm < len(self.board[0]) - 1:
self.traverse(row, colm + 1, current, next_root)
# backtrack
self.visited[row][colm] = False
s = Solution()
print s.findWords([
['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']
], ["oath","pea","eat","rain"])
|
Word_Search 2.py
|
class TrieNode:
# Initialize your data structure here.
def __init__(self):
# reference to related trie node
self.children = {}
# flag to determine if this node represents a word ending
self.word_end = False
def add(self, char):
self.children[char] = TrieNode()
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
node = self.root
for char in word:
if not char in node.children:
node.add(char)
node = node.children[char]
node.word_end = True
class Solution:
# @param {character[][]} board
# @param {string[]} words
# @return {string[]}
def findWords(self, board, words):
# base case
if len(words) == 0 or len(board) == 0:
return []
self.board = board
# create a trie for fast prefix lookup
self.trie = Trie()
for word in words:
self.trie.insert(word)
# visited flag for each char in board
self.visited = [[False] * len(board[0]) for i in range(len(board))]
# result set
self.result = []
# DFS search from all possible chars
for i in range(len(board)):
for j in range(len(board[0])):
self.traverse(i, j, "", self.trie.root)
return self.result
def traverse(self, row, colm, current, root):
# already visited before
if self.visited[row][colm]:
return
# word currently searched is not present
current_char = self.board[row][colm]
if not current_char in root.children:
return
# update current word
current += self.board[row][colm]
next_root = root.children[current_char]
# found a valid word
if next_root.word_end:
self.result.append(current)
next_root.word_end = False # prevent duplications
# mark current word as visited
self.visited[row][colm] = True
# move up
if row > 0:
self.traverse(row - 1, colm, current, next_root)
# move down
if row < len(self.board) - 1:
self.traverse(row + 1, colm, current, next_root)
# movel left
if colm > 0:
self.traverse(row, colm - 1, current, next_root)
# move right
if colm < len(self.board[0]) - 1:
self.traverse(row, colm + 1, current, next_root)
# backtrack
self.visited[row][colm] = False
s = Solution()
print s.findWords([
['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']
], ["oath","pea","eat","rain"])
| 0.511961 | 0.481515 |
import pymysql
# 打开数据库连接
db = pymysql.connect("localhost", "root", "root", "api_dev", use_unicode=True, charset="utf8")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
try:
# 执行SQL语句
cursor.execute("SELECT id, tags FROM problem")
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
id = row[0]
tags = row[1]
tagList = tags.split(',')
list = []
if id not in [1, 2, 3, 4]:
for i in tagList:
if 'Map' == i:
list.append('映射')
if 'Geometry' == i:
list.append('几何')
if 'Binary Indexed Tree' == i:
list.append('树状数组')
elif 'Segment Tree' == i:
list.append('线段树')
elif 'Queue' == i:
list.append('队列')
elif 'Brainteaser' == i:
list.append('谜题')
elif 'Sort' == i:
list.append('排序')
elif 'Tree' == i:
list.append('树')
elif 'Reservoir Sampling' == i:
list.append('蓄水池采样')
elif 'Depth-first Search' == i:
list.append('深度搜索')
elif 'Dynamic Programming' == i:
list.append('动态规划')
elif 'Greedy' == i:
list.append('贪心')
elif 'Heap' == i:
list.append('堆')
elif 'Bit Manipulation' == i:
list.append('位运算')
elif 'Binary Search' == i:
list.append('二分搜索')
elif 'Union Find' == i:
list.append('并查集')
elif 'Divide and Conquer' == i:
list.append('分治')
elif 'Design' == i:
list.append('设计')
elif 'Math' == i:
list.append('数学')
elif 'Graph' == i:
list.append('图')
elif 'Breadth-first Search' == i:
list.append('广度搜索')
elif 'Topological Sort' == i:
list.append('拓扑排序')
elif 'Backtracking' == i:
list.append('回溯')
elif 'String' == i:
list.append('字符串')
elif 'Two Pointers' == i:
list.append('双指针')
elif 'Minimax' == i:
list.append('极小化极大')
elif 'Array' == i:
list.append('数组')
elif 'Stack' == i:
list.append('栈')
elif 'Binary Search Tree' == i:
list.append('二叉树')
elif 'Hash Table' == i:
list.append('哈希表')
elif 'Linked List' == i:
list.append('链表')
elif 'Memoization' == i:
list.append('记忆化')
elif 'Trie' == i:
list.append('前缀树')
else:
print('!!!! => {}'.format(i))
if list:
tags = ' '.join(list)
print("id => {}, tagList = {}, tags = {}".format(id, tagList, tags))
# SQL 更新语句
sql = "UPDATE problem SET tags = '%s' WHERE id = '%d'" % (tags, id)
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except Exception as e:
# 如果发生错误则回滚
db.rollback()
print(e)
except:
print("Error: unable to fetch data")
# 关闭数据库连接
db.close()
|
reformat.py
|
import pymysql
# 打开数据库连接
db = pymysql.connect("localhost", "root", "root", "api_dev", use_unicode=True, charset="utf8")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
try:
# 执行SQL语句
cursor.execute("SELECT id, tags FROM problem")
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
id = row[0]
tags = row[1]
tagList = tags.split(',')
list = []
if id not in [1, 2, 3, 4]:
for i in tagList:
if 'Map' == i:
list.append('映射')
if 'Geometry' == i:
list.append('几何')
if 'Binary Indexed Tree' == i:
list.append('树状数组')
elif 'Segment Tree' == i:
list.append('线段树')
elif 'Queue' == i:
list.append('队列')
elif 'Brainteaser' == i:
list.append('谜题')
elif 'Sort' == i:
list.append('排序')
elif 'Tree' == i:
list.append('树')
elif 'Reservoir Sampling' == i:
list.append('蓄水池采样')
elif 'Depth-first Search' == i:
list.append('深度搜索')
elif 'Dynamic Programming' == i:
list.append('动态规划')
elif 'Greedy' == i:
list.append('贪心')
elif 'Heap' == i:
list.append('堆')
elif 'Bit Manipulation' == i:
list.append('位运算')
elif 'Binary Search' == i:
list.append('二分搜索')
elif 'Union Find' == i:
list.append('并查集')
elif 'Divide and Conquer' == i:
list.append('分治')
elif 'Design' == i:
list.append('设计')
elif 'Math' == i:
list.append('数学')
elif 'Graph' == i:
list.append('图')
elif 'Breadth-first Search' == i:
list.append('广度搜索')
elif 'Topological Sort' == i:
list.append('拓扑排序')
elif 'Backtracking' == i:
list.append('回溯')
elif 'String' == i:
list.append('字符串')
elif 'Two Pointers' == i:
list.append('双指针')
elif 'Minimax' == i:
list.append('极小化极大')
elif 'Array' == i:
list.append('数组')
elif 'Stack' == i:
list.append('栈')
elif 'Binary Search Tree' == i:
list.append('二叉树')
elif 'Hash Table' == i:
list.append('哈希表')
elif 'Linked List' == i:
list.append('链表')
elif 'Memoization' == i:
list.append('记忆化')
elif 'Trie' == i:
list.append('前缀树')
else:
print('!!!! => {}'.format(i))
if list:
tags = ' '.join(list)
print("id => {}, tagList = {}, tags = {}".format(id, tagList, tags))
# SQL 更新语句
sql = "UPDATE problem SET tags = '%s' WHERE id = '%d'" % (tags, id)
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except Exception as e:
# 如果发生错误则回滚
db.rollback()
print(e)
except:
print("Error: unable to fetch data")
# 关闭数据库连接
db.close()
| 0.109319 | 0.186447 |
import numpy as np
import cv2
from pyquaternion import Quaternion
from Quaternion import Quat
import timeit
from scipy import stats
# seek_time = 40
source = 'france' #'own', 'france'
topic_dict = {'paris':'paris.mp4', 'diving':'ocean40.webm', 'venise':'venise.webm', 'roller':'roller65.webm',
'timelapse': 'newyork.webm', '0': 'conan1.mp4', '1':'skiing.mp4', '2':'alien.mp4', '3':'conan2.mp4',
'4':'surfing.mp4', '5': 'war.mp4','7':'football.mp4', '8': 'rhinos.mp4', '6': 'cooking.mp4'}
topic_list = [['paris', 'diving', 'venise', 'roller', 'timelapse', 'newyork'],
['0', '1', '2', '3', '4', '5', '6', '7', '8']]
topic_info_dict = {'paris': ['paris.mp4', 244.06047, 3840, 2048], 'timelapse': ['newyork.webm', 91.03333, 3840, 2048],
'3': ['conan2.mp4', 172.5724, 2560, 1440], '1': ['skiing.mp4', 201.13426, 2560, 1440],
'0': ['conan1.mp4', 164.1973, 2560, 1440], 'venise': ['venise.webm', 175.04, 3840, 2160],
'2': ['alien.mp4', 293.2333, 2560, 1440], '5': ['war.mp4', 655.0544, 2160, 1080],
'4': ['surfing.mp4', 205.7055, 2560, 1440], '7': ['football.mp4', 164.8, 2560, 1440],
'6': ['cooking.mp4', 451.12, 2560, 1440], 'diving': ['ocean40.webm', 372.23853, 3840, 2048],
'roller': ['roller65.webm', 69.0, 3840, 2048], '8': ['rhinos.mp4', 292.0584, 2560, 1440]}
# goal: given a fixed time, and vector dataset, prepare the vec_map
# step 1: read the movie, get weight, height
# step 2: from w, h, fixation with timestam, generate salient map
def geoy_to_phi(_geoy, _height):
d = (_height/2 - _geoy) * 1.0 / (_height/2)
s = -1 if d < 0 else 1
return s * np.arcsin(np.abs(d)) / np.pi * 180
def pixel_to_ang(_x, _y, _geo_h, _geo_w):
phi = geoy_to_phi(_x, _geo_h)
theta = -(_y * 1.0 / _geo_w) * 360
if theta < -180: theta = 360 + theta
return theta, phi
def extract_direction(_q):
v = [1, 0, 0]
return _q.rotate(v)
#CALCULATE DEGREE DISTANCE BETWEEN TWO 3D VECTORS
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def degree_distance(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))/np.pi * 180
def gaussian_from_distance(_d, _gaussian_dict):
temp = np.around(_d, 1)
return _gaussian_dict[temp] if temp in _gaussian_dict else 0.0
def create_pixel_vecmap(_geo_h, _geo_w):
vec_map = np.zeros((_geo_h, _geo_w)).tolist()
for i in range(_geo_h):
for j in range(_geo_w):
theta, phi = pixel_to_ang(i, j, _geo_h, _geo_w)
t = Quat([0.0, theta, phi]).q #nolonger use Quat
q = Quaternion([t[3], t[2], -t[1], t[0]])
vec_map[i][j] = extract_direction(q)
return vec_map
def init(_topic, _seek_time, _var, _ratio=1.0/10):
gaussian_dict = {np.around(_d, 1):stats.multivariate_normal.pdf(_d, mean=0, cov=_var) for _d in np.arange(0.0, 180, .1 )}
video_name = topic_dict[_topic]
vcap = cv2.VideoCapture(video_name)#roller65.webm, paris.mp4; ocean40.webm; venise.webm
vcap.set(cv2.cv2.CAP_PROP_POS_MSEC, _seek_time * 1000)
width = vcap.get(cv2.cv2.CAP_PROP_FRAME_WIDTH) # float
height = vcap.get(cv2.cv2.CAP_PROP_FRAME_HEIGHT) # float
width = int(width * _ratio)
height = int(height * _ratio)
res, frame = vcap.read()
frameG = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frameS = cv2.resize(frameG, (width, height))
vec_map = create_pixel_vecmap(height, width)
return width, height, frameS, vec_map, gaussian_dict
def create_salient(_fixation_list, _vec_map, _width, _height, _gaussian_dict, verbal=False):
idx = 0
heat_map = np.zeros((_height, _width))
for i in range(heat_map.shape[0]):
for j in range(heat_map.shape[1]):
qxy = _vec_map[i][j]
for fixation in _fixation_list:
q0 = fixation[1]
btime = timeit.default_timer()
d = degree_distance(q0, qxy)
dd_time = timeit.default_timer() - btime
heat_map[i, j] += 1.0 * gaussian_from_distance(d, _gaussian_dict)
gau_time = timeit.default_timer() - btime - dd_time
idx += 1
if not verbal: continue
if idx % 10000 == 0:
print(_width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
if d < 5:
print('<5 degree: ---->', _width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
return heat_map
def create_salient_list(_fixation_list, _vec_map, _width, _height, _gaussian_dict, verbal=False):
#same as create_salient, but with indivial fixation
#sum all together heat_map will result in heat_map in create_salient
idx = 0
heat_map_list = np.zeros((len(_fixation_list), _height, _width))
for i in range(heat_map_list[0].shape[0]):
for j in range(heat_map_list[0].shape[1]):
qxy = _vec_map[i][j]
for k, fixation in enumerate(_fixation_list):
q0 = fixation[1]
btime = timeit.default_timer()
d = degree_distance(q0, qxy)
dd_time = timeit.default_timer() - btime
heat_map_list[k, i, j] += 1.0 * gaussian_from_distance(d, _gaussian_dict)
gau_time = timeit.default_timer() - btime - dd_time
idx += 1
if not verbal: continue
if idx % 10000 == 0:
print(_width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
if d < 5:
print('<5 degree: ---->', _width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
return heat_map_list
def get_frame_at_time(_topic, _seek_time, _ratio=.1, isgray=True):
video_name = topic_dict[_topic]
vcap = cv2.VideoCapture(video_name)#roller65.webm, paris.mp4; ocean40.webm; venise.webm
vcap.set(cv2.cv2.CAP_PROP_POS_MSEC, _seek_time * 1000)
width = vcap.get(cv2.cv2.CAP_PROP_FRAME_WIDTH) # float
height = vcap.get(cv2.cv2.CAP_PROP_FRAME_HEIGHT) # float
width = int(width * _ratio)
height = int(height * _ratio)
res, frame = vcap.read()
if isgray == True:
frameG = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
frameG = frame
frameS = cv2.resize(frameG, (width, height))
return frameS
|
utils/get_fixation.py
|
import numpy as np
import cv2
from pyquaternion import Quaternion
from Quaternion import Quat
import timeit
from scipy import stats
# seek_time = 40
source = 'france' #'own', 'france'
topic_dict = {'paris':'paris.mp4', 'diving':'ocean40.webm', 'venise':'venise.webm', 'roller':'roller65.webm',
'timelapse': 'newyork.webm', '0': 'conan1.mp4', '1':'skiing.mp4', '2':'alien.mp4', '3':'conan2.mp4',
'4':'surfing.mp4', '5': 'war.mp4','7':'football.mp4', '8': 'rhinos.mp4', '6': 'cooking.mp4'}
topic_list = [['paris', 'diving', 'venise', 'roller', 'timelapse', 'newyork'],
['0', '1', '2', '3', '4', '5', '6', '7', '8']]
topic_info_dict = {'paris': ['paris.mp4', 244.06047, 3840, 2048], 'timelapse': ['newyork.webm', 91.03333, 3840, 2048],
'3': ['conan2.mp4', 172.5724, 2560, 1440], '1': ['skiing.mp4', 201.13426, 2560, 1440],
'0': ['conan1.mp4', 164.1973, 2560, 1440], 'venise': ['venise.webm', 175.04, 3840, 2160],
'2': ['alien.mp4', 293.2333, 2560, 1440], '5': ['war.mp4', 655.0544, 2160, 1080],
'4': ['surfing.mp4', 205.7055, 2560, 1440], '7': ['football.mp4', 164.8, 2560, 1440],
'6': ['cooking.mp4', 451.12, 2560, 1440], 'diving': ['ocean40.webm', 372.23853, 3840, 2048],
'roller': ['roller65.webm', 69.0, 3840, 2048], '8': ['rhinos.mp4', 292.0584, 2560, 1440]}
# goal: given a fixed time, and vector dataset, prepare the vec_map
# step 1: read the movie, get weight, height
# step 2: from w, h, fixation with timestam, generate salient map
def geoy_to_phi(_geoy, _height):
d = (_height/2 - _geoy) * 1.0 / (_height/2)
s = -1 if d < 0 else 1
return s * np.arcsin(np.abs(d)) / np.pi * 180
def pixel_to_ang(_x, _y, _geo_h, _geo_w):
phi = geoy_to_phi(_x, _geo_h)
theta = -(_y * 1.0 / _geo_w) * 360
if theta < -180: theta = 360 + theta
return theta, phi
def extract_direction(_q):
v = [1, 0, 0]
return _q.rotate(v)
#CALCULATE DEGREE DISTANCE BETWEEN TWO 3D VECTORS
def unit_vector(vector):
return vector / np.linalg.norm(vector)
def degree_distance(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))/np.pi * 180
def gaussian_from_distance(_d, _gaussian_dict):
temp = np.around(_d, 1)
return _gaussian_dict[temp] if temp in _gaussian_dict else 0.0
def create_pixel_vecmap(_geo_h, _geo_w):
vec_map = np.zeros((_geo_h, _geo_w)).tolist()
for i in range(_geo_h):
for j in range(_geo_w):
theta, phi = pixel_to_ang(i, j, _geo_h, _geo_w)
t = Quat([0.0, theta, phi]).q #nolonger use Quat
q = Quaternion([t[3], t[2], -t[1], t[0]])
vec_map[i][j] = extract_direction(q)
return vec_map
def init(_topic, _seek_time, _var, _ratio=1.0/10):
gaussian_dict = {np.around(_d, 1):stats.multivariate_normal.pdf(_d, mean=0, cov=_var) for _d in np.arange(0.0, 180, .1 )}
video_name = topic_dict[_topic]
vcap = cv2.VideoCapture(video_name)#roller65.webm, paris.mp4; ocean40.webm; venise.webm
vcap.set(cv2.cv2.CAP_PROP_POS_MSEC, _seek_time * 1000)
width = vcap.get(cv2.cv2.CAP_PROP_FRAME_WIDTH) # float
height = vcap.get(cv2.cv2.CAP_PROP_FRAME_HEIGHT) # float
width = int(width * _ratio)
height = int(height * _ratio)
res, frame = vcap.read()
frameG = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frameS = cv2.resize(frameG, (width, height))
vec_map = create_pixel_vecmap(height, width)
return width, height, frameS, vec_map, gaussian_dict
def create_salient(_fixation_list, _vec_map, _width, _height, _gaussian_dict, verbal=False):
idx = 0
heat_map = np.zeros((_height, _width))
for i in range(heat_map.shape[0]):
for j in range(heat_map.shape[1]):
qxy = _vec_map[i][j]
for fixation in _fixation_list:
q0 = fixation[1]
btime = timeit.default_timer()
d = degree_distance(q0, qxy)
dd_time = timeit.default_timer() - btime
heat_map[i, j] += 1.0 * gaussian_from_distance(d, _gaussian_dict)
gau_time = timeit.default_timer() - btime - dd_time
idx += 1
if not verbal: continue
if idx % 10000 == 0:
print(_width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
if d < 5:
print('<5 degree: ---->', _width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
return heat_map
def create_salient_list(_fixation_list, _vec_map, _width, _height, _gaussian_dict, verbal=False):
#same as create_salient, but with indivial fixation
#sum all together heat_map will result in heat_map in create_salient
idx = 0
heat_map_list = np.zeros((len(_fixation_list), _height, _width))
for i in range(heat_map_list[0].shape[0]):
for j in range(heat_map_list[0].shape[1]):
qxy = _vec_map[i][j]
for k, fixation in enumerate(_fixation_list):
q0 = fixation[1]
btime = timeit.default_timer()
d = degree_distance(q0, qxy)
dd_time = timeit.default_timer() - btime
heat_map_list[k, i, j] += 1.0 * gaussian_from_distance(d, _gaussian_dict)
gau_time = timeit.default_timer() - btime - dd_time
idx += 1
if not verbal: continue
if idx % 10000 == 0:
print(_width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
if d < 5:
print('<5 degree: ---->', _width * _height, idx, i, j, heat_map[i, j], d, dd_time, gau_time)
return heat_map_list
def get_frame_at_time(_topic, _seek_time, _ratio=.1, isgray=True):
video_name = topic_dict[_topic]
vcap = cv2.VideoCapture(video_name)#roller65.webm, paris.mp4; ocean40.webm; venise.webm
vcap.set(cv2.cv2.CAP_PROP_POS_MSEC, _seek_time * 1000)
width = vcap.get(cv2.cv2.CAP_PROP_FRAME_WIDTH) # float
height = vcap.get(cv2.cv2.CAP_PROP_FRAME_HEIGHT) # float
width = int(width * _ratio)
height = int(height * _ratio)
res, frame = vcap.read()
if isgray == True:
frameG = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
frameG = frame
frameS = cv2.resize(frameG, (width, height))
return frameS
| 0.428473 | 0.322446 |
import csv
import datetime
import shutil
import sys
from functools import partial
import click
from dateutil import tz
from psycopg2._range import Range
from functools import singledispatch
from datacube.ui import click as ui
from datacube.ui.click import CLICK_SETTINGS
PASS_INDEX = ui.pass_index('datacube-search')
def printable_values(d):
return {k: printable(v) for k, v in d.items()}
def write_pretty(out_f, field_names, search_results, terminal_size=shutil.get_terminal_size()):
"""
Output in a human-readable text format. Inspired by psql's expanded output.
"""
terminal_width = terminal_size[0]
record_num = 1
field_header_width = max(len(name) for name in field_names)
field_output_format = '{:<' + str(field_header_width) + '} | {}'
for result in search_results:
separator_line = '-[ {} ]'.format(record_num)
separator_line += '-' * (terminal_width - len(separator_line) - 1)
click.echo(separator_line, file=out_f)
for name, value in sorted(result.items()):
click.echo(
field_output_format.format(name, printable(value)),
file=out_f
)
record_num += 1
def write_csv(out_f, field_names, search_results):
"""
Output as a CSV.
"""
writer = csv.DictWriter(out_f, tuple(sorted(field_names)))
writer.writeheader()
writer.writerows(
(
printable_values(d) for d in
search_results
)
)
OUTPUT_FORMATS = {
'csv': write_csv,
'pretty': write_pretty
}
@click.group(help="Search the Data Cube", context_settings=CLICK_SETTINGS)
@ui.global_cli_options
@click.option('-f',
type=click.Choice(list(OUTPUT_FORMATS)),
default='pretty', show_default=True,
help='Output format')
@click.pass_context
def cli(ctx, f):
ctx.obj['write_results'] = partial(OUTPUT_FORMATS[f], sys.stdout)
@cli.command()
@ui.parsed_search_expressions
@PASS_INDEX
@click.pass_context
def datasets(ctx, index, expressions):
"""
Search available Datasets
"""
ctx.obj['write_results'](
sorted(index.datasets.get_field_names()),
index.datasets.search_summaries(**expressions)
)
@cli.command('product-counts')
@click.argument('period', nargs=1)
@ui.parsed_search_expressions
@PASS_INDEX
def product_counts(index, period, expressions):
"""
Count product Datasets available by period
PERIOD: eg. 1 month, 6 months, 1 year
"""
for product, series in index.datasets.count_by_product_through_time(period, **expressions):
click.echo(product.name)
for timerange, count in series:
formatted_dt = _assume_utc(timerange[0]).strftime("%Y-%m-%d")
click.echo(' {}: {}'.format(formatted_dt, count))
@singledispatch
def printable(val):
return val
@printable.register(type(None))
def printable_none(val):
return ''
@printable.register(datetime.datetime)
def printable_dt(val):
"""
:type val: datetime.datetime
"""
return _assume_utc(val).isoformat()
def _assume_utc(val):
if val.tzinfo is None:
return val.replace(tzinfo=tz.tzutc())
else:
return val.astimezone(tz.tzutc())
@printable.register(Range)
def printable_r(val):
"""
:type val: psycopg2._range.Range
"""
if val.lower_inf:
return printable(val.upper)
if val.upper_inf:
return printable(val.lower)
return '{} to {}'.format(printable(val.lower), printable(val.upper))
if __name__ == '__main__':
cli()
|
datacube/scripts/search_tool.py
|
import csv
import datetime
import shutil
import sys
from functools import partial
import click
from dateutil import tz
from psycopg2._range import Range
from functools import singledispatch
from datacube.ui import click as ui
from datacube.ui.click import CLICK_SETTINGS
PASS_INDEX = ui.pass_index('datacube-search')
def printable_values(d):
return {k: printable(v) for k, v in d.items()}
def write_pretty(out_f, field_names, search_results, terminal_size=shutil.get_terminal_size()):
"""
Output in a human-readable text format. Inspired by psql's expanded output.
"""
terminal_width = terminal_size[0]
record_num = 1
field_header_width = max(len(name) for name in field_names)
field_output_format = '{:<' + str(field_header_width) + '} | {}'
for result in search_results:
separator_line = '-[ {} ]'.format(record_num)
separator_line += '-' * (terminal_width - len(separator_line) - 1)
click.echo(separator_line, file=out_f)
for name, value in sorted(result.items()):
click.echo(
field_output_format.format(name, printable(value)),
file=out_f
)
record_num += 1
def write_csv(out_f, field_names, search_results):
"""
Output as a CSV.
"""
writer = csv.DictWriter(out_f, tuple(sorted(field_names)))
writer.writeheader()
writer.writerows(
(
printable_values(d) for d in
search_results
)
)
OUTPUT_FORMATS = {
'csv': write_csv,
'pretty': write_pretty
}
@click.group(help="Search the Data Cube", context_settings=CLICK_SETTINGS)
@ui.global_cli_options
@click.option('-f',
type=click.Choice(list(OUTPUT_FORMATS)),
default='pretty', show_default=True,
help='Output format')
@click.pass_context
def cli(ctx, f):
ctx.obj['write_results'] = partial(OUTPUT_FORMATS[f], sys.stdout)
@cli.command()
@ui.parsed_search_expressions
@PASS_INDEX
@click.pass_context
def datasets(ctx, index, expressions):
"""
Search available Datasets
"""
ctx.obj['write_results'](
sorted(index.datasets.get_field_names()),
index.datasets.search_summaries(**expressions)
)
@cli.command('product-counts')
@click.argument('period', nargs=1)
@ui.parsed_search_expressions
@PASS_INDEX
def product_counts(index, period, expressions):
"""
Count product Datasets available by period
PERIOD: eg. 1 month, 6 months, 1 year
"""
for product, series in index.datasets.count_by_product_through_time(period, **expressions):
click.echo(product.name)
for timerange, count in series:
formatted_dt = _assume_utc(timerange[0]).strftime("%Y-%m-%d")
click.echo(' {}: {}'.format(formatted_dt, count))
@singledispatch
def printable(val):
return val
@printable.register(type(None))
def printable_none(val):
return ''
@printable.register(datetime.datetime)
def printable_dt(val):
"""
:type val: datetime.datetime
"""
return _assume_utc(val).isoformat()
def _assume_utc(val):
if val.tzinfo is None:
return val.replace(tzinfo=tz.tzutc())
else:
return val.astimezone(tz.tzutc())
@printable.register(Range)
def printable_r(val):
"""
:type val: psycopg2._range.Range
"""
if val.lower_inf:
return printable(val.upper)
if val.upper_inf:
return printable(val.lower)
return '{} to {}'.format(printable(val.lower), printable(val.upper))
if __name__ == '__main__':
cli()
| 0.382718 | 0.168241 |
from typing import List, Tuple
import torch
import torch.nn as nn
from OpenMatch.modules.embedders import Embedder
from OpenMatch.modules.encoders import Conv1DEncoder
from OpenMatch.modules.matchers import KernelMatcher
class ConvKNRM(nn.Module):
def __init__(
self,
vocab_size: int,
embed_dim: int,
kernel_num: int = 21,
kernel_dim: int = 128,
kernel_sizes: List[int] = [1, 2, 3],
embed_matrix: List[float] = None,
task: str = 'ranking'
) -> None:
super(ConvKNRM, self).__init__()
self._vocab_size = vocab_size
self._embed_dim = embed_dim
self._kernel_num = kernel_num
self._kernel_dim = kernel_dim
self._kernel_sizes = kernel_sizes
self._embed_matrix = embed_matrix
self._task = task
self._embedder = Embedder(self._vocab_size, self._embed_dim, self._embed_matrix)
self._encoder = Conv1DEncoder(self._embed_dim, self._kernel_dim, self._kernel_sizes)
self._matcher = KernelMatcher(self._encoder.get_output_dim(), self._kernel_num)
if self._task == 'ranking':
self._dense = nn.Linear(self._kernel_num * (len(self._kernel_sizes) ** 2), 1)
elif self._task == 'classification':
self._dense = nn.Linear(self._kernel_num * (len(self._kernel_sizes) ** 2), 2)
else:
raise ValueError('Task must be `ranking` or `classification`.')
def forward(self, query_ids: torch.Tensor, query_masks: torch.Tensor, doc_ids: torch.Tensor, doc_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
query_embed = self._embedder(query_ids)
doc_embed = self._embedder(doc_ids)
_, query_encs = self._encoder(query_embed, query_masks)
_, doc_encs = self._encoder(doc_embed, doc_masks)
logits = torch.cat([self._matcher(query_enc, query_masks[:, :query_enc.size()[1]], doc_enc, doc_masks[:, :doc_enc.size()[1]])
for query_enc in query_encs for doc_enc in doc_encs], dim=1)
score = self._dense(logits).squeeze(-1)
return score, logits
|
OpenMatch/models/conv_knrm.py
|
from typing import List, Tuple
import torch
import torch.nn as nn
from OpenMatch.modules.embedders import Embedder
from OpenMatch.modules.encoders import Conv1DEncoder
from OpenMatch.modules.matchers import KernelMatcher
class ConvKNRM(nn.Module):
def __init__(
self,
vocab_size: int,
embed_dim: int,
kernel_num: int = 21,
kernel_dim: int = 128,
kernel_sizes: List[int] = [1, 2, 3],
embed_matrix: List[float] = None,
task: str = 'ranking'
) -> None:
super(ConvKNRM, self).__init__()
self._vocab_size = vocab_size
self._embed_dim = embed_dim
self._kernel_num = kernel_num
self._kernel_dim = kernel_dim
self._kernel_sizes = kernel_sizes
self._embed_matrix = embed_matrix
self._task = task
self._embedder = Embedder(self._vocab_size, self._embed_dim, self._embed_matrix)
self._encoder = Conv1DEncoder(self._embed_dim, self._kernel_dim, self._kernel_sizes)
self._matcher = KernelMatcher(self._encoder.get_output_dim(), self._kernel_num)
if self._task == 'ranking':
self._dense = nn.Linear(self._kernel_num * (len(self._kernel_sizes) ** 2), 1)
elif self._task == 'classification':
self._dense = nn.Linear(self._kernel_num * (len(self._kernel_sizes) ** 2), 2)
else:
raise ValueError('Task must be `ranking` or `classification`.')
def forward(self, query_ids: torch.Tensor, query_masks: torch.Tensor, doc_ids: torch.Tensor, doc_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
query_embed = self._embedder(query_ids)
doc_embed = self._embedder(doc_ids)
_, query_encs = self._encoder(query_embed, query_masks)
_, doc_encs = self._encoder(doc_embed, doc_masks)
logits = torch.cat([self._matcher(query_enc, query_masks[:, :query_enc.size()[1]], doc_enc, doc_masks[:, :doc_enc.size()[1]])
for query_enc in query_encs for doc_enc in doc_encs], dim=1)
score = self._dense(logits).squeeze(-1)
return score, logits
| 0.947491 | 0.376251 |
# <markdowncell>
# # netCDF File Visualization Case Study
#
# I was asked by a colleague to visualize data contained within this [netCDF file](https://motherlode.ucar.edu/repository/entry/show/RAMADDA/Unidata/Staff/Julien+Chastang/netcdf-explore?entryid=c7239224-d3fe-45d8-b100-43ae043824c3) ([OPeNDAP link](https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das)) with Python. What follows is an exploration of how I achieved that objective. Because this exercise touches upon many technologies related to Unidata, it makes for an interesting case study. We will be meandering through,
#
# - netCDF
# - WMO GRIB metadata
# - Map projections
# - xray data analysis library
# - cartopy visualization library
# <markdowncell>
# # Crack Open the File
#
# To get our bearings let's see what there is inside our netCDF file. We will be using the [xray library](https://github.com/xray/xray) to dig inside our netCDF data. xray is similar to pandas, but for the [Common Data Model](http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM/). We could have just used the [netcdf4-python library](https://github.com/Unidata/netcdf4-python) but xray has output that is more nicely formatted. Let's first import xray and open the dataset.
# <codecell>
import xray
ds = xray.open_dataset('https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das',
decode_times=False)
print(ds)
# <markdowncell>
# # Dimensions, Coordinates, Data Variables
#
# As far as the dimensions and coordinates go, the most relevant and important coordinates variables are `x` and `y`. We can see the data variables, such as, temperature (`t`), mixing ratio (`mr`), and potential temperature (`th`), are mostly on a 1901 x 1801 grid. There is also the mysterious `nav` dimension and associated data variables which we will be examining later.
#
# Let's set a goal of visualizing **potential temperature** with the [Cartopy](http://scitools.org.uk/cartopy/) plotting package.
#
# The first step is to get more information concerning the variables we are interested in. For example, let's look at _potential temperature_ or `th`.
# <codecell>
print(ds['th'])
# <markdowncell>
# # potential temperature (`th`)
#
# Let's grab the data array for potential temperature (`th`).
# <codecell>
th = ds['th'].values[0][0]
print(th)
# <markdowncell>
# # To Visualize the Data, We have to Decrypt the Projection
#
# In order, to visualize the data that are contained within a two-dimensional array onto a map that represents a three-dimensional globe, we need to understand the projection of the data.
#
# We can make an educated guess these are contained in the data variables with the `nav` cooridinate variable.
#
# Specifically,
#
# - `grid_type`
# - `grid_type_code`
# - `x_dim`
# - `y_dim`
# - `Nx`
# - `Ny`
# - `La1`
# - `Lo1`
# - `LoV`
# - `Latin1`
# - `Latin2`
# - `Dx`
# - `Dy`
#
# **But what are these??**
# <headingcell level=1>
# For Grins, Let's Scrutinize the `grid_type_code`
# <codecell>
print(ds['grid_type_code'])
# <markdowncell>
# # Google to the Rescue
#
# A simple Google search of `GRIB-1 GDS data representation type` takes us to
# [A GUIDE TO THE CODE FORM FM 92-IX Ext. GRIB Edition 1 from 1994](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB") document. Therein one can find an explanation of the variables needed to understand the map projection. Let's review these variables.
# <codecell>
print(ds['grid_type_code'].values[0])
# <markdowncell>
# # What is `grid_type_code` of `5`?
#
# Let's look at [Table 6 ](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB Projection Definitions"). A `grid_type_code` of `5` corresponds to a projection of **Polar Stereographic**.
# <headingcell level=1>
# Next up `grid_type`
# <codecell>
grid_type = ds['grid_type'].values
print('The grid type is ', grid_type[0])
# <markdowncell>
# # Uh oh! Polar Stereographic or Lambert Conformal??
#
# _Note that this newest piece of information relating to a Lambert Conformal projection disagrees with the earlier projection information about a Polar Stereographic projection._ There is a **bug** in the metadata description of the projection.
# <markdowncell>
# # Moving on Anyway, next `Nx` and `Ny`
#
# According to the grib documentation `Nx` and `Ny` represent the number grid points along the x and y axes. Let's grab those.
# <codecell>
nx, ny = ds['Nx'].values[0], ds['Ny'].values[0]
print(nx, ny)
# <markdowncell>
# # `La1` and `Lo1`
#
# Next let's get `La1` and `Lo1` which are defined as the "first grid points" These are probably the latitude and longitude for one of the corners of the grid.
# <codecell>
la1, lo1 = ds['La1'].values[0], ds['Lo1'].values[0]
print(la1, lo1)
# <markdowncell>
# # `Latin1` and `Latin2`
#
# Next up are the rather mysteriously named `Latin1` and `Latin2` variables. When I first saw these identifiers, I thought they referred to a Unicode block, but in fact they relate to the secants of the projection cone. I do not know why they are called "Latin" and this name is confusing. **At any rate, we can feel comfortable that we are dealing with Lambert Conformal rather than Polar Stereographic.**
#
# 
#
# Credit: http://www.geo.hunter.cuny.edu/~jochen
# <codecell>
latin1, latin2 = ds['Latin1'].values[0], ds['Latin2'].values[0]
print(latin1, latin2)
# <markdowncell>
# # The Central Meridian for the Lambert Conformal Projection, `LoV`
#
# If we are defining a Lambert Conformal projection, we will require the central meridian that the GRIB documentation refers to as `LoV`.
# <codecell>
lov = ds['LoV'].values[0]
print(lov)
# <markdowncell>
# # `Dx` and `Dy`
#
# Finally, let's look at the grid increments. In particular, we need to find the units.
# <codecell>
print(ds['Dx'])
print(ds['Dy'])
# <markdowncell>
# # Units for `Dx` and `Dy`
#
# The units for the deltas are in meters.
# <codecell>
dx,dy = ds['Dx'].values[0],ds['Dy'].values[0]
print(dx,dy)
# <markdowncell>
# # Let's Review What We Have
#
# We now have all the information we need to understand the Lambert projection:
#
# - The secants of the Lambert Conformal projection (`Latin1`, `Latin2`)
# - The central meridian of the projection (`LoV`)
#
# Moreover, we have additional information that shows how the data grid relates to the projection:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
# <markdowncell>
# # We are Ready for Visualization (almost)!
#
# Let's import **cartopy** and **matplotlib**.
# <codecell>
%matplotlib inline
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl
# <headingcell level=1>
# Define the Lambert Conformal Projection with Cartopy
# <codecell>
proj = ccrs.LambertConformal(central_longitude=lov,standard_parallels=(latin1,latin2))
# <markdowncell>
# # Lambert Conformal Grid Extents
#
# - To plot the data we need the `left`,`right`,`bottom`,`top` extents of the grid **expressed in Lambert Conformal
# coordinates**.
# - __**Key point**: The projection coordinate systems have flat topology and Euclidean distance.__
# <markdowncell>
# # Calculating the Extents
#
# Remember, we have:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
#
# We have one of the corners in latitude and longitude, but we need to convert it LC coordinates and derive the other corner.
# <markdowncell>
# # Platte Carrée Projection
#
# The Platte Carrée Projection is a very simple X,Y/Cartesian projection. It is used a lot in Cartopy because it allows you to express coordinates in familiar Latitude and Longitudes. **Remember**: The projection coordinate systems have flat topology and Euclidean distance.
# <markdowncell>
# # Platte Carrée
#
# 
#
# Source: [Wikipedia Source](https://en.wikipedia.org/wiki/Equirectangular_projection)
# <headingcell level=1>
# Create the PlatteCarre Cartopy Projection
# <codecell>
pc = ccrs.PlateCarree()
# <markdowncell>
# # Convert Corner from Lat/Lon PlatteCarre to LC
#
# The `transform_point` method translates coordinates from one projection coordinate system to the other.
# <codecell>
left,bottom = proj.transform_point(lo1,la1,pc)
print(left,bottom)
# <markdowncell>
# # Derive Opposite Corner
#
# Derive the opposite corner from the number of points and the delta. **Again**, we can do this because the projection coordinate systems have flat topology and Euclidean distance.
# <codecell>
right,top = left + nx*dx,bottom + ny*dy
print(right,top)
# <markdowncell>
# # Plot It Up!
#
# We now have the extents, we are ready to plot.
# <codecell>
#Define the figure
fig = plt.figure(figsize=(12, 12))
# Define the extents and add the data
ax = plt.axes(projection=proj)
extents = (left, right, bottom, top)
ax.contourf(th, origin='lower', extent=extents, transform=proj)
# Add bells and whistles
ax.coastlines(resolution='50m', color='black', linewidth=2)
ax.add_feature(ccrs.cartopy.feature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines', scale='50m',facecolor='none'))
ax.add_feature(ccrs.cartopy.feature.BORDERS, linewidth='1', edgecolor='black')
ax.gridlines()
plt.show()
# <markdowncell>
# # Exercises for the Reader
#
# - The extents are actually not perfect and snip the image. Why? Fix.
# - Add a colorbar and jazz up the plot.
# - Trick question: Can you label the axes with latitude and longitudes?
# - Try a different projection which should be fairly easy with Cartopy.
# <codecell>
th.shape
# <codecell>
th[0,0]
# <codecell>
|
Siphon/casestudy.py
|
# <markdowncell>
# # netCDF File Visualization Case Study
#
# I was asked by a colleague to visualize data contained within this [netCDF file](https://motherlode.ucar.edu/repository/entry/show/RAMADDA/Unidata/Staff/Julien+Chastang/netcdf-explore?entryid=c7239224-d3fe-45d8-b100-43ae043824c3) ([OPeNDAP link](https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das)) with Python. What follows is an exploration of how I achieved that objective. Because this exercise touches upon many technologies related to Unidata, it makes for an interesting case study. We will be meandering through,
#
# - netCDF
# - WMO GRIB metadata
# - Map projections
# - xray data analysis library
# - cartopy visualization library
# <markdowncell>
# # Crack Open the File
#
# To get our bearings let's see what there is inside our netCDF file. We will be using the [xray library](https://github.com/xray/xray) to dig inside our netCDF data. xray is similar to pandas, but for the [Common Data Model](http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM/). We could have just used the [netcdf4-python library](https://github.com/Unidata/netcdf4-python) but xray has output that is more nicely formatted. Let's first import xray and open the dataset.
# <codecell>
import xray
ds = xray.open_dataset('https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das',
decode_times=False)
print(ds)
# <markdowncell>
# # Dimensions, Coordinates, Data Variables
#
# As far as the dimensions and coordinates go, the most relevant and important coordinates variables are `x` and `y`. We can see the data variables, such as, temperature (`t`), mixing ratio (`mr`), and potential temperature (`th`), are mostly on a 1901 x 1801 grid. There is also the mysterious `nav` dimension and associated data variables which we will be examining later.
#
# Let's set a goal of visualizing **potential temperature** with the [Cartopy](http://scitools.org.uk/cartopy/) plotting package.
#
# The first step is to get more information concerning the variables we are interested in. For example, let's look at _potential temperature_ or `th`.
# <codecell>
print(ds['th'])
# <markdowncell>
# # potential temperature (`th`)
#
# Let's grab the data array for potential temperature (`th`).
# <codecell>
th = ds['th'].values[0][0]
print(th)
# <markdowncell>
# # To Visualize the Data, We have to Decrypt the Projection
#
# In order, to visualize the data that are contained within a two-dimensional array onto a map that represents a three-dimensional globe, we need to understand the projection of the data.
#
# We can make an educated guess these are contained in the data variables with the `nav` cooridinate variable.
#
# Specifically,
#
# - `grid_type`
# - `grid_type_code`
# - `x_dim`
# - `y_dim`
# - `Nx`
# - `Ny`
# - `La1`
# - `Lo1`
# - `LoV`
# - `Latin1`
# - `Latin2`
# - `Dx`
# - `Dy`
#
# **But what are these??**
# <headingcell level=1>
# For Grins, Let's Scrutinize the `grid_type_code`
# <codecell>
print(ds['grid_type_code'])
# <markdowncell>
# # Google to the Rescue
#
# A simple Google search of `GRIB-1 GDS data representation type` takes us to
# [A GUIDE TO THE CODE FORM FM 92-IX Ext. GRIB Edition 1 from 1994](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB") document. Therein one can find an explanation of the variables needed to understand the map projection. Let's review these variables.
# <codecell>
print(ds['grid_type_code'].values[0])
# <markdowncell>
# # What is `grid_type_code` of `5`?
#
# Let's look at [Table 6 ](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB Projection Definitions"). A `grid_type_code` of `5` corresponds to a projection of **Polar Stereographic**.
# <headingcell level=1>
# Next up `grid_type`
# <codecell>
grid_type = ds['grid_type'].values
print('The grid type is ', grid_type[0])
# <markdowncell>
# # Uh oh! Polar Stereographic or Lambert Conformal??
#
# _Note that this newest piece of information relating to a Lambert Conformal projection disagrees with the earlier projection information about a Polar Stereographic projection._ There is a **bug** in the metadata description of the projection.
# <markdowncell>
# # Moving on Anyway, next `Nx` and `Ny`
#
# According to the grib documentation `Nx` and `Ny` represent the number grid points along the x and y axes. Let's grab those.
# <codecell>
nx, ny = ds['Nx'].values[0], ds['Ny'].values[0]
print(nx, ny)
# <markdowncell>
# # `La1` and `Lo1`
#
# Next let's get `La1` and `Lo1` which are defined as the "first grid points" These are probably the latitude and longitude for one of the corners of the grid.
# <codecell>
la1, lo1 = ds['La1'].values[0], ds['Lo1'].values[0]
print(la1, lo1)
# <markdowncell>
# # `Latin1` and `Latin2`
#
# Next up are the rather mysteriously named `Latin1` and `Latin2` variables. When I first saw these identifiers, I thought they referred to a Unicode block, but in fact they relate to the secants of the projection cone. I do not know why they are called "Latin" and this name is confusing. **At any rate, we can feel comfortable that we are dealing with Lambert Conformal rather than Polar Stereographic.**
#
# 
#
# Credit: http://www.geo.hunter.cuny.edu/~jochen
# <codecell>
latin1, latin2 = ds['Latin1'].values[0], ds['Latin2'].values[0]
print(latin1, latin2)
# <markdowncell>
# # The Central Meridian for the Lambert Conformal Projection, `LoV`
#
# If we are defining a Lambert Conformal projection, we will require the central meridian that the GRIB documentation refers to as `LoV`.
# <codecell>
lov = ds['LoV'].values[0]
print(lov)
# <markdowncell>
# # `Dx` and `Dy`
#
# Finally, let's look at the grid increments. In particular, we need to find the units.
# <codecell>
print(ds['Dx'])
print(ds['Dy'])
# <markdowncell>
# # Units for `Dx` and `Dy`
#
# The units for the deltas are in meters.
# <codecell>
dx,dy = ds['Dx'].values[0],ds['Dy'].values[0]
print(dx,dy)
# <markdowncell>
# # Let's Review What We Have
#
# We now have all the information we need to understand the Lambert projection:
#
# - The secants of the Lambert Conformal projection (`Latin1`, `Latin2`)
# - The central meridian of the projection (`LoV`)
#
# Moreover, we have additional information that shows how the data grid relates to the projection:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
# <markdowncell>
# # We are Ready for Visualization (almost)!
#
# Let's import **cartopy** and **matplotlib**.
# <codecell>
%matplotlib inline
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl
# <headingcell level=1>
# Define the Lambert Conformal Projection with Cartopy
# <codecell>
proj = ccrs.LambertConformal(central_longitude=lov,standard_parallels=(latin1,latin2))
# <markdowncell>
# # Lambert Conformal Grid Extents
#
# - To plot the data we need the `left`,`right`,`bottom`,`top` extents of the grid **expressed in Lambert Conformal
# coordinates**.
# - __**Key point**: The projection coordinate systems have flat topology and Euclidean distance.__
# <markdowncell>
# # Calculating the Extents
#
# Remember, we have:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
#
# We have one of the corners in latitude and longitude, but we need to convert it LC coordinates and derive the other corner.
# <markdowncell>
# # Platte Carrée Projection
#
# The Platte Carrée Projection is a very simple X,Y/Cartesian projection. It is used a lot in Cartopy because it allows you to express coordinates in familiar Latitude and Longitudes. **Remember**: The projection coordinate systems have flat topology and Euclidean distance.
# <markdowncell>
# # Platte Carrée
#
# 
#
# Source: [Wikipedia Source](https://en.wikipedia.org/wiki/Equirectangular_projection)
# <headingcell level=1>
# Create the PlatteCarre Cartopy Projection
# <codecell>
pc = ccrs.PlateCarree()
# <markdowncell>
# # Convert Corner from Lat/Lon PlatteCarre to LC
#
# The `transform_point` method translates coordinates from one projection coordinate system to the other.
# <codecell>
left,bottom = proj.transform_point(lo1,la1,pc)
print(left,bottom)
# <markdowncell>
# # Derive Opposite Corner
#
# Derive the opposite corner from the number of points and the delta. **Again**, we can do this because the projection coordinate systems have flat topology and Euclidean distance.
# <codecell>
right,top = left + nx*dx,bottom + ny*dy
print(right,top)
# <markdowncell>
# # Plot It Up!
#
# We now have the extents, we are ready to plot.
# <codecell>
#Define the figure
fig = plt.figure(figsize=(12, 12))
# Define the extents and add the data
ax = plt.axes(projection=proj)
extents = (left, right, bottom, top)
ax.contourf(th, origin='lower', extent=extents, transform=proj)
# Add bells and whistles
ax.coastlines(resolution='50m', color='black', linewidth=2)
ax.add_feature(ccrs.cartopy.feature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines', scale='50m',facecolor='none'))
ax.add_feature(ccrs.cartopy.feature.BORDERS, linewidth='1', edgecolor='black')
ax.gridlines()
plt.show()
# <markdowncell>
# # Exercises for the Reader
#
# - The extents are actually not perfect and snip the image. Why? Fix.
# - Add a colorbar and jazz up the plot.
# - Trick question: Can you label the axes with latitude and longitudes?
# - Try a different projection which should be fairly easy with Cartopy.
# <codecell>
th.shape
# <codecell>
th[0,0]
# <codecell>
| 0.766643 | 0.656459 |
import sqlite3
import logging
from weight_unit import WeightUnit
class Database:
weight_units = []
def __init__(self, path):
self.db = sqlite3.connect(path)
cursor = self.db.cursor()
for row in cursor.execute('SELECT id, name, language_id FROM nutrition_weightunit'):
self.weight_units.append(WeightUnit(
row[1],
row[0],
row[2]
))
cursor.close()
def get_last_ingredient_id(self):
cursor = self.db.cursor()
cursor.execute('SELECT id FROM nutrition_ingredient ORDER BY id DESC LIMIT 1')
result = cursor.fetchone()
if result == None:
result = 0
else:
result = result[0]
cursor.close()
return result
def insert_ingredient(self, ingredient):
logging.info('Inserting ingredient {}'.format(ingredient.name))
query = ('INSERT INTO nutrition_ingredient (id, license_author, status, creation_date, update_date, '
'name, energy, protein, carbohydrates, carbohydrates_sugar, fat, fat_saturated, fibres, '
'sodium, language_id, license_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')
parameters = (ingredient.id, ingredient.license_author, ingredient.status, ingredient.creation_date,
ingredient.update_date, ingredient.name, ingredient.energy, ingredient.protein,
ingredient.carbohydrates, ingredient.carbohydrates_sugar, ingredient.fat, ingredient.fat_saturated,
ingredient.fibres, ingredient.sodium, ingredient.language_id, ingredient.license_id)
cursor = self.db.cursor()
cursor.execute(query, parameters)
self.db.commit()
cursor.close()
def insert_weight_unit(self, weight_unit):
logging.info('Inserting weight unit {}'.format(weight_unit.name))
query = 'INSERT INTO nutrition_weightunit (name, language_id) VALUES (?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (weight_unit.name, weight_unit.language_id))
weight_unit.id = cursor.lastrowid
self.weight_units.append(weight_unit)
self.db.commit()
cursor.close()
return weight_unit.id
def insert_ingredient_weight_unit(self, ingredient_weight_unit):
found = False
for weight_unit in self.weight_units:
if weight_unit.name == ingredient_weight_unit.weight_unit.name:
found = True
ingredient_weight_unit.unit_id = weight_unit.id
break
if not found:
ingredient_weight_unit.unit_id = self.insert_weight_unit(ingredient_weight_unit.weight_unit)
logging.info('Inserting ingredient weight unit {}, {}'.format(ingredient_weight_unit.unit_id, ingredient_weight_unit.ingredient_id))
query = 'INSERT INTO nutrition_ingredientweightunit (gram, amount, ingredient_id, unit_id) VALUES (?, ?, ?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (ingredient_weight_unit.gram, ingredient_weight_unit.amount,
ingredient_weight_unit.ingredient_id, ingredient_weight_unit.unit_id))
self.db.commit()
cursor.close()
def close(self):
self.db.commit()
self.db.close()
|
database.py
|
import sqlite3
import logging
from weight_unit import WeightUnit
class Database:
weight_units = []
def __init__(self, path):
self.db = sqlite3.connect(path)
cursor = self.db.cursor()
for row in cursor.execute('SELECT id, name, language_id FROM nutrition_weightunit'):
self.weight_units.append(WeightUnit(
row[1],
row[0],
row[2]
))
cursor.close()
def get_last_ingredient_id(self):
cursor = self.db.cursor()
cursor.execute('SELECT id FROM nutrition_ingredient ORDER BY id DESC LIMIT 1')
result = cursor.fetchone()
if result == None:
result = 0
else:
result = result[0]
cursor.close()
return result
def insert_ingredient(self, ingredient):
logging.info('Inserting ingredient {}'.format(ingredient.name))
query = ('INSERT INTO nutrition_ingredient (id, license_author, status, creation_date, update_date, '
'name, energy, protein, carbohydrates, carbohydrates_sugar, fat, fat_saturated, fibres, '
'sodium, language_id, license_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)')
parameters = (ingredient.id, ingredient.license_author, ingredient.status, ingredient.creation_date,
ingredient.update_date, ingredient.name, ingredient.energy, ingredient.protein,
ingredient.carbohydrates, ingredient.carbohydrates_sugar, ingredient.fat, ingredient.fat_saturated,
ingredient.fibres, ingredient.sodium, ingredient.language_id, ingredient.license_id)
cursor = self.db.cursor()
cursor.execute(query, parameters)
self.db.commit()
cursor.close()
def insert_weight_unit(self, weight_unit):
logging.info('Inserting weight unit {}'.format(weight_unit.name))
query = 'INSERT INTO nutrition_weightunit (name, language_id) VALUES (?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (weight_unit.name, weight_unit.language_id))
weight_unit.id = cursor.lastrowid
self.weight_units.append(weight_unit)
self.db.commit()
cursor.close()
return weight_unit.id
def insert_ingredient_weight_unit(self, ingredient_weight_unit):
found = False
for weight_unit in self.weight_units:
if weight_unit.name == ingredient_weight_unit.weight_unit.name:
found = True
ingredient_weight_unit.unit_id = weight_unit.id
break
if not found:
ingredient_weight_unit.unit_id = self.insert_weight_unit(ingredient_weight_unit.weight_unit)
logging.info('Inserting ingredient weight unit {}, {}'.format(ingredient_weight_unit.unit_id, ingredient_weight_unit.ingredient_id))
query = 'INSERT INTO nutrition_ingredientweightunit (gram, amount, ingredient_id, unit_id) VALUES (?, ?, ?, ?)'
cursor = self.db.cursor()
cursor.execute(query, (ingredient_weight_unit.gram, ingredient_weight_unit.amount,
ingredient_weight_unit.ingredient_id, ingredient_weight_unit.unit_id))
self.db.commit()
cursor.close()
def close(self):
self.db.commit()
self.db.close()
| 0.34798 | 0.146423 |
# General-purpose Python library imports
import os
import sys
import unittest
# Third-party libraries
import boto.s3.connection
import boto.s3.key
from flexmock import flexmock
# Walrus storage import, the library that we're testing here
lib = os.path.dirname(__file__) + os.sep + ".."
sys.path.append(lib)
from magik.custom_exceptions import BadConfigurationException
from magik.storage_factory import StorageFactory
class TestWalrusStorage(unittest.TestCase):
def setUp(self):
# Set up a mock for when we interact with Walrus
self.fake_walrus = flexmock(name='fake_walrus')
flexmock(boto.s3.connection)
boto.s3.connection.should_receive('S3Connection').and_return(
self.fake_walrus)
self.walrus = StorageFactory.get_storage({
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "http://1.2.3.4:8773/services/Walrus"
})
def test_walrus_storage_creation_without_necessary_parameters(self):
# Trying to create a WalrusStorage without the AWS_ACCESS_KEY should fail.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus"
})
# Similarly, creating a WalrusStorage object without the AWS_SECRET_KEY
# should fail.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus",
"AWS_ACCESS_KEY" : "access"
})
# If S3_URL isn't a URL, an Exception should be thrown.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "1.2.3.4:8773/services/Walrus"
})
# If S3_URL is a URL, that should be fine.
flexmock(boto.s3.connection)
boto.s3.connection.should_receive('S3Connection')
another_walrus = StorageFactory.get_storage({
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "http://1.2.3.4:8773/services/Walrus"
})
self.assertEquals("access", another_walrus.aws_access_key)
self.assertEquals("secret", another_walrus.aws_secret_key)
self.assertEquals("http://1.2.3.4:8773/services/Walrus",
another_walrus.s3_url)
def test_upload_one_file_and_create_bucket(self):
file_one_info = {
'source' : '/baz/boo/fbar1.tgz',
'destination' : '/mybucket/files/fbar1.tgz'
}
# Presume that the local file does exist.
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/baz/boo/fbar1.tgz') \
.and_return(True)
# And presume that our bucket does not exist.
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(None)
# We thus need to be able to create the bucket.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('create_bucket').with_args('mybucket') \
.and_return(fake_bucket)
self.fake_walrus.should_receive('lookup').with_args('mybucket') \
.and_return(fake_bucket)
# Also, presume that we can upload the file fine.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('files/fbar1.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar1.tgz')
# Finally, make sure we can upload our file successfully.
upload_info = [file_one_info]
actual = self.walrus.upload_files(upload_info)
for upload_result in actual:
self.assertEquals(True, upload_result['success'])
def test_upload_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/baz/boo/fbar1.tgz',
'destination' : '/mybucket/files/fbar1.tgz'
}
# Presume that the local file does exist.
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/baz/boo/fbar1.tgz') \
.and_return(True)
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Also, presume that we can upload the file fine.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('files/fbar1.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar1.tgz')
# Set up mocks for the second file.
file_two_info = {
'source' : '/baz/boo/fbar2.tgz',
'destination' : '/mybucket/files/fbar2.tgz'
}
# Presume that the local file does exist.
os.path.should_receive('exists').with_args('/baz/boo/fbar2.tgz') \
.and_return(True)
# Also, presume that we can upload the file fine.
fake_key.should_receive('key').with_args('files/fbar2.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar2.tgz')
# Finally, make sure we can upload our files successfully.
upload_info = [file_one_info, file_two_info]
actual = self.walrus.upload_files(upload_info)
for upload_result in actual:
self.assertEquals(True, upload_result['success'])
def test_download_one_file_that_doesnt_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz',
'destination' : '/baz/boo/fbar1.tgz'
}
# And presume that our bucket does not exist.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(None)
# Finally, make sure we can't download our file.
download_info = [file_one_info]
actual = self.walrus.download_files(download_info)
for download_result in actual:
self.assertEquals(False, download_result['success'])
self.assertEquals('bucket not found', download_result['failure_reason'])
def test_download_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz',
'destination' : '/baz/boo/fbar1.tgz'
}
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Presume that our first file does exist.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('boo/fbar1.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can write to the local filesystem.
fake_key.should_receive('get_contents_to_filename').with_args(
'/baz/boo/fbar1.tgz')
# Set up mocks for the second file.
file_two_info = {
'source' : '/mybucket/files/fbar2.tgz',
'destination' : '/baz/boo/fbar2.tgz'
}
# Presume that our second file does exist.
fake_key.should_receive('key').with_args('boo/fbar2.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can write to the local filesystem.
fake_key.should_receive('get_contents_to_filename').with_args(
'/baz/boo/fbar2.tgz')
# Finally, make sure we can download our files successfully.
download_info = [file_one_info, file_two_info]
actual = self.walrus.download_files(download_info)
for download_result in actual:
self.assertEquals(True, download_result['success'])
def test_delete_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz'
}
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Presume that our first file does exist.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('boo/fbar1.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can delete the file.
fake_key.should_receive('delete')
# Set up mocks for the second file.
file_two_info = {
'source' : '/mybucket/files/fbar2.tgz'
}
# Presume that our second file does exist.
fake_key.should_receive('key').with_args('boo/fbar2.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can delete the file.
fake_key.should_receive('delete')
# Finally, make sure we can download our files successfully.
delete_info = [file_one_info, file_two_info]
actual = self.walrus.delete_files(delete_info)
for delete_result in actual:
self.assertEquals(True, delete_result['success'])
|
tests/test_walrus_storage.py
|
# General-purpose Python library imports
import os
import sys
import unittest
# Third-party libraries
import boto.s3.connection
import boto.s3.key
from flexmock import flexmock
# Walrus storage import, the library that we're testing here
lib = os.path.dirname(__file__) + os.sep + ".."
sys.path.append(lib)
from magik.custom_exceptions import BadConfigurationException
from magik.storage_factory import StorageFactory
class TestWalrusStorage(unittest.TestCase):
def setUp(self):
# Set up a mock for when we interact with Walrus
self.fake_walrus = flexmock(name='fake_walrus')
flexmock(boto.s3.connection)
boto.s3.connection.should_receive('S3Connection').and_return(
self.fake_walrus)
self.walrus = StorageFactory.get_storage({
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "http://1.2.3.4:8773/services/Walrus"
})
def test_walrus_storage_creation_without_necessary_parameters(self):
# Trying to create a WalrusStorage without the AWS_ACCESS_KEY should fail.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus"
})
# Similarly, creating a WalrusStorage object without the AWS_SECRET_KEY
# should fail.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus",
"AWS_ACCESS_KEY" : "access"
})
# If S3_URL isn't a URL, an Exception should be thrown.
self.assertRaises(BadConfigurationException, StorageFactory.get_storage, {
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "1.2.3.4:8773/services/Walrus"
})
# If S3_URL is a URL, that should be fine.
flexmock(boto.s3.connection)
boto.s3.connection.should_receive('S3Connection')
another_walrus = StorageFactory.get_storage({
"name" : "walrus",
"AWS_ACCESS_KEY" : "access",
"AWS_SECRET_KEY" : "secret",
"S3_URL" : "http://1.2.3.4:8773/services/Walrus"
})
self.assertEquals("access", another_walrus.aws_access_key)
self.assertEquals("secret", another_walrus.aws_secret_key)
self.assertEquals("http://1.2.3.4:8773/services/Walrus",
another_walrus.s3_url)
def test_upload_one_file_and_create_bucket(self):
file_one_info = {
'source' : '/baz/boo/fbar1.tgz',
'destination' : '/mybucket/files/fbar1.tgz'
}
# Presume that the local file does exist.
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/baz/boo/fbar1.tgz') \
.and_return(True)
# And presume that our bucket does not exist.
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(None)
# We thus need to be able to create the bucket.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('create_bucket').with_args('mybucket') \
.and_return(fake_bucket)
self.fake_walrus.should_receive('lookup').with_args('mybucket') \
.and_return(fake_bucket)
# Also, presume that we can upload the file fine.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('files/fbar1.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar1.tgz')
# Finally, make sure we can upload our file successfully.
upload_info = [file_one_info]
actual = self.walrus.upload_files(upload_info)
for upload_result in actual:
self.assertEquals(True, upload_result['success'])
def test_upload_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/baz/boo/fbar1.tgz',
'destination' : '/mybucket/files/fbar1.tgz'
}
# Presume that the local file does exist.
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/baz/boo/fbar1.tgz') \
.and_return(True)
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Also, presume that we can upload the file fine.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('files/fbar1.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar1.tgz')
# Set up mocks for the second file.
file_two_info = {
'source' : '/baz/boo/fbar2.tgz',
'destination' : '/mybucket/files/fbar2.tgz'
}
# Presume that the local file does exist.
os.path.should_receive('exists').with_args('/baz/boo/fbar2.tgz') \
.and_return(True)
# Also, presume that we can upload the file fine.
fake_key.should_receive('key').with_args('files/fbar2.tgz')
fake_key.should_receive('set_contents_from_filename') \
.with_args('/baz/boo/fbar2.tgz')
# Finally, make sure we can upload our files successfully.
upload_info = [file_one_info, file_two_info]
actual = self.walrus.upload_files(upload_info)
for upload_result in actual:
self.assertEquals(True, upload_result['success'])
def test_download_one_file_that_doesnt_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz',
'destination' : '/baz/boo/fbar1.tgz'
}
# And presume that our bucket does not exist.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(None)
# Finally, make sure we can't download our file.
download_info = [file_one_info]
actual = self.walrus.download_files(download_info)
for download_result in actual:
self.assertEquals(False, download_result['success'])
self.assertEquals('bucket not found', download_result['failure_reason'])
def test_download_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz',
'destination' : '/baz/boo/fbar1.tgz'
}
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Presume that our first file does exist.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('boo/fbar1.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can write to the local filesystem.
fake_key.should_receive('get_contents_to_filename').with_args(
'/baz/boo/fbar1.tgz')
# Set up mocks for the second file.
file_two_info = {
'source' : '/mybucket/files/fbar2.tgz',
'destination' : '/baz/boo/fbar2.tgz'
}
# Presume that our second file does exist.
fake_key.should_receive('key').with_args('boo/fbar2.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can write to the local filesystem.
fake_key.should_receive('get_contents_to_filename').with_args(
'/baz/boo/fbar2.tgz')
# Finally, make sure we can download our files successfully.
download_info = [file_one_info, file_two_info]
actual = self.walrus.download_files(download_info)
for download_result in actual:
self.assertEquals(True, download_result['success'])
def test_delete_two_files_that_exist(self):
# Set up mocks for the first file.
file_one_info = {
'source' : '/mybucket/files/fbar1.tgz'
}
# And presume that our bucket exists.
fake_bucket = flexmock(name='name_bucket')
self.fake_walrus.should_receive('lookup').with_args('mybucket').and_return(
fake_bucket)
# Presume that our first file does exist.
fake_key = flexmock(name='fake_key')
flexmock(boto.s3.key)
boto.s3.key.should_receive('Key').with_args(fake_bucket).and_return(
fake_key)
fake_key.should_receive('key').with_args('boo/fbar1.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can delete the file.
fake_key.should_receive('delete')
# Set up mocks for the second file.
file_two_info = {
'source' : '/mybucket/files/fbar2.tgz'
}
# Presume that our second file does exist.
fake_key.should_receive('key').with_args('boo/fbar2.tgz')
fake_key.should_receive('exists').and_return(True)
# And presume that we can delete the file.
fake_key.should_receive('delete')
# Finally, make sure we can download our files successfully.
delete_info = [file_one_info, file_two_info]
actual = self.walrus.delete_files(delete_info)
for delete_result in actual:
self.assertEquals(True, delete_result['success'])
| 0.39222 | 0.184529 |
__author__ = "<NAME>"
__all__ = ["TimeSeriesForest"]
import numpy as np
import pandas as pd
import math
from sklearn.ensemble.forest import ForestClassifier
from sklearn.tree import DecisionTreeClassifier
from numpy import random
from copy import deepcopy
from sklearn.utils.multiclass import class_distribution
from sktime.utils.load_data import load_from_tsfile_to_dataframe as ld
class TimeSeriesForest(ForestClassifier):
""" Time-Series Forest Classifier.
TimeSeriesForest: Implementation of Deng's Time Series Forest, with minor changes
@article
{deng13forest,
author = {H.Deng and G.Runger and E.Tuv and M.Vladimir},
title = {A time series forest for classification and feature extraction},
journal = {Information Sciences},
volume = {239},
year = {2013}
Overview: Input n series length m
for each tree
sample sqrt(m) intervals
find mean, sd and slope for each interval, concatenate to form new data set
build decision tree on new data set
ensemble the trees with averaged probability estimates
This implementation deviates from the original in minor ways. It samples intervals with replacement and
does not use the splitting criteria tiny refinement described in deng13forest. This is an intentionally
stripped down, non configurable version for use as a hive-cote component. For a configurable tree based
ensemble, see sktime.classifiers.ensemble.TimeSeriesForestClassifier
TO DO: handle missing values, unequal length series and multivariate problems
Parameters
----------
n_trees : int, ensemble size, optional (default = 200)
random_state : int, seed for random, optional (default to no seed, I think!)
min_interval : int, minimum width of an interval, optional (default to 3)
Attributes
----------
n_classes : int, extracted from the data
num_atts : int, extracted from the data
n_intervals : int, sqrt(num_atts)
classifiers : array of shape = [n_trees] of DecisionTree classifiers
intervals : array of shape = [n_trees][n_intervals][2] stores indexes of all start and end points for all classifiers
dim_to_use : int, the column of the panda passed to use (can be passed a multidimensional problem, but will only use one)
"""
def __init__(self,
random_state = None,
min_interval=3,
n_trees = 200
):
super(TimeSeriesForest, self).__init__(
base_estimator=DecisionTreeClassifier(criterion="entropy"),
n_estimators=n_trees)
self.random_state = random_state
random.seed(random_state)
self.n_trees=n_trees
self.min_interval=min_interval
# The following set in method fit
self.n_classes = 0
self.series_length = 0
self.n_intervals = 0
self.classifiers = []
self.intervals=[]
self.classes_ = []
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y) using random intervals and summary features
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it must have a single column (i.e. univariate
classification. RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("TSF cannot handle multivariate problems yet")
elif isinstance(X.iloc[0,0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:,0]])
else:
raise TypeError("Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_samps, self.series_length = X.shape
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
self.n_intervals = int(math.sqrt(self.series_length))
if self.n_intervals==0:
self.n_intervals = 1
if self.series_length <self.min_interval:
self.min_interval=self.series_length
self.intervals=np.zeros((self.n_trees, self.n_intervals, 2), dtype=int)
for i in range(0, self.n_trees):
transformed_x = np.empty(shape=(3 * self.n_intervals, n_samps))
# Find the random intervals for classifier i and concatentate features
for j in range(0, self.n_intervals):
self.intervals[i][j][0]=random.randint(self.series_length - self.min_interval)
length=random.randint(self.series_length - self.intervals[i][j][0] - 1)
if length < self.min_interval:
length = self.min_interval
self.intervals[i][j][1] = self.intervals[i][j][0] + length
# Transforms here, just hard coding it, so not configurable
means = np.mean(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
std_dev = np.std(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
slope = self.lsq_fit(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]])
transformed_x[3*j]=means
transformed_x[3*j+1]=std_dev
transformed_x[3*j+2]=slope
tree = deepcopy(self.base_estimator)
transformed_x=transformed_x.T
tree.fit(transformed_x, y)
self.classifiers.append(tree)
return self
def predict(self, X):
"""
Find predictions for all cases in X. Built on top of predict_proba
Parameters
----------
X : The training input samples. array-like or pandas data frame.
If a Pandas data frame is passed, a check is performed that it only has one column.
If not, an exception is thrown, since this classifier does not yet have
multivariate capability.
Returns
-------
output : array of shape = [n_test_instances]
"""
proba=self.predict_proba(X)
return [self.classes_[np.argmax(prob)] for prob in proba]
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : The training input samples. array-like or sparse matrix of shape = [n_test_instances, series_length]
If a Pandas data frame is passed (sktime format) a check is performed that it only has one column.
If not, an exception is thrown, since this classifier does not yet have
multivariate capability.
Local variables
----------
n_test_instances : int, number of cases to classify
series_length : int, number of attributes in X, must match _num_atts determined in fit
Returns
-------
output : array of shape = [n_test_instances, num_classes] of probabilities
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("TSF cannot handle multivariate problems yet")
elif isinstance(X.iloc[0,0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:,0]])
else:
raise TypeError("Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_test_instances, series_length = X.shape
if series_length != self.series_length:
raise TypeError(" ERROR number of attributes in the train does not match that in the test data")
sums = np.zeros((X.shape[0],self.n_classes), dtype=np.float64)
for i in range(0, self.n_trees):
transformed_x = np.empty(shape=(3 * self.n_intervals, n_test_instances), dtype=np.float32)
for j in range(0, self.n_intervals):
means = np.mean(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
std_dev = np.std(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
slope = self.lsq_fit(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]])
transformed_x[3*j]=means
transformed_x[3*j+1]=std_dev
transformed_x[3*j+2]=slope
transformed_x=transformed_x.T
sums += self.classifiers[i].predict_proba(transformed_x)
output = sums / (np.ones(self.n_classes) * self.n_estimators)
return output
def lsq_fit(self, Y):
""" Find the slope for each series (row) of Y
Parameters
----------
Y: array of shape = [n_samps, interval_size]
Returns
----------
slope: array of shape = [n_samps]
"""
x = np.arange(Y.shape[1]) + 1
slope = (np.mean(x * Y, axis=1) - np.mean(x) * np.mean(Y, axis=1)) / ((x * x).mean() - x.mean() ** 2)
return slope
if __name__ == "__main__":
dataset = "Gunpoint"
train_x, train_y = ld.load_from_tsfile_to_dataframe(file_path="C:/temp/sktime_temp_data/" + dataset + "/", file_name=dataset + "_TRAIN.ts")
print(train_x.iloc[0:10])
tsf = TimeSeriesForest()
tsf.fit(train_x.iloc[0:10], train_y[0:10])
preds = tsf.predict(train_x.iloc[10:20])
print(preds)
|
sktime/classifiers/interval_based/tsf.py
|
__author__ = "<NAME>"
__all__ = ["TimeSeriesForest"]
import numpy as np
import pandas as pd
import math
from sklearn.ensemble.forest import ForestClassifier
from sklearn.tree import DecisionTreeClassifier
from numpy import random
from copy import deepcopy
from sklearn.utils.multiclass import class_distribution
from sktime.utils.load_data import load_from_tsfile_to_dataframe as ld
class TimeSeriesForest(ForestClassifier):
""" Time-Series Forest Classifier.
TimeSeriesForest: Implementation of Deng's Time Series Forest, with minor changes
@article
{deng13forest,
author = {H.Deng and G.Runger and E.Tuv and M.Vladimir},
title = {A time series forest for classification and feature extraction},
journal = {Information Sciences},
volume = {239},
year = {2013}
Overview: Input n series length m
for each tree
sample sqrt(m) intervals
find mean, sd and slope for each interval, concatenate to form new data set
build decision tree on new data set
ensemble the trees with averaged probability estimates
This implementation deviates from the original in minor ways. It samples intervals with replacement and
does not use the splitting criteria tiny refinement described in deng13forest. This is an intentionally
stripped down, non configurable version for use as a hive-cote component. For a configurable tree based
ensemble, see sktime.classifiers.ensemble.TimeSeriesForestClassifier
TO DO: handle missing values, unequal length series and multivariate problems
Parameters
----------
n_trees : int, ensemble size, optional (default = 200)
random_state : int, seed for random, optional (default to no seed, I think!)
min_interval : int, minimum width of an interval, optional (default to 3)
Attributes
----------
n_classes : int, extracted from the data
num_atts : int, extracted from the data
n_intervals : int, sqrt(num_atts)
classifiers : array of shape = [n_trees] of DecisionTree classifiers
intervals : array of shape = [n_trees][n_intervals][2] stores indexes of all start and end points for all classifiers
dim_to_use : int, the column of the panda passed to use (can be passed a multidimensional problem, but will only use one)
"""
def __init__(self,
random_state = None,
min_interval=3,
n_trees = 200
):
super(TimeSeriesForest, self).__init__(
base_estimator=DecisionTreeClassifier(criterion="entropy"),
n_estimators=n_trees)
self.random_state = random_state
random.seed(random_state)
self.n_trees=n_trees
self.min_interval=min_interval
# The following set in method fit
self.n_classes = 0
self.series_length = 0
self.n_intervals = 0
self.classifiers = []
self.intervals=[]
self.classes_ = []
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y) using random intervals and summary features
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it must have a single column (i.e. univariate
classification. RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("TSF cannot handle multivariate problems yet")
elif isinstance(X.iloc[0,0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:,0]])
else:
raise TypeError("Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_samps, self.series_length = X.shape
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
self.n_intervals = int(math.sqrt(self.series_length))
if self.n_intervals==0:
self.n_intervals = 1
if self.series_length <self.min_interval:
self.min_interval=self.series_length
self.intervals=np.zeros((self.n_trees, self.n_intervals, 2), dtype=int)
for i in range(0, self.n_trees):
transformed_x = np.empty(shape=(3 * self.n_intervals, n_samps))
# Find the random intervals for classifier i and concatentate features
for j in range(0, self.n_intervals):
self.intervals[i][j][0]=random.randint(self.series_length - self.min_interval)
length=random.randint(self.series_length - self.intervals[i][j][0] - 1)
if length < self.min_interval:
length = self.min_interval
self.intervals[i][j][1] = self.intervals[i][j][0] + length
# Transforms here, just hard coding it, so not configurable
means = np.mean(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
std_dev = np.std(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
slope = self.lsq_fit(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]])
transformed_x[3*j]=means
transformed_x[3*j+1]=std_dev
transformed_x[3*j+2]=slope
tree = deepcopy(self.base_estimator)
transformed_x=transformed_x.T
tree.fit(transformed_x, y)
self.classifiers.append(tree)
return self
def predict(self, X):
"""
Find predictions for all cases in X. Built on top of predict_proba
Parameters
----------
X : The training input samples. array-like or pandas data frame.
If a Pandas data frame is passed, a check is performed that it only has one column.
If not, an exception is thrown, since this classifier does not yet have
multivariate capability.
Returns
-------
output : array of shape = [n_test_instances]
"""
proba=self.predict_proba(X)
return [self.classes_[np.argmax(prob)] for prob in proba]
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : The training input samples. array-like or sparse matrix of shape = [n_test_instances, series_length]
If a Pandas data frame is passed (sktime format) a check is performed that it only has one column.
If not, an exception is thrown, since this classifier does not yet have
multivariate capability.
Local variables
----------
n_test_instances : int, number of cases to classify
series_length : int, number of attributes in X, must match _num_atts determined in fit
Returns
-------
output : array of shape = [n_test_instances, num_classes] of probabilities
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("TSF cannot handle multivariate problems yet")
elif isinstance(X.iloc[0,0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:,0]])
else:
raise TypeError("Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_test_instances, series_length = X.shape
if series_length != self.series_length:
raise TypeError(" ERROR number of attributes in the train does not match that in the test data")
sums = np.zeros((X.shape[0],self.n_classes), dtype=np.float64)
for i in range(0, self.n_trees):
transformed_x = np.empty(shape=(3 * self.n_intervals, n_test_instances), dtype=np.float32)
for j in range(0, self.n_intervals):
means = np.mean(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
std_dev = np.std(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]], axis=1)
slope = self.lsq_fit(X[:, self.intervals[i][j][0]:self.intervals[i][j][1]])
transformed_x[3*j]=means
transformed_x[3*j+1]=std_dev
transformed_x[3*j+2]=slope
transformed_x=transformed_x.T
sums += self.classifiers[i].predict_proba(transformed_x)
output = sums / (np.ones(self.n_classes) * self.n_estimators)
return output
def lsq_fit(self, Y):
""" Find the slope for each series (row) of Y
Parameters
----------
Y: array of shape = [n_samps, interval_size]
Returns
----------
slope: array of shape = [n_samps]
"""
x = np.arange(Y.shape[1]) + 1
slope = (np.mean(x * Y, axis=1) - np.mean(x) * np.mean(Y, axis=1)) / ((x * x).mean() - x.mean() ** 2)
return slope
if __name__ == "__main__":
dataset = "Gunpoint"
train_x, train_y = ld.load_from_tsfile_to_dataframe(file_path="C:/temp/sktime_temp_data/" + dataset + "/", file_name=dataset + "_TRAIN.ts")
print(train_x.iloc[0:10])
tsf = TimeSeriesForest()
tsf.fit(train_x.iloc[0:10], train_y[0:10])
preds = tsf.predict(train_x.iloc[10:20])
print(preds)
| 0.853577 | 0.523299 |
from msrest.pipeline import ClientRawResponse
from .. import models
class CustomVisionPredictionClientOperationsMixin(object):
def classify_image(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.classify_image.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/image'}
def classify_image_with_no_store(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.classify_image_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_with_no_store.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/image/nostore'}
def classify_image_url(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image url and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.classify_image_url.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_url.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/url'}
def classify_image_url_with_no_store(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image url without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.classify_image_url_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_url_with_no_store.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/url/nostore'}
def detect_image(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.detect_image.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/image'}
def detect_image_with_no_store(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.detect_image_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_with_no_store.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/image/nostore'}
def detect_image_url(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image url and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.detect_image_url.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_url.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/url'}
def detect_image_url_with_no_store(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image url without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.detect_image_url_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_url_with_no_store.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/url/nostore'}
|
sdk/cognitiveservices/azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/prediction/operations/_custom_vision_prediction_client_operations.py
|
from msrest.pipeline import ClientRawResponse
from .. import models
class CustomVisionPredictionClientOperationsMixin(object):
def classify_image(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.classify_image.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/image'}
def classify_image_with_no_store(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.classify_image_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_with_no_store.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/image/nostore'}
def classify_image_url(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image url and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.classify_image_url.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_url.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/url'}
def classify_image_url_with_no_store(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Classify an image url without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.classify_image_url_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
classify_image_url_with_no_store.metadata = {'url': '/{projectId}/classify/iterations/{publishedName}/url/nostore'}
def detect_image(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.detect_image.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/image'}
def detect_image_with_no_store(
self, project_id, published_name, image_data, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param image_data: Binary image data. Supported formats are JPEG, GIF,
PNG, and BMP. Supports images up to 4MB.
:type image_data: Generator
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
# Construct URL
url = self.detect_image_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'imageData': image_data,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_with_no_store.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/image/nostore'}
def detect_image_url(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image url and saves the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.detect_image_url.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_url.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/url'}
def detect_image_url_with_no_store(
self, project_id, published_name, url, application=None, custom_headers=None, raw=False, **operation_config):
"""Detect objects in an image url without saving the result.
:param project_id: The project id.
:type project_id: str
:param published_name: Specifies the name of the model to evaluate
against.
:type published_name: str
:param url: Url of the image.
:type url: str
:param application: Optional. Specifies the name of application using
the endpoint.
:type application: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImagePrediction or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.customvision.prediction.models.ImagePrediction
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`CustomVisionErrorException<azure.cognitiveservices.vision.customvision.prediction.models.CustomVisionErrorException>`
"""
image_url = models.ImageUrl(url=url)
# Construct URL
url = self.detect_image_url_with_no_store.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'projectId': self._serialize.url("project_id", project_id, 'str'),
'publishedName': self._serialize.url("published_name", published_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if application is not None:
query_parameters['application'] = self._serialize.query("application", application, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(image_url, 'ImageUrl')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.CustomVisionErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImagePrediction', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
detect_image_url_with_no_store.metadata = {'url': '/{projectId}/detect/iterations/{publishedName}/url/nostore'}
| 0.861115 | 0.233499 |
"""Test Loan - setcollateraltoken."""
from test_framework.test_framework import DefiTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal
from decimal import Decimal
import calendar
import time
class LoanSetCollateralTokenTest (DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [
['-txnotokens=0', '-amkheight=50', '-bayfrontheight=50', '-fortcanningheight=50', '-eunosheight=50', '-txindex=1']]
def run_test(self):
assert_equal(len(self.nodes[0].listtokens()), 1) # only one token == DFI
print("Generating initial chain...")
self.nodes[0].generate(101)
self.nodes[0].createtoken({
"symbol": "BTC",
"name": "BTC token",
"isDAT": True,
"collateralAddress": self.nodes[0].get_genesis_keys().ownerAuthAddress
})
self.nodes[0].generate(1)
symbolDFI = "DFI"
symbolBTC = "BTC"
idDFI = list(self.nodes[0].gettoken(symbolDFI).keys())[0]
idBTC = list(self.nodes[0].gettoken(symbolBTC).keys())[0]
try:
self.nodes[0].setcollateraltoken({
'token': "DOGE",
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Token DOGE does not exist" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Price feed DFI/USD does not belong to any oracle" in errorString)
oracle_address1 = self.nodes[0].getnewaddress("", "legacy")
price_feeds1 = [
{"currency": "USD", "token": "DFI"},
{"currency": "USD", "token": "BTC"}]
oracle_id1 = self.nodes[0].appointoracle(oracle_address1, price_feeds1, 10)
self.nodes[0].generate(1)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("no live oracles for specified request" in errorString)
oracle1_prices = [
{"currency": "USD", "tokenAmount": "1@DFI"},
{"currency": "USD", "tokenAmount": "1@BTC"}]
timestamp = calendar.timegm(time.gmtime())
self.nodes[0].setoracledata(oracle_id1, timestamp, oracle1_prices)
self.nodes[0].generate(1)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 2,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("setCollateralToken factor must be lower or equal than 1" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': -1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Amount out of range" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "Blabla"})
except JSONRPCException as e:
errorString = e.error['message']
assert("price feed not in valid format - token/currency" in errorString)
collTokenTx1 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 0.5,
'fixedIntervalPriceId': "DFI/USD"})
collTokenTx3 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD",
'activateAfterBlock': 135})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 2)
collToken1 = [token for token in collTokens if token["tokenId"] == collTokenTx1][0]
assert_equal(collToken1["token"], symbolDFI)
assert_equal(collToken1["factor"], Decimal('0.5'))
assert_equal(collToken1["fixedIntervalPriceId"], "DFI/USD")
collTokenTx2 = self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0.9,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 3)
collToken2 = [token for token in collTokens if token["tokenId"] == collTokenTx2][0]
assert_equal(collToken2["token"], symbolBTC)
assert_equal(collToken2["factor"], Decimal('0.9'))
assert_equal(collToken2["fixedIntervalPriceId"], "BTC/USD")
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 3)
collToken3 = [token for token in collTokens if token["tokenId"] == collTokenTx3][0]
assert_equal(collToken3["token"], symbolDFI)
assert_equal(collToken3["factor"], Decimal('1'))
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('0.5'))
assert_equal(collTokens["activateAfterBlock"], 105)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], 106)
self.nodes[0].generate(30)
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('1'))
assert_equal(collTokens["activateAfterBlock"], 135)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], 106)
self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 2)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 4)
if __name__ == '__main__':
LoanSetCollateralTokenTest().main()
|
test/functional/feature_loan_setcollateraltoken.py
|
"""Test Loan - setcollateraltoken."""
from test_framework.test_framework import DefiTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal
from decimal import Decimal
import calendar
import time
class LoanSetCollateralTokenTest (DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [
['-txnotokens=0', '-amkheight=50', '-bayfrontheight=50', '-fortcanningheight=50', '-eunosheight=50', '-txindex=1']]
def run_test(self):
assert_equal(len(self.nodes[0].listtokens()), 1) # only one token == DFI
print("Generating initial chain...")
self.nodes[0].generate(101)
self.nodes[0].createtoken({
"symbol": "BTC",
"name": "BTC token",
"isDAT": True,
"collateralAddress": self.nodes[0].get_genesis_keys().ownerAuthAddress
})
self.nodes[0].generate(1)
symbolDFI = "DFI"
symbolBTC = "BTC"
idDFI = list(self.nodes[0].gettoken(symbolDFI).keys())[0]
idBTC = list(self.nodes[0].gettoken(symbolBTC).keys())[0]
try:
self.nodes[0].setcollateraltoken({
'token': "DOGE",
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Token DOGE does not exist" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Price feed DFI/USD does not belong to any oracle" in errorString)
oracle_address1 = self.nodes[0].getnewaddress("", "legacy")
price_feeds1 = [
{"currency": "USD", "token": "DFI"},
{"currency": "USD", "token": "BTC"}]
oracle_id1 = self.nodes[0].appointoracle(oracle_address1, price_feeds1, 10)
self.nodes[0].generate(1)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("no live oracles for specified request" in errorString)
oracle1_prices = [
{"currency": "USD", "tokenAmount": "1@DFI"},
{"currency": "USD", "tokenAmount": "1@BTC"}]
timestamp = calendar.timegm(time.gmtime())
self.nodes[0].setoracledata(oracle_id1, timestamp, oracle1_prices)
self.nodes[0].generate(1)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 2,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("setCollateralToken factor must be lower or equal than 1" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': -1,
'fixedIntervalPriceId': "DFI/USD"})
except JSONRPCException as e:
errorString = e.error['message']
assert("Amount out of range" in errorString)
try:
self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "Blabla"})
except JSONRPCException as e:
errorString = e.error['message']
assert("price feed not in valid format - token/currency" in errorString)
collTokenTx1 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 0.5,
'fixedIntervalPriceId': "DFI/USD"})
collTokenTx3 = self.nodes[0].setcollateraltoken({
'token': idDFI,
'factor': 1,
'fixedIntervalPriceId': "DFI/USD",
'activateAfterBlock': 135})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 2)
collToken1 = [token for token in collTokens if token["tokenId"] == collTokenTx1][0]
assert_equal(collToken1["token"], symbolDFI)
assert_equal(collToken1["factor"], Decimal('0.5'))
assert_equal(collToken1["fixedIntervalPriceId"], "DFI/USD")
collTokenTx2 = self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0.9,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 3)
collToken2 = [token for token in collTokens if token["tokenId"] == collTokenTx2][0]
assert_equal(collToken2["token"], symbolBTC)
assert_equal(collToken2["factor"], Decimal('0.9'))
assert_equal(collToken2["fixedIntervalPriceId"], "BTC/USD")
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 3)
collToken3 = [token for token in collTokens if token["tokenId"] == collTokenTx3][0]
assert_equal(collToken3["token"], symbolDFI)
assert_equal(collToken3["factor"], Decimal('1'))
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('0.5'))
assert_equal(collTokens["activateAfterBlock"], 105)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], 106)
self.nodes[0].generate(30)
collTokens = self.nodes[0].getcollateraltoken(idDFI)
assert_equal(collTokens["token"], symbolDFI)
assert_equal(collTokens["factor"], Decimal('1'))
assert_equal(collTokens["activateAfterBlock"], 135)
collTokens = self.nodes[0].getcollateraltoken(idBTC)
assert_equal(collTokens["token"], symbolBTC)
assert_equal(collTokens["factor"], Decimal('0.9'))
assert_equal(collTokens["activateAfterBlock"], 106)
self.nodes[0].setcollateraltoken({
'token': idBTC,
'factor': 0,
'fixedIntervalPriceId': "BTC/USD"})
self.nodes[0].generate(1)
collTokens = self.nodes[0].listcollateraltokens()
assert_equal(len(collTokens), 2)
collTokens = self.nodes[0].listcollateraltokens({'all': True})
assert_equal(len(collTokens), 4)
if __name__ == '__main__':
LoanSetCollateralTokenTest().main()
| 0.662141 | 0.405625 |
import numpy as np
from scipy.spatial.distance import pdist, squareform
class GaussianProcess:
"""
The crop yield Gaussian process
"""
def __init__(self, sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01):
self.sigma = sigma
self.r_loc = r_loc
self.r_year = r_year
self.sigma_e = sigma_e
self.sigma_b = sigma_b
@staticmethod
def _normalize(x):
x_mean = np.mean(x, axis=0, keepdims=True)
x_scale = np.ptp(x, axis=0, keepdims=True)
return (x - x_mean) / x_scale
def run(self, feat_train, feat_test, loc_train, loc_test, year_train, year_test,
train_yield, model_weights, model_bias):
# makes sure the features have an additional testue for the bias term
# We call the features H since the features are used as the basis functions h(x)
H_train = np.concatenate((feat_train, np.ones((feat_train.shape[0], 1))), axis=1)
H_test = np.concatenate((feat_test, np.ones((feat_test.shape[0], 1))), axis=1)
Y_train = np.expand_dims(train_yield, axis=1)
n_train = feat_train.shape[0]
n_test = feat_test.shape[0]
locations = self._normalize(np.concatenate((loc_train, loc_test), axis=0))
years = self._normalize(np.concatenate((year_train, year_test), axis=0))
# to calculate the se_kernel, a dim=2 array must be passed
years = np.expand_dims(years, axis=1)
# These are the squared exponential kernel function we'll use for the covariance
se_loc = squareform(pdist(locations, 'euclidean')) ** 2 / (self.r_loc ** 2)
se_year = squareform(pdist(years, 'euclidean')) ** 2 / (self.r_year ** 2)
# make the dirac matrix we'll add onto the kernel function
noise = np.zeros([n_train + n_test, n_train + n_test])
noise[0: n_train, 0: n_train] += (self.sigma_e ** 2) * np.identity(n_train)
kernel = ((self.sigma ** 2) * np.exp(-se_loc) * np.exp(-se_year)) + noise
# since B is diagonal, and B = self.sigma_b * np.identity(feat_train.shape[1]),
# its easy to calculate the inverse of B
B_inv = np.identity(H_train.shape[1]) / self.sigma_b
# "We choose b as the weight vector of the last layer of our deep models"
b = np.concatenate((model_weights.transpose(1, 0), np.expand_dims(model_bias, 1)))
K_inv = np.linalg.inv(kernel[0: n_train, 0: n_train])
# The definition of beta comes from equation 2.41 in Rasmussen (2006)
beta = np.linalg.inv(B_inv + H_train.T.dot(K_inv).dot(H_train)).dot(H_train.T.dot(K_inv).dot(Y_train) + B_inv.dot(b))
# We take the mean of g(X*) as our prediction, also from equation 2.41
pred = H_test.dot(beta) + kernel[n_train:, :n_train].dot(K_inv).dot(Y_train - H_train.dot(beta))
return pred
|
cyp/models/gp.py
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
class GaussianProcess:
"""
The crop yield Gaussian process
"""
def __init__(self, sigma=1, r_loc=0.5, r_year=1.5, sigma_e=0.32, sigma_b=0.01):
self.sigma = sigma
self.r_loc = r_loc
self.r_year = r_year
self.sigma_e = sigma_e
self.sigma_b = sigma_b
@staticmethod
def _normalize(x):
x_mean = np.mean(x, axis=0, keepdims=True)
x_scale = np.ptp(x, axis=0, keepdims=True)
return (x - x_mean) / x_scale
def run(self, feat_train, feat_test, loc_train, loc_test, year_train, year_test,
train_yield, model_weights, model_bias):
# makes sure the features have an additional testue for the bias term
# We call the features H since the features are used as the basis functions h(x)
H_train = np.concatenate((feat_train, np.ones((feat_train.shape[0], 1))), axis=1)
H_test = np.concatenate((feat_test, np.ones((feat_test.shape[0], 1))), axis=1)
Y_train = np.expand_dims(train_yield, axis=1)
n_train = feat_train.shape[0]
n_test = feat_test.shape[0]
locations = self._normalize(np.concatenate((loc_train, loc_test), axis=0))
years = self._normalize(np.concatenate((year_train, year_test), axis=0))
# to calculate the se_kernel, a dim=2 array must be passed
years = np.expand_dims(years, axis=1)
# These are the squared exponential kernel function we'll use for the covariance
se_loc = squareform(pdist(locations, 'euclidean')) ** 2 / (self.r_loc ** 2)
se_year = squareform(pdist(years, 'euclidean')) ** 2 / (self.r_year ** 2)
# make the dirac matrix we'll add onto the kernel function
noise = np.zeros([n_train + n_test, n_train + n_test])
noise[0: n_train, 0: n_train] += (self.sigma_e ** 2) * np.identity(n_train)
kernel = ((self.sigma ** 2) * np.exp(-se_loc) * np.exp(-se_year)) + noise
# since B is diagonal, and B = self.sigma_b * np.identity(feat_train.shape[1]),
# its easy to calculate the inverse of B
B_inv = np.identity(H_train.shape[1]) / self.sigma_b
# "We choose b as the weight vector of the last layer of our deep models"
b = np.concatenate((model_weights.transpose(1, 0), np.expand_dims(model_bias, 1)))
K_inv = np.linalg.inv(kernel[0: n_train, 0: n_train])
# The definition of beta comes from equation 2.41 in Rasmussen (2006)
beta = np.linalg.inv(B_inv + H_train.T.dot(K_inv).dot(H_train)).dot(H_train.T.dot(K_inv).dot(Y_train) + B_inv.dot(b))
# We take the mean of g(X*) as our prediction, also from equation 2.41
pred = H_test.dot(beta) + kernel[n_train:, :n_train].dot(K_inv).dot(Y_train - H_train.dot(beta))
return pred
| 0.877437 | 0.649676 |
import abc
import copy
import inspect
import re
import six
import yaql
from yaql import exceptions as yaql_exc
from highlander import exceptions as exc
from highlander.openstack.common import log as logging
from highlander import yaql_utils
LOG = logging.getLogger(__name__)
class Evaluator(object):
"""Expression evaluator interface.
Having this interface gives the flexibility to change the actual expression
language used in Highlander DSL for conditions, output calculation etc.
"""
@classmethod
@abc.abstractmethod
def validate(cls, expression):
"""Parse and validates the expression.
:param expression: Expression string
:return: True if expression is valid
"""
pass
@classmethod
@abc.abstractmethod
def evaluate(cls, expression, context):
"""Evaluates the expression against the given data context.
:param expression: Expression string
:param context: Data context
:return: Expression result
"""
pass
@classmethod
@abc.abstractmethod
def is_expression(cls, expression):
"""Check expression string and decide whether it is expression or not.
:param expression: Expression string
:return: True if string is expression
"""
pass
class YAQLEvaluator(Evaluator):
@classmethod
def validate(cls, expression):
LOG.debug("Validating YAQL expression [expression='%s']", expression)
try:
yaql.parse(expression)
except (yaql_exc.YaqlException, KeyError, ValueError, TypeError) as e:
raise exc.YaqlEvaluationException(e.message)
@classmethod
def evaluate(cls, expression, data_context):
LOG.debug("Evaluating YAQL expression [expression='%s', context=%s]"
% (expression, data_context))
try:
result = yaql.parse(expression).evaluate(
data=data_context,
context=yaql_utils.create_yaql_context()
)
except (KeyError, yaql_exc.YaqlException) as e:
raise exc.YaqlEvaluationException(
"Can not evaluate YAQL expression: %s, data = %s; error:"
" %s" % (expression, data_context, str(e))
)
LOG.debug("YAQL expression result: %s" % result)
return result if not inspect.isgenerator(result) else list(result)
@classmethod
def is_expression(cls, s):
# TODO(rakhmerov): It should be generalized since it may not be YAQL.
# Treat any string as a YAQL expression.
return isinstance(s, six.string_types)
INLINE_YAQL_REGEXP = '<%.*?%>'
class InlineYAQLEvaluator(YAQLEvaluator):
# This regular expression will look for multiple occurrences of YAQL
# expressions in '<% %>' (i.e. <% any_symbols %>) within a string.
find_expression_pattern = re.compile(INLINE_YAQL_REGEXP)
@classmethod
def validate(cls, expression):
LOG.debug(
"Validating inline YAQL expression [expression='%s']", expression)
if not isinstance(expression, six.string_types):
raise exc.YaqlEvaluationException("Unsupported type '%s'." %
type(expression))
found_expressions = cls.find_inline_expressions(expression)
if found_expressions:
[super(InlineYAQLEvaluator, cls).validate(expr.strip("<%>"))
for expr in found_expressions]
@classmethod
def evaluate(cls, expression, data_context):
LOG.debug(
"Evaluating inline YAQL expression [expression='%s', context=%s]"
% (expression, data_context)
)
result = expression
found_expressions = cls.find_inline_expressions(expression)
if found_expressions:
for expr in found_expressions:
trim_expr = expr.strip("<%>")
evaluated = super(InlineYAQLEvaluator,
cls).evaluate(trim_expr, data_context)
if len(expression) == len(expr):
result = evaluated
else:
result = result.replace(expr, str(evaluated))
LOG.debug("Inline YAQL expression result: %s" % result)
return result
@classmethod
def is_expression(cls, s):
return s
@classmethod
def find_inline_expressions(cls, s):
return cls.find_expression_pattern.findall(s)
# TODO(rakhmerov): Make it configurable.
_EVALUATOR = InlineYAQLEvaluator
def validate(expression):
return _EVALUATOR.validate(expression)
def evaluate(expression, context):
# Check if the passed value is expression so we don't need to do this
# every time on a caller side.
if (not isinstance(expression, six.string_types) or
not _EVALUATOR.is_expression(expression)):
return expression
return _EVALUATOR.evaluate(expression, context)
def _evaluate_item(item, context):
if isinstance(item, six.string_types):
try:
return evaluate(item, context)
except AttributeError as e:
LOG.debug("Expression %s is not evaluated, [context=%s]: %s"
% (item, context, e))
return item
else:
return evaluate_recursively(item, context)
def evaluate_recursively(data, context):
data = copy.copy(data)
if not context:
return data
if isinstance(data, dict):
for key in data:
data[key] = _evaluate_item(data[key], context)
elif isinstance(data, list):
for index, item in enumerate(data):
data[index] = _evaluate_item(item, context)
elif isinstance(data, six.string_types):
return _evaluate_item(data, context)
return data
|
highlander/expressions.py
|
import abc
import copy
import inspect
import re
import six
import yaql
from yaql import exceptions as yaql_exc
from highlander import exceptions as exc
from highlander.openstack.common import log as logging
from highlander import yaql_utils
LOG = logging.getLogger(__name__)
class Evaluator(object):
"""Expression evaluator interface.
Having this interface gives the flexibility to change the actual expression
language used in Highlander DSL for conditions, output calculation etc.
"""
@classmethod
@abc.abstractmethod
def validate(cls, expression):
"""Parse and validates the expression.
:param expression: Expression string
:return: True if expression is valid
"""
pass
@classmethod
@abc.abstractmethod
def evaluate(cls, expression, context):
"""Evaluates the expression against the given data context.
:param expression: Expression string
:param context: Data context
:return: Expression result
"""
pass
@classmethod
@abc.abstractmethod
def is_expression(cls, expression):
"""Check expression string and decide whether it is expression or not.
:param expression: Expression string
:return: True if string is expression
"""
pass
class YAQLEvaluator(Evaluator):
@classmethod
def validate(cls, expression):
LOG.debug("Validating YAQL expression [expression='%s']", expression)
try:
yaql.parse(expression)
except (yaql_exc.YaqlException, KeyError, ValueError, TypeError) as e:
raise exc.YaqlEvaluationException(e.message)
@classmethod
def evaluate(cls, expression, data_context):
LOG.debug("Evaluating YAQL expression [expression='%s', context=%s]"
% (expression, data_context))
try:
result = yaql.parse(expression).evaluate(
data=data_context,
context=yaql_utils.create_yaql_context()
)
except (KeyError, yaql_exc.YaqlException) as e:
raise exc.YaqlEvaluationException(
"Can not evaluate YAQL expression: %s, data = %s; error:"
" %s" % (expression, data_context, str(e))
)
LOG.debug("YAQL expression result: %s" % result)
return result if not inspect.isgenerator(result) else list(result)
@classmethod
def is_expression(cls, s):
# TODO(rakhmerov): It should be generalized since it may not be YAQL.
# Treat any string as a YAQL expression.
return isinstance(s, six.string_types)
INLINE_YAQL_REGEXP = '<%.*?%>'
class InlineYAQLEvaluator(YAQLEvaluator):
# This regular expression will look for multiple occurrences of YAQL
# expressions in '<% %>' (i.e. <% any_symbols %>) within a string.
find_expression_pattern = re.compile(INLINE_YAQL_REGEXP)
@classmethod
def validate(cls, expression):
LOG.debug(
"Validating inline YAQL expression [expression='%s']", expression)
if not isinstance(expression, six.string_types):
raise exc.YaqlEvaluationException("Unsupported type '%s'." %
type(expression))
found_expressions = cls.find_inline_expressions(expression)
if found_expressions:
[super(InlineYAQLEvaluator, cls).validate(expr.strip("<%>"))
for expr in found_expressions]
@classmethod
def evaluate(cls, expression, data_context):
LOG.debug(
"Evaluating inline YAQL expression [expression='%s', context=%s]"
% (expression, data_context)
)
result = expression
found_expressions = cls.find_inline_expressions(expression)
if found_expressions:
for expr in found_expressions:
trim_expr = expr.strip("<%>")
evaluated = super(InlineYAQLEvaluator,
cls).evaluate(trim_expr, data_context)
if len(expression) == len(expr):
result = evaluated
else:
result = result.replace(expr, str(evaluated))
LOG.debug("Inline YAQL expression result: %s" % result)
return result
@classmethod
def is_expression(cls, s):
return s
@classmethod
def find_inline_expressions(cls, s):
return cls.find_expression_pattern.findall(s)
# TODO(rakhmerov): Make it configurable.
_EVALUATOR = InlineYAQLEvaluator
def validate(expression):
return _EVALUATOR.validate(expression)
def evaluate(expression, context):
# Check if the passed value is expression so we don't need to do this
# every time on a caller side.
if (not isinstance(expression, six.string_types) or
not _EVALUATOR.is_expression(expression)):
return expression
return _EVALUATOR.evaluate(expression, context)
def _evaluate_item(item, context):
if isinstance(item, six.string_types):
try:
return evaluate(item, context)
except AttributeError as e:
LOG.debug("Expression %s is not evaluated, [context=%s]: %s"
% (item, context, e))
return item
else:
return evaluate_recursively(item, context)
def evaluate_recursively(data, context):
data = copy.copy(data)
if not context:
return data
if isinstance(data, dict):
for key in data:
data[key] = _evaluate_item(data[key], context)
elif isinstance(data, list):
for index, item in enumerate(data):
data[index] = _evaluate_item(item, context)
elif isinstance(data, six.string_types):
return _evaluate_item(data, context)
return data
| 0.478041 | 0.480357 |
import os
from os import path
import numpy as np
from scipy.io import loadmat
import torch
from subprocess import call
from shutil import copy, move
from tqdm import tqdm
from tempfile import TemporaryDirectory
import h5py
import PIL.Image
import PIL.ImageDraw
import PIL.ImageOps
import PIL.ImageFilter
import PIL.ImageChops
from margipose.data.mpi_inf_3dhp.common import Constants, Annotations, MpiInf3dhpSkeletonDesc
from margipose.data.skeleton import absolute_to_root_relative, CanonicalSkeletonDesc
def _progress(iterator, name):
return tqdm(iterator, desc='{:10s}'.format(name), ascii=True, leave=False)
def is_image_ok(image_file):
"""Test whether a camera frame image is usable."""
img = PIL.Image.open(image_file)
grey = np.array(img).mean(axis=-1)
# If over 1/3 of the image is white, the flash probably went off in
# this frame, washing out the image and making it unusable.
if (grey > 250).sum() > (img.height * img.width) / 3.0:
return False
return True
def process_camera_video(in_dir, out_dir, camera_id, frame_indices):
subdirs = [('imageSequence', 'jpg'), ('ChairMasks', 'png'), ('FGmasks', 'jpg')]
for subdir, ext in _progress(subdirs, 'Videos'):
frames_dir = path.join(out_dir, subdir, 'video_%d' % camera_id)
os.makedirs(frames_dir, exist_ok=True)
existing_files = {f for f in os.listdir(frames_dir)}
skip = True
for i in frame_indices:
filename = 'img_%06d.%s' % (i + 1, ext)
if filename not in existing_files:
skip = False
break
if skip:
continue
video_file = path.join(in_dir, subdir, 'video_%d.avi' % camera_id)
with TemporaryDirectory(prefix='tmp_', dir=path.join(out_dir)) as tmp_dir:
retcode = call([
'ffmpeg',
'-nostats', '-loglevel', '16',
'-i', video_file,
'-vf', 'scale=768:768',
'-qscale:v', '3',
path.join(tmp_dir, 'img_%06d.{}'.format(ext))
])
if retcode != 0:
raise Exception(f'failed to extract frames from "{video_file}"')
for i in frame_indices:
filename = 'img_%06d.%s' % (i + 1, ext)
move(
path.join(tmp_dir, filename),
path.join(frames_dir, filename)
)
def interesting_frame_indices(annot, camera_id, n_frames):
"""Use the annotations to find interesting training poses.
A pose is "interesting" if it is sufficiently different from previously seen
poses, and is within the image bounds.
"""
univ_annot3 = torch.from_numpy(annot.univ_annot3[camera_id])
annot2 = torch.from_numpy(annot.annot2[camera_id])
frame_indices = []
prev_joints3d = None
threshold = 200 ** 2 # Require a joint to move at least 200mm since the previous pose
for i in range(n_frames):
joints3d = univ_annot3[i]
if prev_joints3d is not None:
max_move = (joints3d - prev_joints3d).pow(2).sum(-1).max().item()
if max_move < threshold:
continue
# Keep pose if all joint coordinates are within the image bounds
if annot2[i].min().item() >= 0 and annot2[i].max().item() < 2048:
prev_joints3d = joints3d
frame_indices.append(i)
return frame_indices
def _calculate_univ_scale_factor(annot3, univ_annot3, skel_desc):
rel_annot3 = absolute_to_root_relative(torch.as_tensor(annot3), skel_desc.root_joint_id)
rel_univ = absolute_to_root_relative(torch.as_tensor(univ_annot3), skel_desc.root_joint_id)
# NOTE: annot3 and univ_annot3 are not congruent for the revised release of TS6. The
# discrepancies appear for the knee and ankle joints only. It seems like it is the
# universal annotations that are incorrect, since annot3 projects to annot2 correctly.
exclude = {'pelvis', 'left_knee', 'left_ankle', 'right_knee', 'right_ankle'}
include_indices = [i for i, name in enumerate(skel_desc.joint_names) if not name in exclude]
rel_annot3 = rel_annot3[..., include_indices, :]
rel_univ = rel_univ[..., include_indices, :]
non_zero = rel_univ.abs().gt(1e-6)
ratio = (rel_annot3 / rel_univ).masked_select(non_zero)
scale = float(ratio.median())
rel_univ_recons = rel_annot3 / scale
err_count = (rel_univ_recons - rel_univ).abs().gt(1e-6).sum()
assert err_count == 0
return scale
def _add_annotation_metadata(f, annot3, univ_annot3, skel_desc):
ds = f.create_dataset('joints3d', annot3.shape, dtype='f8')
ds[:] = annot3
scale = _calculate_univ_scale_factor(annot3, univ_annot3, skel_desc)
ds = f.create_dataset('scale', (1,), dtype='f8')
ds[:] = scale
def process_sequence(in_dir, out_dir, n_frames, blacklist):
os.makedirs(out_dir, exist_ok=True)
for filename in ['annot.mat', 'camera.calibration']:
src_file = path.join(in_dir, filename)
dest_file = path.join(out_dir, filename)
if not path.exists(dest_file):
copy(src_file, dest_file)
with h5py.File(path.join(out_dir, 'metadata.h5'), 'w') as f:
annot = Annotations(loadmat(path.join(out_dir, 'annot.mat')))
_add_annotation_metadata(
f,
annot.annot3[:, :n_frames],
annot.univ_annot3[:, :n_frames],
MpiInf3dhpSkeletonDesc,
)
for camera_id in _progress(Constants['vnect_cameras'], 'Cameras'):
if camera_id not in blacklist:
process_camera_video(in_dir, out_dir, camera_id, range(n_frames))
indices = interesting_frame_indices(annot, camera_id, n_frames)
images_dir = path.join(out_dir, 'imageSequence', 'video_%d' % camera_id)
indices = [
i for i in indices
if is_image_ok(path.join(images_dir, 'img_%06d.jpg' % (i + 1)))
]
ds = f.create_dataset(
'interesting_frames/camera%d' % camera_id,
(len(indices),),
dtype='i8'
)
ds[:] = np.array(indices)
def preprocess_sequences(src_dir, dest_dir, seqs):
for subj_id, seq_id in _progress(seqs, 'Sequences'):
seq_rel_path = path.join('S%d' % subj_id, 'Seq%d' % seq_id)
process_sequence(
path.join(src_dir, seq_rel_path),
path.join(dest_dir, seq_rel_path),
n_frames=Constants['seq_info'][seq_rel_path]['num_frames'],
blacklist=Constants['blacklist'].get(seq_rel_path, [])
)
def preprocess_training_data(src_dir, dest_dir):
return preprocess_sequences(src_dir, dest_dir, Constants['train_seqs'])
def preprocess_validation_data(src_dir, dest_dir):
return preprocess_sequences(src_dir, dest_dir, Constants['val_seqs'])
def preprocess_test_data(src_dir, dest_dir):
from margipose.data.mpi_inf_3dhp.raw import RawMpiTestDataset, RawMpiTestSeqDataset
for seq_id in _progress(RawMpiTestDataset.SEQ_IDS, 'Sequences'):
dataset = RawMpiTestSeqDataset(src_dir, seq_id, valid_only=True)
out_dir = path.join(dest_dir, seq_id.replace('TS', 'S'), 'Seq1')
image_out_dir = path.join(out_dir, 'imageSequence', 'video_0')
os.makedirs(image_out_dir, exist_ok=True)
image_width = image_height = -1
for example in _progress(dataset, 'Images'):
image = PIL.Image.open(example['image_file'])
image_width, image_height = image.size
image = image.resize((int(image_width * 768 / image_height), 768), PIL.Image.ANTIALIAS)
image.save(path.join(image_out_dir, 'img_%06d.jpg' % (example['frame_index'] + 1)))
copy(dataset.annot_file, path.join(out_dir, 'annot_data.mat'))
with h5py.File(path.join(out_dir, 'metadata.h5'), 'w') as f:
with h5py.File(dataset.annot_file, 'r') as annot:
n_frames = len(annot['annot3'])
annot3 = np.array(annot['annot3']).reshape(1, n_frames, 17, 3)
univ_annot3 = np.array(annot['univ_annot3']).reshape(1, n_frames, 17, 3)
annot2 = np.array(annot['annot2']).reshape(1, n_frames, 17, 2)
# Infer camera intrinsics
x3d = np.stack([annot3[0, :, :, 0], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
x2d = (annot2[0, :, :, 0] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
fx, cx = list(np.linalg.lstsq(x3d, x2d, rcond=None)[0].flatten())
y3d = np.stack([annot3[0, :, :, 1], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
y2d = (annot2[0, :, :, 1] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
fy, cy = list(np.linalg.lstsq(y3d, y2d, rcond=None)[0].flatten())
with open(path.join(out_dir, 'camera.calibration'), 'w') as cam_file:
lines = [
'Fake Camera Calibration File',
'name 0',
' size {:d} {:d}'.format(image_width, image_height),
' intrinsic {:0.3f} 0 {:0.3f} 0 0 {:0.3f} {:0.3f} 0 0 0 1 0 0 0 0 1'
.format(fx, cx, fy, cy),
' extrinsic 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1',
]
for line in lines:
cam_file.write(line + '\n')
_add_annotation_metadata(f, annot3, univ_annot3, CanonicalSkeletonDesc)
indices = []
for frame_index, is_valid in enumerate(np.array(annot['valid_frame']).flatten()):
if is_valid == 1:
indices.append(frame_index)
ds = f.create_dataset('interesting_frames/camera0', (len(indices),), dtype='i8')
ds[:] = np.array(indices)
def _isolate_person(img, skel2d):
x1, y1 = list(skel2d.min(axis=0))
x2, y2 = list(skel2d.max(axis=0))
margin = 30
x1 = max(x1 - margin, 0)
y1 = max(y1 - margin, 0)
x2 = min(x2 + margin, 767)
y2 = min(y2 + margin, 767)
draw = PIL.ImageDraw.Draw(img)
draw.rectangle([0, 0, x1, 767], fill=0)
draw.rectangle([0, 0, 767, y1], fill=0)
draw.rectangle([x2, 0, 767, 767], fill=0)
draw.rectangle([0, y2, 767, 767], fill=0)
def preprocess_masks(dir, subj_id, seq_id):
# Masks are useful to do data augmentation with compositing
# eg `PIL.Image.composite(example_input, shirt_pattern, up_body_mask)`
seq_rel_path = path.join('S%d' % subj_id, 'Seq%d' % seq_id)
seq_dir = path.join(dir, seq_rel_path)
info = Constants['seq_info'][seq_rel_path]
interesting_frames = []
with h5py.File(path.join(seq_dir, 'metadata.h5'), 'r') as f:
for k in f['interesting_frames'].keys():
interesting_frames.append(
(int(k.replace('camera', '')), list(f['interesting_frames'][k]))
)
annot = Annotations(loadmat(path.join(seq_dir, 'annot.mat')))
for camera_id, frame_indices in _progress(interesting_frames, 'Cameras'):
for frame_index in frame_indices:
path_part = 'video_{}/img_{:06d}'.format(camera_id, frame_index + 1)
img = PIL.Image.open(path.join(seq_dir, 'FGmasks/{}.jpg'.format(path_part)))
img = PIL.ImageOps.invert(img)
fg, up_body, low_body = img.split()
skel2d = annot.annot2[camera_id, frame_index] * 768 / 2048
if info['bg_augmentable']:
fg = PIL.ImageOps.invert(fg)
_isolate_person(fg, skel2d)
chair = PIL.Image.open(
path.join(seq_dir, 'ChairMasks/{}.png'.format(path_part)))
chair, _, _ = chair.split()
chair = PIL.ImageOps.invert(chair)
# Pixel-wise max
combined = PIL.ImageChops.lighter(fg, chair)
fg_file = path.join(seq_dir, 'foreground_mask', path_part + '.png')
os.makedirs(path.dirname(fg_file), exist_ok=True)
combined.save(fg_file)
if info['ub_augmentable']:
_isolate_person(up_body, skel2d)
up_body = up_body.filter(PIL.ImageFilter.MinFilter(3))
up_body = up_body.filter(PIL.ImageFilter.MaxFilter(3))
up_body_file = path.join(seq_dir, 'up_body_mask', path_part + '.png')
os.makedirs(path.dirname(up_body_file), exist_ok=True)
up_body.save(up_body_file)
if info['lb_augmentable']:
_isolate_person(low_body, skel2d)
low_body = low_body.filter(PIL.ImageFilter.MinFilter(3))
low_body = low_body.filter(PIL.ImageFilter.MaxFilter(3))
low_body_file = path.join(seq_dir, 'low_body_mask', path_part + '.png')
os.makedirs(path.dirname(low_body_file), exist_ok=True)
low_body.save(low_body_file)
def preprocess_training_masks(dir):
"""Preprocess masks in a preprocessed training data directory."""
for subj_id, seq_id in _progress(Constants['train_seqs'], 'Sequences'):
preprocess_masks(dir, subj_id, seq_id)
def preprocess_validation_masks(dir):
"""Preprocess masks in a preprocessed validation data directory."""
for subj_id, seq_id in _progress(Constants['val_seqs'], 'Sequences'):
preprocess_masks(dir, subj_id, seq_id)
|
src/margipose/data/mpi_inf_3dhp/preprocess.py
|
import os
from os import path
import numpy as np
from scipy.io import loadmat
import torch
from subprocess import call
from shutil import copy, move
from tqdm import tqdm
from tempfile import TemporaryDirectory
import h5py
import PIL.Image
import PIL.ImageDraw
import PIL.ImageOps
import PIL.ImageFilter
import PIL.ImageChops
from margipose.data.mpi_inf_3dhp.common import Constants, Annotations, MpiInf3dhpSkeletonDesc
from margipose.data.skeleton import absolute_to_root_relative, CanonicalSkeletonDesc
def _progress(iterator, name):
return tqdm(iterator, desc='{:10s}'.format(name), ascii=True, leave=False)
def is_image_ok(image_file):
"""Test whether a camera frame image is usable."""
img = PIL.Image.open(image_file)
grey = np.array(img).mean(axis=-1)
# If over 1/3 of the image is white, the flash probably went off in
# this frame, washing out the image and making it unusable.
if (grey > 250).sum() > (img.height * img.width) / 3.0:
return False
return True
def process_camera_video(in_dir, out_dir, camera_id, frame_indices):
subdirs = [('imageSequence', 'jpg'), ('ChairMasks', 'png'), ('FGmasks', 'jpg')]
for subdir, ext in _progress(subdirs, 'Videos'):
frames_dir = path.join(out_dir, subdir, 'video_%d' % camera_id)
os.makedirs(frames_dir, exist_ok=True)
existing_files = {f for f in os.listdir(frames_dir)}
skip = True
for i in frame_indices:
filename = 'img_%06d.%s' % (i + 1, ext)
if filename not in existing_files:
skip = False
break
if skip:
continue
video_file = path.join(in_dir, subdir, 'video_%d.avi' % camera_id)
with TemporaryDirectory(prefix='tmp_', dir=path.join(out_dir)) as tmp_dir:
retcode = call([
'ffmpeg',
'-nostats', '-loglevel', '16',
'-i', video_file,
'-vf', 'scale=768:768',
'-qscale:v', '3',
path.join(tmp_dir, 'img_%06d.{}'.format(ext))
])
if retcode != 0:
raise Exception(f'failed to extract frames from "{video_file}"')
for i in frame_indices:
filename = 'img_%06d.%s' % (i + 1, ext)
move(
path.join(tmp_dir, filename),
path.join(frames_dir, filename)
)
def interesting_frame_indices(annot, camera_id, n_frames):
"""Use the annotations to find interesting training poses.
A pose is "interesting" if it is sufficiently different from previously seen
poses, and is within the image bounds.
"""
univ_annot3 = torch.from_numpy(annot.univ_annot3[camera_id])
annot2 = torch.from_numpy(annot.annot2[camera_id])
frame_indices = []
prev_joints3d = None
threshold = 200 ** 2 # Require a joint to move at least 200mm since the previous pose
for i in range(n_frames):
joints3d = univ_annot3[i]
if prev_joints3d is not None:
max_move = (joints3d - prev_joints3d).pow(2).sum(-1).max().item()
if max_move < threshold:
continue
# Keep pose if all joint coordinates are within the image bounds
if annot2[i].min().item() >= 0 and annot2[i].max().item() < 2048:
prev_joints3d = joints3d
frame_indices.append(i)
return frame_indices
def _calculate_univ_scale_factor(annot3, univ_annot3, skel_desc):
rel_annot3 = absolute_to_root_relative(torch.as_tensor(annot3), skel_desc.root_joint_id)
rel_univ = absolute_to_root_relative(torch.as_tensor(univ_annot3), skel_desc.root_joint_id)
# NOTE: annot3 and univ_annot3 are not congruent for the revised release of TS6. The
# discrepancies appear for the knee and ankle joints only. It seems like it is the
# universal annotations that are incorrect, since annot3 projects to annot2 correctly.
exclude = {'pelvis', 'left_knee', 'left_ankle', 'right_knee', 'right_ankle'}
include_indices = [i for i, name in enumerate(skel_desc.joint_names) if not name in exclude]
rel_annot3 = rel_annot3[..., include_indices, :]
rel_univ = rel_univ[..., include_indices, :]
non_zero = rel_univ.abs().gt(1e-6)
ratio = (rel_annot3 / rel_univ).masked_select(non_zero)
scale = float(ratio.median())
rel_univ_recons = rel_annot3 / scale
err_count = (rel_univ_recons - rel_univ).abs().gt(1e-6).sum()
assert err_count == 0
return scale
def _add_annotation_metadata(f, annot3, univ_annot3, skel_desc):
ds = f.create_dataset('joints3d', annot3.shape, dtype='f8')
ds[:] = annot3
scale = _calculate_univ_scale_factor(annot3, univ_annot3, skel_desc)
ds = f.create_dataset('scale', (1,), dtype='f8')
ds[:] = scale
def process_sequence(in_dir, out_dir, n_frames, blacklist):
os.makedirs(out_dir, exist_ok=True)
for filename in ['annot.mat', 'camera.calibration']:
src_file = path.join(in_dir, filename)
dest_file = path.join(out_dir, filename)
if not path.exists(dest_file):
copy(src_file, dest_file)
with h5py.File(path.join(out_dir, 'metadata.h5'), 'w') as f:
annot = Annotations(loadmat(path.join(out_dir, 'annot.mat')))
_add_annotation_metadata(
f,
annot.annot3[:, :n_frames],
annot.univ_annot3[:, :n_frames],
MpiInf3dhpSkeletonDesc,
)
for camera_id in _progress(Constants['vnect_cameras'], 'Cameras'):
if camera_id not in blacklist:
process_camera_video(in_dir, out_dir, camera_id, range(n_frames))
indices = interesting_frame_indices(annot, camera_id, n_frames)
images_dir = path.join(out_dir, 'imageSequence', 'video_%d' % camera_id)
indices = [
i for i in indices
if is_image_ok(path.join(images_dir, 'img_%06d.jpg' % (i + 1)))
]
ds = f.create_dataset(
'interesting_frames/camera%d' % camera_id,
(len(indices),),
dtype='i8'
)
ds[:] = np.array(indices)
def preprocess_sequences(src_dir, dest_dir, seqs):
for subj_id, seq_id in _progress(seqs, 'Sequences'):
seq_rel_path = path.join('S%d' % subj_id, 'Seq%d' % seq_id)
process_sequence(
path.join(src_dir, seq_rel_path),
path.join(dest_dir, seq_rel_path),
n_frames=Constants['seq_info'][seq_rel_path]['num_frames'],
blacklist=Constants['blacklist'].get(seq_rel_path, [])
)
def preprocess_training_data(src_dir, dest_dir):
return preprocess_sequences(src_dir, dest_dir, Constants['train_seqs'])
def preprocess_validation_data(src_dir, dest_dir):
return preprocess_sequences(src_dir, dest_dir, Constants['val_seqs'])
def preprocess_test_data(src_dir, dest_dir):
from margipose.data.mpi_inf_3dhp.raw import RawMpiTestDataset, RawMpiTestSeqDataset
for seq_id in _progress(RawMpiTestDataset.SEQ_IDS, 'Sequences'):
dataset = RawMpiTestSeqDataset(src_dir, seq_id, valid_only=True)
out_dir = path.join(dest_dir, seq_id.replace('TS', 'S'), 'Seq1')
image_out_dir = path.join(out_dir, 'imageSequence', 'video_0')
os.makedirs(image_out_dir, exist_ok=True)
image_width = image_height = -1
for example in _progress(dataset, 'Images'):
image = PIL.Image.open(example['image_file'])
image_width, image_height = image.size
image = image.resize((int(image_width * 768 / image_height), 768), PIL.Image.ANTIALIAS)
image.save(path.join(image_out_dir, 'img_%06d.jpg' % (example['frame_index'] + 1)))
copy(dataset.annot_file, path.join(out_dir, 'annot_data.mat'))
with h5py.File(path.join(out_dir, 'metadata.h5'), 'w') as f:
with h5py.File(dataset.annot_file, 'r') as annot:
n_frames = len(annot['annot3'])
annot3 = np.array(annot['annot3']).reshape(1, n_frames, 17, 3)
univ_annot3 = np.array(annot['univ_annot3']).reshape(1, n_frames, 17, 3)
annot2 = np.array(annot['annot2']).reshape(1, n_frames, 17, 2)
# Infer camera intrinsics
x3d = np.stack([annot3[0, :, :, 0], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
x2d = (annot2[0, :, :, 0] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
fx, cx = list(np.linalg.lstsq(x3d, x2d, rcond=None)[0].flatten())
y3d = np.stack([annot3[0, :, :, 1], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
y2d = (annot2[0, :, :, 1] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
fy, cy = list(np.linalg.lstsq(y3d, y2d, rcond=None)[0].flatten())
with open(path.join(out_dir, 'camera.calibration'), 'w') as cam_file:
lines = [
'Fake Camera Calibration File',
'name 0',
' size {:d} {:d}'.format(image_width, image_height),
' intrinsic {:0.3f} 0 {:0.3f} 0 0 {:0.3f} {:0.3f} 0 0 0 1 0 0 0 0 1'
.format(fx, cx, fy, cy),
' extrinsic 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1',
]
for line in lines:
cam_file.write(line + '\n')
_add_annotation_metadata(f, annot3, univ_annot3, CanonicalSkeletonDesc)
indices = []
for frame_index, is_valid in enumerate(np.array(annot['valid_frame']).flatten()):
if is_valid == 1:
indices.append(frame_index)
ds = f.create_dataset('interesting_frames/camera0', (len(indices),), dtype='i8')
ds[:] = np.array(indices)
def _isolate_person(img, skel2d):
x1, y1 = list(skel2d.min(axis=0))
x2, y2 = list(skel2d.max(axis=0))
margin = 30
x1 = max(x1 - margin, 0)
y1 = max(y1 - margin, 0)
x2 = min(x2 + margin, 767)
y2 = min(y2 + margin, 767)
draw = PIL.ImageDraw.Draw(img)
draw.rectangle([0, 0, x1, 767], fill=0)
draw.rectangle([0, 0, 767, y1], fill=0)
draw.rectangle([x2, 0, 767, 767], fill=0)
draw.rectangle([0, y2, 767, 767], fill=0)
def preprocess_masks(dir, subj_id, seq_id):
# Masks are useful to do data augmentation with compositing
# eg `PIL.Image.composite(example_input, shirt_pattern, up_body_mask)`
seq_rel_path = path.join('S%d' % subj_id, 'Seq%d' % seq_id)
seq_dir = path.join(dir, seq_rel_path)
info = Constants['seq_info'][seq_rel_path]
interesting_frames = []
with h5py.File(path.join(seq_dir, 'metadata.h5'), 'r') as f:
for k in f['interesting_frames'].keys():
interesting_frames.append(
(int(k.replace('camera', '')), list(f['interesting_frames'][k]))
)
annot = Annotations(loadmat(path.join(seq_dir, 'annot.mat')))
for camera_id, frame_indices in _progress(interesting_frames, 'Cameras'):
for frame_index in frame_indices:
path_part = 'video_{}/img_{:06d}'.format(camera_id, frame_index + 1)
img = PIL.Image.open(path.join(seq_dir, 'FGmasks/{}.jpg'.format(path_part)))
img = PIL.ImageOps.invert(img)
fg, up_body, low_body = img.split()
skel2d = annot.annot2[camera_id, frame_index] * 768 / 2048
if info['bg_augmentable']:
fg = PIL.ImageOps.invert(fg)
_isolate_person(fg, skel2d)
chair = PIL.Image.open(
path.join(seq_dir, 'ChairMasks/{}.png'.format(path_part)))
chair, _, _ = chair.split()
chair = PIL.ImageOps.invert(chair)
# Pixel-wise max
combined = PIL.ImageChops.lighter(fg, chair)
fg_file = path.join(seq_dir, 'foreground_mask', path_part + '.png')
os.makedirs(path.dirname(fg_file), exist_ok=True)
combined.save(fg_file)
if info['ub_augmentable']:
_isolate_person(up_body, skel2d)
up_body = up_body.filter(PIL.ImageFilter.MinFilter(3))
up_body = up_body.filter(PIL.ImageFilter.MaxFilter(3))
up_body_file = path.join(seq_dir, 'up_body_mask', path_part + '.png')
os.makedirs(path.dirname(up_body_file), exist_ok=True)
up_body.save(up_body_file)
if info['lb_augmentable']:
_isolate_person(low_body, skel2d)
low_body = low_body.filter(PIL.ImageFilter.MinFilter(3))
low_body = low_body.filter(PIL.ImageFilter.MaxFilter(3))
low_body_file = path.join(seq_dir, 'low_body_mask', path_part + '.png')
os.makedirs(path.dirname(low_body_file), exist_ok=True)
low_body.save(low_body_file)
def preprocess_training_masks(dir):
"""Preprocess masks in a preprocessed training data directory."""
for subj_id, seq_id in _progress(Constants['train_seqs'], 'Sequences'):
preprocess_masks(dir, subj_id, seq_id)
def preprocess_validation_masks(dir):
"""Preprocess masks in a preprocessed validation data directory."""
for subj_id, seq_id in _progress(Constants['val_seqs'], 'Sequences'):
preprocess_masks(dir, subj_id, seq_id)
| 0.521715 | 0.24834 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0029_auto_20170917_2049'),
]
operations = [
migrations.RemoveField(
model_name='bmdwfield',
name='element_type_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='required_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='type_old',
),
migrations.RemoveField(
model_name='emdissemantics',
name='required_old',
),
migrations.AlterField(
model_name='bmdwfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
# migrations.AlterField(
# model_name='bmdwfield',
# name='element_type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwElementType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='required',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwFieldType'),
# preserve_default=False,
# ),
migrations.AlterField(
model_name='emdisfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdisfield',
name='emdis_type',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisFieldType', verbose_name='field type'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisField'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_message',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisMessage'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='required',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='wmda_form',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.WmdaForm'),
preserve_default=False,
),
]
|
wmdadict/migrations/0030_auto_20170917_2104.py
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0029_auto_20170917_2049'),
]
operations = [
migrations.RemoveField(
model_name='bmdwfield',
name='element_type_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='required_old',
),
migrations.RemoveField(
model_name='bmdwfield',
name='type_old',
),
migrations.RemoveField(
model_name='emdissemantics',
name='required_old',
),
migrations.AlterField(
model_name='bmdwfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
# migrations.AlterField(
# model_name='bmdwfield',
# name='element_type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwElementType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='required',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
# preserve_default=False,
# ),
# migrations.AlterField(
# model_name='bmdwfield',
# name='type',
# field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.BmdwFieldType'),
# preserve_default=False,
# ),
migrations.AlterField(
model_name='emdisfield',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdisfield',
name='emdis_type',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisFieldType', verbose_name='field type'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisField'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='emdis_message',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.EmdisMessage'),
preserve_default=False,
),
migrations.AlterField(
model_name='emdissemantics',
name='required',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.RequiredFieldType'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='dict_field',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.DictionaryField', verbose_name='WMDA Dictionary Field'),
preserve_default=False,
),
migrations.AlterField(
model_name='formfields',
name='wmda_form',
field=models.ForeignKey(default=999, on_delete=django.db.models.deletion.PROTECT, to='wmdadict.WmdaForm'),
preserve_default=False,
),
]
| 0.580352 | 0.073165 |
from math import acos
class Vector:
"""3D Vector. Could be used as 2D."""
def __init__(self, x: float = 0, y: float = 0, z: float = 0):
self.data = [x, y, z]
def __add__(self, other):
return Vector(*(self.data[i] + other.data[i] for i in range(3)))
def __iadd__(self, other):
for i in range(3):
self.data[i] += other.data[i]
return self
def __mul__(self, coefficient: float):
return Vector(*(self.data[i] * coefficient for i in range(3)))
def __imul__(self, coefficient: float):
for i in range(3):
self.data[i] *= coefficient
return self
def __neg__(self):
return Vector(*(-self.data[i] for i in range(3)))
def __sub__(self, other):
return Vector(*(self.data[i] - other.data[i] for i in range(3)))
def __isub__(self, other):
for i in range(3):
self.data[i] -= other.data[i]
return self
def __truediv__(self, coefficient: float):
return Vector(*(self.data[i] / coefficient for i in range(3)))
def __itruediv__(self, coefficient: float):
for i in range(3):
self.data[i] /= coefficient
return self
def __eq__(self, other):
return self.data == other.data
def __repr__(self):
return f"Vector({self.data[0]}, {self.data[1]}, {self.data[2]})"
def length(self):
"""Vector euclidean length."""
return sum(self.data[i] ** 2 for i in range(3)) ** 0.5
def x(self) -> float:
"""Vector x-projection."""
return self.data[0]
def y(self) -> float:
"""Vector y-projection."""
return self.data[1]
def z(self) -> float:
"""Vector z-projection."""
return self.data[2]
def x_vector(self):
"""Vector x-projection as vector parallel of x-axis."""
return Vector(self.data[0], 0, 0)
def y_vector(self):
"""Vector y-projection as vector parallel of y-axis."""
return Vector(0, self.data[1], 0)
def z_vector(self):
"""Vector z-projection as vector parallel of z-axis."""
return Vector(0, 0, self.data[2])
def cross(self, other) -> float:
"""Cross product of self and other."""
return sum(self.data[i] * other.data[i] for i in range(3))
def angle_cos(self, other=None) -> float:
"""Cosine of angle between self and other."""
if other is None:
other = Vector(1, 0)
return self.cross(other) / (self.length() * other.length())
def angle(self, other) -> float:
"""Angle between self and other in radians."""
return acos(self.angle(other))
|
physicslib/vector.py
|
from math import acos
class Vector:
"""3D Vector. Could be used as 2D."""
def __init__(self, x: float = 0, y: float = 0, z: float = 0):
self.data = [x, y, z]
def __add__(self, other):
return Vector(*(self.data[i] + other.data[i] for i in range(3)))
def __iadd__(self, other):
for i in range(3):
self.data[i] += other.data[i]
return self
def __mul__(self, coefficient: float):
return Vector(*(self.data[i] * coefficient for i in range(3)))
def __imul__(self, coefficient: float):
for i in range(3):
self.data[i] *= coefficient
return self
def __neg__(self):
return Vector(*(-self.data[i] for i in range(3)))
def __sub__(self, other):
return Vector(*(self.data[i] - other.data[i] for i in range(3)))
def __isub__(self, other):
for i in range(3):
self.data[i] -= other.data[i]
return self
def __truediv__(self, coefficient: float):
return Vector(*(self.data[i] / coefficient for i in range(3)))
def __itruediv__(self, coefficient: float):
for i in range(3):
self.data[i] /= coefficient
return self
def __eq__(self, other):
return self.data == other.data
def __repr__(self):
return f"Vector({self.data[0]}, {self.data[1]}, {self.data[2]})"
def length(self):
"""Vector euclidean length."""
return sum(self.data[i] ** 2 for i in range(3)) ** 0.5
def x(self) -> float:
"""Vector x-projection."""
return self.data[0]
def y(self) -> float:
"""Vector y-projection."""
return self.data[1]
def z(self) -> float:
"""Vector z-projection."""
return self.data[2]
def x_vector(self):
"""Vector x-projection as vector parallel of x-axis."""
return Vector(self.data[0], 0, 0)
def y_vector(self):
"""Vector y-projection as vector parallel of y-axis."""
return Vector(0, self.data[1], 0)
def z_vector(self):
"""Vector z-projection as vector parallel of z-axis."""
return Vector(0, 0, self.data[2])
def cross(self, other) -> float:
"""Cross product of self and other."""
return sum(self.data[i] * other.data[i] for i in range(3))
def angle_cos(self, other=None) -> float:
"""Cosine of angle between self and other."""
if other is None:
other = Vector(1, 0)
return self.cross(other) / (self.length() * other.length())
def angle(self, other) -> float:
"""Angle between self and other in radians."""
return acos(self.angle(other))
| 0.91507 | 0.696688 |
from nuaal.connections.api import RestBase
from nuaal.definitions import DATA_PATH
from nuaal.utils import check_path
import json
import os
import requests
class Cisco_NX_API(RestBase):
def __init__(self, ip, username, password, verify_ssl=False, DEBUG=False):
super(Cisco_NX_API, self).__init__(url="https://{}".format(ip),
username=username,
password=password,
api_base_path="/ins",
verify_ssl=verify_ssl,
DEBUG=DEBUG,
con_type="NXOS"
)
def _initialize(self):
if self.verify_ssl == False:
# Disable certificate warning
try:
requests.packages.urllib3.disable_warnings()
except:
self.logger.warning(msg="Failed to disable Certificate Warnings")
self.common_headers["auth"] = (self.username, self.password)
def craft_payload(self, commands):
if isinstance(commands, str):
commands = [x.strip() for x in commands.split(";")]
commands = " ;".join(commands)
payload = {
"ins_api": {
"version": "1.0",
"type": "cli_show",
"chunk": "0",
"sid": "1",
"input": commands,
"output_format": "json"
}
}
return json.dumps(payload)
def test(self):
self._initialize()
payload = self.craft_payload("show cdp neighbors")
response = self._response_handler(self._post(path="", data=payload))
print(json.dumps(response, indent=2))
if __name__ == '__main__':
device = Cisco_NX_API(ip="10.17.89.47",
username="admin",
password="<PASSWORD>",
DEBUG=True)
device.test()
|
nuaal/connections/api/nxos/NxOsBase.py
|
from nuaal.connections.api import RestBase
from nuaal.definitions import DATA_PATH
from nuaal.utils import check_path
import json
import os
import requests
class Cisco_NX_API(RestBase):
def __init__(self, ip, username, password, verify_ssl=False, DEBUG=False):
super(Cisco_NX_API, self).__init__(url="https://{}".format(ip),
username=username,
password=password,
api_base_path="/ins",
verify_ssl=verify_ssl,
DEBUG=DEBUG,
con_type="NXOS"
)
def _initialize(self):
if self.verify_ssl == False:
# Disable certificate warning
try:
requests.packages.urllib3.disable_warnings()
except:
self.logger.warning(msg="Failed to disable Certificate Warnings")
self.common_headers["auth"] = (self.username, self.password)
def craft_payload(self, commands):
if isinstance(commands, str):
commands = [x.strip() for x in commands.split(";")]
commands = " ;".join(commands)
payload = {
"ins_api": {
"version": "1.0",
"type": "cli_show",
"chunk": "0",
"sid": "1",
"input": commands,
"output_format": "json"
}
}
return json.dumps(payload)
def test(self):
self._initialize()
payload = self.craft_payload("show cdp neighbors")
response = self._response_handler(self._post(path="", data=payload))
print(json.dumps(response, indent=2))
if __name__ == '__main__':
device = Cisco_NX_API(ip="10.17.89.47",
username="admin",
password="<PASSWORD>",
DEBUG=True)
device.test()
| 0.365457 | 0.106087 |
import argparse
import collections
import gzip
import json
from pprint import pformat
import re
import tqdm
def main():
p = argparse.ArgumentParser()
p.add_argument("-o", "--output-file", help="json file output path")
p.add_argument("-v", "--verbose", action="store_true")
p.add_argument("gangstr_spec", help="path of the GangSTR repeat spec .bed file")
args = p.parse_args()
if not args.output_file:
args.output_file = re.sub(".bed(.gz)?$", "", args.gangstr_spec) + ".variant_catalog.json"
process_variant_catalog(args.gangstr_spec, args.output_file, verbose=args.verbose)
def process_variant_catalog(gangstr_spec_path, output_file_path, verbose=False):
print(f"Parsing {gangstr_spec_path}")
json_records = []
existing_locus_ids = set()
counter = collections.defaultdict(int)
with (gzip.open if gangstr_spec_path.endswith("gz") else open)(gangstr_spec_path, "rt") as f:
for row in tqdm.tqdm(f, unit=" records"):
fields = row.strip("\n").split("\t")
chrom = fields[0]
start_0based = int(fields[1]) - 1
end_1based = int(fields[2])
repeat_unit = fields[4]
if len(fields) > 5:
off_target_regions = fields[5]
if len(off_target_regions) > 1:
print(f"WARNING: found GangSTR spec with off-target regions. This script doesn't yet support "
f"transferring off-target regions to the variant catalog")
counter["total input loci"] += 1
trim_bp = (end_1based - start_0based) % len(repeat_unit)
if trim_bp != 0:
counter["trimmed locus"] += 1
if verbose:
print(f"WARNING: {chrom}:{start_0based}-{end_1based} interval has size {end_1based - start_0based} "
f"which is not a multiple of the repeat unit {repeat_unit} (size {len(repeat_unit)}). "
f"Changing it to {chrom}:{start_0based}-{end_1based - trim_bp}")
end_1based -= trim_bp
assert (end_1based - start_0based) % len(repeat_unit) == 0
locus_id = f"{chrom}-{start_0based}-{end_1based}-{repeat_unit}"
if locus_id in existing_locus_ids:
counter["skipped duplicate"] += 1
if verbose:
print(f"WARNING: skipping duplicate locus id: {locus_id}")
continue
existing_locus_ids.add(locus_id)
json_records.append({
"LocusId": locus_id,
"ReferenceRegion": f"{chrom}:{start_0based}-{end_1based}",
"LocusStructure": f"({repeat_unit})*",
"VariantType": "Repeat",
})
# TODO add support for off-target regions
with (gzip.open if output_file_path.endswith("gz") else open)(output_file_path, "wt") as f:
json.dump(json_records, f, indent=4)
print(f"Wrote out {output_file_path}")
print(pformat(dict(counter)))
if __name__ == "__main__":
main()
|
str_analysis/convert_gangstr_spec_to_expansion_hunter_variant_catalog.py
|
import argparse
import collections
import gzip
import json
from pprint import pformat
import re
import tqdm
def main():
p = argparse.ArgumentParser()
p.add_argument("-o", "--output-file", help="json file output path")
p.add_argument("-v", "--verbose", action="store_true")
p.add_argument("gangstr_spec", help="path of the GangSTR repeat spec .bed file")
args = p.parse_args()
if not args.output_file:
args.output_file = re.sub(".bed(.gz)?$", "", args.gangstr_spec) + ".variant_catalog.json"
process_variant_catalog(args.gangstr_spec, args.output_file, verbose=args.verbose)
def process_variant_catalog(gangstr_spec_path, output_file_path, verbose=False):
print(f"Parsing {gangstr_spec_path}")
json_records = []
existing_locus_ids = set()
counter = collections.defaultdict(int)
with (gzip.open if gangstr_spec_path.endswith("gz") else open)(gangstr_spec_path, "rt") as f:
for row in tqdm.tqdm(f, unit=" records"):
fields = row.strip("\n").split("\t")
chrom = fields[0]
start_0based = int(fields[1]) - 1
end_1based = int(fields[2])
repeat_unit = fields[4]
if len(fields) > 5:
off_target_regions = fields[5]
if len(off_target_regions) > 1:
print(f"WARNING: found GangSTR spec with off-target regions. This script doesn't yet support "
f"transferring off-target regions to the variant catalog")
counter["total input loci"] += 1
trim_bp = (end_1based - start_0based) % len(repeat_unit)
if trim_bp != 0:
counter["trimmed locus"] += 1
if verbose:
print(f"WARNING: {chrom}:{start_0based}-{end_1based} interval has size {end_1based - start_0based} "
f"which is not a multiple of the repeat unit {repeat_unit} (size {len(repeat_unit)}). "
f"Changing it to {chrom}:{start_0based}-{end_1based - trim_bp}")
end_1based -= trim_bp
assert (end_1based - start_0based) % len(repeat_unit) == 0
locus_id = f"{chrom}-{start_0based}-{end_1based}-{repeat_unit}"
if locus_id in existing_locus_ids:
counter["skipped duplicate"] += 1
if verbose:
print(f"WARNING: skipping duplicate locus id: {locus_id}")
continue
existing_locus_ids.add(locus_id)
json_records.append({
"LocusId": locus_id,
"ReferenceRegion": f"{chrom}:{start_0based}-{end_1based}",
"LocusStructure": f"({repeat_unit})*",
"VariantType": "Repeat",
})
# TODO add support for off-target regions
with (gzip.open if output_file_path.endswith("gz") else open)(output_file_path, "wt") as f:
json.dump(json_records, f, indent=4)
print(f"Wrote out {output_file_path}")
print(pformat(dict(counter)))
if __name__ == "__main__":
main()
| 0.214034 | 0.197367 |
import os
import pickle
import numpy as np
import sklearn.metrics
from itertools import islice, zip_longest
import numpy as np
from IPython.display import HTML, Markdown
from bing_maps import *
import pandas as pd
import mapswipe
from pathlib import Path
from collections import defaultdict, namedtuple
import bing_maps
TileVotes = namedtuple('TileVotes', ['yes_count', 'maybe_count', 'bad_imagery_count'])
TileVotes.__iadd__ = lambda x,y: TileVotes(x.yes_count + y.yes_count,
x.maybe_count + y.maybe_count,
x.bad_imagery_count + y.bad_imagery_count)
class_names = ['bad_imagery', 'built', 'empty']
class_number_to_name = {k: v for k, v in enumerate(class_names)}
class_name_to_number = {v: k for k, v in class_number_to_name.items()}
def ground_truth_solutions_file_to_map(solutions_path):
retval = {}
with open(solutions_path) as solutions_file:
for line in solutions_file:
tokens = line.strip().split(',')
retval[tokens[0]] = tokens[1]
return retval
def predictions_file_to_map(predictions_path):
with open(predictions_path, 'rb') as f:
(paths, prediction_vectors) = zip(*pickle.load(f))
quadkeys = []
for path in paths:
filename = os.path.basename(path)
quadkeys.append(filename[0:filename.index('.')])
return dict(zip(quadkeys, prediction_vectors))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def tableau(quadkeys, solution = None):
retVal = "<table>"
for row in grouper(quadkeys, 3):
html_row = "<tr>"
for quadkey in row:
html_row += "<td align=\"center\" style=\"text-align: center\">"
if quadkey is not None:
html_row += cell_renderer(quadkey, solution)
html_row += "</td>"
html_row += "</tr>"
retVal += html_row
retVal += "</table>"
display(HTML(retVal))
def cell_renderer(quadkey, solution):
retVal = ""
retVal = "Quadkey: <a href=\"{}\" target=\"_blank\">{}</a><br>".format(quadkey_to_url(quadkey), quadkey)
if solution is not None:
retVal += "Officially: {}<br>".format(solution.ground_truth[quadkey])
retVal += "Predicted class: " + solution.predicted_class(quadkey) + "<br>"
retVal += "<img align=\"center\" src=\"mapswipe_working_dir/{}\"/><br>".format(os.path.relpath(mapswipe.get_tile_path(quadkey),
os.path.join(str(Path.home()),'.mapswipe')))
if solution is not None:
retVal += "PV:" + str(solution.prediction_vectors[quadkey])
return retVal
def get_all_tile_votes_for_projects(project_ids):
retval = defaultdict(lambda: TileVotes(0, 0, 0))
for project_id in project_ids:
with mapswipe.get_project_details_file(project_id) as project_details_file:
tile_json = json.loads(project_details_file.read())
for tile in tile_json:
quadkey = bing_maps.tile_to_quadkey((int(tile['task_x']), int(tile['task_y'])), int(tile['task_z']))
votes = TileVotes(tile['yes_count'], tile['maybe_count'], tile['bad_imagery_count'])
retval[quadkey] += votes
return retval
class Solution:
def __init__(self, ground_truth, prediction_vectors):
self.ground_truth = ground_truth
self.prediction_vectors = prediction_vectors
if self.ground_truth.keys() != self.prediction_vectors.keys():
raise(KeyError('Ground truth tiles != prediction tiles'))
ground_truth_classes = []
prediction_vector_classes = []
for quadkey in ground_truth.keys():
ground_truth_classes.append(class_name_to_number[ground_truth[quadkey]])
prediction_vector_classes.append(np.argmax(prediction_vectors[quadkey]))
self.confusion_matrix = sklearn.metrics.confusion_matrix(ground_truth_classes, prediction_vector_classes)
self.category_accuracies = [self.confusion_matrix[i][i] / sum(self.confusion_matrix[i]) for i in range(len(self.confusion_matrix))]
self.accuracy = np.mean(self.category_accuracies)
self.tile_count = len(ground_truth)
def classified_as(self, predicted_class, solution_class):
if predicted_class in class_name_to_number:
predict_class_index = class_name_to_number[predicted_class]
solution_class_index = class_name_to_number[solution_class]
else:
predict_class_index = predicted_class
solution_class_index = solution_class
retval = {k : v for k,v in self.prediction_vectors.items()
if np.argmax(v) == predict_class_index and class_name_to_number[self.ground_truth[k]] == solution_class_index}
return sorted(retval.items(), key=lambda x:x[1][np.argmax(x[1])], reverse=True)
def predicted_class(self, quadkey):
return class_number_to_name[np.argmax(self.prediction_vectors[quadkey])]
|
mapswipe_analysis.py
|
import os
import pickle
import numpy as np
import sklearn.metrics
from itertools import islice, zip_longest
import numpy as np
from IPython.display import HTML, Markdown
from bing_maps import *
import pandas as pd
import mapswipe
from pathlib import Path
from collections import defaultdict, namedtuple
import bing_maps
TileVotes = namedtuple('TileVotes', ['yes_count', 'maybe_count', 'bad_imagery_count'])
TileVotes.__iadd__ = lambda x,y: TileVotes(x.yes_count + y.yes_count,
x.maybe_count + y.maybe_count,
x.bad_imagery_count + y.bad_imagery_count)
class_names = ['bad_imagery', 'built', 'empty']
class_number_to_name = {k: v for k, v in enumerate(class_names)}
class_name_to_number = {v: k for k, v in class_number_to_name.items()}
def ground_truth_solutions_file_to_map(solutions_path):
retval = {}
with open(solutions_path) as solutions_file:
for line in solutions_file:
tokens = line.strip().split(',')
retval[tokens[0]] = tokens[1]
return retval
def predictions_file_to_map(predictions_path):
with open(predictions_path, 'rb') as f:
(paths, prediction_vectors) = zip(*pickle.load(f))
quadkeys = []
for path in paths:
filename = os.path.basename(path)
quadkeys.append(filename[0:filename.index('.')])
return dict(zip(quadkeys, prediction_vectors))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def tableau(quadkeys, solution = None):
retVal = "<table>"
for row in grouper(quadkeys, 3):
html_row = "<tr>"
for quadkey in row:
html_row += "<td align=\"center\" style=\"text-align: center\">"
if quadkey is not None:
html_row += cell_renderer(quadkey, solution)
html_row += "</td>"
html_row += "</tr>"
retVal += html_row
retVal += "</table>"
display(HTML(retVal))
def cell_renderer(quadkey, solution):
retVal = ""
retVal = "Quadkey: <a href=\"{}\" target=\"_blank\">{}</a><br>".format(quadkey_to_url(quadkey), quadkey)
if solution is not None:
retVal += "Officially: {}<br>".format(solution.ground_truth[quadkey])
retVal += "Predicted class: " + solution.predicted_class(quadkey) + "<br>"
retVal += "<img align=\"center\" src=\"mapswipe_working_dir/{}\"/><br>".format(os.path.relpath(mapswipe.get_tile_path(quadkey),
os.path.join(str(Path.home()),'.mapswipe')))
if solution is not None:
retVal += "PV:" + str(solution.prediction_vectors[quadkey])
return retVal
def get_all_tile_votes_for_projects(project_ids):
retval = defaultdict(lambda: TileVotes(0, 0, 0))
for project_id in project_ids:
with mapswipe.get_project_details_file(project_id) as project_details_file:
tile_json = json.loads(project_details_file.read())
for tile in tile_json:
quadkey = bing_maps.tile_to_quadkey((int(tile['task_x']), int(tile['task_y'])), int(tile['task_z']))
votes = TileVotes(tile['yes_count'], tile['maybe_count'], tile['bad_imagery_count'])
retval[quadkey] += votes
return retval
class Solution:
def __init__(self, ground_truth, prediction_vectors):
self.ground_truth = ground_truth
self.prediction_vectors = prediction_vectors
if self.ground_truth.keys() != self.prediction_vectors.keys():
raise(KeyError('Ground truth tiles != prediction tiles'))
ground_truth_classes = []
prediction_vector_classes = []
for quadkey in ground_truth.keys():
ground_truth_classes.append(class_name_to_number[ground_truth[quadkey]])
prediction_vector_classes.append(np.argmax(prediction_vectors[quadkey]))
self.confusion_matrix = sklearn.metrics.confusion_matrix(ground_truth_classes, prediction_vector_classes)
self.category_accuracies = [self.confusion_matrix[i][i] / sum(self.confusion_matrix[i]) for i in range(len(self.confusion_matrix))]
self.accuracy = np.mean(self.category_accuracies)
self.tile_count = len(ground_truth)
def classified_as(self, predicted_class, solution_class):
if predicted_class in class_name_to_number:
predict_class_index = class_name_to_number[predicted_class]
solution_class_index = class_name_to_number[solution_class]
else:
predict_class_index = predicted_class
solution_class_index = solution_class
retval = {k : v for k,v in self.prediction_vectors.items()
if np.argmax(v) == predict_class_index and class_name_to_number[self.ground_truth[k]] == solution_class_index}
return sorted(retval.items(), key=lambda x:x[1][np.argmax(x[1])], reverse=True)
def predicted_class(self, quadkey):
return class_number_to_name[np.argmax(self.prediction_vectors[quadkey])]
| 0.535584 | 0.219317 |
import pandas as pd
import matplotlib.pyplot as plt
# parameters
# ---
# which subset
subset = 'survey_only'
rho = 1700
# where are the samples and where to put results
dir_results = '../../results/neutral_data_fitm/'
# where to find the island area and richness data
fname_area = '../../data/processed/island_area.csv' # island_name,area_sq_m,area_sq_km
fname_rich = '../../data/processed/island_richness.csv' # island_name,richness
# which island subsets info kept
dirname_subsets = '../../data/processed/island_subsets/'
# which islands are we doing
# ---
islands = list( pd.read_csv( dirname_subsets + subset + '.csv', header=0 )['island_name'] )
# get real data's area vs richness
# ---
# create a dataframe: island_name, area, richness
df_area = pd.read_csv(fname_area)
df_rich = pd.read_csv(fname_rich)
assert len(df_area) == len(df_rich), f'Number of islands in {fname_area} =/= {fname_rich}'
df_data = pd.merge(df_area, df_rich, on="island_name")
# subset to islands of interest
df_data_sub = df_data[df_data['island_name'].isin(islands)]
A_tru = df_data_sub['area_sq_km'].values
S_tru = df_data_sub['richness'].values
# get the first sample's area vs richness
# ---
fname = dir_results + 'samples_' + subset + '_rho_' + str(rho) + '.csv'
df = pd.read_csv(fname)
df_1 = df.iloc[0]
# get order of island names and areas
islands = [ s[2:] for s in df.columns if s[0:2] == 'J_' ]
isle2area = dict(zip(df_data_sub['island_name'], df_data_sub['area_sq_km'])) # dictionary to turn island names into island areas
A_sam = [ isle2area[island] for island in islands ]
# get the richness of each island
data_row_as_str = df_1['presence_absence_matrix_cols_isles_concatenated']
data_row = [ 1 if c == 'p' else 0 for c in data_row_as_str ]
S = df_1['S']
H = df_1['H']
S_sam = [ sum(data_row[i:i+S]) for i, island in zip( range(0, S*H, S), islands ) ]
# plot both for comparison
# ---
plt.scatter(A_tru, S_tru, alpha=0.7, label='data')
plt.scatter(A_sam, S_sam, alpha=0.7, label='sample')
plt.xlabel(r'area (km$^2$)')
plt.ylabel(r'number of species')
plt.xscale('log')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(dir_results + 'sample_' + subset + '_rho_' + str(rho) + '.pdf')
plt.close()
|
scripts/neutral_data_fitm/plot_sample.py
|
import pandas as pd
import matplotlib.pyplot as plt
# parameters
# ---
# which subset
subset = 'survey_only'
rho = 1700
# where are the samples and where to put results
dir_results = '../../results/neutral_data_fitm/'
# where to find the island area and richness data
fname_area = '../../data/processed/island_area.csv' # island_name,area_sq_m,area_sq_km
fname_rich = '../../data/processed/island_richness.csv' # island_name,richness
# which island subsets info kept
dirname_subsets = '../../data/processed/island_subsets/'
# which islands are we doing
# ---
islands = list( pd.read_csv( dirname_subsets + subset + '.csv', header=0 )['island_name'] )
# get real data's area vs richness
# ---
# create a dataframe: island_name, area, richness
df_area = pd.read_csv(fname_area)
df_rich = pd.read_csv(fname_rich)
assert len(df_area) == len(df_rich), f'Number of islands in {fname_area} =/= {fname_rich}'
df_data = pd.merge(df_area, df_rich, on="island_name")
# subset to islands of interest
df_data_sub = df_data[df_data['island_name'].isin(islands)]
A_tru = df_data_sub['area_sq_km'].values
S_tru = df_data_sub['richness'].values
# get the first sample's area vs richness
# ---
fname = dir_results + 'samples_' + subset + '_rho_' + str(rho) + '.csv'
df = pd.read_csv(fname)
df_1 = df.iloc[0]
# get order of island names and areas
islands = [ s[2:] for s in df.columns if s[0:2] == 'J_' ]
isle2area = dict(zip(df_data_sub['island_name'], df_data_sub['area_sq_km'])) # dictionary to turn island names into island areas
A_sam = [ isle2area[island] for island in islands ]
# get the richness of each island
data_row_as_str = df_1['presence_absence_matrix_cols_isles_concatenated']
data_row = [ 1 if c == 'p' else 0 for c in data_row_as_str ]
S = df_1['S']
H = df_1['H']
S_sam = [ sum(data_row[i:i+S]) for i, island in zip( range(0, S*H, S), islands ) ]
# plot both for comparison
# ---
plt.scatter(A_tru, S_tru, alpha=0.7, label='data')
plt.scatter(A_sam, S_sam, alpha=0.7, label='sample')
plt.xlabel(r'area (km$^2$)')
plt.ylabel(r'number of species')
plt.xscale('log')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(dir_results + 'sample_' + subset + '_rho_' + str(rho) + '.pdf')
plt.close()
| 0.407098 | 0.529811 |
def main():
"""
Say we have a produces an assignment between true detections within images
and some set of predictions.
"""
import numpy as np
import ubelt as ub
# Create demo detection metrics
from kwcoco.metrics import DetectionMetrics
dmet = DetectionMetrics.demo(
nimgs=1000, nboxes=(0, 10), n_fp=(0, 10), n_fn=(0, 10))
# We might have some sort of mapping between images and the predicted and
# true boxes (note gid means imaGe id).
gid_to_true = dmet.gid_to_true_dets
gid_to_pred = dmet.gid_to_pred_dets
print('gid_to_true = {}'.format(str(gid_to_true)[0:100] + ' ...'))
print('gid_to_pred = {}'.format(str(gid_to_pred)[0:100] + ' ...'))
"""
gid_to_true = {0: <Detections(5) at 0x7fe08c335a10>, 1: <Detections(5) at 0x7fe08c3359d0>, 2: <Detections(8) at 0x ...
gid_to_pred = {0: <Detections(2) at 0x7fe08c335990>, 1: <Detections(6) at 0x7fe08c335dd0>, 2: <Detections(13) at 0 ...
"""
# Each detection might have data like this
print('gid_to_true[0].data = {}'.format(ub.repr2(gid_to_true[0].data, nl=1)))
"""
gid_to_true[0].data = {
'boxes': <Boxes(cxywh,
array([[74.07547 , 61.581673 , 24.438194 , 47.287003 ],
[28.509544 , 26.718906 , 3.487833 , 43.095215 ],
[60.247677 , 65.802795 , 42.938393 , 36.610165 ],
[35.281883 , 80.26636 , 4.0845375, 31.898323 ],
[30.69794 , 83.549904 , 34.32573 , 7.9176483]], dtype=float32))>,
'class_idxs': np.array([1, 1, 1, 1, 1], dtype=np.int64),
'weights': np.array([1, 1, 1, 1, 1], dtype=np.int32),
}
"""
# we can compute an association between each box and get a flat table
table = dmet.confusion_vectors().data
# The table of values might look something like this.
# Again, note the gids correspond to imaGe ids
# txs correspond to indexes of true detections in that image
# pxs correspond to indexes of predicted detections in that image
# A -1 in an index value means the row is unassociated
print(table.pandas()[['gid', 'txs', 'pxs']])
"""
gid txs pxs
0 0 3 0
1 0 4 1
2 0 0 -1
3 0 1 -1
4 0 2 -1
... ... ... ...
9881 999 -1 1
9882 999 -1 3
9883 999 -1 2
9884 999 0 -1
9885 999 1 -1
"""
# Say we need to know some attribute (e.g. the bounding boxes) for all of
# the true associations, but the table is already flattened. (multiple
# duplicate gids per row). How do we access that data?
# We could use a list comprehension and lookup the Detections object for
# that image and then look up the index within the image:
data_attr_v1 = np.array([
[-1] * 4 if tx == -1 else gid_to_true[gid].data['boxes'].data[tx]
for gid, tx in zip(table['gid'], table['txs'])
])
# But that means we are accessing the __getitem__ of gid_to_true a lot
# Is there a better way?
# Yes, we can group the table by image id.
import kwarray
data_attr_v2 = np.full((len(table), 4), fill_value=-1.0)
unique_gids, groupxs = kwarray.group_indices(table['gid'])
for gid, groupxs in zip(unique_gids, groupxs):
true_det = gid_to_true[gid]
image_txs = table['txs'][groupxs]
valid_flags = image_txs != -1
valid_txs = image_txs[valid_flags]
valid_groupxs = groupxs[valid_flags]
valid_attr = true_det.data['boxes'].data[valid_txs]
data_attr_v2[valid_groupxs] = valid_attr
# We can see both codeblocks are the same, but which is faster
assert np.all(data_attr_v2 == data_attr_v1)
import timerit
ti = timerit.Timerit(50, bestof=10, verbose=2)
for timer in ti.reset('list-comprehension'):
with timer:
data_attr_v1 = np.array([
[-1] * 4 if tx == -1 else gid_to_true[gid].data['boxes'].data[tx]
for gid, tx in zip(table['gid'], table['txs'])
])
for timer in ti.reset('grouping'):
with timer:
data_attr_v2 = np.full((len(table), 4), fill_value=-1.0)
unique_gids, groupxs = kwarray.group_indices(table['gid'])
for gid, groupxs in zip(unique_gids, groupxs):
true_det = gid_to_true[gid]
image_txs = table['txs'][groupxs]
valid_flags = image_txs != -1
valid_txs = image_txs[valid_flags]
valid_groupxs = groupxs[valid_flags]
valid_attr = true_det.data['boxes'].data[valid_txs]
data_attr_v2[valid_groupxs] = valid_attr
# The grouping method is 3x faster, even though its longer! It lets you
# vectorize more operations and ultimately perform fewer python ops. So
# give grouping a try in your data if you have flat tables that need to be
# unflattened.
|
when_is_grouping_better.py
|
def main():
"""
Say we have a produces an assignment between true detections within images
and some set of predictions.
"""
import numpy as np
import ubelt as ub
# Create demo detection metrics
from kwcoco.metrics import DetectionMetrics
dmet = DetectionMetrics.demo(
nimgs=1000, nboxes=(0, 10), n_fp=(0, 10), n_fn=(0, 10))
# We might have some sort of mapping between images and the predicted and
# true boxes (note gid means imaGe id).
gid_to_true = dmet.gid_to_true_dets
gid_to_pred = dmet.gid_to_pred_dets
print('gid_to_true = {}'.format(str(gid_to_true)[0:100] + ' ...'))
print('gid_to_pred = {}'.format(str(gid_to_pred)[0:100] + ' ...'))
"""
gid_to_true = {0: <Detections(5) at 0x7fe08c335a10>, 1: <Detections(5) at 0x7fe08c3359d0>, 2: <Detections(8) at 0x ...
gid_to_pred = {0: <Detections(2) at 0x7fe08c335990>, 1: <Detections(6) at 0x7fe08c335dd0>, 2: <Detections(13) at 0 ...
"""
# Each detection might have data like this
print('gid_to_true[0].data = {}'.format(ub.repr2(gid_to_true[0].data, nl=1)))
"""
gid_to_true[0].data = {
'boxes': <Boxes(cxywh,
array([[74.07547 , 61.581673 , 24.438194 , 47.287003 ],
[28.509544 , 26.718906 , 3.487833 , 43.095215 ],
[60.247677 , 65.802795 , 42.938393 , 36.610165 ],
[35.281883 , 80.26636 , 4.0845375, 31.898323 ],
[30.69794 , 83.549904 , 34.32573 , 7.9176483]], dtype=float32))>,
'class_idxs': np.array([1, 1, 1, 1, 1], dtype=np.int64),
'weights': np.array([1, 1, 1, 1, 1], dtype=np.int32),
}
"""
# we can compute an association between each box and get a flat table
table = dmet.confusion_vectors().data
# The table of values might look something like this.
# Again, note the gids correspond to imaGe ids
# txs correspond to indexes of true detections in that image
# pxs correspond to indexes of predicted detections in that image
# A -1 in an index value means the row is unassociated
print(table.pandas()[['gid', 'txs', 'pxs']])
"""
gid txs pxs
0 0 3 0
1 0 4 1
2 0 0 -1
3 0 1 -1
4 0 2 -1
... ... ... ...
9881 999 -1 1
9882 999 -1 3
9883 999 -1 2
9884 999 0 -1
9885 999 1 -1
"""
# Say we need to know some attribute (e.g. the bounding boxes) for all of
# the true associations, but the table is already flattened. (multiple
# duplicate gids per row). How do we access that data?
# We could use a list comprehension and lookup the Detections object for
# that image and then look up the index within the image:
data_attr_v1 = np.array([
[-1] * 4 if tx == -1 else gid_to_true[gid].data['boxes'].data[tx]
for gid, tx in zip(table['gid'], table['txs'])
])
# But that means we are accessing the __getitem__ of gid_to_true a lot
# Is there a better way?
# Yes, we can group the table by image id.
import kwarray
data_attr_v2 = np.full((len(table), 4), fill_value=-1.0)
unique_gids, groupxs = kwarray.group_indices(table['gid'])
for gid, groupxs in zip(unique_gids, groupxs):
true_det = gid_to_true[gid]
image_txs = table['txs'][groupxs]
valid_flags = image_txs != -1
valid_txs = image_txs[valid_flags]
valid_groupxs = groupxs[valid_flags]
valid_attr = true_det.data['boxes'].data[valid_txs]
data_attr_v2[valid_groupxs] = valid_attr
# We can see both codeblocks are the same, but which is faster
assert np.all(data_attr_v2 == data_attr_v1)
import timerit
ti = timerit.Timerit(50, bestof=10, verbose=2)
for timer in ti.reset('list-comprehension'):
with timer:
data_attr_v1 = np.array([
[-1] * 4 if tx == -1 else gid_to_true[gid].data['boxes'].data[tx]
for gid, tx in zip(table['gid'], table['txs'])
])
for timer in ti.reset('grouping'):
with timer:
data_attr_v2 = np.full((len(table), 4), fill_value=-1.0)
unique_gids, groupxs = kwarray.group_indices(table['gid'])
for gid, groupxs in zip(unique_gids, groupxs):
true_det = gid_to_true[gid]
image_txs = table['txs'][groupxs]
valid_flags = image_txs != -1
valid_txs = image_txs[valid_flags]
valid_groupxs = groupxs[valid_flags]
valid_attr = true_det.data['boxes'].data[valid_txs]
data_attr_v2[valid_groupxs] = valid_attr
# The grouping method is 3x faster, even though its longer! It lets you
# vectorize more operations and ultimately perform fewer python ops. So
# give grouping a try in your data if you have flat tables that need to be
# unflattened.
| 0.667581 | 0.572364 |
import secrets
from copy import copy
from datetime import datetime
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import flask
from sqlalchemy import and_, func
from sqlalchemy.dialects.postgresql import ARRAY, INET
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.security import check_password_hash, generate_password_hash
from core import APIException, cache, db
from core.mixins import SinglePKMixin
from core.users.permissions import SitePermissions
from core.users.serializers import (
APIKeySerializer,
InviteSerializer,
UserSerializer,
)
from core.utils import cached_property
if TYPE_CHECKING:
from core.permissions.models import UserClass as UserClass_ # noqa
app = flask.current_app
class User(db.Model, SinglePKMixin):
__tablename__ = 'users'
__serializer__ = UserSerializer
__cache_key__ = 'users_{id}'
__cache_key_permissions__ = 'users_{id}_permissions'
__cache_key_from_username__ = 'users_username_{username}'
id: int = db.Column(db.Integer, primary_key=True)
username: str = db.Column(db.String(32), unique=True, nullable=False)
passhash: str = db.Column(db.String(128), nullable=False)
email: str = db.Column(db.String(255), nullable=False)
enabled: bool = db.Column(db.Boolean, nullable=False, server_default='t')
locked: bool = db.Column(db.Boolean, nullable=False, server_default='f')
user_class_id: int = db.Column(
db.Integer,
db.ForeignKey('user_classes.id'),
nullable=False,
server_default='1',
)
inviter_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), index=True
)
invites: int = db.Column(db.Integer, nullable=False, server_default='0')
uploaded: int = db.Column(
db.BigInteger, nullable=False, server_default='5368709120'
) # 5 GB
downloaded: int = db.Column(
db.BigInteger, nullable=False, server_default='0'
)
@declared_attr
def __table_args__(cls):
return (
db.Index(
'ix_users_username', func.lower(cls.username), unique=True
),
db.Index('ix_users_email', func.lower(cls.email)),
)
@classmethod
def from_username(cls, username: str) -> 'User':
username = username.lower()
return cls.from_query(
key=cls.__cache_key_from_username__.format(username=username),
filter=func.lower(cls.username) == username,
)
@classmethod
def new(cls, username: str, password: str, email: str) -> 'User':
"""
Alternative constructor which generates a password hash and
lowercases and strips leading and trailing spaces from the email.
"""
if cls.from_username(username) is not None:
raise APIException(f'The username {username} is already in use.')
return super()._new(
username=username,
passhash=generate_password_hash(password),
email=email.lower().strip(),
)
@cached_property
def user_class(self):
return self.user_class_model.name
@cached_property
def secondary_classes(self) -> List[str]:
from core.permissions.models import SecondaryClass
secondary_classes = SecondaryClass.from_user(self.id)
return [sc.name for sc in secondary_classes]
@cached_property
def inviter(self) -> Optional['User']:
return User.from_pk(self.inviter_id) if self.inviter_id else None
@cached_property
def api_keys(self) -> List['APIKey']:
return APIKey.from_user(self.id)
@cached_property
def permissions(self) -> List[str]:
"""
A general function to get the permissions of a user from a permission
model and attributes of their user classes. Locked users are restricted
to the permissions defined for them in the config.
:param key: The cache key to cache the permissions under
:param model: The model to query custom permissions from
:param attr: The attribute of the userclasses that should be queried
"""
from core.permissions.models import SecondaryClass
from core.permissions.models import UserPermission
if self.locked: # Locked accounts have restricted permissions.
return app.config['LOCKED_ACCOUNT_PERMISSIONS']
key = self.__cache_key_permissions__.format(id=self.id)
permissions = cache.get(key)
if not permissions:
permissions = copy(self.user_class_model.permissions)
for class_ in SecondaryClass.from_user(self.id):
permissions += class_.permissions
permissions = set(permissions) # De-dupe
for perm, granted in UserPermission.from_user(self.id).items():
if not granted and perm in permissions:
permissions.remove(perm)
if granted and perm not in permissions:
permissions.add(perm)
cache.set(key, permissions)
return permissions
@cached_property
def basic_permissions(self) -> List[str]:
return [
p for p in self.permissions if p in app.config['BASIC_PERMISSIONS']
]
@cached_property
def user_class_model(self) -> 'UserClass_':
from core.permissions.models import UserClass
return UserClass.from_pk(self.user_class_id)
def belongs_to_user(self) -> bool:
"""Check whether or not the requesting user matches this user."""
return flask.g.user == self
def set_password(self, password: str) -> None:
self.passhash = generate_password_hash(password)
def check_password(self, password: str) -> bool:
return check_password_hash(self.passhash, password)
def has_permission(self, permission: Union[None, str, Enum]) -> bool:
"""Check whether a user has a permission."""
if SitePermissions.GOD_MODE.value in self.permissions:
return True
p = permission.value if isinstance(permission, Enum) else permission
return bool(p and p in self.permissions)
class Invite(db.Model, SinglePKMixin):
__tablename__: str = 'invites'
__serializer__ = InviteSerializer
__cache_key__: str = 'invites_{code}'
__cache_key_of_user__: str = 'invites_user_{user_id}'
__deletion_attr__ = 'expired'
code: str = db.Column(db.String(24), primary_key=True)
inviter_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), nullable=False, index=True
)
invitee_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), index=True
)
email: str = db.Column(db.String(255), nullable=False)
time_sent: datetime = db.Column(
db.DateTime(timezone=True), server_default=func.now()
)
from_ip: str = db.Column(INET, nullable=False, server_default='0.0.0.0')
expired: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
@classmethod
def new(cls, inviter_id: int, email: str, ip: int) -> 'Invite':
"""
Generate a random invite code.
:param inviter_id: User ID of the inviter
:param email: E-mail to send the invite to
:param ip: IP address the invite was sent from
"""
while True:
code = secrets.token_urlsafe(24)[:24]
if not cls.from_pk(code, include_dead=True):
break
cache.delete(cls.__cache_key_of_user__.format(user_id=inviter_id))
return super()._new(
inviter_id=inviter_id,
code=code,
email=email.lower().strip(),
from_ip=ip,
)
@classmethod
def from_inviter(
cls, inviter_id: int, include_dead: bool = False, used: bool = False
) -> List['Invite']:
"""
Get all invites sent by a user.
:param inviter_id: The User ID of the inviter.
:param include_dead: Whether or not to include dead invites in the list
:param used: Whether or not to include used invites in the list
:return: A list of invites sent by the inviter
"""
filter = cls.inviter_id == inviter_id
if used:
filter = and_(filter, cls.invitee_id.isnot(None)) # type: ignore
return cls.get_many(
key=cls.__cache_key_of_user__.format(user_id=inviter_id),
filter=filter,
order=cls.time_sent.desc(), # type: ignore
include_dead=include_dead or used,
)
@cached_property
def invitee(self) -> User:
return User.from_pk(self.invitee_id)
@cached_property
def inviter(self) -> User:
return User.from_pk(self.inviter_id)
def belongs_to_user(self) -> bool:
"""Returns whether or not the requesting user matches the inviter."""
return flask.g.user is not None and self.inviter_id == flask.g.user.id
class APIKey(db.Model, SinglePKMixin):
__tablename__: str = 'api_keys'
__serializer__ = APIKeySerializer
__cache_key__: str = 'api_keys_{hash}'
__cache_key_of_user__: str = 'api_keys_user_{user_id}'
__deletion_attr__ = 'revoked'
hash: str = db.Column(db.String(10), primary_key=True)
user_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), nullable=False, index=True
)
keyhashsalt: str = db.Column(db.String(128))
last_used: datetime = db.Column(
db.DateTime(timezone=True), nullable=False, server_default=func.now()
)
ip: str = db.Column(INET, nullable=False, server_default='0.0.0.0')
user_agent: str = db.Column(db.Text)
revoked: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
permanent: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
timeout: bool = db.Column(
db.Integer, nullable=False, server_default='3600'
)
permissions: str = db.Column(ARRAY(db.String(36)))
@classmethod
def new(
cls,
user_id: int,
ip: str,
user_agent: str,
permanent: bool = False,
timeout: int = 60 * 30,
permissions: List[str] = None,
) -> Tuple[str, 'APIKey']:
"""
Create a new API Key with randomly generated secret keys and the
user details passed in as params. Generated keys are hashed and
salted for storage in the database.
:param user_id: API Key will belong to this user
:param ip: The IP that this session was created with
:param user_agent: User Agent the session was created with
:return: A tuple containing the identifier and the new API Key
"""
while True:
hash = secrets.token_urlsafe(10)[:10]
if not cls.from_pk(hash, include_dead=True):
break
key = secrets.token_urlsafe(16)[:16]
cache.delete(cls.__cache_key_of_user__.format(user_id=user_id))
api_key = super()._new(
user_id=user_id,
hash=hash,
keyhashsalt=generate_password_hash(key),
ip=ip,
user_agent=user_agent,
permanent=permanent,
timeout=timeout,
permissions=permissions or [],
)
return (hash + key, api_key)
@classmethod
def from_user(
cls, user_id: int, include_dead: bool = False
) -> List['APIKey']:
"""
Get all API keys owned by a user.
:param user_id: The User ID of the owner
:param include_dead: Whether or not to include dead API keys in the search
:return: A list of API keys owned by the user
"""
return cls.get_many(
key=cls.__cache_key_of_user__.format(user_id=user_id),
filter=cls.user_id == user_id,
include_dead=include_dead,
)
@classmethod
def hashes_from_user(cls, user_id: int) -> List[Union[int, str]]:
return cls.get_pks_of_many(
key=cls.__cache_key_of_user__.format(user_id=user_id),
filter=cls.user_id == user_id,
)
def check_key(self, key: str) -> bool:
"""
Validates the authenticity of an API key against its stored id.
:param key: The key to check against the keyhashsalt
:return: Whether or not the key matches the keyhashsalt
"""
return check_password_hash(self.keyhashsalt, key)
def has_permission(self, permission: Union[str, Enum]) -> bool:
"""
Checks if the API key is assigned a permission. If the API key
is not assigned any permissions, it checks against the user's
permissions instead.
:param permission: Permission to search for
:return: Whether or not the API Key has the permission
"""
p = permission.value if isinstance(permission, Enum) else permission
if self.permissions:
return p in self.permissions
user = User.from_pk(self.user_id)
return user.has_permission(p)
|
core/users/models.py
|
import secrets
from copy import copy
from datetime import datetime
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import flask
from sqlalchemy import and_, func
from sqlalchemy.dialects.postgresql import ARRAY, INET
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.security import check_password_hash, generate_password_hash
from core import APIException, cache, db
from core.mixins import SinglePKMixin
from core.users.permissions import SitePermissions
from core.users.serializers import (
APIKeySerializer,
InviteSerializer,
UserSerializer,
)
from core.utils import cached_property
if TYPE_CHECKING:
from core.permissions.models import UserClass as UserClass_ # noqa
app = flask.current_app
class User(db.Model, SinglePKMixin):
__tablename__ = 'users'
__serializer__ = UserSerializer
__cache_key__ = 'users_{id}'
__cache_key_permissions__ = 'users_{id}_permissions'
__cache_key_from_username__ = 'users_username_{username}'
id: int = db.Column(db.Integer, primary_key=True)
username: str = db.Column(db.String(32), unique=True, nullable=False)
passhash: str = db.Column(db.String(128), nullable=False)
email: str = db.Column(db.String(255), nullable=False)
enabled: bool = db.Column(db.Boolean, nullable=False, server_default='t')
locked: bool = db.Column(db.Boolean, nullable=False, server_default='f')
user_class_id: int = db.Column(
db.Integer,
db.ForeignKey('user_classes.id'),
nullable=False,
server_default='1',
)
inviter_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), index=True
)
invites: int = db.Column(db.Integer, nullable=False, server_default='0')
uploaded: int = db.Column(
db.BigInteger, nullable=False, server_default='5368709120'
) # 5 GB
downloaded: int = db.Column(
db.BigInteger, nullable=False, server_default='0'
)
@declared_attr
def __table_args__(cls):
return (
db.Index(
'ix_users_username', func.lower(cls.username), unique=True
),
db.Index('ix_users_email', func.lower(cls.email)),
)
@classmethod
def from_username(cls, username: str) -> 'User':
username = username.lower()
return cls.from_query(
key=cls.__cache_key_from_username__.format(username=username),
filter=func.lower(cls.username) == username,
)
@classmethod
def new(cls, username: str, password: str, email: str) -> 'User':
"""
Alternative constructor which generates a password hash and
lowercases and strips leading and trailing spaces from the email.
"""
if cls.from_username(username) is not None:
raise APIException(f'The username {username} is already in use.')
return super()._new(
username=username,
passhash=generate_password_hash(password),
email=email.lower().strip(),
)
@cached_property
def user_class(self):
return self.user_class_model.name
@cached_property
def secondary_classes(self) -> List[str]:
from core.permissions.models import SecondaryClass
secondary_classes = SecondaryClass.from_user(self.id)
return [sc.name for sc in secondary_classes]
@cached_property
def inviter(self) -> Optional['User']:
return User.from_pk(self.inviter_id) if self.inviter_id else None
@cached_property
def api_keys(self) -> List['APIKey']:
return APIKey.from_user(self.id)
@cached_property
def permissions(self) -> List[str]:
"""
A general function to get the permissions of a user from a permission
model and attributes of their user classes. Locked users are restricted
to the permissions defined for them in the config.
:param key: The cache key to cache the permissions under
:param model: The model to query custom permissions from
:param attr: The attribute of the userclasses that should be queried
"""
from core.permissions.models import SecondaryClass
from core.permissions.models import UserPermission
if self.locked: # Locked accounts have restricted permissions.
return app.config['LOCKED_ACCOUNT_PERMISSIONS']
key = self.__cache_key_permissions__.format(id=self.id)
permissions = cache.get(key)
if not permissions:
permissions = copy(self.user_class_model.permissions)
for class_ in SecondaryClass.from_user(self.id):
permissions += class_.permissions
permissions = set(permissions) # De-dupe
for perm, granted in UserPermission.from_user(self.id).items():
if not granted and perm in permissions:
permissions.remove(perm)
if granted and perm not in permissions:
permissions.add(perm)
cache.set(key, permissions)
return permissions
@cached_property
def basic_permissions(self) -> List[str]:
return [
p for p in self.permissions if p in app.config['BASIC_PERMISSIONS']
]
@cached_property
def user_class_model(self) -> 'UserClass_':
from core.permissions.models import UserClass
return UserClass.from_pk(self.user_class_id)
def belongs_to_user(self) -> bool:
"""Check whether or not the requesting user matches this user."""
return flask.g.user == self
def set_password(self, password: str) -> None:
self.passhash = generate_password_hash(password)
def check_password(self, password: str) -> bool:
return check_password_hash(self.passhash, password)
def has_permission(self, permission: Union[None, str, Enum]) -> bool:
"""Check whether a user has a permission."""
if SitePermissions.GOD_MODE.value in self.permissions:
return True
p = permission.value if isinstance(permission, Enum) else permission
return bool(p and p in self.permissions)
class Invite(db.Model, SinglePKMixin):
__tablename__: str = 'invites'
__serializer__ = InviteSerializer
__cache_key__: str = 'invites_{code}'
__cache_key_of_user__: str = 'invites_user_{user_id}'
__deletion_attr__ = 'expired'
code: str = db.Column(db.String(24), primary_key=True)
inviter_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), nullable=False, index=True
)
invitee_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), index=True
)
email: str = db.Column(db.String(255), nullable=False)
time_sent: datetime = db.Column(
db.DateTime(timezone=True), server_default=func.now()
)
from_ip: str = db.Column(INET, nullable=False, server_default='0.0.0.0')
expired: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
@classmethod
def new(cls, inviter_id: int, email: str, ip: int) -> 'Invite':
"""
Generate a random invite code.
:param inviter_id: User ID of the inviter
:param email: E-mail to send the invite to
:param ip: IP address the invite was sent from
"""
while True:
code = secrets.token_urlsafe(24)[:24]
if not cls.from_pk(code, include_dead=True):
break
cache.delete(cls.__cache_key_of_user__.format(user_id=inviter_id))
return super()._new(
inviter_id=inviter_id,
code=code,
email=email.lower().strip(),
from_ip=ip,
)
@classmethod
def from_inviter(
cls, inviter_id: int, include_dead: bool = False, used: bool = False
) -> List['Invite']:
"""
Get all invites sent by a user.
:param inviter_id: The User ID of the inviter.
:param include_dead: Whether or not to include dead invites in the list
:param used: Whether or not to include used invites in the list
:return: A list of invites sent by the inviter
"""
filter = cls.inviter_id == inviter_id
if used:
filter = and_(filter, cls.invitee_id.isnot(None)) # type: ignore
return cls.get_many(
key=cls.__cache_key_of_user__.format(user_id=inviter_id),
filter=filter,
order=cls.time_sent.desc(), # type: ignore
include_dead=include_dead or used,
)
@cached_property
def invitee(self) -> User:
return User.from_pk(self.invitee_id)
@cached_property
def inviter(self) -> User:
return User.from_pk(self.inviter_id)
def belongs_to_user(self) -> bool:
"""Returns whether or not the requesting user matches the inviter."""
return flask.g.user is not None and self.inviter_id == flask.g.user.id
class APIKey(db.Model, SinglePKMixin):
__tablename__: str = 'api_keys'
__serializer__ = APIKeySerializer
__cache_key__: str = 'api_keys_{hash}'
__cache_key_of_user__: str = 'api_keys_user_{user_id}'
__deletion_attr__ = 'revoked'
hash: str = db.Column(db.String(10), primary_key=True)
user_id: int = db.Column(
db.Integer, db.ForeignKey('users.id'), nullable=False, index=True
)
keyhashsalt: str = db.Column(db.String(128))
last_used: datetime = db.Column(
db.DateTime(timezone=True), nullable=False, server_default=func.now()
)
ip: str = db.Column(INET, nullable=False, server_default='0.0.0.0')
user_agent: str = db.Column(db.Text)
revoked: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
permanent: bool = db.Column(
db.Boolean, nullable=False, index=True, server_default='f'
)
timeout: bool = db.Column(
db.Integer, nullable=False, server_default='3600'
)
permissions: str = db.Column(ARRAY(db.String(36)))
@classmethod
def new(
cls,
user_id: int,
ip: str,
user_agent: str,
permanent: bool = False,
timeout: int = 60 * 30,
permissions: List[str] = None,
) -> Tuple[str, 'APIKey']:
"""
Create a new API Key with randomly generated secret keys and the
user details passed in as params. Generated keys are hashed and
salted for storage in the database.
:param user_id: API Key will belong to this user
:param ip: The IP that this session was created with
:param user_agent: User Agent the session was created with
:return: A tuple containing the identifier and the new API Key
"""
while True:
hash = secrets.token_urlsafe(10)[:10]
if not cls.from_pk(hash, include_dead=True):
break
key = secrets.token_urlsafe(16)[:16]
cache.delete(cls.__cache_key_of_user__.format(user_id=user_id))
api_key = super()._new(
user_id=user_id,
hash=hash,
keyhashsalt=generate_password_hash(key),
ip=ip,
user_agent=user_agent,
permanent=permanent,
timeout=timeout,
permissions=permissions or [],
)
return (hash + key, api_key)
@classmethod
def from_user(
cls, user_id: int, include_dead: bool = False
) -> List['APIKey']:
"""
Get all API keys owned by a user.
:param user_id: The User ID of the owner
:param include_dead: Whether or not to include dead API keys in the search
:return: A list of API keys owned by the user
"""
return cls.get_many(
key=cls.__cache_key_of_user__.format(user_id=user_id),
filter=cls.user_id == user_id,
include_dead=include_dead,
)
@classmethod
def hashes_from_user(cls, user_id: int) -> List[Union[int, str]]:
return cls.get_pks_of_many(
key=cls.__cache_key_of_user__.format(user_id=user_id),
filter=cls.user_id == user_id,
)
def check_key(self, key: str) -> bool:
"""
Validates the authenticity of an API key against its stored id.
:param key: The key to check against the keyhashsalt
:return: Whether or not the key matches the keyhashsalt
"""
return check_password_hash(self.keyhashsalt, key)
def has_permission(self, permission: Union[str, Enum]) -> bool:
"""
Checks if the API key is assigned a permission. If the API key
is not assigned any permissions, it checks against the user's
permissions instead.
:param permission: Permission to search for
:return: Whether or not the API Key has the permission
"""
p = permission.value if isinstance(permission, Enum) else permission
if self.permissions:
return p in self.permissions
user = User.from_pk(self.user_id)
return user.has_permission(p)
| 0.803135 | 0.136033 |
import sys, os, fcntl, termios
##################################################################
# Establish a serial-port connection w. required settings.
##################################################################
def openSerial(self, portName="/dev/ttyS0"):
# The open attempt may fail on account of permissions or on
# account of somebody's already using the port.
# Pass such exceptions on to our client.
try:
# You probably just want to use the builtin open(), here...
fd = os.open(portName, os.O_RDWR)
# Set up symbolic constants for the list elements returned by
# tcgetattr.
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc] = range(7)
# Set the port baud rate, etc.
settings = termios.tcgetattr(fd)
# Set the baud rate.
settings[ospeed] = TERMIOS.B9600 # Output speed
settings[ispeed] = TERMIOS.B0 # Input speed (B0 => match output)
# Go for 8N1 with hardware handshaking.
settings[cflag] = (((settings[cflag] & ~TERMIOS.CSIZE) |
TERMIOS.CS8) & ~TERMIOS.PARENB)
# NOTE: This code relies on an UNDOCUMENTED
# feature of Solaris 2.4. Answerbook explicitly states
# that CRTSCTS will not work. After much searching you
# will discover that termiox ioctl() calls are to
# be used for this purpose. After reviewing Sunsolve
# databases, you will find that termiox's TCGETX/TCSETX
# are not implemented. *snarl*
settings[cflag] = settings[cflag] | TERMIOS.CRTSCTS
# Don't echo received chars, or do erase or kill input processing.
settings[lflag] = (settings[lflag] &
~(TERMIOS.ECHO | TERMIOS.ICANON))
# Do NO output processing.
settings[oflag] = 0
# When reading, always return immediately, regardless of
# how many characters are available.
settings[cc][TERMIOS.VMIN] = 0
settings[cc][TERMIOS.VTIME] = 0
# Install the modified line settings.
termios.tcsetattr(fd, TERMIOS.TCSANOW, settings)
# Set it up for non-blocking I/O.
fcntl.fcntl(fd, FCNTL.F_SETFL, FCNTL.O_NONBLOCK)
except os.error, (errno, strerr):
# If any of this fails, mention the port name in the
# exception.
raise os.error, "Can't open %s: errno=%d (%s)" % (portName, errno, strerr)
# --------------------------------------------------------------------------
openSerial("/dev/ttyS0")
|
snippets/serial/serial.py
|
import sys, os, fcntl, termios
##################################################################
# Establish a serial-port connection w. required settings.
##################################################################
def openSerial(self, portName="/dev/ttyS0"):
# The open attempt may fail on account of permissions or on
# account of somebody's already using the port.
# Pass such exceptions on to our client.
try:
# You probably just want to use the builtin open(), here...
fd = os.open(portName, os.O_RDWR)
# Set up symbolic constants for the list elements returned by
# tcgetattr.
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc] = range(7)
# Set the port baud rate, etc.
settings = termios.tcgetattr(fd)
# Set the baud rate.
settings[ospeed] = TERMIOS.B9600 # Output speed
settings[ispeed] = TERMIOS.B0 # Input speed (B0 => match output)
# Go for 8N1 with hardware handshaking.
settings[cflag] = (((settings[cflag] & ~TERMIOS.CSIZE) |
TERMIOS.CS8) & ~TERMIOS.PARENB)
# NOTE: This code relies on an UNDOCUMENTED
# feature of Solaris 2.4. Answerbook explicitly states
# that CRTSCTS will not work. After much searching you
# will discover that termiox ioctl() calls are to
# be used for this purpose. After reviewing Sunsolve
# databases, you will find that termiox's TCGETX/TCSETX
# are not implemented. *snarl*
settings[cflag] = settings[cflag] | TERMIOS.CRTSCTS
# Don't echo received chars, or do erase or kill input processing.
settings[lflag] = (settings[lflag] &
~(TERMIOS.ECHO | TERMIOS.ICANON))
# Do NO output processing.
settings[oflag] = 0
# When reading, always return immediately, regardless of
# how many characters are available.
settings[cc][TERMIOS.VMIN] = 0
settings[cc][TERMIOS.VTIME] = 0
# Install the modified line settings.
termios.tcsetattr(fd, TERMIOS.TCSANOW, settings)
# Set it up for non-blocking I/O.
fcntl.fcntl(fd, FCNTL.F_SETFL, FCNTL.O_NONBLOCK)
except os.error, (errno, strerr):
# If any of this fails, mention the port name in the
# exception.
raise os.error, "Can't open %s: errno=%d (%s)" % (portName, errno, strerr)
# --------------------------------------------------------------------------
openSerial("/dev/ttyS0")
| 0.241221 | 0.117167 |
from typing import Dict, Any, List, Tuple, Optional
import copy
class ShieldBoosterVariant(object):
def __init__(self):
# no need for private attributes, we are handing out deep copies
self.engineering = ""
self.experimental = ""
self.shield_strength_bonus = 0
self.exp_res_bonus = 0
self.kin_res_bonus = 0
self.therm_res_bonus = 0
self.can_skip = False
self.loadout_template = None # type: Optional[Dict[str, Any]]
def __str__(self):
return f"{self.engineering} - {self.experimental}"
def get_loadout_template_slot(self, slot: int) -> Dict[str, Any]:
"""
Get the loadout dictionary for the provided slot number
:param slot: int from 1 to 8 (including)
:return:
"""
if self.loadout_template:
loadout = copy.deepcopy(self.loadout_template)
loadout["Slot"] = f"tinyhardpoint{slot}"
return loadout
return dict()
@staticmethod
def create_from_json(json_booster: Dict[str, Any]) -> "ShieldBoosterVariant":
"""
Create a ShieldBoosterVariant object from json node
:param json_booster: json node (or dictionary) in data file
:return: newly created ShieldBoosterVariant object
"""
booster = ShieldBoosterVariant()
booster.engineering = json_booster["engineering"]
booster.experimental = json_booster["experimental"]
booster.shield_strength_bonus = json_booster["shield_strength_bonus"]
booster.exp_res_bonus = 1 - json_booster["exp_res_bonus"]
booster.kin_res_bonus = 1 - json_booster["kin_res_bonus"]
booster.therm_res_bonus = 1 - json_booster["therm_res_bonus"]
booster.can_skip = json_booster["can_skip"]
booster.loadout_template = json_booster["loadout_template"]
return booster
@staticmethod
def calculate_booster_bonuses(shield_boosters: List["ShieldBoosterVariant"], booster_loadout: List[int] = None) -> Tuple[float, float, float, float]:
"""
Calculate the combined bonus of shield boosters. This function has 2 modes: either supply it with a list of all ShieldBoosterVariant and a list of indexes
for the boosters to use or supply it only with a list of ShieldBoosterVariant.
:param shield_boosters: list of ShieldBoosterVariant.
:param booster_loadout: booster loadout as a list of indexes of the booster in shield_boosters
:return: tuple: exp_modifier, kin_modifier, therm_modifier, hitpoint_bonus
"""
exp_modifier = 1.0
kin_modifier = 1.0
therm_modifier = 1.0
hitpoint_bonus = 1.0
if booster_loadout:
boosters = [shield_boosters[x] for x in booster_loadout]
else:
boosters = shield_boosters
for booster in boosters:
exp_modifier *= booster.exp_res_bonus
kin_modifier *= booster.kin_res_bonus
therm_modifier *= booster.therm_res_bonus
hitpoint_bonus += booster.shield_strength_bonus
# Compensate for diminishing returns
if exp_modifier < 0.7:
exp_modifier = 0.7 - (0.7 - exp_modifier) / 2
if kin_modifier < 0.7:
kin_modifier = 0.7 - (0.7 - kin_modifier) / 2
if therm_modifier < 0.7:
therm_modifier = 0.7 - (0.7 - therm_modifier) / 2
return exp_modifier, kin_modifier, therm_modifier, hitpoint_bonus
|
ShieldBoosterVariant.py
|
from typing import Dict, Any, List, Tuple, Optional
import copy
class ShieldBoosterVariant(object):
def __init__(self):
# no need for private attributes, we are handing out deep copies
self.engineering = ""
self.experimental = ""
self.shield_strength_bonus = 0
self.exp_res_bonus = 0
self.kin_res_bonus = 0
self.therm_res_bonus = 0
self.can_skip = False
self.loadout_template = None # type: Optional[Dict[str, Any]]
def __str__(self):
return f"{self.engineering} - {self.experimental}"
def get_loadout_template_slot(self, slot: int) -> Dict[str, Any]:
"""
Get the loadout dictionary for the provided slot number
:param slot: int from 1 to 8 (including)
:return:
"""
if self.loadout_template:
loadout = copy.deepcopy(self.loadout_template)
loadout["Slot"] = f"tinyhardpoint{slot}"
return loadout
return dict()
@staticmethod
def create_from_json(json_booster: Dict[str, Any]) -> "ShieldBoosterVariant":
"""
Create a ShieldBoosterVariant object from json node
:param json_booster: json node (or dictionary) in data file
:return: newly created ShieldBoosterVariant object
"""
booster = ShieldBoosterVariant()
booster.engineering = json_booster["engineering"]
booster.experimental = json_booster["experimental"]
booster.shield_strength_bonus = json_booster["shield_strength_bonus"]
booster.exp_res_bonus = 1 - json_booster["exp_res_bonus"]
booster.kin_res_bonus = 1 - json_booster["kin_res_bonus"]
booster.therm_res_bonus = 1 - json_booster["therm_res_bonus"]
booster.can_skip = json_booster["can_skip"]
booster.loadout_template = json_booster["loadout_template"]
return booster
@staticmethod
def calculate_booster_bonuses(shield_boosters: List["ShieldBoosterVariant"], booster_loadout: List[int] = None) -> Tuple[float, float, float, float]:
"""
Calculate the combined bonus of shield boosters. This function has 2 modes: either supply it with a list of all ShieldBoosterVariant and a list of indexes
for the boosters to use or supply it only with a list of ShieldBoosterVariant.
:param shield_boosters: list of ShieldBoosterVariant.
:param booster_loadout: booster loadout as a list of indexes of the booster in shield_boosters
:return: tuple: exp_modifier, kin_modifier, therm_modifier, hitpoint_bonus
"""
exp_modifier = 1.0
kin_modifier = 1.0
therm_modifier = 1.0
hitpoint_bonus = 1.0
if booster_loadout:
boosters = [shield_boosters[x] for x in booster_loadout]
else:
boosters = shield_boosters
for booster in boosters:
exp_modifier *= booster.exp_res_bonus
kin_modifier *= booster.kin_res_bonus
therm_modifier *= booster.therm_res_bonus
hitpoint_bonus += booster.shield_strength_bonus
# Compensate for diminishing returns
if exp_modifier < 0.7:
exp_modifier = 0.7 - (0.7 - exp_modifier) / 2
if kin_modifier < 0.7:
kin_modifier = 0.7 - (0.7 - kin_modifier) / 2
if therm_modifier < 0.7:
therm_modifier = 0.7 - (0.7 - therm_modifier) / 2
return exp_modifier, kin_modifier, therm_modifier, hitpoint_bonus
| 0.879581 | 0.205874 |
import tensorrt as trt
import numpy as np
from scipy.io.wavfile import write
import time
import torch
import argparse
import sys
sys.path.append('./')
from common.utils import to_gpu, get_mask_from_lengths
from tacotron2.text import text_to_sequence
from inference import MeasureTime, prepare_input_sequence, load_and_setup_model
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from trt.trt_utils import load_engine, run_trt_engine
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='Full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='Output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, required=True,
help='Full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='Full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='Full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='Full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='Full path to the WaveGlow model checkpoint file')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float,
help='Denoising strength for removing model bias')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--fp16', action='store_true',
help='Inference with FP16 precision')
return parser
def init_decoder_inputs(memory, processed_memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
mask = get_mask_from_lengths(memory_lengths).to(device)
decoder_input = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
return (decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
def init_decoder_outputs(memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
decoder_output = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
gate_prediction = torch.zeros(bs, 1, device=device, dtype=dtype)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, decoder_output, gate_prediction)
def init_decoder_tensors(decoder_inputs, decoder_outputs):
decoder_tensors = {
"inputs" : {
'decoder_input': decoder_inputs[0],
'attention_hidden': decoder_inputs[1],
'attention_cell': decoder_inputs[2],
'decoder_hidden': decoder_inputs[3],
'decoder_cell': decoder_inputs[4],
'attention_weights': decoder_inputs[5],
'attention_weights_cum': decoder_inputs[6],
'attention_context': decoder_inputs[7],
'memory': decoder_inputs[8],
'processed_memory': decoder_inputs[9],
'mask': decoder_inputs[10]
},
"outputs" : {
'out_attention_hidden': decoder_outputs[0],
'out_attention_cell': decoder_outputs[1],
'out_decoder_hidden': decoder_outputs[2],
'out_decoder_cell': decoder_outputs[3],
'out_attention_weights': decoder_outputs[4],
'out_attention_weights_cum': decoder_outputs[5],
'out_attention_context': decoder_outputs[6],
'decoder_output': decoder_outputs[7],
'gate_prediction': decoder_outputs[8]
}
}
return decoder_tensors
def swap_inputs_outputs(decoder_inputs, decoder_outputs):
new_decoder_inputs = (decoder_outputs[7], # decoder_output
decoder_outputs[0], # attention_hidden
decoder_outputs[1], # attention_cell
decoder_outputs[2], # decoder_hidden
decoder_outputs[3], # decoder_cell
decoder_outputs[4], # attention_weights
decoder_outputs[5], # attention_weights_cum
decoder_outputs[6], # attention_context
decoder_inputs[8], # memory
decoder_inputs[9], # processed_memory
decoder_inputs[10]) # mask
new_decoder_outputs = (decoder_inputs[1], # attention_hidden
decoder_inputs[2], # attention_cell
decoder_inputs[3], # decoder_hidden
decoder_inputs[4], # decoder_cell
decoder_inputs[5], # attention_weights
decoder_inputs[6], # attention_weights_cum
decoder_inputs[7], # attention_context
decoder_inputs[0], # decoder_input
decoder_outputs[8])# gate_output
return new_decoder_inputs, new_decoder_outputs
def infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, fp16):
memory = torch.zeros((len(sequence_lengths), sequence_lengths[0], 512)).cuda()
if fp16:
memory = memory.half()
device = memory.device
dtype = memory.dtype
processed_memory = torch.zeros((len(sequence_lengths),sequence_lengths[0],128), device=device, dtype=dtype)
lens = torch.zeros_like(sequence_lengths)
encoder_tensors = {
"inputs" :
{'sequences': sequences, 'sequence_lengths': sequence_lengths},
"outputs" :
{'memory': memory, 'lens': lens, 'processed_memory': processed_memory}
}
print("Running Tacotron2 Encoder")
with MeasureTime(measurements, "tacotron2_encoder_time"):
run_trt_engine(encoder_context, encoder, encoder_tensors)
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1, device = device), torch.zeros(1, device = device), torch.zeros(1, device = device))
gate_threshold = 0.5
max_decoder_steps = 1664
first_iter = True
decoder_inputs = init_decoder_inputs(memory, processed_memory, sequence_lengths)
decoder_outputs = init_decoder_outputs(memory, sequence_lengths)
print("Running Tacotron2 Decoder")
measurements_decoder = {}
while True:
decoder_tensors = init_decoder_tensors(decoder_inputs, decoder_outputs)
with MeasureTime(measurements_decoder, "step"):
run_trt_engine(decoder_context, decoder_iter, decoder_tensors)
if first_iter:
mel_outputs = torch.unsqueeze(decoder_outputs[7], 2)
gate_outputs = torch.unsqueeze(decoder_outputs[8], 2)
alignments = torch.unsqueeze(decoder_outputs[4], 2)
measurements['tacotron2_decoder_time'] = measurements_decoder['step']
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(decoder_outputs[7], 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(decoder_outputs[8], 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(decoder_outputs[4], 2)), 2)
measurements['tacotron2_decoder_time'] += measurements_decoder['step']
dec = torch.le(torch.sigmoid(decoder_outputs[8]), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after",mel_outputs.size(2),"decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_inputs, decoder_outputs = swap_inputs_outputs(decoder_inputs, decoder_outputs)
mel_outputs_postnet = torch.zeros_like(mel_outputs, device=device, dtype=dtype)
postnet_tensors = {
"inputs" :
{'mel_outputs': mel_outputs},
"outputs" :
{'mel_outputs_postnet': mel_outputs_postnet}
}
print("Running Tacotron2 Postnet")
with MeasureTime(measurements, "tacotron2_postnet_time"):
run_trt_engine(postnet_context, postnet, postnet_tensors)
print("Tacotron2 Postnet done")
return mel_outputs_postnet, mel_lengths
def infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, fp16):
mel_size = mel.size(2)
batch_size = mel.size(0)
stride = 256
n_group = 8
z_size = mel_size*stride
z_size = z_size//n_group
z = torch.randn(batch_size, n_group, z_size).cuda()
mel = mel.unsqueeze(3)
z = z.unsqueeze(3)
audios = torch.zeros(batch_size, mel_size*stride).cuda()
if fp16:
z = z.half()
mel = mel.half()
audios = audios.half()
waveglow_tensors = {
"inputs" :
{'mel': mel, 'z': z},
"outputs" :
{'audio': audios}
}
print("Running WaveGlow")
with MeasureTime(measurements, "waveglow_time"):
run_trt_engine(waveglow_context, waveglow, waveglow_tensors)
return audios
def main():
parser = argparse.ArgumentParser(
description='TensorRT Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
# initialize CUDA state
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser, args.waveglow_ckpt,
True, forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
measurements = {}
sequences, sequence_lengths = prepare_input_sequence(texts)
sequences = sequences.to(torch.int32)
sequence_lengths = sequence_lengths.to(torch.int32)
with MeasureTime(measurements, "latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, args.fp16)
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
with encoder_context, decoder_context, postnet_context, waveglow_context:
pass
audios = audios.float()
if args.waveglow_ckpt != "":
with MeasureTime(measurements, "denoiser"):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
for i, audio in enumerate(audios):
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = args.output + "audio_"+str(i)+"_trt.wav"
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.log(step=0, data={"tacotron2_encoder_latency": measurements['tacotron2_encoder_time']})
DLLogger.log(step=0, data={"tacotron2_decoder_latency": measurements['tacotron2_decoder_time']})
DLLogger.log(step=0, data={"tacotron2_postnet_latency": measurements['tacotron2_postnet_time']})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"latency": measurements['latency']})
if args.waveglow_ckpt != "":
DLLogger.log(step=0, data={"denoiser": measurements['denoiser']})
DLLogger.flush()
prec = "fp16" if args.fp16 else "fp32"
latency = measurements['latency']
throughput = audios.size(1)/latency
log_data = "1,"+str(sequence_lengths[0].item())+","+prec+","+str(latency)+","+str(throughput)+","+str(mel_lengths[0].item())+"\n"
with open("log_bs1_"+prec+".log", 'a') as f:
f.write(log_data)
if __name__ == "__main__":
main()
|
demo/Tacotron2/trt/inference_trt.py
|
import tensorrt as trt
import numpy as np
from scipy.io.wavfile import write
import time
import torch
import argparse
import sys
sys.path.append('./')
from common.utils import to_gpu, get_mask_from_lengths
from tacotron2.text import text_to_sequence
from inference import MeasureTime, prepare_input_sequence, load_and_setup_model
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from trt.trt_utils import load_engine, run_trt_engine
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='Full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='Output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, required=True,
help='Full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='Full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='Full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='Full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='Full path to the WaveGlow model checkpoint file')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float,
help='Denoising strength for removing model bias')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--fp16', action='store_true',
help='Inference with FP16 precision')
return parser
def init_decoder_inputs(memory, processed_memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
mask = get_mask_from_lengths(memory_lengths).to(device)
decoder_input = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
return (decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
def init_decoder_outputs(memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
decoder_output = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
gate_prediction = torch.zeros(bs, 1, device=device, dtype=dtype)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, decoder_output, gate_prediction)
def init_decoder_tensors(decoder_inputs, decoder_outputs):
decoder_tensors = {
"inputs" : {
'decoder_input': decoder_inputs[0],
'attention_hidden': decoder_inputs[1],
'attention_cell': decoder_inputs[2],
'decoder_hidden': decoder_inputs[3],
'decoder_cell': decoder_inputs[4],
'attention_weights': decoder_inputs[5],
'attention_weights_cum': decoder_inputs[6],
'attention_context': decoder_inputs[7],
'memory': decoder_inputs[8],
'processed_memory': decoder_inputs[9],
'mask': decoder_inputs[10]
},
"outputs" : {
'out_attention_hidden': decoder_outputs[0],
'out_attention_cell': decoder_outputs[1],
'out_decoder_hidden': decoder_outputs[2],
'out_decoder_cell': decoder_outputs[3],
'out_attention_weights': decoder_outputs[4],
'out_attention_weights_cum': decoder_outputs[5],
'out_attention_context': decoder_outputs[6],
'decoder_output': decoder_outputs[7],
'gate_prediction': decoder_outputs[8]
}
}
return decoder_tensors
def swap_inputs_outputs(decoder_inputs, decoder_outputs):
new_decoder_inputs = (decoder_outputs[7], # decoder_output
decoder_outputs[0], # attention_hidden
decoder_outputs[1], # attention_cell
decoder_outputs[2], # decoder_hidden
decoder_outputs[3], # decoder_cell
decoder_outputs[4], # attention_weights
decoder_outputs[5], # attention_weights_cum
decoder_outputs[6], # attention_context
decoder_inputs[8], # memory
decoder_inputs[9], # processed_memory
decoder_inputs[10]) # mask
new_decoder_outputs = (decoder_inputs[1], # attention_hidden
decoder_inputs[2], # attention_cell
decoder_inputs[3], # decoder_hidden
decoder_inputs[4], # decoder_cell
decoder_inputs[5], # attention_weights
decoder_inputs[6], # attention_weights_cum
decoder_inputs[7], # attention_context
decoder_inputs[0], # decoder_input
decoder_outputs[8])# gate_output
return new_decoder_inputs, new_decoder_outputs
def infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, fp16):
memory = torch.zeros((len(sequence_lengths), sequence_lengths[0], 512)).cuda()
if fp16:
memory = memory.half()
device = memory.device
dtype = memory.dtype
processed_memory = torch.zeros((len(sequence_lengths),sequence_lengths[0],128), device=device, dtype=dtype)
lens = torch.zeros_like(sequence_lengths)
encoder_tensors = {
"inputs" :
{'sequences': sequences, 'sequence_lengths': sequence_lengths},
"outputs" :
{'memory': memory, 'lens': lens, 'processed_memory': processed_memory}
}
print("Running Tacotron2 Encoder")
with MeasureTime(measurements, "tacotron2_encoder_time"):
run_trt_engine(encoder_context, encoder, encoder_tensors)
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1, device = device), torch.zeros(1, device = device), torch.zeros(1, device = device))
gate_threshold = 0.5
max_decoder_steps = 1664
first_iter = True
decoder_inputs = init_decoder_inputs(memory, processed_memory, sequence_lengths)
decoder_outputs = init_decoder_outputs(memory, sequence_lengths)
print("Running Tacotron2 Decoder")
measurements_decoder = {}
while True:
decoder_tensors = init_decoder_tensors(decoder_inputs, decoder_outputs)
with MeasureTime(measurements_decoder, "step"):
run_trt_engine(decoder_context, decoder_iter, decoder_tensors)
if first_iter:
mel_outputs = torch.unsqueeze(decoder_outputs[7], 2)
gate_outputs = torch.unsqueeze(decoder_outputs[8], 2)
alignments = torch.unsqueeze(decoder_outputs[4], 2)
measurements['tacotron2_decoder_time'] = measurements_decoder['step']
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(decoder_outputs[7], 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(decoder_outputs[8], 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(decoder_outputs[4], 2)), 2)
measurements['tacotron2_decoder_time'] += measurements_decoder['step']
dec = torch.le(torch.sigmoid(decoder_outputs[8]), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after",mel_outputs.size(2),"decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_inputs, decoder_outputs = swap_inputs_outputs(decoder_inputs, decoder_outputs)
mel_outputs_postnet = torch.zeros_like(mel_outputs, device=device, dtype=dtype)
postnet_tensors = {
"inputs" :
{'mel_outputs': mel_outputs},
"outputs" :
{'mel_outputs_postnet': mel_outputs_postnet}
}
print("Running Tacotron2 Postnet")
with MeasureTime(measurements, "tacotron2_postnet_time"):
run_trt_engine(postnet_context, postnet, postnet_tensors)
print("Tacotron2 Postnet done")
return mel_outputs_postnet, mel_lengths
def infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, fp16):
mel_size = mel.size(2)
batch_size = mel.size(0)
stride = 256
n_group = 8
z_size = mel_size*stride
z_size = z_size//n_group
z = torch.randn(batch_size, n_group, z_size).cuda()
mel = mel.unsqueeze(3)
z = z.unsqueeze(3)
audios = torch.zeros(batch_size, mel_size*stride).cuda()
if fp16:
z = z.half()
mel = mel.half()
audios = audios.half()
waveglow_tensors = {
"inputs" :
{'mel': mel, 'z': z},
"outputs" :
{'audio': audios}
}
print("Running WaveGlow")
with MeasureTime(measurements, "waveglow_time"):
run_trt_engine(waveglow_context, waveglow, waveglow_tensors)
return audios
def main():
parser = argparse.ArgumentParser(
description='TensorRT Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
# initialize CUDA state
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser, args.waveglow_ckpt,
True, forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
measurements = {}
sequences, sequence_lengths = prepare_input_sequence(texts)
sequences = sequences.to(torch.int32)
sequence_lengths = sequence_lengths.to(torch.int32)
with MeasureTime(measurements, "latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, args.fp16)
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
with encoder_context, decoder_context, postnet_context, waveglow_context:
pass
audios = audios.float()
if args.waveglow_ckpt != "":
with MeasureTime(measurements, "denoiser"):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
for i, audio in enumerate(audios):
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = args.output + "audio_"+str(i)+"_trt.wav"
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.log(step=0, data={"tacotron2_encoder_latency": measurements['tacotron2_encoder_time']})
DLLogger.log(step=0, data={"tacotron2_decoder_latency": measurements['tacotron2_decoder_time']})
DLLogger.log(step=0, data={"tacotron2_postnet_latency": measurements['tacotron2_postnet_time']})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"latency": measurements['latency']})
if args.waveglow_ckpt != "":
DLLogger.log(step=0, data={"denoiser": measurements['denoiser']})
DLLogger.flush()
prec = "fp16" if args.fp16 else "fp32"
latency = measurements['latency']
throughput = audios.size(1)/latency
log_data = "1,"+str(sequence_lengths[0].item())+","+prec+","+str(latency)+","+str(throughput)+","+str(mel_lengths[0].item())+"\n"
with open("log_bs1_"+prec+".log", 'a') as f:
f.write(log_data)
if __name__ == "__main__":
main()
| 0.589362 | 0.184327 |
import unittest
from io import BytesIO
try:
# Python 2
import cPickle as pickle
except ImportError:
# Python 3
import pickle
from icarus.util import Tree
class TestTree(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_init_from_tree(self):
t = Tree({'a': 1, 'b': 2})
tree = Tree(t)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_dict(self):
tree = Tree({'a': 1, 'b': 2})
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_kwargs(self):
tree = Tree(a=1, b=2)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_nested_kwargs(self):
tree = Tree(a=1, b=dict(c=2))
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b', 'c']), 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['b'], Tree)
def test_init_from_dict_kwargs(self):
tree = Tree({'c': 3}, a=1, b=2)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertEqual(tree.getval(['c']), 3)
self.assertIsInstance(tree, Tree)
def test_init_from_nested_dict(self):
tree = Tree({'a': {'c': {'e': 1}}, 'b': {'d': 2}})
self.assertEqual(tree.getval(['a', 'c', 'e']), 1)
self.assertEqual(tree.getval(['b', 'd']), 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['c'], Tree)
self.assertIsInstance(tree.getval(['a', 'c']), Tree)
self.assertIsInstance(tree['b'], Tree)
def test_setitem(self):
tree = Tree()
tree['a'] = {'b': 1, 'c': 2}
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertEqual(tree.getval(['a', 'b']), 1)
self.assertEqual(tree.getval(['a', 'c']), 2)
def test_nested_setitem(self):
tree = Tree()
tree['a'] = {'b': {'c': 1}, 'd': 2}
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['b'], Tree)
self.assertEqual(tree.getval(['a', 'b', 'c']), 1)
self.assertEqual(tree.getval(['a', 'd']), 2)
def test_update_base(self):
tree = Tree()
tree.update({'b': 1, 'c': 2})
self.assertIsInstance(tree, Tree)
self.assertEqual(tree.getval(['b']), 1)
self.assertEqual(tree.getval(['c']), 2)
def test_update_new_brach(self):
tree = Tree()
tree['a'].update({'b': 1, 'c': 2})
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertEqual(tree.getval(['a', 'b']), 1)
self.assertEqual(tree.getval(['a', 'c']), 2)
def test_nested_update(self):
tree = Tree()
tree['a'].update({'b': {'c': 1}, 'd': 2})
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['b'], Tree)
self.assertEqual(tree.getval(['a', 'b', 'c']), 1)
self.assertEqual(tree.getval(['a', 'd']), 2)
def test_getset(self):
tree = Tree()
tree.setval([1, 2, 3, 4], 5)
self.assertEqual(tree.getval([1, 2, 3, 4]), 5)
def test_getval(self):
tree = Tree()
tree[1][2][3] = 4
self.assertEqual(tree.getval([1, 2, 3]), 4)
self.assertEqual(tree.getval([1, 2])[3], 4)
self.assertEqual(tree.getval([1])[2][3], 4)
def test_getval_none(self):
tree = Tree()
self.assertIsNone(tree.getval([1]))
self.assertIsNone(tree.getval([1, 2]))
self.assertIsNone(tree.getval([3, 4, 5]))
def test_getval_empty(self):
tree = Tree()
_ = tree[1][2][3]
self.assertIsNotNone(tree.getval([1]))
self.assertIsNotNone(tree.getval([1, 2]))
self.assertIsNone(tree.getval([1, 2, 3]))
self.assertIsNone(tree.getval([1, 2, 3, 4]))
def test_iter(self):
tree = Tree()
# add first elements
tree['b']['c']['e'] = 4
tree['b']['v']['d'] = 3
l = list(tree)
self.assertEquals(len(l), 2)
self.assertIn((('b', 'c', 'e'), 4), l)
self.assertIn((('b', 'v', 'd'), 3), l)
# add additional element
tree['a'] = 1
l = list(tree)
self.assertEquals(len(l), 3)
self.assertIn((('b', 'c', 'e'), 4), l)
self.assertIn((('b', 'v', 'd'), 3), l)
self.assertIn((('a',), 1), l)
# overwrite previous elements
tree['b']['c'] = 5
l = list(tree)
self.assertEquals(len(l), 3)
self.assertIn((('b', 'c'), 5), l)
self.assertIn((('b', 'v', 'd'), 3), l)
self.assertIn((('a',), 1), l)
def test_paths(self):
tree = Tree()
tree['b']['c']['e'] = 4
tree['b']['v']['d'] = 3
tree['a'] = 1
expected = {('b', 'c', 'e'): 4, ('b', 'v', 'd'): 3, ('a',): 1}
self.assertDictEqual(expected, tree.paths())
def test_pickle_dict(self):
d = {'a': 1, 'b': 2}
tree = Tree(**d)
self.assertEqual(tree['a'], 1)
self.assertEqual(tree['b'], 2)
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertEqual(tree_2['a'], 1)
self.assertEqual(tree_2['b'], 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_pickle_empty(self):
tree = Tree()
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_pickle(self):
tree = Tree()
tree[1][2][3] = '123'
tree[1][2][4] = '124'
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertEquals(tree[1][2][3], '123')
self.assertEquals(tree_2[1][2][3], '123')
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_str(self):
tree = Tree({'a': {'b': 'a'}, 'b': 'c', 'd': {'b': 'c'}})
self.assertEqual(eval(str(tree)), tree)
def test_dict_1(self):
d = {'a': 1, 'b': 2}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_dict_2(self):
d = {'a': {'b': 'a'}, 'b': 'c', 'd': {'b': 'c'}}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_dict_3(self):
d = {'a': {'b': [1, 2, 'v']}, 'b': 'c', 'd': {'b': 4}}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_match(self):
t = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}}
pos_match_equal = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}}
pos_match_subset = {'a': {'b': 1}, 'd': {'e': 3}}
neg_match_diff = {'a': {'b': 2}, 'c': 2, 'd': {'e': 3}}
neg_match_superset = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}, 'f': 3}
tree = Tree(t)
self.assertTrue(tree.match(pos_match_equal))
self.assertTrue(tree.match(pos_match_subset))
self.assertFalse(tree.match(neg_match_diff))
self.assertFalse(tree.match(neg_match_superset))
def test_match_empty_tree(self):
tree = Tree()
self.assertFalse(tree.match({'a': 1}))
|
icarus/test/test_tree.py
|
import unittest
from io import BytesIO
try:
# Python 2
import cPickle as pickle
except ImportError:
# Python 3
import pickle
from icarus.util import Tree
class TestTree(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_init_from_tree(self):
t = Tree({'a': 1, 'b': 2})
tree = Tree(t)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_dict(self):
tree = Tree({'a': 1, 'b': 2})
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_kwargs(self):
tree = Tree(a=1, b=2)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertIsInstance(tree, Tree)
def test_init_from_nested_kwargs(self):
tree = Tree(a=1, b=dict(c=2))
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b', 'c']), 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['b'], Tree)
def test_init_from_dict_kwargs(self):
tree = Tree({'c': 3}, a=1, b=2)
self.assertEqual(tree.getval(['a']), 1)
self.assertEqual(tree.getval(['b']), 2)
self.assertEqual(tree.getval(['c']), 3)
self.assertIsInstance(tree, Tree)
def test_init_from_nested_dict(self):
tree = Tree({'a': {'c': {'e': 1}}, 'b': {'d': 2}})
self.assertEqual(tree.getval(['a', 'c', 'e']), 1)
self.assertEqual(tree.getval(['b', 'd']), 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['c'], Tree)
self.assertIsInstance(tree.getval(['a', 'c']), Tree)
self.assertIsInstance(tree['b'], Tree)
def test_setitem(self):
tree = Tree()
tree['a'] = {'b': 1, 'c': 2}
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertEqual(tree.getval(['a', 'b']), 1)
self.assertEqual(tree.getval(['a', 'c']), 2)
def test_nested_setitem(self):
tree = Tree()
tree['a'] = {'b': {'c': 1}, 'd': 2}
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['b'], Tree)
self.assertEqual(tree.getval(['a', 'b', 'c']), 1)
self.assertEqual(tree.getval(['a', 'd']), 2)
def test_update_base(self):
tree = Tree()
tree.update({'b': 1, 'c': 2})
self.assertIsInstance(tree, Tree)
self.assertEqual(tree.getval(['b']), 1)
self.assertEqual(tree.getval(['c']), 2)
def test_update_new_brach(self):
tree = Tree()
tree['a'].update({'b': 1, 'c': 2})
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertEqual(tree.getval(['a', 'b']), 1)
self.assertEqual(tree.getval(['a', 'c']), 2)
def test_nested_update(self):
tree = Tree()
tree['a'].update({'b': {'c': 1}, 'd': 2})
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree['a'], Tree)
self.assertIsInstance(tree['a']['b'], Tree)
self.assertEqual(tree.getval(['a', 'b', 'c']), 1)
self.assertEqual(tree.getval(['a', 'd']), 2)
def test_getset(self):
tree = Tree()
tree.setval([1, 2, 3, 4], 5)
self.assertEqual(tree.getval([1, 2, 3, 4]), 5)
def test_getval(self):
tree = Tree()
tree[1][2][3] = 4
self.assertEqual(tree.getval([1, 2, 3]), 4)
self.assertEqual(tree.getval([1, 2])[3], 4)
self.assertEqual(tree.getval([1])[2][3], 4)
def test_getval_none(self):
tree = Tree()
self.assertIsNone(tree.getval([1]))
self.assertIsNone(tree.getval([1, 2]))
self.assertIsNone(tree.getval([3, 4, 5]))
def test_getval_empty(self):
tree = Tree()
_ = tree[1][2][3]
self.assertIsNotNone(tree.getval([1]))
self.assertIsNotNone(tree.getval([1, 2]))
self.assertIsNone(tree.getval([1, 2, 3]))
self.assertIsNone(tree.getval([1, 2, 3, 4]))
def test_iter(self):
tree = Tree()
# add first elements
tree['b']['c']['e'] = 4
tree['b']['v']['d'] = 3
l = list(tree)
self.assertEquals(len(l), 2)
self.assertIn((('b', 'c', 'e'), 4), l)
self.assertIn((('b', 'v', 'd'), 3), l)
# add additional element
tree['a'] = 1
l = list(tree)
self.assertEquals(len(l), 3)
self.assertIn((('b', 'c', 'e'), 4), l)
self.assertIn((('b', 'v', 'd'), 3), l)
self.assertIn((('a',), 1), l)
# overwrite previous elements
tree['b']['c'] = 5
l = list(tree)
self.assertEquals(len(l), 3)
self.assertIn((('b', 'c'), 5), l)
self.assertIn((('b', 'v', 'd'), 3), l)
self.assertIn((('a',), 1), l)
def test_paths(self):
tree = Tree()
tree['b']['c']['e'] = 4
tree['b']['v']['d'] = 3
tree['a'] = 1
expected = {('b', 'c', 'e'): 4, ('b', 'v', 'd'): 3, ('a',): 1}
self.assertDictEqual(expected, tree.paths())
def test_pickle_dict(self):
d = {'a': 1, 'b': 2}
tree = Tree(**d)
self.assertEqual(tree['a'], 1)
self.assertEqual(tree['b'], 2)
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertEqual(tree_2['a'], 1)
self.assertEqual(tree_2['b'], 2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_pickle_empty(self):
tree = Tree()
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_pickle(self):
tree = Tree()
tree[1][2][3] = '123'
tree[1][2][4] = '124'
f = BytesIO()
pickle.dump(tree, f)
f.seek(0)
tree_2 = pickle.load(f)
self.assertEquals(type(tree), type(tree_2))
self.assertEquals(tree, tree_2)
self.assertEquals(tree[1][2][3], '123')
self.assertEquals(tree_2[1][2][3], '123')
self.assertIsInstance(tree, Tree)
self.assertIsInstance(tree_2, Tree)
def test_str(self):
tree = Tree({'a': {'b': 'a'}, 'b': 'c', 'd': {'b': 'c'}})
self.assertEqual(eval(str(tree)), tree)
def test_dict_1(self):
d = {'a': 1, 'b': 2}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_dict_2(self):
d = {'a': {'b': 'a'}, 'b': 'c', 'd': {'b': 'c'}}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_dict_3(self):
d = {'a': {'b': [1, 2, 'v']}, 'b': 'c', 'd': {'b': 4}}
tree = Tree(d)
self.assertEqual(d, tree.dict())
def test_match(self):
t = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}}
pos_match_equal = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}}
pos_match_subset = {'a': {'b': 1}, 'd': {'e': 3}}
neg_match_diff = {'a': {'b': 2}, 'c': 2, 'd': {'e': 3}}
neg_match_superset = {'a': {'b': 1}, 'c': 2, 'd': {'e': 3}, 'f': 3}
tree = Tree(t)
self.assertTrue(tree.match(pos_match_equal))
self.assertTrue(tree.match(pos_match_subset))
self.assertFalse(tree.match(neg_match_diff))
self.assertFalse(tree.match(neg_match_superset))
def test_match_empty_tree(self):
tree = Tree()
self.assertFalse(tree.match({'a': 1}))
| 0.699254 | 0.58436 |
from django.db import models
class ProductFamily(models.Model):
product_family_id = models.FloatField(primary_key=True, db_column="produkt_familie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_familie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'products/{self.slug}/'
class ProductDivision(models.Model):
product_division_id = models.FloatField(primary_key=True, db_column="produkt_sparte_id")
product_family = models.ForeignKey(ProductFamily, related_name="divisions", on_delete=models.DO_NOTHING, blank=True,
null=True, db_column="produkt_familie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_sparte'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_family.slug}/{self.slug}/'
class ProductCategory(models.Model):
product_category_id = models.FloatField(primary_key=True, db_column="produkt_kategorie_id")
product_division = models.ForeignKey(ProductDivision, related_name="categories", on_delete=models.DO_NOTHING,
blank=True, null=True, db_column="produkt_sparte_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_kategorie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_division.product_family.slug}/{self.product_division.slug}/{self.slug}'
class ProductSubcategory(models.Model):
product_subcategory_id = models.FloatField(primary_key=True, db_column="produkt_subkategorie_id")
product_category = models.ForeignKey(ProductCategory, related_name="subcategories", on_delete=models.DO_NOTHING,
blank=True, null=True, db_column="produkt_kategorie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_subkategorie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_category.product_division.product_family.slug}/{self.product_category.product_division.slug}/{self.product_category.slug}/{self.slug}/'
class Product(models.Model):
product_id = models.AutoField(primary_key=True, db_column="produkt_id")
subcategory = models.ForeignKey(ProductSubcategory, related_name='products', on_delete=models.CASCADE,
db_column="produktklasse_id", blank=True, null=True)
name = models.CharField(db_column="proukt_name", max_length=150)
slug = models.SlugField()
description = models.CharField(max_length=500, blank=True, null=True, db_column="produktbeschreibung")
price = models.FloatField(db_column="listenverkaufspreis")
image = models.CharField(db_column="produktbild_link", max_length=1000, blank=True, null=True)
sku = models.FloatField(db_column="sku", blank=True, null=True)
evaluation = models.FloatField(db_column="bewertung", blank=True, null=True)
recycle = models.FloatField(db_column="recyclebar", blank=True, null=True)
lowfat = models.FloatField(db_column="low_fat", blank=True, null=True)
mwst = models.FloatField(db_column="mwst_satz")
discount = models.FloatField(db_column="angebotsrabatt", blank=True, null=True)
origin = models.FloatField(db_column="datenherkunft_id", blank=True, null=True)
# date_added = models.DateTimeField(auto_now_add=True)
class Meta:
# ordering = ('-date_added',)
db_table = 'produkt'
managed = 'true'
def __str__(self):
return self.name
def get_absolute_url(self):
return f'/product/{self.slug}/'
def get_image(self):
return self.image.split(',')[0]
def get_thumbnail(self):
return self.image.split(',')[0]
def get_price(self):
return round(self.price, 2)
|
product/models.py
|
from django.db import models
class ProductFamily(models.Model):
product_family_id = models.FloatField(primary_key=True, db_column="produkt_familie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_familie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'products/{self.slug}/'
class ProductDivision(models.Model):
product_division_id = models.FloatField(primary_key=True, db_column="produkt_sparte_id")
product_family = models.ForeignKey(ProductFamily, related_name="divisions", on_delete=models.DO_NOTHING, blank=True,
null=True, db_column="produkt_familie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_sparte'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_family.slug}/{self.slug}/'
class ProductCategory(models.Model):
product_category_id = models.FloatField(primary_key=True, db_column="produkt_kategorie_id")
product_division = models.ForeignKey(ProductDivision, related_name="categories", on_delete=models.DO_NOTHING,
blank=True, null=True, db_column="produkt_sparte_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_kategorie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_division.product_family.slug}/{self.product_division.slug}/{self.slug}'
class ProductSubcategory(models.Model):
product_subcategory_id = models.FloatField(primary_key=True, db_column="produkt_subkategorie_id")
product_category = models.ForeignKey(ProductCategory, related_name="subcategories", on_delete=models.DO_NOTHING,
blank=True, null=True, db_column="produkt_kategorie_id")
description = models.CharField(max_length=50, db_column="bezeichnung")
slug = models.SlugField(default="test")
class Meta:
ordering = ('description',)
db_table = 'produkt_subkategorie'
def __str__(self):
return self.description
def get_absolute_url(self):
return f'/products/{self.product_category.product_division.product_family.slug}/{self.product_category.product_division.slug}/{self.product_category.slug}/{self.slug}/'
class Product(models.Model):
product_id = models.AutoField(primary_key=True, db_column="produkt_id")
subcategory = models.ForeignKey(ProductSubcategory, related_name='products', on_delete=models.CASCADE,
db_column="produktklasse_id", blank=True, null=True)
name = models.CharField(db_column="proukt_name", max_length=150)
slug = models.SlugField()
description = models.CharField(max_length=500, blank=True, null=True, db_column="produktbeschreibung")
price = models.FloatField(db_column="listenverkaufspreis")
image = models.CharField(db_column="produktbild_link", max_length=1000, blank=True, null=True)
sku = models.FloatField(db_column="sku", blank=True, null=True)
evaluation = models.FloatField(db_column="bewertung", blank=True, null=True)
recycle = models.FloatField(db_column="recyclebar", blank=True, null=True)
lowfat = models.FloatField(db_column="low_fat", blank=True, null=True)
mwst = models.FloatField(db_column="mwst_satz")
discount = models.FloatField(db_column="angebotsrabatt", blank=True, null=True)
origin = models.FloatField(db_column="datenherkunft_id", blank=True, null=True)
# date_added = models.DateTimeField(auto_now_add=True)
class Meta:
# ordering = ('-date_added',)
db_table = 'produkt'
managed = 'true'
def __str__(self):
return self.name
def get_absolute_url(self):
return f'/product/{self.slug}/'
def get_image(self):
return self.image.split(',')[0]
def get_thumbnail(self):
return self.image.split(',')[0]
def get_price(self):
return round(self.price, 2)
| 0.618435 | 0.138171 |
import copy
import json
import pytest
import pathlib
import urllib.request
from unittest.mock import patch, MagicMock, mock_open
from o3de import manifest, download, utils
TEST_O3DE_MANIFEST_JSON_PAYLOAD = '''
{
"o3de_manifest_name": "testuser",
"origin": "C:/Users/testuser/.o3de",
"default_engines_folder": "C:/Users/testuser/.o3de/Engines",
"default_projects_folder": "C:/Users/testuser/O3DE/Projects",
"default_gems_folder": "C:/Users/testuser/O3DE/Gems",
"default_templates_folder": "C:/Users/testuser/O3DE/Templates",
"default_restricted_folder": "C:/Users/testuser/.o3de/Restricted",
"default_third_party_folder": "C:/Users/testuser/.o3de/3rdParty",
"projects": [],
"external_subdirectories": [],
"templates": [],
"restricted": [],
"repos": ["http://o3de.org"],
"engines": [],
"engines_path": {}
}
'''
TEST_O3DE_MANIFEST_EXISTING_GEM_JSON_PAYLOAD = '''
{
"o3de_manifest_name": "testuser",
"origin": "C:/Users/testuser/.o3de",
"default_engines_folder": "C:/Users/testuser/.o3de/Engines",
"default_projects_folder": "C:/Users/testuser/O3DE/Projects",
"default_gems_folder": "C:/Users/testuser/O3DE/Gems",
"default_templates_folder": "C:/Users/testuser/O3DE/Templates",
"default_restricted_folder": "C:/Users/testuser/.o3de/Restricted",
"default_third_party_folder": "C:/Users/testuser/.o3de/3rdParty",
"projects": [],
"external_subdirectories": ["C:/Users/testuser/O3DE/Gems/TestGem"],
"templates": [],
"restricted": [],
"repos": ["http://o3de.org"],
"engines": [],
"engines_path": {}
}
'''
TEST_O3DE_REPO_FILE_NAME = '3fb160cdfde8b32864335e71a9b7a0519591f3080d2a06e7ca10f830e0cb7a54.json'
TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD = '''
{
"repo_name": "Test Repo",
"origin": "",
"gems": ["http://o3derepo.org/TestGem", "C:/localrepo/TestLocalGem"],
"projects": ["http://o3derepo.org/TestProject"],
"engines": ["http://o3derepo.org/TestEngine"],
"templates": ["http://o3derepo.org/TestTemplate"]
}
'''
TEST_O3DE_REPO_GEM_FILE_NAME = 'a765db91484f0d963d4ba5c98161074df7cd87caf1340e6bc7cebdce1807c994.json'
TEST_O3DE_REPO_GEM_JSON_PAYLOAD = '''
{
"gem_name": "TestGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2022-01-01 11:00:00",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD = '''
{
"gem_name": "TestGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"sha256": "cd89c508cad0e48e51806a9963d17a0f2f7196e26c79f45aa9ea3b435a2ceb6a",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_LOCAL_REPO_GEM_FILE_NAME = '8758b5acace49baf89ba5d36c1c214f10f8e47cd198096d1ae6b016b23b0833d.json'
TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD = '''
{
"gem_name": "TestLocalGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "C:/localrepo/TestLocalGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "Jan-2022",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_PROJECT_JSON_PAYLOAD = '''
{
"project_name": "TestProject",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestProject/project.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2022-01-01 11:00:00",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Project"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_ENGINE_JSON_PAYLOAD = '''
{
"engine_name": "TestEngine",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestEngine/engine.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2021-12-01",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Engine"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_TEMPLATE_JSON_PAYLOAD = '''
{
"template_name": "TestTemplate",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestTemplate/template.zip",
"repo_uri": "http://o3derepo.org",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Template"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
# This data will be hashed as if it were the zip file
TEST_O3DE_ZIP_FILE_DATA = 'O3DE'
TEST_O3DE_INCORRECT_FILE_DATA = 'O3DE '
@pytest.fixture(scope='class')
def init_register_repo_data(request):
request.cls.o3de_manifest_data = json.loads(TEST_O3DE_MANIFEST_JSON_PAYLOAD)
@pytest.mark.usefixtures('init_register_repo_data')
class TestObjectDownload:
extracted_gem_json = ''
extracted_gem_path = ''
created_files = []
valid_urls = [
'http://o3derepo.org/repo.json',
'http://o3derepo.org/TestGem/gem.json',
'http://o3derepo.org/TestGem/gem.zip',
'http://o3derepo.org/TestProject/project.json',
'http://o3derepo.org/TestTemplate/template.json',
'http://o3derecursiverepo.org/repo.json'
]
@pytest.mark.parametrize("manifest_data, gem_name, expected_result, repo_data, gem_data, zip_data, skip_auto_register, force_overwrite, registration_expected", [
# Remote and local gem tests
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestLocalGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_EXISTING_GEM_JSON_PAYLOAD, 'TestGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, False, False),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, True, True, False),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'UnavailableGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, False),
# hashing tests
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD, TEST_O3DE_INCORRECT_FILE_DATA, False, True, False),
])
def test_download_gem(self, manifest_data, gem_name, expected_result, repo_data, gem_data, zip_data, skip_auto_register, force_overwrite, registration_expected):
self.o3de_manifest_data = json.loads(manifest_data)
self.created_files.clear()
# add pre existing files
self.created_files.append('C:/Users/testuser/.o3de/cache/3fb160cdfde8b32864335e71a9b7a0519591f3080d2a06e7ca10f830e0cb7a54.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/a765db91484f0d963d4ba5c98161074df7cd87caf1340e6bc7cebdce1807c994.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/8758b5acace49baf89ba5d36c1c214f10f8e47cd198096d1ae6b016b23b0833d.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/Gems/TestGem/gem.zip')
self.created_files.append('C:/localrepo/TestLocalGem/gem.zip')
def load_o3de_manifest(manifest_path: pathlib.Path = None) -> dict:
return copy.deepcopy(self.o3de_manifest_data)
def save_o3de_manifest(manifest_data: dict, manifest_path: pathlib.Path = None) -> bool:
self.o3de_manifest_data = manifest_data
return True
def mocked_requests_get(url):
if isinstance(url, urllib.request.Request):
url_str = url.get_full_url()
else:
url_str = url
if url_str in self.valid_urls:
custom_mock = MagicMock()
custom_mock.getcode.return_value = 200
custom_mock.read.return_value = 0
custom_mock.headers = {'content-length': 0}
custom_mock.__enter__.return_value = custom_mock
else:
raise urllib.error.HTTPError(url_str, 404, "Not found", {}, 0)
return custom_mock
def mocked_extract(path=None, members=None, pwd=None):
self.extracted_gem_path = path
self.extracted_gem_json = gem_data
self.created_files.append(path / 'gem.json')
def mocked_open(path, mode = '', *args, **kwargs):
file_data = zip_data.encode('utf-8')
if pathlib.Path(path).name == TEST_O3DE_REPO_FILE_NAME:
file_data = TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD
elif pathlib.Path(path).name == TEST_O3DE_REPO_GEM_FILE_NAME or \
pathlib.Path(path).name == TEST_O3DE_LOCAL_REPO_GEM_FILE_NAME or \
pathlib.Path(path).name == 'gem.json':
file_data = gem_data
elif path == pathlib.Path(self.extracted_gem_path) / 'gem.json':
file_data = self.extracted_gem_json
mockedopen = mock_open(mock=MagicMock(), read_data=file_data)
if 'w' in mode:
self.created_files.append(path)
file_obj = mockedopen(self, *args, **kwargs)
file_obj.extractall = mocked_extract
file_obj.open = mocked_open
return file_obj
def mocked_isfile(path):
matches = [pathlib.Path(x).name for x in self.created_files if pathlib.Path(x).name == pathlib.Path(path).name]
if len(matches) != 0:
return True
else:
return False
def mocked_copy(origin, dest):
if mocked_isfile(origin):
self.created_files.append(dest)
download_callback_called = False
def download_callback(downloaded, total_size):
download_callback_called = True
with patch('o3de.manifest.load_o3de_manifest', side_effect=load_o3de_manifest) as _1,\
patch('o3de.manifest.save_o3de_manifest', side_effect=save_o3de_manifest) as _2, \
patch('o3de.utils.find_ancestor_dir_containing_file', return_value=None) as _3, \
patch('urllib.request.urlopen', side_effect=mocked_requests_get) as _4, \
patch('pathlib.Path.open', mocked_open) as _5, \
patch('pathlib.Path.is_file', mocked_isfile) as _6, \
patch('pathlib.Path.mkdir') as _7, \
patch('pathlib.Path.unlink') as _8, \
patch('zipfile.is_zipfile', return_value=True) as _9, \
patch('zipfile.ZipFile', mocked_open) as _10, \
patch('pathlib.Path.is_dir', return_value=True) as _11, \
patch('shutil.copy', mocked_copy) as _12, \
patch('os.unlink') as _13, \
patch('os.path.getsize', return_value=64) as _14:
result = download.download_gem(gem_name, '', skip_auto_register, force_overwrite, download_callback)
assert result == expected_result
# Check that the path is in external directories - register validates the gem.json so that will be tested here
matches = [pathlib.Path(x) for x in manifest.get_manifest_external_subdirectories() if pathlib.Path(x) == self.extracted_gem_path]
assert (len(matches) != 0) == registration_expected
@pytest.mark.parametrize("update_function, object_name, object_data, existing_time, update_available", [
# Repo gem is newer
pytest.param(download.is_o3de_gem_update_available, 'TestGem', TEST_O3DE_REPO_GEM_JSON_PAYLOAD, "2021-12-01", True),
# Repo engine is not newer
pytest.param(download.is_o3de_engine_update_available, 'TestEngine', TEST_O3DE_REPO_ENGINE_JSON_PAYLOAD, "2021-12-01", False),
# Repo project has a last_updated field, local does not
pytest.param(download.is_o3de_project_update_available, 'TestProject', TEST_O3DE_REPO_PROJECT_JSON_PAYLOAD, "", True),
# Repo template does not have a last_updated field
pytest.param(download.is_o3de_template_update_available, 'TestTemplate', TEST_O3DE_REPO_TEMPLATE_JSON_PAYLOAD, "", False),
# Repo object does not exist
pytest.param(download.is_o3de_gem_update_available, 'NonExistingObject', "", "", False),
# Incorrect repo datetime format
pytest.param(download.is_o3de_gem_update_available, 'TestLocalGem', TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD, "2021-12-01", False),
])
def test_check_updates(self, update_function, object_name, object_data, existing_time, update_available):
self.o3de_manifest_data = json.loads(TEST_O3DE_MANIFEST_JSON_PAYLOAD)
def load_o3de_manifest(manifest_path: pathlib.Path = None) -> dict:
return copy.deepcopy(self.o3de_manifest_data)
def save_o3de_manifest(manifest_data: dict, manifest_path: pathlib.Path = None) -> bool:
self.o3de_manifest_data = manifest_data
return True
def mocked_open(path, mode, *args, **kwargs):
file_data = bytes(0)
if pathlib.Path(path).name == TEST_O3DE_REPO_FILE_NAME:
file_data = TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD
else:
file_data = object_data
mockedopen = mock_open(mock=MagicMock(), read_data=file_data)
return mockedopen(self, *args, **kwargs)
with patch('o3de.manifest.load_o3de_manifest', side_effect=load_o3de_manifest) as _1,\
patch('o3de.manifest.save_o3de_manifest', side_effect=save_o3de_manifest) as _2, \
patch('pathlib.Path.open', mocked_open) as _3, \
patch('pathlib.Path.is_file', return_value=True) as _4:
assert update_function(object_name, existing_time) == update_available
|
scripts/o3de/tests/test_download.py
|
import copy
import json
import pytest
import pathlib
import urllib.request
from unittest.mock import patch, MagicMock, mock_open
from o3de import manifest, download, utils
TEST_O3DE_MANIFEST_JSON_PAYLOAD = '''
{
"o3de_manifest_name": "testuser",
"origin": "C:/Users/testuser/.o3de",
"default_engines_folder": "C:/Users/testuser/.o3de/Engines",
"default_projects_folder": "C:/Users/testuser/O3DE/Projects",
"default_gems_folder": "C:/Users/testuser/O3DE/Gems",
"default_templates_folder": "C:/Users/testuser/O3DE/Templates",
"default_restricted_folder": "C:/Users/testuser/.o3de/Restricted",
"default_third_party_folder": "C:/Users/testuser/.o3de/3rdParty",
"projects": [],
"external_subdirectories": [],
"templates": [],
"restricted": [],
"repos": ["http://o3de.org"],
"engines": [],
"engines_path": {}
}
'''
TEST_O3DE_MANIFEST_EXISTING_GEM_JSON_PAYLOAD = '''
{
"o3de_manifest_name": "testuser",
"origin": "C:/Users/testuser/.o3de",
"default_engines_folder": "C:/Users/testuser/.o3de/Engines",
"default_projects_folder": "C:/Users/testuser/O3DE/Projects",
"default_gems_folder": "C:/Users/testuser/O3DE/Gems",
"default_templates_folder": "C:/Users/testuser/O3DE/Templates",
"default_restricted_folder": "C:/Users/testuser/.o3de/Restricted",
"default_third_party_folder": "C:/Users/testuser/.o3de/3rdParty",
"projects": [],
"external_subdirectories": ["C:/Users/testuser/O3DE/Gems/TestGem"],
"templates": [],
"restricted": [],
"repos": ["http://o3de.org"],
"engines": [],
"engines_path": {}
}
'''
TEST_O3DE_REPO_FILE_NAME = '3fb160cdfde8b32864335e71a9b7a0519591f3080d2a06e7ca10f830e0cb7a54.json'
TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD = '''
{
"repo_name": "Test Repo",
"origin": "",
"gems": ["http://o3derepo.org/TestGem", "C:/localrepo/TestLocalGem"],
"projects": ["http://o3derepo.org/TestProject"],
"engines": ["http://o3derepo.org/TestEngine"],
"templates": ["http://o3derepo.org/TestTemplate"]
}
'''
TEST_O3DE_REPO_GEM_FILE_NAME = 'a765db91484f0d963d4ba5c98161074df7cd87caf1340e6bc7cebdce1807c994.json'
TEST_O3DE_REPO_GEM_JSON_PAYLOAD = '''
{
"gem_name": "TestGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2022-01-01 11:00:00",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD = '''
{
"gem_name": "TestGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"sha256": "cd89c508cad0e48e51806a9963d17a0f2f7196e26c79f45aa9ea3b435a2ceb6a",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_LOCAL_REPO_GEM_FILE_NAME = '8758b5acace49baf89ba5d36c1c214f10f8e47cd198096d1ae6b016b23b0833d.json'
TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD = '''
{
"gem_name": "TestLocalGem",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "C:/localrepo/TestLocalGem/gem.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "Jan-2022",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Gem"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_PROJECT_JSON_PAYLOAD = '''
{
"project_name": "TestProject",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestProject/project.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2022-01-01 11:00:00",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Project"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_ENGINE_JSON_PAYLOAD = '''
{
"engine_name": "TestEngine",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestEngine/engine.zip",
"repo_uri": "http://o3derepo.org",
"last_updated": "2021-12-01",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Engine"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
TEST_O3DE_REPO_TEMPLATE_JSON_PAYLOAD = '''
{
"template_name": "TestTemplate",
"license": "Apache-2.0 Or MIT",
"origin": "Test Creator",
"origin_uri": "http://o3derepo.org/TestTemplate/template.zip",
"repo_uri": "http://o3derepo.org",
"type": "Tool",
"summary": "A test downloadable gem.",
"canonical_tags": [
"Template"
],
"user_tags": [],
"icon_path": "preview.png",
"requirements": "",
"documentation_url": "",
"dependencies": []
}
'''
# This data will be hashed as if it were the zip file
TEST_O3DE_ZIP_FILE_DATA = 'O3DE'
TEST_O3DE_INCORRECT_FILE_DATA = 'O3DE '
@pytest.fixture(scope='class')
def init_register_repo_data(request):
request.cls.o3de_manifest_data = json.loads(TEST_O3DE_MANIFEST_JSON_PAYLOAD)
@pytest.mark.usefixtures('init_register_repo_data')
class TestObjectDownload:
extracted_gem_json = ''
extracted_gem_path = ''
created_files = []
valid_urls = [
'http://o3derepo.org/repo.json',
'http://o3derepo.org/TestGem/gem.json',
'http://o3derepo.org/TestGem/gem.zip',
'http://o3derepo.org/TestProject/project.json',
'http://o3derepo.org/TestTemplate/template.json',
'http://o3derecursiverepo.org/repo.json'
]
@pytest.mark.parametrize("manifest_data, gem_name, expected_result, repo_data, gem_data, zip_data, skip_auto_register, force_overwrite, registration_expected", [
# Remote and local gem tests
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestLocalGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_EXISTING_GEM_JSON_PAYLOAD, 'TestGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, False, False),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, True, True, False),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'UnavailableGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, False),
# hashing tests
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 0, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD, TEST_O3DE_ZIP_FILE_DATA, False, True, True),
pytest.param(TEST_O3DE_MANIFEST_JSON_PAYLOAD, 'TestGem', 1, TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD, TEST_O3DE_REPO_GEM_WITH_HASH_JSON_PAYLOAD, TEST_O3DE_INCORRECT_FILE_DATA, False, True, False),
])
def test_download_gem(self, manifest_data, gem_name, expected_result, repo_data, gem_data, zip_data, skip_auto_register, force_overwrite, registration_expected):
self.o3de_manifest_data = json.loads(manifest_data)
self.created_files.clear()
# add pre existing files
self.created_files.append('C:/Users/testuser/.o3de/cache/3fb160cdfde8b32864335e71a9b7a0519591f3080d2a06e7ca10f830e0cb7a54.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/a765db91484f0d963d4ba5c98161074df7cd87caf1340e6bc7cebdce1807c994.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/8758b5acace49baf89ba5d36c1c214f10f8e47cd198096d1ae6b016b23b0833d.json')
self.created_files.append('C:/Users/testuser/.o3de/cache/Gems/TestGem/gem.zip')
self.created_files.append('C:/localrepo/TestLocalGem/gem.zip')
def load_o3de_manifest(manifest_path: pathlib.Path = None) -> dict:
return copy.deepcopy(self.o3de_manifest_data)
def save_o3de_manifest(manifest_data: dict, manifest_path: pathlib.Path = None) -> bool:
self.o3de_manifest_data = manifest_data
return True
def mocked_requests_get(url):
if isinstance(url, urllib.request.Request):
url_str = url.get_full_url()
else:
url_str = url
if url_str in self.valid_urls:
custom_mock = MagicMock()
custom_mock.getcode.return_value = 200
custom_mock.read.return_value = 0
custom_mock.headers = {'content-length': 0}
custom_mock.__enter__.return_value = custom_mock
else:
raise urllib.error.HTTPError(url_str, 404, "Not found", {}, 0)
return custom_mock
def mocked_extract(path=None, members=None, pwd=None):
self.extracted_gem_path = path
self.extracted_gem_json = gem_data
self.created_files.append(path / 'gem.json')
def mocked_open(path, mode = '', *args, **kwargs):
file_data = zip_data.encode('utf-8')
if pathlib.Path(path).name == TEST_O3DE_REPO_FILE_NAME:
file_data = TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD
elif pathlib.Path(path).name == TEST_O3DE_REPO_GEM_FILE_NAME or \
pathlib.Path(path).name == TEST_O3DE_LOCAL_REPO_GEM_FILE_NAME or \
pathlib.Path(path).name == 'gem.json':
file_data = gem_data
elif path == pathlib.Path(self.extracted_gem_path) / 'gem.json':
file_data = self.extracted_gem_json
mockedopen = mock_open(mock=MagicMock(), read_data=file_data)
if 'w' in mode:
self.created_files.append(path)
file_obj = mockedopen(self, *args, **kwargs)
file_obj.extractall = mocked_extract
file_obj.open = mocked_open
return file_obj
def mocked_isfile(path):
matches = [pathlib.Path(x).name for x in self.created_files if pathlib.Path(x).name == pathlib.Path(path).name]
if len(matches) != 0:
return True
else:
return False
def mocked_copy(origin, dest):
if mocked_isfile(origin):
self.created_files.append(dest)
download_callback_called = False
def download_callback(downloaded, total_size):
download_callback_called = True
with patch('o3de.manifest.load_o3de_manifest', side_effect=load_o3de_manifest) as _1,\
patch('o3de.manifest.save_o3de_manifest', side_effect=save_o3de_manifest) as _2, \
patch('o3de.utils.find_ancestor_dir_containing_file', return_value=None) as _3, \
patch('urllib.request.urlopen', side_effect=mocked_requests_get) as _4, \
patch('pathlib.Path.open', mocked_open) as _5, \
patch('pathlib.Path.is_file', mocked_isfile) as _6, \
patch('pathlib.Path.mkdir') as _7, \
patch('pathlib.Path.unlink') as _8, \
patch('zipfile.is_zipfile', return_value=True) as _9, \
patch('zipfile.ZipFile', mocked_open) as _10, \
patch('pathlib.Path.is_dir', return_value=True) as _11, \
patch('shutil.copy', mocked_copy) as _12, \
patch('os.unlink') as _13, \
patch('os.path.getsize', return_value=64) as _14:
result = download.download_gem(gem_name, '', skip_auto_register, force_overwrite, download_callback)
assert result == expected_result
# Check that the path is in external directories - register validates the gem.json so that will be tested here
matches = [pathlib.Path(x) for x in manifest.get_manifest_external_subdirectories() if pathlib.Path(x) == self.extracted_gem_path]
assert (len(matches) != 0) == registration_expected
@pytest.mark.parametrize("update_function, object_name, object_data, existing_time, update_available", [
# Repo gem is newer
pytest.param(download.is_o3de_gem_update_available, 'TestGem', TEST_O3DE_REPO_GEM_JSON_PAYLOAD, "2021-12-01", True),
# Repo engine is not newer
pytest.param(download.is_o3de_engine_update_available, 'TestEngine', TEST_O3DE_REPO_ENGINE_JSON_PAYLOAD, "2021-12-01", False),
# Repo project has a last_updated field, local does not
pytest.param(download.is_o3de_project_update_available, 'TestProject', TEST_O3DE_REPO_PROJECT_JSON_PAYLOAD, "", True),
# Repo template does not have a last_updated field
pytest.param(download.is_o3de_template_update_available, 'TestTemplate', TEST_O3DE_REPO_TEMPLATE_JSON_PAYLOAD, "", False),
# Repo object does not exist
pytest.param(download.is_o3de_gem_update_available, 'NonExistingObject', "", "", False),
# Incorrect repo datetime format
pytest.param(download.is_o3de_gem_update_available, 'TestLocalGem', TEST_O3DE_LOCAL_REPO_GEM_JSON_PAYLOAD, "2021-12-01", False),
])
def test_check_updates(self, update_function, object_name, object_data, existing_time, update_available):
self.o3de_manifest_data = json.loads(TEST_O3DE_MANIFEST_JSON_PAYLOAD)
def load_o3de_manifest(manifest_path: pathlib.Path = None) -> dict:
return copy.deepcopy(self.o3de_manifest_data)
def save_o3de_manifest(manifest_data: dict, manifest_path: pathlib.Path = None) -> bool:
self.o3de_manifest_data = manifest_data
return True
def mocked_open(path, mode, *args, **kwargs):
file_data = bytes(0)
if pathlib.Path(path).name == TEST_O3DE_REPO_FILE_NAME:
file_data = TEST_O3DE_REPO_WITH_OBJECTS_JSON_PAYLOAD
else:
file_data = object_data
mockedopen = mock_open(mock=MagicMock(), read_data=file_data)
return mockedopen(self, *args, **kwargs)
with patch('o3de.manifest.load_o3de_manifest', side_effect=load_o3de_manifest) as _1,\
patch('o3de.manifest.save_o3de_manifest', side_effect=save_o3de_manifest) as _2, \
patch('pathlib.Path.open', mocked_open) as _3, \
patch('pathlib.Path.is_file', return_value=True) as _4:
assert update_function(object_name, existing_time) == update_available
| 0.208904 | 0.268606 |
from pirates.piratesbase.PiratesGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from pirates.piratesbase import PiratesGlobals
from direct.distributed import DistributedObject
from pirates.piratesbase import PLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.effects import ShipSplintersA
from pirates.destructibles import DistributedDestructibleObject
class DistributedBarrel(DistributedDestructibleObject.DistributedDestructibleObject):
__module__ = __name__
notify = directNotify.newCategory('DistributedBarrel')
def __init__(self, cr):
DistributedDestructibleObject.DistributedDestructibleObject.__init__(self, cr)
NodePath.__init__(self)
self.assign(hidden.attachNewNode('barrel'))
self.Hp = 0
self.maxHp = 0
self.parentId = None
self.HpDisplay = None
self.prop = None
self.modelType = 0
return
def load(self):
self.loadModel()
self.displayHp()
def loadModel(self):
self.prop = loader.loadModel('models/props/barrel')
self.coll = self.prop.findAllMatches('**/collision*')
for c in self.coll:
self.curMask = c.node().getIntoCollideMask()
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
c.setTag('objType', str(PiratesGlobals.COLL_DESTRUCTIBLE))
c.setTag('propId', str(self.doId))
self.prop.reparentTo(self)
def playDamage(self, pos):
if base.cr.wantSpecialEffects:
shipSplintersAEffect = ShipSplintersA.getEffect()
if shipSplintersAEffect:
shipSplintersAEffect.reparentTo(render)
shipSplintersAEffect.setPos(pos)
shipSplintersAEffect.play()
def playDeath(self):
if self.prop != None:
self.prop.hide()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask.allOff())
return
def respawn(self):
if self.prop != None:
self.prop.show()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
return
|
pirates/destructibles/DistributedBarrel.py
|
from pirates.piratesbase.PiratesGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from pirates.piratesbase import PiratesGlobals
from direct.distributed import DistributedObject
from pirates.piratesbase import PLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.effects import ShipSplintersA
from pirates.destructibles import DistributedDestructibleObject
class DistributedBarrel(DistributedDestructibleObject.DistributedDestructibleObject):
__module__ = __name__
notify = directNotify.newCategory('DistributedBarrel')
def __init__(self, cr):
DistributedDestructibleObject.DistributedDestructibleObject.__init__(self, cr)
NodePath.__init__(self)
self.assign(hidden.attachNewNode('barrel'))
self.Hp = 0
self.maxHp = 0
self.parentId = None
self.HpDisplay = None
self.prop = None
self.modelType = 0
return
def load(self):
self.loadModel()
self.displayHp()
def loadModel(self):
self.prop = loader.loadModel('models/props/barrel')
self.coll = self.prop.findAllMatches('**/collision*')
for c in self.coll:
self.curMask = c.node().getIntoCollideMask()
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
c.setTag('objType', str(PiratesGlobals.COLL_DESTRUCTIBLE))
c.setTag('propId', str(self.doId))
self.prop.reparentTo(self)
def playDamage(self, pos):
if base.cr.wantSpecialEffects:
shipSplintersAEffect = ShipSplintersA.getEffect()
if shipSplintersAEffect:
shipSplintersAEffect.reparentTo(render)
shipSplintersAEffect.setPos(pos)
shipSplintersAEffect.play()
def playDeath(self):
if self.prop != None:
self.prop.hide()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask.allOff())
return
def respawn(self):
if self.prop != None:
self.prop.show()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
return
| 0.483648 | 0.085518 |
import sys
import argparse
from closeio_api import Client as CloseIO_API, APIError
from datetime import datetime, timedelta
import time
from dateutil import tz
import csv
reload(sys)
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser(description='Get Time To Respond Metrics From Org')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
parser.add_argument('--past-days', '-p', required=True, help='How many days in the past should we start the calculation?')
parser.add_argument('--org-count', '-o', action='store_true', help='Use this field if you also want org totals, not just active user totals. Note: Only use this field with short date ranges (i.e. 2 weeks maximum)')
parser.add_argument('--user-counts', '-u', action='store_true', help='Get stats per individual user')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id)['name']
org_memberships = api.get('organization/' + org_id)['memberships']
assert args.org_count or args.user_counts, \
'ERROR: Please include the org count parameter, the user counts parameter, or both'
assert (args.org_count and int(args.past_days) < 15) or not args.org_count, \
'ERROR: When using the org-count parameter, make sure that the past days parameter is less than 15'
def pretty_time_delta(seconds):
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%dd %dh %dm %ds' % (days, hours, minutes, seconds)
elif hours > 0:
return '%dh %dm %ds' % (hours, minutes, seconds)
elif minutes > 0:
return '%dm %ds' % (minutes, seconds)
else:
return '%ds' % (seconds)
tz_off = (-time.timezone/60/60)
today = datetime.utcnow().date()
start = datetime(today.year, today.month, today.day, tzinfo=tz.tzutc()) - timedelta(days=int(args.past_days)) - timedelta(hours=tz_off)
end = datetime(today.year, today.month, today.day, tzinfo=tz.tzutc()) + timedelta(days=1)
start = start.strftime("%Y-%m-%dT%H:%M:%S")
end = end.strftime("%Y-%m-%dT%H:%M:%S")
user_stats = []
def getTTR(user):
if user != None:
print "Getting all activities in the last %s days for %s..." % (args.past_days, user['user_full_name'])
else:
print "Getting all activities in the last %s days for %s..." % (args.past_days, "All Users")
has_more = True
offset = 0
seconds = 0
seconds_inc = 0
resp = None
activities = []
while has_more:
if user != None:
resp = api.get('activity', params={ '_skip':offset, 'date_created__gte':start, 'date_created__lte':end, '_fields': '_type,id,date_created,lead_id,direction,user_id,duration', 'user_id': user['user_id'] })
else:
resp = api.get('activity', params={ '_skip':offset, 'date_created__gte':start, 'date_created__lte':end, '_fields': '_type,id,date_created,lead_id,direction,user_id,duration' })
for activity in resp['data']:
if activity['_type'] in ['Call', 'Email', 'SMS'] and activity['lead_id'] != None:
activity['date_created'] = activity['date_created'].split('+')[0].split('.')[0]
activities.append(activity)
print offset
offset+=len(resp['data'])
has_more = resp['has_more']
if user == None:
user = {}
user['user_full_name'] = 'All Users'
print "Getting TTR for %s..." % user['user_full_name']
responded_count = 0
responded_count_with_not_responded_to_yet = 0
total_time_to_respond_with_not_responded_to_yet = 0
total_time_to_respond = 0
inbound_activities = [i for i in activities if ((i['direction'] == 'incoming' or i['direction'] == 'inbound') and (i['_type'] in ['SMS', 'Email'] or (i['_type'] == 'Call' and i['duration'] == 0))) ]
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
for i in range(0, len(inbound_activities)):
activities_for_this_lead = [a for a in activities if a['lead_id'] == inbound_activities[i]['lead_id']]
outbound_activities_for_this_lead = [a for a in activities_for_this_lead if datetime.strptime(a['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") > datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") and (a['direction'] == 'outbound' or a['direction'] == 'outgoing')]
if len(outbound_activities_for_this_lead) != 0:
activity_after = outbound_activities_for_this_lead[len(outbound_activities_for_this_lead)-1]
diff = (datetime.strptime(activity_after['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") - datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S")).total_seconds()
total_time_to_respond += diff
total_time_to_respond_with_not_responded_to_yet += diff
responded_count += 1
responded_count_with_not_responded_to_yet += 1
else:
diff = (datetime.strptime(now, "%Y-%m-%dT%H:%M:%S") - datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S")).total_seconds()
total_time_to_respond_with_not_responded_to_yet += diff
responded_count_with_not_responded_to_yet += 1
if responded_count != 0:
seconds = int(float(total_time_to_respond) / float(responded_count))
if total_time_to_respond_with_not_responded_to_yet != 0:
seconds_inc = int(float(total_time_to_respond_with_not_responded_to_yet) / float(responded_count_with_not_responded_to_yet))
print "Average Time to Respond To Leads (Only Leads Alredy Responded To): %s" % pretty_time_delta(seconds)
print "Average Time to Respond To Leads (Including Leads Not Responded To Yet): %s" % pretty_time_delta(seconds_inc)
user_stat = {
'Total # of SMS' : len([i for i in activities if i['_type'] == 'SMS']),
'Total # of Emails' : len([i for i in activities if i['_type'] == 'Email']),
'Total # of Calls' : len([i for i in activities if i['_type'] == 'Call']),
'Total # of Inbound Communications' : len([i for i in activities if (i['_type'] in ['SMS', 'Call', 'Email'] and i['direction'] in ['inbound', 'incoming'])]),
'Total # of Outbound Communications' : len([i for i in activities if (i['_type'] in ['SMS', 'Call', 'Email'] and i['direction'] in ['outbound', 'outgoing'])]),
'Average Time to Respond To Leads (Only Leads Alredy Responded To)': seconds,
'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted': pretty_time_delta(seconds),
'Average Time to Respond To Leads (Including Leads Not Responded To Yet)': seconds_inc,
'Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted': pretty_time_delta(seconds_inc),
'User Name': user['user_full_name']
}
user_stats.append(user_stat)
if args.user_counts:
for membership in org_memberships:
getTTR(membership)
if args.org_count:
getTTR(None)
f = open('%s Time to Respond Data Per User For The Past %s days.csv' % (org_name, args.past_days), 'wt')
try:
keys = user_stats[0].keys()
ordered_keys = ['User Name', 'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted', 'Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted'] + [i for i in keys if i not in ['Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted', 'User Name', 'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted']]
writer = csv.DictWriter(f, ordered_keys)
writer.writeheader()
writer.writerows(user_stats)
finally:
f.close()
|
scripts/time_to_respond_report.py
|
import sys
import argparse
from closeio_api import Client as CloseIO_API, APIError
from datetime import datetime, timedelta
import time
from dateutil import tz
import csv
reload(sys)
sys.setdefaultencoding('utf-8')
parser = argparse.ArgumentParser(description='Get Time To Respond Metrics From Org')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
parser.add_argument('--past-days', '-p', required=True, help='How many days in the past should we start the calculation?')
parser.add_argument('--org-count', '-o', action='store_true', help='Use this field if you also want org totals, not just active user totals. Note: Only use this field with short date ranges (i.e. 2 weeks maximum)')
parser.add_argument('--user-counts', '-u', action='store_true', help='Get stats per individual user')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id)['name']
org_memberships = api.get('organization/' + org_id)['memberships']
assert args.org_count or args.user_counts, \
'ERROR: Please include the org count parameter, the user counts parameter, or both'
assert (args.org_count and int(args.past_days) < 15) or not args.org_count, \
'ERROR: When using the org-count parameter, make sure that the past days parameter is less than 15'
def pretty_time_delta(seconds):
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '%dd %dh %dm %ds' % (days, hours, minutes, seconds)
elif hours > 0:
return '%dh %dm %ds' % (hours, minutes, seconds)
elif minutes > 0:
return '%dm %ds' % (minutes, seconds)
else:
return '%ds' % (seconds)
tz_off = (-time.timezone/60/60)
today = datetime.utcnow().date()
start = datetime(today.year, today.month, today.day, tzinfo=tz.tzutc()) - timedelta(days=int(args.past_days)) - timedelta(hours=tz_off)
end = datetime(today.year, today.month, today.day, tzinfo=tz.tzutc()) + timedelta(days=1)
start = start.strftime("%Y-%m-%dT%H:%M:%S")
end = end.strftime("%Y-%m-%dT%H:%M:%S")
user_stats = []
def getTTR(user):
if user != None:
print "Getting all activities in the last %s days for %s..." % (args.past_days, user['user_full_name'])
else:
print "Getting all activities in the last %s days for %s..." % (args.past_days, "All Users")
has_more = True
offset = 0
seconds = 0
seconds_inc = 0
resp = None
activities = []
while has_more:
if user != None:
resp = api.get('activity', params={ '_skip':offset, 'date_created__gte':start, 'date_created__lte':end, '_fields': '_type,id,date_created,lead_id,direction,user_id,duration', 'user_id': user['user_id'] })
else:
resp = api.get('activity', params={ '_skip':offset, 'date_created__gte':start, 'date_created__lte':end, '_fields': '_type,id,date_created,lead_id,direction,user_id,duration' })
for activity in resp['data']:
if activity['_type'] in ['Call', 'Email', 'SMS'] and activity['lead_id'] != None:
activity['date_created'] = activity['date_created'].split('+')[0].split('.')[0]
activities.append(activity)
print offset
offset+=len(resp['data'])
has_more = resp['has_more']
if user == None:
user = {}
user['user_full_name'] = 'All Users'
print "Getting TTR for %s..." % user['user_full_name']
responded_count = 0
responded_count_with_not_responded_to_yet = 0
total_time_to_respond_with_not_responded_to_yet = 0
total_time_to_respond = 0
inbound_activities = [i for i in activities if ((i['direction'] == 'incoming' or i['direction'] == 'inbound') and (i['_type'] in ['SMS', 'Email'] or (i['_type'] == 'Call' and i['duration'] == 0))) ]
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
for i in range(0, len(inbound_activities)):
activities_for_this_lead = [a for a in activities if a['lead_id'] == inbound_activities[i]['lead_id']]
outbound_activities_for_this_lead = [a for a in activities_for_this_lead if datetime.strptime(a['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") > datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") and (a['direction'] == 'outbound' or a['direction'] == 'outgoing')]
if len(outbound_activities_for_this_lead) != 0:
activity_after = outbound_activities_for_this_lead[len(outbound_activities_for_this_lead)-1]
diff = (datetime.strptime(activity_after['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S") - datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S")).total_seconds()
total_time_to_respond += diff
total_time_to_respond_with_not_responded_to_yet += diff
responded_count += 1
responded_count_with_not_responded_to_yet += 1
else:
diff = (datetime.strptime(now, "%Y-%m-%dT%H:%M:%S") - datetime.strptime(inbound_activities[i]['date_created'].split('.')[0], "%Y-%m-%dT%H:%M:%S")).total_seconds()
total_time_to_respond_with_not_responded_to_yet += diff
responded_count_with_not_responded_to_yet += 1
if responded_count != 0:
seconds = int(float(total_time_to_respond) / float(responded_count))
if total_time_to_respond_with_not_responded_to_yet != 0:
seconds_inc = int(float(total_time_to_respond_with_not_responded_to_yet) / float(responded_count_with_not_responded_to_yet))
print "Average Time to Respond To Leads (Only Leads Alredy Responded To): %s" % pretty_time_delta(seconds)
print "Average Time to Respond To Leads (Including Leads Not Responded To Yet): %s" % pretty_time_delta(seconds_inc)
user_stat = {
'Total # of SMS' : len([i for i in activities if i['_type'] == 'SMS']),
'Total # of Emails' : len([i for i in activities if i['_type'] == 'Email']),
'Total # of Calls' : len([i for i in activities if i['_type'] == 'Call']),
'Total # of Inbound Communications' : len([i for i in activities if (i['_type'] in ['SMS', 'Call', 'Email'] and i['direction'] in ['inbound', 'incoming'])]),
'Total # of Outbound Communications' : len([i for i in activities if (i['_type'] in ['SMS', 'Call', 'Email'] and i['direction'] in ['outbound', 'outgoing'])]),
'Average Time to Respond To Leads (Only Leads Alredy Responded To)': seconds,
'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted': pretty_time_delta(seconds),
'Average Time to Respond To Leads (Including Leads Not Responded To Yet)': seconds_inc,
'Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted': pretty_time_delta(seconds_inc),
'User Name': user['user_full_name']
}
user_stats.append(user_stat)
if args.user_counts:
for membership in org_memberships:
getTTR(membership)
if args.org_count:
getTTR(None)
f = open('%s Time to Respond Data Per User For The Past %s days.csv' % (org_name, args.past_days), 'wt')
try:
keys = user_stats[0].keys()
ordered_keys = ['User Name', 'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted', 'Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted'] + [i for i in keys if i not in ['Average Time to Respond To Leads (Including Leads Not Responded To Yet) Formatted', 'User Name', 'Average Time to Respond To Leads (Only Leads Alredy Responded To) Formatted']]
writer = csv.DictWriter(f, ordered_keys)
writer.writeheader()
writer.writerows(user_stats)
finally:
f.close()
| 0.192767 | 0.158142 |
from __future__ import print_function, absolute_import, division
from .pokerth_pb2 import PokerTHMessage
__author__ = '<NAME>'
__copyright__ = '<NAME>'
def makeSizeBytes(n):
"""
Create a 4 bytes string that encodes the number ``n``.
:param n: integer
:return: 4 bytes string
"""
return str(bytearray.fromhex(b'{:08x}'.format(n)))
def readSizeBytes(string):
"""
Reads the 4 byte size-string and returns the size as integer.
:param string: 4 byte size-string
:return: integer
"""
assert len(string) == 4
return int(string.encode('hex'), 16)
def unpack(data):
"""
Unpacks/Deserializes a PokerTH network messsage.
:param data: data as string
:return: PokerTHMessage object containing the message
"""
envelope = PokerTHMessage()
envelope.ParseFromString(data)
return envelope
def pack(envelope):
"""
Packs/Serializes a PokerTHMessage to a data string.
:param envelope: PokerTHMessage envelope
:return: data as string
"""
data = envelope.SerializeToString()
size_bytes = makeSizeBytes(len(data))
return size_bytes + data
def develop(envelope):
"""
Remove the envelope from a message.
:param envelope: PokerTHMessage object that envelops a message
:return: PokerTH message from the envelope
"""
msg = [v for _, v in envelope.ListFields() if v != envelope.messageType]
assert len(msg) == 1
assert msg[0].IsInitialized()
return msg[0]
def _getEnvelopeAttr(msg_name):
"""
Get attribute name of an envelope for a given message name.
:param msg_name: name of message
:return: attribute name of message
"""
return msg_name[0].lower() + msg_name[1:]
def _getMsgTypeAttr(msg_name):
"""
Get the attribute name for the type of a message.
:param msg_name: name of message
:return: attribute name of message's type
"""
return "Type_{}".format(msg_name)
def envelop(msg):
"""
Put a message into an envelope.
:param msg: PokerTH message object
:return: message wrapped in a PokerTHMessage object
"""
msg_name = msg.__class__.__name__
envelope = PokerTHMessage()
envelope_msg = getattr(envelope, _getEnvelopeAttr(msg_name))
envelope_msg.MergeFrom(msg)
msg_type = getattr(envelope, _getMsgTypeAttr(msg_name))
setattr(envelope, 'messageType', msg_type)
return envelope
|
pokerthproto/transport.py
|
from __future__ import print_function, absolute_import, division
from .pokerth_pb2 import PokerTHMessage
__author__ = '<NAME>'
__copyright__ = '<NAME>'
def makeSizeBytes(n):
"""
Create a 4 bytes string that encodes the number ``n``.
:param n: integer
:return: 4 bytes string
"""
return str(bytearray.fromhex(b'{:08x}'.format(n)))
def readSizeBytes(string):
"""
Reads the 4 byte size-string and returns the size as integer.
:param string: 4 byte size-string
:return: integer
"""
assert len(string) == 4
return int(string.encode('hex'), 16)
def unpack(data):
"""
Unpacks/Deserializes a PokerTH network messsage.
:param data: data as string
:return: PokerTHMessage object containing the message
"""
envelope = PokerTHMessage()
envelope.ParseFromString(data)
return envelope
def pack(envelope):
"""
Packs/Serializes a PokerTHMessage to a data string.
:param envelope: PokerTHMessage envelope
:return: data as string
"""
data = envelope.SerializeToString()
size_bytes = makeSizeBytes(len(data))
return size_bytes + data
def develop(envelope):
"""
Remove the envelope from a message.
:param envelope: PokerTHMessage object that envelops a message
:return: PokerTH message from the envelope
"""
msg = [v for _, v in envelope.ListFields() if v != envelope.messageType]
assert len(msg) == 1
assert msg[0].IsInitialized()
return msg[0]
def _getEnvelopeAttr(msg_name):
"""
Get attribute name of an envelope for a given message name.
:param msg_name: name of message
:return: attribute name of message
"""
return msg_name[0].lower() + msg_name[1:]
def _getMsgTypeAttr(msg_name):
"""
Get the attribute name for the type of a message.
:param msg_name: name of message
:return: attribute name of message's type
"""
return "Type_{}".format(msg_name)
def envelop(msg):
"""
Put a message into an envelope.
:param msg: PokerTH message object
:return: message wrapped in a PokerTHMessage object
"""
msg_name = msg.__class__.__name__
envelope = PokerTHMessage()
envelope_msg = getattr(envelope, _getEnvelopeAttr(msg_name))
envelope_msg.MergeFrom(msg)
msg_type = getattr(envelope, _getMsgTypeAttr(msg_name))
setattr(envelope, 'messageType', msg_type)
return envelope
| 0.887449 | 0.463323 |
import torch
import torch.nn as nn
from torch.distributions import Normal, Categorical
def init_weights(module: nn.Module, gain=1.414):
for m in module.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
torch.nn.init.zeros_(m.bias)
torch.nn.init.orthogonal_(m.weight, gain)
if isinstance(m, (nn.GRU, nn.LSTM)):
torch.nn.init.zeros_(m.bias_ih_l0)
torch.nn.init.zeros_(m.bias_hh_l0)
torch.nn.init.orthogonal_(m.weight_ih_l0)
torch.nn.init.orthogonal_(m.weight_hh_l0)
class Mlp(nn.Module):
def __init__(self, input_dim, hidden_sizes, activation):
super(Mlp, self).__init__()
self.flat = len(input_dim) == 2
last_size = input_dim[0] * input_dim[1] if self.flat else input_dim[0]
mlp = []
for hidden_size in hidden_sizes:
mlp.append(nn.Linear(last_size, hidden_size))
mlp.append(activation())
last_size = hidden_size
self.mlp = nn.Sequential(*mlp)
def forward(self, x):
if self.flat:
x = torch.flatten(x, -2)
x = self.mlp(x)
return x
class Cnn(nn.Module):
def __init__(self, shape, hidden_sizes, activation):
super(Cnn, self).__init__()
h, w, in_channels = shape
cnn = []
mlp_idx = 0
for conv in hidden_sizes:
if isinstance(conv, tuple):
out_channels, kernel_size, pool_size = conv
cnn.append(nn.Conv2d(in_channels, out_channels, (kernel_size, kernel_size)))
cnn.append(nn.MaxPool2d(pool_size))
cnn.append(activation())
h = (h - kernel_size + 1) // pool_size
w = (w - kernel_size + 1) // pool_size
in_channels = out_channels
else:
break
mlp_idx += 1
self.cnn = nn.Sequential(*cnn)
self.mlp = Mlp(
(h * w * in_channels,),
hidden_sizes[mlp_idx:],
activation
)
self.activation = activation()
def forward(self, x):
x = x.transpose(-1, -3)
if len(x.shape) == 5:
l, b = x.shape[:2]
x = x.flatten(0, 1)
x = self.cnn(x)
x = x.flatten(1)
x = x.unflatten(0, (l, b))
else:
x = self.cnn(x)
x = x.flatten(1)
x = self.activation(self.mlp(x))
return x
class Rnn(nn.Module):
def __init__(self, hidden_size, activation, rnn='lstm'):
super(Rnn, self).__init__()
if rnn == 'lstm':
self.rnn = nn.LSTM(hidden_size, hidden_size)
elif rnn == 'gru':
self.rnn = nn.GRU(hidden_size, hidden_size)
else:
raise NotImplementedError
self.activation = activation()
def forward(self, x, states, dones):
if len(x.shape) == 3:
# reshape to (chunk_len, batch_size, ...)
x = x.transpose(0, 1)
chunk_len = x.shape[0]
xs = []
for step in range(chunk_len):
x_ = x[step:step + 1, :, :]
x_, states = self.rnn(x_, states)
done = dones[:, step]
if isinstance(states, tuple):
for states_ in states:
states_[:, done, :] = 0.
else:
states[:, done, :] = 0.
xs.append(x_)
# reshape to (1, batch_size, chunk_len, ...)
x = torch.stack(xs, dim=2)
# reshape to (batch_size, chunk_len, ...)
x = x.squeeze(0)
else:
# reshape to (1, batch_size, ...)
x = x.unsqueeze(0)
x, states = self.rnn(x, states)
# reshape to (batch_size, ...)
x = x.squeeze(0)
x = self.activation(x)
return x, states
class Discrete(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Discrete, self).__init__()
self.logits = nn.Linear(hidden_size, action_dim)
def forward(self, x):
logits = self.logits(x)
return Categorical(logits=logits)
class Continuous(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Continuous, self).__init__()
self.mean = nn.Linear(hidden_size, action_dim)
self.logstd = nn.Parameter(torch.zeros(action_dim))
def forward(self, x):
mean = self.mean(x)
std = self.logstd.exp().expand_as(mean)
return Normal(mean, std)
class Deterministic(nn.Module):
def __init__(self, hidden_size, output_dim):
super(Deterministic, self).__init__()
self.x = nn.Linear(hidden_size, output_dim)
def forward(self, x):
return self.x(x)
|
pbrl/policy/base.py
|
import torch
import torch.nn as nn
from torch.distributions import Normal, Categorical
def init_weights(module: nn.Module, gain=1.414):
for m in module.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
torch.nn.init.zeros_(m.bias)
torch.nn.init.orthogonal_(m.weight, gain)
if isinstance(m, (nn.GRU, nn.LSTM)):
torch.nn.init.zeros_(m.bias_ih_l0)
torch.nn.init.zeros_(m.bias_hh_l0)
torch.nn.init.orthogonal_(m.weight_ih_l0)
torch.nn.init.orthogonal_(m.weight_hh_l0)
class Mlp(nn.Module):
def __init__(self, input_dim, hidden_sizes, activation):
super(Mlp, self).__init__()
self.flat = len(input_dim) == 2
last_size = input_dim[0] * input_dim[1] if self.flat else input_dim[0]
mlp = []
for hidden_size in hidden_sizes:
mlp.append(nn.Linear(last_size, hidden_size))
mlp.append(activation())
last_size = hidden_size
self.mlp = nn.Sequential(*mlp)
def forward(self, x):
if self.flat:
x = torch.flatten(x, -2)
x = self.mlp(x)
return x
class Cnn(nn.Module):
def __init__(self, shape, hidden_sizes, activation):
super(Cnn, self).__init__()
h, w, in_channels = shape
cnn = []
mlp_idx = 0
for conv in hidden_sizes:
if isinstance(conv, tuple):
out_channels, kernel_size, pool_size = conv
cnn.append(nn.Conv2d(in_channels, out_channels, (kernel_size, kernel_size)))
cnn.append(nn.MaxPool2d(pool_size))
cnn.append(activation())
h = (h - kernel_size + 1) // pool_size
w = (w - kernel_size + 1) // pool_size
in_channels = out_channels
else:
break
mlp_idx += 1
self.cnn = nn.Sequential(*cnn)
self.mlp = Mlp(
(h * w * in_channels,),
hidden_sizes[mlp_idx:],
activation
)
self.activation = activation()
def forward(self, x):
x = x.transpose(-1, -3)
if len(x.shape) == 5:
l, b = x.shape[:2]
x = x.flatten(0, 1)
x = self.cnn(x)
x = x.flatten(1)
x = x.unflatten(0, (l, b))
else:
x = self.cnn(x)
x = x.flatten(1)
x = self.activation(self.mlp(x))
return x
class Rnn(nn.Module):
def __init__(self, hidden_size, activation, rnn='lstm'):
super(Rnn, self).__init__()
if rnn == 'lstm':
self.rnn = nn.LSTM(hidden_size, hidden_size)
elif rnn == 'gru':
self.rnn = nn.GRU(hidden_size, hidden_size)
else:
raise NotImplementedError
self.activation = activation()
def forward(self, x, states, dones):
if len(x.shape) == 3:
# reshape to (chunk_len, batch_size, ...)
x = x.transpose(0, 1)
chunk_len = x.shape[0]
xs = []
for step in range(chunk_len):
x_ = x[step:step + 1, :, :]
x_, states = self.rnn(x_, states)
done = dones[:, step]
if isinstance(states, tuple):
for states_ in states:
states_[:, done, :] = 0.
else:
states[:, done, :] = 0.
xs.append(x_)
# reshape to (1, batch_size, chunk_len, ...)
x = torch.stack(xs, dim=2)
# reshape to (batch_size, chunk_len, ...)
x = x.squeeze(0)
else:
# reshape to (1, batch_size, ...)
x = x.unsqueeze(0)
x, states = self.rnn(x, states)
# reshape to (batch_size, ...)
x = x.squeeze(0)
x = self.activation(x)
return x, states
class Discrete(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Discrete, self).__init__()
self.logits = nn.Linear(hidden_size, action_dim)
def forward(self, x):
logits = self.logits(x)
return Categorical(logits=logits)
class Continuous(nn.Module):
def __init__(self, hidden_size, action_dim):
super(Continuous, self).__init__()
self.mean = nn.Linear(hidden_size, action_dim)
self.logstd = nn.Parameter(torch.zeros(action_dim))
def forward(self, x):
mean = self.mean(x)
std = self.logstd.exp().expand_as(mean)
return Normal(mean, std)
class Deterministic(nn.Module):
def __init__(self, hidden_size, output_dim):
super(Deterministic, self).__init__()
self.x = nn.Linear(hidden_size, output_dim)
def forward(self, x):
return self.x(x)
| 0.935693 | 0.502686 |
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
import argparse
import os
import os.path as osp
from darknet import Darknet
import pickle as pkl
import pandas as pd
import random
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
parser.add_argument("--images", dest = 'images', help =
"Image / Directory containing images to perform detection upon",
default = "imgs", type = str)
parser.add_argument("--det", dest = 'det', help =
"Image / Directory to store detections to",
default = "det", type = str)
parser.add_argument("--bs", dest = "bs", help = "Batch size", default = 1)
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
args = arg_parse()
images = args.images
batch_size = int(args.bs)
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
classes = load_classes("data/coco.names")
#Set up the neural network
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
#If there's a GPU availible, put the model on GPU
if CUDA:
model.cuda()
#Set the model in evaluation mode
model.eval()
read_dir = time.time()
#Detection phase
try:
imlist = [osp.join(osp.realpath('.'), images, img) for img in os.listdir(images)]
except NotADirectoryError:
imlist = []
imlist.append(osp.join(osp.realpath('.'), images))
except FileNotFoundError:
print ("No file or directory with the name {}".format(images))
exit()
if not os.path.exists(args.det):
os.makedirs(args.det)
load_batch = time.time()
loaded_ims = [cv2.imread(x) for x in imlist]
im_batches = list(map(prep_image, loaded_ims, [inp_dim for x in range(len(imlist))]))
im_dim_list = [(x.shape[1], x.shape[0]) for x in loaded_ims]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
leftover = 0
if (len(im_dim_list) % batch_size):
leftover = 1
if batch_size != 1:
num_batches = len(imlist) // batch_size + leftover
im_batches = [torch.cat((im_batches[i*batch_size : min((i + 1)*batch_size,
len(im_batches))])) for i in range(num_batches)]
write = 0
if CUDA:
im_dim_list = im_dim_list.cuda()
start_det_loop = time.time()
for i, batch in enumerate(im_batches):
#load the image
start = time.time()
if CUDA:
batch = batch.cuda()
with torch.no_grad():
prediction = model(Variable(batch), CUDA)
prediction = write_results(prediction, confidence, num_classes, nms_conf = nms_thesh)
end = time.time()
if type(prediction) == int:
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", ""))
print("----------------------------------------------------------")
continue
prediction[:,0] += i*batch_size #transform the atribute from index in batch to index in imlist
if not write: #If we have't initialised output
output = prediction
write = 1
else:
output = torch.cat((output,prediction))
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id]
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", " ".join(objs)))
print("----------------------------------------------------------")
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print ("No detections were made")
exit()
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(416/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim_list[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim_list[i,1])
output_recast = time.time()
class_load = time.time()
colors = pkl.load(open("pallete", "rb"))
draw = time.time()
def write(x, results):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
color = random.choice(colors)
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
list(map(lambda x: write(x, loaded_ims), output))
det_names = pd.Series(imlist).apply(lambda x: "{}/det_{}".format(args.det,x.split("/")[-1]))
list(map(cv2.imwrite, det_names, loaded_ims))
end = time.time()
print("SUMMARY")
print("----------------------------------------------------------")
print("{:25s}: {}".format("Task", "Time Taken (in seconds)"))
print()
print("{:25s}: {:2.3f}".format("Reading addresses", load_batch - read_dir))
print("{:25s}: {:2.3f}".format("Loading batch", start_det_loop - load_batch))
print("{:25s}: {:2.3f}".format("Detection (" + str(len(imlist)) + " images)", output_recast - start_det_loop))
print("{:25s}: {:2.3f}".format("Output Processing", class_load - output_recast))
print("{:25s}: {:2.3f}".format("Drawing Boxes", end - draw))
print("{:25s}: {:2.3f}".format("Average time_per_img", (end - load_batch)/len(imlist)))
print("----------------------------------------------------------")
torch.cuda.empty_cache()
|
detect.py
|
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
import argparse
import os
import os.path as osp
from darknet import Darknet
import pickle as pkl
import pandas as pd
import random
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
parser.add_argument("--images", dest = 'images', help =
"Image / Directory containing images to perform detection upon",
default = "imgs", type = str)
parser.add_argument("--det", dest = 'det', help =
"Image / Directory to store detections to",
default = "det", type = str)
parser.add_argument("--bs", dest = "bs", help = "Batch size", default = 1)
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
args = arg_parse()
images = args.images
batch_size = int(args.bs)
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
classes = load_classes("data/coco.names")
#Set up the neural network
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
#If there's a GPU availible, put the model on GPU
if CUDA:
model.cuda()
#Set the model in evaluation mode
model.eval()
read_dir = time.time()
#Detection phase
try:
imlist = [osp.join(osp.realpath('.'), images, img) for img in os.listdir(images)]
except NotADirectoryError:
imlist = []
imlist.append(osp.join(osp.realpath('.'), images))
except FileNotFoundError:
print ("No file or directory with the name {}".format(images))
exit()
if not os.path.exists(args.det):
os.makedirs(args.det)
load_batch = time.time()
loaded_ims = [cv2.imread(x) for x in imlist]
im_batches = list(map(prep_image, loaded_ims, [inp_dim for x in range(len(imlist))]))
im_dim_list = [(x.shape[1], x.shape[0]) for x in loaded_ims]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
leftover = 0
if (len(im_dim_list) % batch_size):
leftover = 1
if batch_size != 1:
num_batches = len(imlist) // batch_size + leftover
im_batches = [torch.cat((im_batches[i*batch_size : min((i + 1)*batch_size,
len(im_batches))])) for i in range(num_batches)]
write = 0
if CUDA:
im_dim_list = im_dim_list.cuda()
start_det_loop = time.time()
for i, batch in enumerate(im_batches):
#load the image
start = time.time()
if CUDA:
batch = batch.cuda()
with torch.no_grad():
prediction = model(Variable(batch), CUDA)
prediction = write_results(prediction, confidence, num_classes, nms_conf = nms_thesh)
end = time.time()
if type(prediction) == int:
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", ""))
print("----------------------------------------------------------")
continue
prediction[:,0] += i*batch_size #transform the atribute from index in batch to index in imlist
if not write: #If we have't initialised output
output = prediction
write = 1
else:
output = torch.cat((output,prediction))
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id]
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", " ".join(objs)))
print("----------------------------------------------------------")
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print ("No detections were made")
exit()
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(416/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim_list[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim_list[i,1])
output_recast = time.time()
class_load = time.time()
colors = pkl.load(open("pallete", "rb"))
draw = time.time()
def write(x, results):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
color = random.choice(colors)
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
list(map(lambda x: write(x, loaded_ims), output))
det_names = pd.Series(imlist).apply(lambda x: "{}/det_{}".format(args.det,x.split("/")[-1]))
list(map(cv2.imwrite, det_names, loaded_ims))
end = time.time()
print("SUMMARY")
print("----------------------------------------------------------")
print("{:25s}: {}".format("Task", "Time Taken (in seconds)"))
print()
print("{:25s}: {:2.3f}".format("Reading addresses", load_batch - read_dir))
print("{:25s}: {:2.3f}".format("Loading batch", start_det_loop - load_batch))
print("{:25s}: {:2.3f}".format("Detection (" + str(len(imlist)) + " images)", output_recast - start_det_loop))
print("{:25s}: {:2.3f}".format("Output Processing", class_load - output_recast))
print("{:25s}: {:2.3f}".format("Drawing Boxes", end - draw))
print("{:25s}: {:2.3f}".format("Average time_per_img", (end - load_batch)/len(imlist)))
print("----------------------------------------------------------")
torch.cuda.empty_cache()
| 0.50415 | 0.225758 |
import ctypes
import os
import shutil
import cv2
import glob
import subprocess
import signal
from pydub import AudioSegment
from collections import defaultdict
from tqdm import tqdm
from multiprocessing import Process, Queue, Value, Pipe
from queue import Empty
from logging import getLogger, StreamHandler, Formatter, FileHandler, getLevelName
from config import *
def setup_logger(modname):
log_level = getLevelName(LOG_LEVEL)
logger = getLogger(modname)
logger.setLevel(log_level)
sh = StreamHandler()
sh.setLevel(log_level)
formatter = Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = FileHandler("error.log") # fh = file handler
fh.setLevel(log_level)
fh_formatter = Formatter(
'%(asctime)s - %(filename)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(fh_formatter)
logger.addHandler(fh)
return logger
logger = setup_logger(__name__)
def merge_video(movie_files, key_name, send_end):
tmp_video_file = os.path.join(TMP_DIR, f"tmp_v_{key_name}.mp4")
debug_1 = ""
try:
# 形式はmp4
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#動画情報の取得
movie = cv2.VideoCapture(movie_files[0])
fps = movie.get(cv2.CAP_PROP_FPS)
height = movie.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = movie.get(cv2.CAP_PROP_FRAME_WIDTH)
# 出力先のファイルを開く
out = cv2.VideoWriter(tmp_video_file, int(fourcc), fps,
(int(width), int(height)))
for i, movies in enumerate(movie_files):
debug_1 = movies
# 動画ファイルの読み込み,引数はビデオファイルのパス
movie = cv2.VideoCapture(movies)
count = movie.get(cv2.CAP_PROP_FRAME_COUNT)
frames = []
if movie.isOpened() == False: # 正常に動画ファイルを読み込めたか確認
continue
for _ in range(int(count)):
ret, tmp_f = movie.read() # read():1コマ分のキャプチャ画像データを読み込む
if ret:
frames.append(tmp_f)
# 読み込んだフレームを書き込み
if i == 0:
[out.write(f) for f in frames]
else:
[out.write(f) for f in frames[DUP_FRAME:]]
except Exception as e:
logger.error(e)
logger.error(debug_1)
out.release()
send_end.send((tmp_video_file, height))
def merge_audio(movie_files, key_name, send_end):
tmp_audio_file_sub = os.path.join(TMP_DIR, f"tmp_a_{key_name}_sub.wav")
tmp_audio_file = os.path.join(TMP_DIR, f"tmp_a_{key_name}.wav")
audio_merged = AudioSegment.empty()
for i, movies in enumerate(movie_files):
command = f"ffmpeg -y -i {movies} -vn -loglevel quiet {tmp_audio_file_sub}"
subprocess.run(command, shell=True)
audio_tmp = AudioSegment.from_file(tmp_audio_file_sub, format="wav")
if i == 0:
audio_merged += audio_tmp
else:
audio_merged += audio_tmp[DUP_AUDIO:]
# 結合した音声書き出し
audio_merged.export(tmp_audio_file, format="wav")
os.remove(tmp_audio_file_sub)
send_end.send(tmp_audio_file)
def encode_movie(key_name, video_file, height, audio_file):
filename = os.path.join(TMP_DIR, f"{key_name}.mp4")
# 動画と音声結合
vf = VIDEO_FILTER
cv = f"-c:v {VIDEO_CODEC}"
# ビットレートは解像度に応じて固定にしています。
if height == 1080: # FHD
bv = f"-b:v {VIDEO_BR_1}"
elif height == 720: # HD
bv = f"-b:v {VIDEO_BR_2}"
else: # VGA
bv = f"-b:v {VIDEO_BR_3}"
loglevel = "-loglevel quiet"
command = f"ffmpeg -y -i {video_file} -i {audio_file} {cv} {bv} {vf} -c:a aac {loglevel} {filename}"
subprocess.run(command, shell=True)
os.remove(video_file)
os.remove(audio_file)
def transfer(tran_q, merge_q, end_sw):
# ネットワーク越しなどの場合に一旦ローカルにコピーするための処理
while not end_sw.value:
try:
files_list, key_name, _ = tran_q.get(timeout=30)
files_list_t = []
for f in files_list:
if INPUT_FILE_COPY:
copy_to_path = os.path.join(TMP_DIR, f.split("/")[-1])
if not os.path.exists(copy_to_path):
shutil.copy(f, copy_to_path)
files_list_t.append(copy_to_path)
else:
files_list_t.append(f)
merge_q.put((files_list_t ,key_name))
except Empty:
continue
def merger(merge_q, encode_q, end_sw):
while not end_sw.value:
try:
files_list, key_name = merge_q.get(timeout=30)
recv_end_v, send_end_v = Pipe(False)
recv_end_a, send_end_a = Pipe(False)
proc_v = Process(target=merge_video, args=(
files_list, key_name, send_end_v))
proc_a = Process(target=merge_audio, args=(
files_list, key_name, send_end_a))
proc_v.start()
proc_a.start()
proc_v.join()
proc_a.join()
if INPUT_FILE_COPY:
for f in files_list:
os.remove(f)
tmp_video_file, height = recv_end_v.recv()
tmp_audio_file = recv_end_a.recv()
encode_q.put((key_name, tmp_video_file, height, tmp_audio_file))
except Empty:
continue
def encoder(encode_q, tran2_q, end_sw):
while not end_sw.value:
try:
key_name, tmp_video_file, height, tmp_audio_file = encode_q.get(timeout=30)
encode_movie(key_name, tmp_video_file, height, tmp_audio_file)
tran2_q.put(key_name)
except Empty:
continue
def transfer2(tran2_q, tqdm_q, end_sw):
while not end_sw.value:
try:
key_name = tran2_q.get(timeout=30)
copy_from_path = os.path.join(TMP_DIR, f"{key_name}.mp4")
copy_to_path = os.path.join(OUT_DIR, f"{key_name}.mp4")
try:
shutil.copy(copy_from_path, copy_to_path)
os.remove(copy_from_path)
except Exception as e:
logger.error(e)
continue
tqdm_q.put(key_name)
except Empty:
continue
def progress(tqdm_q, size, pcnt, end_sw):
with tqdm(total=size) as t:
while size > pcnt.value:
key_name = tqdm_q.get()
t.set_description(f"{key_name} finished")
t.update(1)
pcnt.value += 1
end_sw.value = True
if __name__ == '__main__':
os.makedirs(TMP_DIR, exist_ok=True)
os.makedirs(OUT_DIR, exist_ok=True)
tran_q = Queue()
merge_q = Queue(maxsize=MERGE_WORKERS*4)
encode_q = Queue(maxsize=ENCODE_WORKERS*4)
tran2_q = Queue()
tqdm_q = Queue()
pcnt = Value(ctypes.c_int)
pcnt.value = 0
end_sw = Value(ctypes.c_bool)
end_sw.value = False
# ディレクトリ内の動画を:フロント・リアカメラごと、撮影開始時間ごとにまとめる
files_dict = defaultdict(list)
for f in glob.glob(os.path.join(IN_DIR, "*.MP4")):
files_dict["_".join(f.split("/")[-1].split("_")[:2])].append(f)
data = []
for i, (key_name, files_list) in enumerate(files_dict.items()):
if not os.path.exists(os.path.join(OUT_DIR, f"{key_name}.mp4")):
data.append((sorted(files_list), key_name, i))
[tran_q.put(q) for q in data]
proc_tran = Process(target=transfer, args=(tran_q, merge_q, end_sw))
proc_tran.start()
proc_merg = [Process(target=merger, args=(merge_q, encode_q, end_sw))
for _ in range(MERGE_WORKERS)]
[p.start() for p in proc_merg]
proc_enc = [Process(target=encoder, args=(encode_q, tran2_q, end_sw))
for _ in range(ENCODE_WORKERS)]
[p.start() for p in proc_enc]
proc_tran2 = Process(target=transfer2, args=(tran2_q, tqdm_q, end_sw))
proc_tran2.start()
proc_tqdm = Process(target=progress, args=(tqdm_q, len(data), pcnt, end_sw))
proc_tqdm.start()
proc_tran.join()
[p.join() for p in proc_merg]
[p.join() for p in proc_enc]
proc_tqdm.join()
proc_tran2.join()
shutil.rmtree(TMP_DIR)
|
mitsuba.py
|
import ctypes
import os
import shutil
import cv2
import glob
import subprocess
import signal
from pydub import AudioSegment
from collections import defaultdict
from tqdm import tqdm
from multiprocessing import Process, Queue, Value, Pipe
from queue import Empty
from logging import getLogger, StreamHandler, Formatter, FileHandler, getLevelName
from config import *
def setup_logger(modname):
log_level = getLevelName(LOG_LEVEL)
logger = getLogger(modname)
logger.setLevel(log_level)
sh = StreamHandler()
sh.setLevel(log_level)
formatter = Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = FileHandler("error.log") # fh = file handler
fh.setLevel(log_level)
fh_formatter = Formatter(
'%(asctime)s - %(filename)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(fh_formatter)
logger.addHandler(fh)
return logger
logger = setup_logger(__name__)
def merge_video(movie_files, key_name, send_end):
tmp_video_file = os.path.join(TMP_DIR, f"tmp_v_{key_name}.mp4")
debug_1 = ""
try:
# 形式はmp4
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#動画情報の取得
movie = cv2.VideoCapture(movie_files[0])
fps = movie.get(cv2.CAP_PROP_FPS)
height = movie.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = movie.get(cv2.CAP_PROP_FRAME_WIDTH)
# 出力先のファイルを開く
out = cv2.VideoWriter(tmp_video_file, int(fourcc), fps,
(int(width), int(height)))
for i, movies in enumerate(movie_files):
debug_1 = movies
# 動画ファイルの読み込み,引数はビデオファイルのパス
movie = cv2.VideoCapture(movies)
count = movie.get(cv2.CAP_PROP_FRAME_COUNT)
frames = []
if movie.isOpened() == False: # 正常に動画ファイルを読み込めたか確認
continue
for _ in range(int(count)):
ret, tmp_f = movie.read() # read():1コマ分のキャプチャ画像データを読み込む
if ret:
frames.append(tmp_f)
# 読み込んだフレームを書き込み
if i == 0:
[out.write(f) for f in frames]
else:
[out.write(f) for f in frames[DUP_FRAME:]]
except Exception as e:
logger.error(e)
logger.error(debug_1)
out.release()
send_end.send((tmp_video_file, height))
def merge_audio(movie_files, key_name, send_end):
tmp_audio_file_sub = os.path.join(TMP_DIR, f"tmp_a_{key_name}_sub.wav")
tmp_audio_file = os.path.join(TMP_DIR, f"tmp_a_{key_name}.wav")
audio_merged = AudioSegment.empty()
for i, movies in enumerate(movie_files):
command = f"ffmpeg -y -i {movies} -vn -loglevel quiet {tmp_audio_file_sub}"
subprocess.run(command, shell=True)
audio_tmp = AudioSegment.from_file(tmp_audio_file_sub, format="wav")
if i == 0:
audio_merged += audio_tmp
else:
audio_merged += audio_tmp[DUP_AUDIO:]
# 結合した音声書き出し
audio_merged.export(tmp_audio_file, format="wav")
os.remove(tmp_audio_file_sub)
send_end.send(tmp_audio_file)
def encode_movie(key_name, video_file, height, audio_file):
filename = os.path.join(TMP_DIR, f"{key_name}.mp4")
# 動画と音声結合
vf = VIDEO_FILTER
cv = f"-c:v {VIDEO_CODEC}"
# ビットレートは解像度に応じて固定にしています。
if height == 1080: # FHD
bv = f"-b:v {VIDEO_BR_1}"
elif height == 720: # HD
bv = f"-b:v {VIDEO_BR_2}"
else: # VGA
bv = f"-b:v {VIDEO_BR_3}"
loglevel = "-loglevel quiet"
command = f"ffmpeg -y -i {video_file} -i {audio_file} {cv} {bv} {vf} -c:a aac {loglevel} {filename}"
subprocess.run(command, shell=True)
os.remove(video_file)
os.remove(audio_file)
def transfer(tran_q, merge_q, end_sw):
# ネットワーク越しなどの場合に一旦ローカルにコピーするための処理
while not end_sw.value:
try:
files_list, key_name, _ = tran_q.get(timeout=30)
files_list_t = []
for f in files_list:
if INPUT_FILE_COPY:
copy_to_path = os.path.join(TMP_DIR, f.split("/")[-1])
if not os.path.exists(copy_to_path):
shutil.copy(f, copy_to_path)
files_list_t.append(copy_to_path)
else:
files_list_t.append(f)
merge_q.put((files_list_t ,key_name))
except Empty:
continue
def merger(merge_q, encode_q, end_sw):
while not end_sw.value:
try:
files_list, key_name = merge_q.get(timeout=30)
recv_end_v, send_end_v = Pipe(False)
recv_end_a, send_end_a = Pipe(False)
proc_v = Process(target=merge_video, args=(
files_list, key_name, send_end_v))
proc_a = Process(target=merge_audio, args=(
files_list, key_name, send_end_a))
proc_v.start()
proc_a.start()
proc_v.join()
proc_a.join()
if INPUT_FILE_COPY:
for f in files_list:
os.remove(f)
tmp_video_file, height = recv_end_v.recv()
tmp_audio_file = recv_end_a.recv()
encode_q.put((key_name, tmp_video_file, height, tmp_audio_file))
except Empty:
continue
def encoder(encode_q, tran2_q, end_sw):
while not end_sw.value:
try:
key_name, tmp_video_file, height, tmp_audio_file = encode_q.get(timeout=30)
encode_movie(key_name, tmp_video_file, height, tmp_audio_file)
tran2_q.put(key_name)
except Empty:
continue
def transfer2(tran2_q, tqdm_q, end_sw):
while not end_sw.value:
try:
key_name = tran2_q.get(timeout=30)
copy_from_path = os.path.join(TMP_DIR, f"{key_name}.mp4")
copy_to_path = os.path.join(OUT_DIR, f"{key_name}.mp4")
try:
shutil.copy(copy_from_path, copy_to_path)
os.remove(copy_from_path)
except Exception as e:
logger.error(e)
continue
tqdm_q.put(key_name)
except Empty:
continue
def progress(tqdm_q, size, pcnt, end_sw):
with tqdm(total=size) as t:
while size > pcnt.value:
key_name = tqdm_q.get()
t.set_description(f"{key_name} finished")
t.update(1)
pcnt.value += 1
end_sw.value = True
if __name__ == '__main__':
os.makedirs(TMP_DIR, exist_ok=True)
os.makedirs(OUT_DIR, exist_ok=True)
tran_q = Queue()
merge_q = Queue(maxsize=MERGE_WORKERS*4)
encode_q = Queue(maxsize=ENCODE_WORKERS*4)
tran2_q = Queue()
tqdm_q = Queue()
pcnt = Value(ctypes.c_int)
pcnt.value = 0
end_sw = Value(ctypes.c_bool)
end_sw.value = False
# ディレクトリ内の動画を:フロント・リアカメラごと、撮影開始時間ごとにまとめる
files_dict = defaultdict(list)
for f in glob.glob(os.path.join(IN_DIR, "*.MP4")):
files_dict["_".join(f.split("/")[-1].split("_")[:2])].append(f)
data = []
for i, (key_name, files_list) in enumerate(files_dict.items()):
if not os.path.exists(os.path.join(OUT_DIR, f"{key_name}.mp4")):
data.append((sorted(files_list), key_name, i))
[tran_q.put(q) for q in data]
proc_tran = Process(target=transfer, args=(tran_q, merge_q, end_sw))
proc_tran.start()
proc_merg = [Process(target=merger, args=(merge_q, encode_q, end_sw))
for _ in range(MERGE_WORKERS)]
[p.start() for p in proc_merg]
proc_enc = [Process(target=encoder, args=(encode_q, tran2_q, end_sw))
for _ in range(ENCODE_WORKERS)]
[p.start() for p in proc_enc]
proc_tran2 = Process(target=transfer2, args=(tran2_q, tqdm_q, end_sw))
proc_tran2.start()
proc_tqdm = Process(target=progress, args=(tqdm_q, len(data), pcnt, end_sw))
proc_tqdm.start()
proc_tran.join()
[p.join() for p in proc_merg]
[p.join() for p in proc_enc]
proc_tqdm.join()
proc_tran2.join()
shutil.rmtree(TMP_DIR)
| 0.107455 | 0.095055 |
# @Time : 11/24/18 12:29 PM
# @Author : <NAME>
# @File : multimodal_gan.py
from random import sample
import numpy as np
# installed packages and modules
from keras.layers import (Dense, Conv1D, MaxPool1D, Flatten,
Dropout, Input, Activation, BatchNormalization,
concatenate, GaussianNoise, multiply, RepeatVector,
Lambda)
from keras.models import Model
from keras.optimizers import RMSprop
from keras.regularizers import l1_l2
from numpy import zeros
from numpy.random import standard_normal
from .settings import (TEXTS_SIZE, TEXT_NOISE_SIZE, EMBEDDING_SIZE,
IMAGES_SIZE, IMAGE_NOISE_SIZE, LEAVES_SIZE, LEAF_NOISE_SIZE)
# created packages and modules
from .utils import Word2Embedded, tanh3
def kronecker_product(mat1, mat2):
n1 = mat1.get_shape()[1]
n2 = mat2.get_shape()[1]
mat1 = RepeatVector(n2)(mat1)
mat1 = concatenate([mat1[:, :, i] for i in range(n1)], axis=-1)
mat2 = Flatten()(RepeatVector(n1)(mat2))
result = multiply(inputs=[mat1, mat2])
# convert (i-1)dim to i dim
# result = Reshape((n2, n1))(result)
return result
Kronecker = Lambda(lambda tensors: kronecker_product(tensors[0], tensors[1]))
def generator_for_text(noise_len, embedding_len, conv_filters, conv_window_len):
# start to create CNN
# add input layer
texts_noise = Input(shape=(noise_len, embedding_len), dtype="float32", name="texts_noise")
# add first conv layer and batch-normalization layer
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(texts_noise)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
# add second conv layer and batch-normalization layer
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
texts_out = Activation(tanh3)(hidden_layers)
gen_model = Model(inputs=[texts_noise], outputs=[texts_out])
return gen_model
def generator_for_image_or_leaf(noise_len, out_len, dense_units):
noise = Input(shape=(noise_len,), dtype="float32", name="images_noise")
# add full-connect layer
hidden_layers = Dense(dense_units, activation="relu")(noise)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
gen_out = Dense(out_len, activation="tanh")(hidden_layers)
gen_model = Model(inputs=[noise], outputs=[gen_out])
return gen_model
def discriminator(text_len, embedding_len, conv_filters, conv_window_len, dense_units,
images_size, leaves_size, lr, is_gate=True):
# start to create CNN
# add input layer
texts = Input(shape=(text_len, embedding_len), dtype="float32", name="texts")
texts_with_noise = GaussianNoise(0.01)(texts)
# add first conv layer and max-pool layer
texts_conv1d = Conv1D(conv_filters, conv_window_len, padding='valid',
activation='linear', strides=1)(texts_with_noise)
texts_pool1d = MaxPool1D()(texts_conv1d)
# add flatten layer
texts_flatten = Flatten()(texts_pool1d)
# add full-connect layer and drop-out layer
texts_dense = Dense(10, activation="linear")(texts_flatten)
texts_out = Dropout(0.5)(texts_dense)
images = Input(shape=(images_size,), name='images')
images_with_noise = GaussianNoise(0.01)(images)
images_out = Dense(4, activation='linear')(images_with_noise)
leaves = Input(shape=(leaves_size,), name="leaves")
leaves_with_noise = GaussianNoise(0.01)(leaves)
leaves_out = Dense(5, activation='linear')(leaves_with_noise)
if is_gate:
texts_gate = Dense(10, activation="hard_sigmoid", name='texts_gate')(concatenate([images_out, leaves_out],
axis=-1))
images_gate = Dense(4, activation="hard_sigmoid", name='images_gate')(concatenate([texts_out, leaves_out],
axis=-1))
leaves_gate = Dense(5, activation="hard_sigmoid", name='leaves_gate')(concatenate([texts_out, images_out],
axis=-1))
texts_filtered = multiply([texts_out, texts_gate])
images_filtered = multiply([images_out, images_gate])
leaves_filtered = multiply([leaves_out, leaves_gate])
else:
texts_filtered = texts_out
images_filtered = images_out
leaves_filtered = leaves_out
texts_images_kron = Kronecker([images_filtered, texts_filtered])
texts_leaves_kron = Kronecker([leaves_filtered, texts_filtered])
images_leaves_kron = Kronecker([images_filtered, leaves_filtered])
datas = [texts_out, images_out, leaves_out, texts_images_kron,
texts_leaves_kron, images_leaves_kron]
cat_data = concatenate(datas)
cat_hidden = Dense(dense_units, activation="linear")(cat_data)
cat_hidden = Dropout(0.5)(cat_hidden)
# add output layer with softmax
cat_output = Dense(2, activation='softmax', name='cat_output',
activity_regularizer=l1_l2(l1=0.02, l2=0.02),
kernel_regularizer=l1_l2(l1=0.02, l2=0.02),
bias_regularizer=l1_l2(l1=0.02, l2=0.02))(cat_hidden)
dis_model = Model(inputs=[texts, images, leaves], outputs=[cat_output])
optimizer = RMSprop(lr=lr, clipvalue=1.0, decay=1e-8)
dis_model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return dis_model
def fix_model(model, is_trainable=False):
model.trainable = is_trainable
for layer in model.layers:
layer.trainable = is_trainable
class Gan(object):
def __init__(self):
# create the generator and discriminator model.
# create the generator model
texts_size = TEXTS_SIZE
text_noise_size = TEXT_NOISE_SIZE
embedding_size = EMBEDDING_SIZE
conv_filters = 300
conv_window_len = 3
image_size = IMAGES_SIZE
image_noise_size = IMAGE_NOISE_SIZE
image_dense_units = 100
leaves_size = LEAVES_SIZE
leaf_noise_size = LEAF_NOISE_SIZE
leaf_dense_units = 500
dis_lr = 1e-4
gen_lr = 1e-3
self.generator_for_text = generator_for_text(noise_len=text_noise_size,
embedding_len=embedding_size,
conv_filters=conv_filters,
conv_window_len=conv_window_len,
)
self.generator_for_image = generator_for_image_or_leaf(noise_len=image_noise_size,
out_len=image_size,
dense_units=image_dense_units,
)
self.generator_for_leaf = generator_for_image_or_leaf(noise_len=leaf_noise_size,
out_len=leaves_size,
dense_units=leaf_dense_units,
)
# create the discriminator model
self.discriminator = discriminator(texts_size, embedding_size,
conv_filters=250,
conv_window_len=3,
dense_units=250,
lr=dis_lr,
images_size=image_size,
leaves_size=leaves_size)
# fix the discriminator
fix_model(self.discriminator, is_trainable=False)
# assemble the generator and discriminator model into a gan model
text_noise_in = Input(shape=(text_noise_size, embedding_size), dtype="float32", name="text_noise_in")
image_noise_in = Input(shape=(image_noise_size,), dtype="float32", name="image_noise_in")
leaf_noise_in = Input(shape=(leaf_noise_size,), dtype="float32", name="leaf_noise_in")
text_hidden_layer = self.generator_for_text(text_noise_in)
image_hidden_layer = self.generator_for_image(image_noise_in)
leaf_hidden_layer = self.generator_for_leaf(leaf_noise_in)
gan_output = self.discriminator([text_hidden_layer, image_hidden_layer, leaf_hidden_layer])
self.gan_model = Model(inputs=[text_noise_in, image_noise_in, leaf_noise_in], outputs=[gan_output])
optimizer = RMSprop(lr=gen_lr, clipvalue=1.0, decay=1e-8)
self.gan_model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"])
self.text_len = texts_size
self.text_noise_len = text_noise_size
self.embedding_len = embedding_size
self.word2embedded = Word2Embedded(texts_size)
self.image_noise_len = image_noise_size
self.leaf_noise_len = leaf_noise_size
self.losses = {"gen_loss": [], "dis_loss": []}
def train(self, texts, images, leaves, epochs=2000, batch_size=25):
for epoch in range(epochs):
text_seed_noises = standard_normal(size=(batch_size,
self.text_noise_len,
self.embedding_len)).astype(dtype="float32")
image_seed_noises = standard_normal(size=(batch_size,
self.image_noise_len)).astype(dtype="float32")
leaf_seed_noises = standard_normal(size=(batch_size,
self.leaf_noise_len)).astype(dtype="float32")
# counterfeit text, image and leaf
gen_embedding_mat = self.generator_for_text.predict(text_seed_noises)
gen_image_mat = self.generator_for_image.predict(image_seed_noises)
gen_leaf_mat = self.generator_for_leaf.predict(leaf_seed_noises)
# sample from x with batch_size
batch_index = sample(range(texts.shape[0]), batch_size)
true_word_index = texts[batch_index]
true_embedding_mat = self.word2embedded(true_word_index)
true_image_mat = images[batch_index]
true_leaf_mat = leaves[batch_index]
# concatenate the counterfeit text and true text
cat_texts = np.concatenate((true_embedding_mat, gen_embedding_mat), axis=0)
cat_images = np.concatenate((true_image_mat, gen_image_mat), axis=0)
cat_leaves = np.concatenate((true_leaf_mat, gen_leaf_mat), axis=0)
target = zeros(shape=(batch_size * 2, 2), dtype="int32")
target[:batch_size, 1] = 1
target[batch_size:, 0] = 1
# feed the cat data and target into discriminator
fix_model(self.discriminator, is_trainable=True)
dis_loss = self.discriminator.train_on_batch(x={"texts": cat_texts,
"images": cat_images,
"leaves": cat_leaves}, y=target)
self.losses["dis_loss"].append(dis_loss[0])
print(('epoch: {}, training discriminator, '
'loss: {:.2f}, accuracy: {:.2f}').format(epoch + 1, *dis_loss))
# train Generator-Discriminator stack on input noise to non-generated output class
text_seed_noises = standard_normal(size=(batch_size,
self.text_noise_len,
self.embedding_len)).astype(dtype="float32")
image_seed_noises = standard_normal(size=(batch_size,
self.image_noise_len)).astype(dtype="float32")
leaf_seed_noises = standard_normal(size=(batch_size,
self.leaf_noise_len)).astype(dtype="float32")
target = zeros([batch_size, 2], dtype="int32")
target[:, 1] = 1
# train gan with discriminator fixed
fix_model(self.discriminator, is_trainable=False)
gen_loss = self.gan_model.train_on_batch(x={"text_noise_in": text_seed_noises,
"image_noise_in": image_seed_noises,
"leaf_noise_in": leaf_seed_noises}, y=target)
self.losses["gen_loss"].append(gen_loss[0])
print(("epoch: {}, training generator, "
"loss: {:.2f}, accuracy: {:.2f}").format(epoch + 1, *gen_loss))
print('-' * 60)
|
DMMFF/multimodal_gan.py
|
# @Time : 11/24/18 12:29 PM
# @Author : <NAME>
# @File : multimodal_gan.py
from random import sample
import numpy as np
# installed packages and modules
from keras.layers import (Dense, Conv1D, MaxPool1D, Flatten,
Dropout, Input, Activation, BatchNormalization,
concatenate, GaussianNoise, multiply, RepeatVector,
Lambda)
from keras.models import Model
from keras.optimizers import RMSprop
from keras.regularizers import l1_l2
from numpy import zeros
from numpy.random import standard_normal
from .settings import (TEXTS_SIZE, TEXT_NOISE_SIZE, EMBEDDING_SIZE,
IMAGES_SIZE, IMAGE_NOISE_SIZE, LEAVES_SIZE, LEAF_NOISE_SIZE)
# created packages and modules
from .utils import Word2Embedded, tanh3
def kronecker_product(mat1, mat2):
n1 = mat1.get_shape()[1]
n2 = mat2.get_shape()[1]
mat1 = RepeatVector(n2)(mat1)
mat1 = concatenate([mat1[:, :, i] for i in range(n1)], axis=-1)
mat2 = Flatten()(RepeatVector(n1)(mat2))
result = multiply(inputs=[mat1, mat2])
# convert (i-1)dim to i dim
# result = Reshape((n2, n1))(result)
return result
Kronecker = Lambda(lambda tensors: kronecker_product(tensors[0], tensors[1]))
def generator_for_text(noise_len, embedding_len, conv_filters, conv_window_len):
# start to create CNN
# add input layer
texts_noise = Input(shape=(noise_len, embedding_len), dtype="float32", name="texts_noise")
# add first conv layer and batch-normalization layer
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(texts_noise)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
# add second conv layer and batch-normalization layer
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
hidden_layers = Activation(activation='relu')(hidden_layers)
hidden_layers = Conv1D(conv_filters, conv_window_len, padding='valid', strides=1)(hidden_layers)
hidden_layers = BatchNormalization()(hidden_layers)
texts_out = Activation(tanh3)(hidden_layers)
gen_model = Model(inputs=[texts_noise], outputs=[texts_out])
return gen_model
def generator_for_image_or_leaf(noise_len, out_len, dense_units):
noise = Input(shape=(noise_len,), dtype="float32", name="images_noise")
# add full-connect layer
hidden_layers = Dense(dense_units, activation="relu")(noise)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
hidden_layers = Dense(dense_units, activation="relu")(hidden_layers)
gen_out = Dense(out_len, activation="tanh")(hidden_layers)
gen_model = Model(inputs=[noise], outputs=[gen_out])
return gen_model
def discriminator(text_len, embedding_len, conv_filters, conv_window_len, dense_units,
images_size, leaves_size, lr, is_gate=True):
# start to create CNN
# add input layer
texts = Input(shape=(text_len, embedding_len), dtype="float32", name="texts")
texts_with_noise = GaussianNoise(0.01)(texts)
# add first conv layer and max-pool layer
texts_conv1d = Conv1D(conv_filters, conv_window_len, padding='valid',
activation='linear', strides=1)(texts_with_noise)
texts_pool1d = MaxPool1D()(texts_conv1d)
# add flatten layer
texts_flatten = Flatten()(texts_pool1d)
# add full-connect layer and drop-out layer
texts_dense = Dense(10, activation="linear")(texts_flatten)
texts_out = Dropout(0.5)(texts_dense)
images = Input(shape=(images_size,), name='images')
images_with_noise = GaussianNoise(0.01)(images)
images_out = Dense(4, activation='linear')(images_with_noise)
leaves = Input(shape=(leaves_size,), name="leaves")
leaves_with_noise = GaussianNoise(0.01)(leaves)
leaves_out = Dense(5, activation='linear')(leaves_with_noise)
if is_gate:
texts_gate = Dense(10, activation="hard_sigmoid", name='texts_gate')(concatenate([images_out, leaves_out],
axis=-1))
images_gate = Dense(4, activation="hard_sigmoid", name='images_gate')(concatenate([texts_out, leaves_out],
axis=-1))
leaves_gate = Dense(5, activation="hard_sigmoid", name='leaves_gate')(concatenate([texts_out, images_out],
axis=-1))
texts_filtered = multiply([texts_out, texts_gate])
images_filtered = multiply([images_out, images_gate])
leaves_filtered = multiply([leaves_out, leaves_gate])
else:
texts_filtered = texts_out
images_filtered = images_out
leaves_filtered = leaves_out
texts_images_kron = Kronecker([images_filtered, texts_filtered])
texts_leaves_kron = Kronecker([leaves_filtered, texts_filtered])
images_leaves_kron = Kronecker([images_filtered, leaves_filtered])
datas = [texts_out, images_out, leaves_out, texts_images_kron,
texts_leaves_kron, images_leaves_kron]
cat_data = concatenate(datas)
cat_hidden = Dense(dense_units, activation="linear")(cat_data)
cat_hidden = Dropout(0.5)(cat_hidden)
# add output layer with softmax
cat_output = Dense(2, activation='softmax', name='cat_output',
activity_regularizer=l1_l2(l1=0.02, l2=0.02),
kernel_regularizer=l1_l2(l1=0.02, l2=0.02),
bias_regularizer=l1_l2(l1=0.02, l2=0.02))(cat_hidden)
dis_model = Model(inputs=[texts, images, leaves], outputs=[cat_output])
optimizer = RMSprop(lr=lr, clipvalue=1.0, decay=1e-8)
dis_model.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return dis_model
def fix_model(model, is_trainable=False):
model.trainable = is_trainable
for layer in model.layers:
layer.trainable = is_trainable
class Gan(object):
def __init__(self):
# create the generator and discriminator model.
# create the generator model
texts_size = TEXTS_SIZE
text_noise_size = TEXT_NOISE_SIZE
embedding_size = EMBEDDING_SIZE
conv_filters = 300
conv_window_len = 3
image_size = IMAGES_SIZE
image_noise_size = IMAGE_NOISE_SIZE
image_dense_units = 100
leaves_size = LEAVES_SIZE
leaf_noise_size = LEAF_NOISE_SIZE
leaf_dense_units = 500
dis_lr = 1e-4
gen_lr = 1e-3
self.generator_for_text = generator_for_text(noise_len=text_noise_size,
embedding_len=embedding_size,
conv_filters=conv_filters,
conv_window_len=conv_window_len,
)
self.generator_for_image = generator_for_image_or_leaf(noise_len=image_noise_size,
out_len=image_size,
dense_units=image_dense_units,
)
self.generator_for_leaf = generator_for_image_or_leaf(noise_len=leaf_noise_size,
out_len=leaves_size,
dense_units=leaf_dense_units,
)
# create the discriminator model
self.discriminator = discriminator(texts_size, embedding_size,
conv_filters=250,
conv_window_len=3,
dense_units=250,
lr=dis_lr,
images_size=image_size,
leaves_size=leaves_size)
# fix the discriminator
fix_model(self.discriminator, is_trainable=False)
# assemble the generator and discriminator model into a gan model
text_noise_in = Input(shape=(text_noise_size, embedding_size), dtype="float32", name="text_noise_in")
image_noise_in = Input(shape=(image_noise_size,), dtype="float32", name="image_noise_in")
leaf_noise_in = Input(shape=(leaf_noise_size,), dtype="float32", name="leaf_noise_in")
text_hidden_layer = self.generator_for_text(text_noise_in)
image_hidden_layer = self.generator_for_image(image_noise_in)
leaf_hidden_layer = self.generator_for_leaf(leaf_noise_in)
gan_output = self.discriminator([text_hidden_layer, image_hidden_layer, leaf_hidden_layer])
self.gan_model = Model(inputs=[text_noise_in, image_noise_in, leaf_noise_in], outputs=[gan_output])
optimizer = RMSprop(lr=gen_lr, clipvalue=1.0, decay=1e-8)
self.gan_model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"])
self.text_len = texts_size
self.text_noise_len = text_noise_size
self.embedding_len = embedding_size
self.word2embedded = Word2Embedded(texts_size)
self.image_noise_len = image_noise_size
self.leaf_noise_len = leaf_noise_size
self.losses = {"gen_loss": [], "dis_loss": []}
def train(self, texts, images, leaves, epochs=2000, batch_size=25):
for epoch in range(epochs):
text_seed_noises = standard_normal(size=(batch_size,
self.text_noise_len,
self.embedding_len)).astype(dtype="float32")
image_seed_noises = standard_normal(size=(batch_size,
self.image_noise_len)).astype(dtype="float32")
leaf_seed_noises = standard_normal(size=(batch_size,
self.leaf_noise_len)).astype(dtype="float32")
# counterfeit text, image and leaf
gen_embedding_mat = self.generator_for_text.predict(text_seed_noises)
gen_image_mat = self.generator_for_image.predict(image_seed_noises)
gen_leaf_mat = self.generator_for_leaf.predict(leaf_seed_noises)
# sample from x with batch_size
batch_index = sample(range(texts.shape[0]), batch_size)
true_word_index = texts[batch_index]
true_embedding_mat = self.word2embedded(true_word_index)
true_image_mat = images[batch_index]
true_leaf_mat = leaves[batch_index]
# concatenate the counterfeit text and true text
cat_texts = np.concatenate((true_embedding_mat, gen_embedding_mat), axis=0)
cat_images = np.concatenate((true_image_mat, gen_image_mat), axis=0)
cat_leaves = np.concatenate((true_leaf_mat, gen_leaf_mat), axis=0)
target = zeros(shape=(batch_size * 2, 2), dtype="int32")
target[:batch_size, 1] = 1
target[batch_size:, 0] = 1
# feed the cat data and target into discriminator
fix_model(self.discriminator, is_trainable=True)
dis_loss = self.discriminator.train_on_batch(x={"texts": cat_texts,
"images": cat_images,
"leaves": cat_leaves}, y=target)
self.losses["dis_loss"].append(dis_loss[0])
print(('epoch: {}, training discriminator, '
'loss: {:.2f}, accuracy: {:.2f}').format(epoch + 1, *dis_loss))
# train Generator-Discriminator stack on input noise to non-generated output class
text_seed_noises = standard_normal(size=(batch_size,
self.text_noise_len,
self.embedding_len)).astype(dtype="float32")
image_seed_noises = standard_normal(size=(batch_size,
self.image_noise_len)).astype(dtype="float32")
leaf_seed_noises = standard_normal(size=(batch_size,
self.leaf_noise_len)).astype(dtype="float32")
target = zeros([batch_size, 2], dtype="int32")
target[:, 1] = 1
# train gan with discriminator fixed
fix_model(self.discriminator, is_trainable=False)
gen_loss = self.gan_model.train_on_batch(x={"text_noise_in": text_seed_noises,
"image_noise_in": image_seed_noises,
"leaf_noise_in": leaf_seed_noises}, y=target)
self.losses["gen_loss"].append(gen_loss[0])
print(("epoch: {}, training generator, "
"loss: {:.2f}, accuracy: {:.2f}").format(epoch + 1, *gen_loss))
print('-' * 60)
| 0.822332 | 0.532668 |
import lldb
from intelpt_testcase import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from lldbsuite.test.decorators import *
class TestTraceLoad(TraceIntelPTTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def testLoadTrace(self):
src_dir = self.getSourceDir()
trace_definition_file = os.path.join(src_dir, "intelpt-trace", "trace.json")
self.expect("trace load -v " + trace_definition_file, substrs=["intel-pt"])
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
self.assertEqual(process.GetProcessID(), 1234)
self.assertEqual(process.GetNumThreads(), 1)
self.assertEqual(process.GetThreadAtIndex(0).GetThreadID(), 3842849)
self.assertEqual(target.GetNumModules(), 1)
module = target.GetModuleAtIndex(0)
path = module.GetFileSpec()
self.assertEqual(path.fullpath, os.path.join(src_dir, "intelpt-trace", "a.out"))
self.assertGreater(module.GetNumSections(), 0)
self.assertEqual(module.GetSectionAtIndex(0).GetFileAddress(), 0x400000)
self.assertEqual("6AA9A4E2-6F28-2F33-377D-59FECE874C71-5B41261A", module.GetUUIDString())
# check that the Process and Thread objects were created correctly
self.expect("thread info", substrs=["tid = 3842849"])
self.expect("thread list", substrs=["Process 1234 stopped", "tid = 3842849"])
self.expect("thread trace dump info", substrs=['''Trace technology: intel-pt
thread #1: tid = 3842849
Total number of instructions: 21
Memory usage:
Raw trace size: 4 KiB
Total approximate memory usage (excluding raw trace): 1.27 KiB
Average memory usage per instruction (excluding raw trace): 61.76 bytes
Timing:
Decoding instructions: ''', '''s
Events:
Number of instructions with events: 1
Number of individual events: 1
paused: 1
Errors:
Number of TSC decoding errors: 0'''])
def testLoadInvalidTraces(self):
src_dir = self.getSourceDir()
# We test first an invalid type
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad.json"), error=True,
substrs=['''error: expected object at traceSession.processes[0]
Context:
{
"processes": [
/* error: expected object */
123
],
"trace": { ... }
}
Schema:
{
"trace": {
"type": "intel-pt",
"cpuInfo": {
"vendor": "intel" | "unknown",
"family": integer,
"model": integer,
"stepping": integer
}
},'''])
# Now we test a missing field in the global session file
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad2.json"), error=True,
substrs=['error: missing value at traceSession.processes[1].triple', "Context", "Schema"])
# Now we test a missing field in the intel-pt settings
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad4.json"), error=True,
substrs=['''error: missing value at traceSession.trace.cpuInfo.family
Context:
{
"processes": [],
"trace": {
"cpuInfo": /* error: missing value */ {
"model": 79,
"stepping": 1,
"vendor": "intel"
},
"type": "intel-pt"
}
}''', "Schema"])
# Now we test an incorrect load address in the intel-pt settings
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad5.json"), error=True,
substrs=['error: expected numeric string at traceSession.processes[0].modules[0].loadAddress',
'"loadAddress": /* error: expected numeric string */ 400000,', "Schema"])
# The following wrong schema will have a valid target and an invalid one. In the case of failure,
# no targets should be created.
self.assertEqual(self.dbg.GetNumTargets(), 0)
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad3.json"), error=True,
substrs=['error: missing value at traceSession.processes[1].pid'])
self.assertEqual(self.dbg.GetNumTargets(), 0)
|
lldb/test/API/commands/trace/TestTraceLoad.py
|
import lldb
from intelpt_testcase import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from lldbsuite.test.decorators import *
class TestTraceLoad(TraceIntelPTTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def testLoadTrace(self):
src_dir = self.getSourceDir()
trace_definition_file = os.path.join(src_dir, "intelpt-trace", "trace.json")
self.expect("trace load -v " + trace_definition_file, substrs=["intel-pt"])
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
self.assertEqual(process.GetProcessID(), 1234)
self.assertEqual(process.GetNumThreads(), 1)
self.assertEqual(process.GetThreadAtIndex(0).GetThreadID(), 3842849)
self.assertEqual(target.GetNumModules(), 1)
module = target.GetModuleAtIndex(0)
path = module.GetFileSpec()
self.assertEqual(path.fullpath, os.path.join(src_dir, "intelpt-trace", "a.out"))
self.assertGreater(module.GetNumSections(), 0)
self.assertEqual(module.GetSectionAtIndex(0).GetFileAddress(), 0x400000)
self.assertEqual("6AA9A4E2-6F28-2F33-377D-59FECE874C71-5B41261A", module.GetUUIDString())
# check that the Process and Thread objects were created correctly
self.expect("thread info", substrs=["tid = 3842849"])
self.expect("thread list", substrs=["Process 1234 stopped", "tid = 3842849"])
self.expect("thread trace dump info", substrs=['''Trace technology: intel-pt
thread #1: tid = 3842849
Total number of instructions: 21
Memory usage:
Raw trace size: 4 KiB
Total approximate memory usage (excluding raw trace): 1.27 KiB
Average memory usage per instruction (excluding raw trace): 61.76 bytes
Timing:
Decoding instructions: ''', '''s
Events:
Number of instructions with events: 1
Number of individual events: 1
paused: 1
Errors:
Number of TSC decoding errors: 0'''])
def testLoadInvalidTraces(self):
src_dir = self.getSourceDir()
# We test first an invalid type
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad.json"), error=True,
substrs=['''error: expected object at traceSession.processes[0]
Context:
{
"processes": [
/* error: expected object */
123
],
"trace": { ... }
}
Schema:
{
"trace": {
"type": "intel-pt",
"cpuInfo": {
"vendor": "intel" | "unknown",
"family": integer,
"model": integer,
"stepping": integer
}
},'''])
# Now we test a missing field in the global session file
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad2.json"), error=True,
substrs=['error: missing value at traceSession.processes[1].triple', "Context", "Schema"])
# Now we test a missing field in the intel-pt settings
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad4.json"), error=True,
substrs=['''error: missing value at traceSession.trace.cpuInfo.family
Context:
{
"processes": [],
"trace": {
"cpuInfo": /* error: missing value */ {
"model": 79,
"stepping": 1,
"vendor": "intel"
},
"type": "intel-pt"
}
}''', "Schema"])
# Now we test an incorrect load address in the intel-pt settings
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad5.json"), error=True,
substrs=['error: expected numeric string at traceSession.processes[0].modules[0].loadAddress',
'"loadAddress": /* error: expected numeric string */ 400000,', "Schema"])
# The following wrong schema will have a valid target and an invalid one. In the case of failure,
# no targets should be created.
self.assertEqual(self.dbg.GetNumTargets(), 0)
self.expect("trace load -v " + os.path.join(src_dir, "intelpt-trace", "trace_bad3.json"), error=True,
substrs=['error: missing value at traceSession.processes[1].pid'])
self.assertEqual(self.dbg.GetNumTargets(), 0)
| 0.525856 | 0.378143 |
import sys
from PyQt5.QtCore import Qt, QRectF, QPointF
from PyQt5.QtGui import QPixmap, QTransform, QBrush, QColor, QPen
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, QGraphicsView, QGraphicsScene, QGraphicsPixmapItem, QSizePolicy, QSpacerItem, QGraphicsObject
class MouseBrushObject(QGraphicsObject):
def __init__(self):
QGraphicsObject.__init__(self)
self._size = 10
self._x = 0
self._y = 0
self._pen = None
self._brush = None
self._color = None
self.setColor(QColor(255, 0, 0, 255))
def paint(self, painter, option, widget):
rect = self.boundingRect()
painter.setPen(self._pen)
painter.setBrush(self._brush)
painter.drawEllipse(rect)
def boundingRect(self):
return QRectF(self._x, self._y, self._size, self._size)
def setColor(self, color):
self._color = color
self._pen = QPen(self._color, 1)
self._brush = QBrush(QColor(self._color.red(), self._color.green(), self._color.blue(), 40))
def setSize(self, size):
self._size = size
def setPosition(self, pos):
self._x = pos.x()-self._size/2
self._y = pos.y()-self._size/2
self.setPos(QPointF(self._x, self._y))
class View(QGraphicsView):
def __init__(self, parent=None):
QGraphicsView.__init__(self, parent=parent)
self.setMouseTracking(True)
self.scene = QGraphicsScene(self)
self.setScene(self.scene)
pixmap = QPixmap(300, 300)
self.scene.addItem(QGraphicsPixmapItem(pixmap))
#self.setTransform(QTransform().scale(1, 1).rotate(0))
self.scene.setBackgroundBrush(QBrush(Qt.lightGray))
self._brushItem = MouseBrushObject()
def mouseMoveEvent(self, event):
pos = event.pos()
#pos = self.mapToScene(pos)
#pos = self.mapFromScene(pos)
#pos = self.mapToGlobal(pos)
#pos = self.mapFromGlobal(self.mapToGlobal(pos))
#pos = self.mapToGlobal(self.mapFromGlobal(pos))
#pos = self.mapToGlobal(self.mapFromScene(pos))
self._brushItem.setPosition(pos)
def enterEvent(self, event):
self.scene.addItem(self._brushItem)
return super(View, self).enterEvent(event)
def leaveEvent(self, event):
self.scene.removeItem(self._brushItem)
return super(View, self).leaveEvent(event)
class Viewer(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
layout = QVBoxLayout()
self.view = View(self)
self.setLayout(layout)
layout.addWidget(self.view)
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.viewer = Viewer(self)
layout = QVBoxLayout()
layout.addWidget(self.viewer)
centralwidget = QWidget(self)
centralwidget.setLayout(layout)
self.setCentralWidget(centralwidget)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
|
temp.py
|
import sys
from PyQt5.QtCore import Qt, QRectF, QPointF
from PyQt5.QtGui import QPixmap, QTransform, QBrush, QColor, QPen
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, QGraphicsView, QGraphicsScene, QGraphicsPixmapItem, QSizePolicy, QSpacerItem, QGraphicsObject
class MouseBrushObject(QGraphicsObject):
def __init__(self):
QGraphicsObject.__init__(self)
self._size = 10
self._x = 0
self._y = 0
self._pen = None
self._brush = None
self._color = None
self.setColor(QColor(255, 0, 0, 255))
def paint(self, painter, option, widget):
rect = self.boundingRect()
painter.setPen(self._pen)
painter.setBrush(self._brush)
painter.drawEllipse(rect)
def boundingRect(self):
return QRectF(self._x, self._y, self._size, self._size)
def setColor(self, color):
self._color = color
self._pen = QPen(self._color, 1)
self._brush = QBrush(QColor(self._color.red(), self._color.green(), self._color.blue(), 40))
def setSize(self, size):
self._size = size
def setPosition(self, pos):
self._x = pos.x()-self._size/2
self._y = pos.y()-self._size/2
self.setPos(QPointF(self._x, self._y))
class View(QGraphicsView):
def __init__(self, parent=None):
QGraphicsView.__init__(self, parent=parent)
self.setMouseTracking(True)
self.scene = QGraphicsScene(self)
self.setScene(self.scene)
pixmap = QPixmap(300, 300)
self.scene.addItem(QGraphicsPixmapItem(pixmap))
#self.setTransform(QTransform().scale(1, 1).rotate(0))
self.scene.setBackgroundBrush(QBrush(Qt.lightGray))
self._brushItem = MouseBrushObject()
def mouseMoveEvent(self, event):
pos = event.pos()
#pos = self.mapToScene(pos)
#pos = self.mapFromScene(pos)
#pos = self.mapToGlobal(pos)
#pos = self.mapFromGlobal(self.mapToGlobal(pos))
#pos = self.mapToGlobal(self.mapFromGlobal(pos))
#pos = self.mapToGlobal(self.mapFromScene(pos))
self._brushItem.setPosition(pos)
def enterEvent(self, event):
self.scene.addItem(self._brushItem)
return super(View, self).enterEvent(event)
def leaveEvent(self, event):
self.scene.removeItem(self._brushItem)
return super(View, self).leaveEvent(event)
class Viewer(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
layout = QVBoxLayout()
self.view = View(self)
self.setLayout(layout)
layout.addWidget(self.view)
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.viewer = Viewer(self)
layout = QVBoxLayout()
layout.addWidget(self.viewer)
centralwidget = QWidget(self)
centralwidget.setLayout(layout)
self.setCentralWidget(centralwidget)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
| 0.401805 | 0.266184 |
import unittest
import numpy as np
from openvino.tools.mo.ops.ctc_loss import CTCLoss
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'logits': {'kind': 'op'},
'logits_data': {'shape': None, 'value': None, 'kind': 'data'},
'logit_length': {'kind': 'op'},
'logit_length_data': {'shape': None, 'value': None, 'kind': 'data'},
'labels': {'kind': 'op'},
'labels_data': {'shape': None, 'value': None, 'kind': 'data'},
'label_length': {'kind': 'op'},
'label_length_data': {'shape': None, 'value': None, 'kind': 'data'},
'blank_index': {'kind': 'op'},
'blank_index_data': {'shape': None, 'value': None, 'kind': 'data'},
'ctcloss_node': {'op': 'CTCLoss', 'kind': 'op', 'preprocess_collapse_repeated': False,
'ctc_merge_repeated': True, 'unique': False},
'output': {'shape': None, 'value': None, 'kind': 'data'}}
# graph 1
edges1 = [('logits', 'logits_data'),
('logit_length', 'logit_length_data'),
('labels', 'labels_data'),
('label_length', 'label_length_data'),
('blank_index', 'blank_index_data'),
('logits_data', 'ctcloss_node', {'in': 0}),
('logit_length_data', 'ctcloss_node', {'in': 1}),
('labels_data', 'ctcloss_node', {'in': 2}),
('label_length_data', 'ctcloss_node', {'in': 3}),
('blank_index_data', 'ctcloss_node', {'in': 4}),
('ctcloss_node', 'output', {'out': 0})]
# valid test case
inputs1 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4])},
'labels_data': {'shape': int64_array([4, 100])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
# invalid test case with incorrect rank for the second input tensor
inputs2 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4, 3])},
'labels_data': {'shape': int64_array([4, 100])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
# invalid test case with incorrect time dimension
inputs3 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4])},
'labels_data': {'shape': int64_array([4, 300])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
class TestCTCLoss(unittest.TestCase):
def test_infer1(self):
graph = build_graph(nodes_attributes, edges1, inputs1)
ctc_loss_node = Node(graph, 'ctcloss_node')
CTCLoss.infer(ctc_loss_node)
# prepare reference results
ref_output_shape = int64_array([4])
# get the result
res_output_shape = graph.node['output']['shape']
self.assertTrue(np.array_equal(ref_output_shape, res_output_shape),
'shapes do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape))
def test_infer_invalid1(self):
graph = build_graph(nodes_attributes, edges1, inputs2)
ctc_loss_node = Node(graph, 'ctcloss_node')
self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node)
def test_infer_invalid2(self):
graph = build_graph(nodes_attributes, edges1, inputs3)
ctc_loss_node = Node(graph, 'ctcloss_node')
self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node)
|
tools/mo/unit_tests/mo/ops/ctc_loss_test.py
|
import unittest
import numpy as np
from openvino.tools.mo.ops.ctc_loss import CTCLoss
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'logits': {'kind': 'op'},
'logits_data': {'shape': None, 'value': None, 'kind': 'data'},
'logit_length': {'kind': 'op'},
'logit_length_data': {'shape': None, 'value': None, 'kind': 'data'},
'labels': {'kind': 'op'},
'labels_data': {'shape': None, 'value': None, 'kind': 'data'},
'label_length': {'kind': 'op'},
'label_length_data': {'shape': None, 'value': None, 'kind': 'data'},
'blank_index': {'kind': 'op'},
'blank_index_data': {'shape': None, 'value': None, 'kind': 'data'},
'ctcloss_node': {'op': 'CTCLoss', 'kind': 'op', 'preprocess_collapse_repeated': False,
'ctc_merge_repeated': True, 'unique': False},
'output': {'shape': None, 'value': None, 'kind': 'data'}}
# graph 1
edges1 = [('logits', 'logits_data'),
('logit_length', 'logit_length_data'),
('labels', 'labels_data'),
('label_length', 'label_length_data'),
('blank_index', 'blank_index_data'),
('logits_data', 'ctcloss_node', {'in': 0}),
('logit_length_data', 'ctcloss_node', {'in': 1}),
('labels_data', 'ctcloss_node', {'in': 2}),
('label_length_data', 'ctcloss_node', {'in': 3}),
('blank_index_data', 'ctcloss_node', {'in': 4}),
('ctcloss_node', 'output', {'out': 0})]
# valid test case
inputs1 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4])},
'labels_data': {'shape': int64_array([4, 100])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
# invalid test case with incorrect rank for the second input tensor
inputs2 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4, 3])},
'labels_data': {'shape': int64_array([4, 100])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
# invalid test case with incorrect time dimension
inputs3 = {'logits_data': {'shape': int64_array([4, 100, 5])},
'logit_length_data': {'shape': int64_array([4])},
'labels_data': {'shape': int64_array([4, 300])},
'label_length_data': {'shape': int64_array([4])},
'blank_index_data': {'shape': int64_array([])}}
class TestCTCLoss(unittest.TestCase):
def test_infer1(self):
graph = build_graph(nodes_attributes, edges1, inputs1)
ctc_loss_node = Node(graph, 'ctcloss_node')
CTCLoss.infer(ctc_loss_node)
# prepare reference results
ref_output_shape = int64_array([4])
# get the result
res_output_shape = graph.node['output']['shape']
self.assertTrue(np.array_equal(ref_output_shape, res_output_shape),
'shapes do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape))
def test_infer_invalid1(self):
graph = build_graph(nodes_attributes, edges1, inputs2)
ctc_loss_node = Node(graph, 'ctcloss_node')
self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node)
def test_infer_invalid2(self):
graph = build_graph(nodes_attributes, edges1, inputs3)
ctc_loss_node = Node(graph, 'ctcloss_node')
self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node)
| 0.699254 | 0.348673 |
import errno
import logging
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.platform
__virtualname__ = "iscsi"
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
if __opts__.get("iscsi_grains", False) is False:
return False
else:
return __virtualname__
def iscsi_iqn():
"""
Return iSCSI IQN
"""
grains = {}
grains["iscsi_iqn"] = False
if salt.utils.platform.is_linux():
grains["iscsi_iqn"] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains["iscsi_iqn"] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains["iscsi_iqn"] = _aix_iqn()
return grains
def _linux_iqn():
"""
Return iSCSI IQN from a Linux host.
"""
ret = []
initiator = "/etc/iscsi/initiatorname.iscsi"
try:
with salt.utils.files.fopen(initiator, "r") as _iscsi:
for line in _iscsi:
line = line.strip()
if line.startswith("InitiatorName="):
ret.append(line.split("=", 1)[1])
except OSError as ex:
if ex.errno != errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
return ret
def _aix_iqn():
"""
Return iSCSI IQN from an AIX host.
"""
ret = []
aix_cmd = "lsattr -E -l iscsi0 | grep initiator_name"
aix_ret = salt.modules.cmdmod.run(aix_cmd)
if aix_ret[0].isalpha():
try:
ret.append(aix_ret.split()[1].rstrip())
except IndexError:
pass
return ret
def _windows_iqn():
"""
Return iSCSI IQN from a Windows host.
"""
ret = []
wmic = salt.utils.path.which("wmic")
if not wmic:
return ret
namespace = r"\\root\WMI"
path = "MSiSCSIInitiator_MethodClass"
get = "iSCSINodeName"
cmd_ret = salt.modules.cmdmod.run_all(
"{} /namespace:{} path {} get {} /format:table"
"".format(wmic, namespace, path, get)
)
for line in cmd_ret["stdout"].splitlines():
if line.startswith("iqn."):
line = line.rstrip()
ret.append(line.rstrip())
return ret
|
salt/grains/iscsi.py
|
import errno
import logging
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.platform
__virtualname__ = "iscsi"
# Get logging started
log = logging.getLogger(__name__)
def __virtual__():
if __opts__.get("iscsi_grains", False) is False:
return False
else:
return __virtualname__
def iscsi_iqn():
"""
Return iSCSI IQN
"""
grains = {}
grains["iscsi_iqn"] = False
if salt.utils.platform.is_linux():
grains["iscsi_iqn"] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains["iscsi_iqn"] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains["iscsi_iqn"] = _aix_iqn()
return grains
def _linux_iqn():
"""
Return iSCSI IQN from a Linux host.
"""
ret = []
initiator = "/etc/iscsi/initiatorname.iscsi"
try:
with salt.utils.files.fopen(initiator, "r") as _iscsi:
for line in _iscsi:
line = line.strip()
if line.startswith("InitiatorName="):
ret.append(line.split("=", 1)[1])
except OSError as ex:
if ex.errno != errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
return ret
def _aix_iqn():
"""
Return iSCSI IQN from an AIX host.
"""
ret = []
aix_cmd = "lsattr -E -l iscsi0 | grep initiator_name"
aix_ret = salt.modules.cmdmod.run(aix_cmd)
if aix_ret[0].isalpha():
try:
ret.append(aix_ret.split()[1].rstrip())
except IndexError:
pass
return ret
def _windows_iqn():
"""
Return iSCSI IQN from a Windows host.
"""
ret = []
wmic = salt.utils.path.which("wmic")
if not wmic:
return ret
namespace = r"\\root\WMI"
path = "MSiSCSIInitiator_MethodClass"
get = "iSCSINodeName"
cmd_ret = salt.modules.cmdmod.run_all(
"{} /namespace:{} path {} get {} /format:table"
"".format(wmic, namespace, path, get)
)
for line in cmd_ret["stdout"].splitlines():
if line.startswith("iqn."):
line = line.rstrip()
ret.append(line.rstrip())
return ret
| 0.360264 | 0.047492 |
import treetensor.torch as ttorch
from .base import choose_mark
# noinspection DuplicatedCode,PyUnresolvedReferences
class TestTorchTensorAutograd:
@choose_mark()
def test_requires_grad(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1.a.requires_grad_(False)
assert not tt1.requires_grad.all()
assert tt1.requires_grad.any()
tt1.b.x.requires_grad_(False)
assert not tt1.requires_grad.all()
assert not tt1.requires_grad.any()
@choose_mark()
def test_requires_grad_(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
})
assert not tt1.requires_grad.any()
tt1.requires_grad_(True)
assert tt1.requires_grad.all()
tt1.a.requires_grad_(False)
assert not tt1.requires_grad.all()
assert tt1.requires_grad.any()
tt1.b.x.requires_grad_(False)
assert not tt1.requires_grad.all()
assert not tt1.requires_grad.any()
@choose_mark()
def test_grad(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
mq = tt1.mean() ** 2
mq.backward()
assert ttorch.isclose(tt1.grad, ttorch.tensor({
'a': [1.4286, 1.4286, 1.4286],
'b': {'x': [[1.4286, 1.4286],
[1.4286, 1.4286]]},
}), atol=1e-4).all()
@choose_mark()
def test_detach(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1r = tt1.detach()
assert tt1.requires_grad.all()
assert tt1r is not tt1
assert not tt1r.requires_grad.any()
@choose_mark()
def test_detach_(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1r = tt1.detach_()
assert tt1r is tt1
assert not tt1.requires_grad.any()
|
test/torch/tensor/test_autograd.py
|
import treetensor.torch as ttorch
from .base import choose_mark
# noinspection DuplicatedCode,PyUnresolvedReferences
class TestTorchTensorAutograd:
@choose_mark()
def test_requires_grad(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1.a.requires_grad_(False)
assert not tt1.requires_grad.all()
assert tt1.requires_grad.any()
tt1.b.x.requires_grad_(False)
assert not tt1.requires_grad.all()
assert not tt1.requires_grad.any()
@choose_mark()
def test_requires_grad_(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
})
assert not tt1.requires_grad.any()
tt1.requires_grad_(True)
assert tt1.requires_grad.all()
tt1.a.requires_grad_(False)
assert not tt1.requires_grad.all()
assert tt1.requires_grad.any()
tt1.b.x.requires_grad_(False)
assert not tt1.requires_grad.all()
assert not tt1.requires_grad.any()
@choose_mark()
def test_grad(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
mq = tt1.mean() ** 2
mq.backward()
assert ttorch.isclose(tt1.grad, ttorch.tensor({
'a': [1.4286, 1.4286, 1.4286],
'b': {'x': [[1.4286, 1.4286],
[1.4286, 1.4286]]},
}), atol=1e-4).all()
@choose_mark()
def test_detach(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1r = tt1.detach()
assert tt1.requires_grad.all()
assert tt1r is not tt1
assert not tt1r.requires_grad.any()
@choose_mark()
def test_detach_(self):
tt1 = ttorch.tensor({
'a': [2, 3, 4.0],
'b': {'x': [[5, 6], [7, 8.0]]}
}, requires_grad=True)
assert tt1.requires_grad.all()
tt1r = tt1.detach_()
assert tt1r is tt1
assert not tt1.requires_grad.any()
| 0.479991 | 0.606964 |
import re
import sys
import time
from unittest.case import SkipTest
import mock
import pytest
import six
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ENV_KEY
from ddtrace.constants import ERROR_MSG
from ddtrace.constants import ERROR_STACK
from ddtrace.constants import ERROR_TYPE
from ddtrace.constants import SERVICE_VERSION_KEY
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.ext import SpanTypes
from ddtrace.span import Span
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import assert_is_not_measured
from tests.utils import override_global_config
class SpanTestCase(TracerTestCase):
def test_ids(self):
s = Span(tracer=None, name="span.test")
assert s.trace_id
assert s.span_id
assert not s.parent_id
s2 = Span(tracer=None, name="t", trace_id=1, span_id=2, parent_id=1)
assert s2.trace_id == 1
assert s2.span_id == 2
assert s2.parent_id == 1
def test_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("a", "a")
s.set_tag("b", 1)
s.set_tag("c", "1")
d = s.to_dict()
assert d["meta"] == dict(a="a", c="1")
assert d["metrics"] == dict(b=1)
def test_numeric_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("negative", -1)
s.set_tag("zero", 0)
s.set_tag("positive", 1)
s.set_tag("large_int", 2 ** 53)
s.set_tag("really_large_int", (2 ** 53) + 1)
s.set_tag("large_negative_int", -(2 ** 53))
s.set_tag("really_large_negative_int", -((2 ** 53) + 1))
s.set_tag("float", 12.3456789)
s.set_tag("negative_float", -12.3456789)
s.set_tag("large_float", 2.0 ** 53)
s.set_tag("really_large_float", (2.0 ** 53) + 1)
d = s.to_dict()
assert d["meta"] == dict(
really_large_int=str(((2 ** 53) + 1)),
really_large_negative_int=str(-((2 ** 53) + 1)),
)
assert d["metrics"] == {
"negative": -1,
"zero": 0,
"positive": 1,
"large_int": 2 ** 53,
"large_negative_int": -(2 ** 53),
"float": 12.3456789,
"negative_float": -12.3456789,
"large_float": 2.0 ** 53,
"really_large_float": (2.0 ** 53) + 1,
}
def test_set_tag_bool(self):
s = Span(tracer=None, name="test.span")
s.set_tag("true", True)
s.set_tag("false", False)
d = s.to_dict()
assert d["meta"] == dict(true="True", false="False")
assert "metrics" not in d
def test_set_tag_metric(self):
s = Span(tracer=None, name="test.span")
s.set_tag("test", "value")
assert s.meta == dict(test="value")
assert s.metrics == dict()
s.set_tag("test", 1)
assert s.meta == dict()
assert s.metrics == dict(test=1)
def test_set_valid_metrics(self):
s = Span(tracer=None, name="test.span")
s.set_metric("a", 0)
s.set_metric("b", -12)
s.set_metric("c", 12.134)
s.set_metric("d", 1231543543265475686787869123)
s.set_metric("e", "12.34")
d = s.to_dict()
expected = {
"a": 0,
"b": -12,
"c": 12.134,
"d": 1231543543265475686787869123,
"e": 12.34,
}
assert d["metrics"] == expected
def test_set_invalid_metric(self):
s = Span(tracer=None, name="test.span")
invalid_metrics = [None, {}, [], s, "quarante-douze", float("nan"), float("inf"), 1j]
for i, m in enumerate(invalid_metrics):
k = str(i)
s.set_metric(k, m)
assert s.get_metric(k) is None
def test_set_numpy_metric(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
s = Span(tracer=None, name="test.span")
s.set_metric("a", np.int64(1))
assert s.get_metric("a") == 1
assert type(s.get_metric("a")) == float
def test_tags_not_string(self):
# ensure we can cast as strings
class Foo(object):
def __repr__(self):
1 / 0
s = Span(tracer=None, name="test.span")
s.set_tag("a", Foo())
def test_finish(self):
# ensure span.finish() marks the end time of the span
s = Span(None, "test.span")
sleep = 0.05
time.sleep(sleep)
s.finish()
assert s.duration >= sleep, "%s < %s" % (s.duration, sleep)
def test_finish_no_tracer(self):
# ensure finish works with no tracer without raising exceptions
s = Span(tracer=None, name="test.span")
s.finish()
def test_finish_called_multiple_times(self):
# we should only record a span the first time finish is called on it
s = Span(self.tracer, "bar")
s.finish()
s.finish()
def test_finish_set_span_duration(self):
# If set the duration on a span, the span should be recorded with this
# duration
s = Span(tracer=None, name="test.span")
s.duration = 1337.0
s.finish()
assert s.duration == 1337.0
def test_setter_casts_duration_ns_as_int(self):
s = Span(tracer=None, name="test.span")
s.duration = 3.2
s.finish()
assert s.duration == 3.2
assert s.duration_ns == 3200000000
assert isinstance(s.duration_ns, int)
def test_get_span_returns_none_by_default(self):
s = Span(tracer=None, name="test.span")
assert s.duration is None
def test_traceback_with_error(self):
s = Span(None, "test.span")
try:
1 / 0
except ZeroDivisionError:
s.set_traceback()
else:
assert 0, "should have failed"
assert s.error
assert "by zero" in s.get_tag(ERROR_MSG)
assert "ZeroDivisionError" in s.get_tag(ERROR_TYPE)
def test_traceback_without_error(self):
s = Span(None, "test.span")
s.set_traceback()
assert not s.error
assert not s.get_tag(ERROR_MSG)
assert not s.get_tag(ERROR_TYPE)
assert "in test_traceback_without_error" in s.get_tag(ERROR_STACK)
def test_ctx_mgr(self):
s = Span(self.tracer, "bar")
assert not s.duration
assert not s.error
e = Exception("boo")
try:
with s:
time.sleep(0.01)
raise e
except Exception as out:
assert out == e
assert s.duration > 0, s.duration
assert s.error
assert s.get_tag(ERROR_MSG) == "boo"
assert "Exception" in s.get_tag(ERROR_TYPE)
assert s.get_tag(ERROR_STACK)
else:
assert 0, "should have failed"
def test_span_type(self):
s = Span(tracer=None, name="test.span", service="s", resource="r", span_type=SpanTypes.WEB)
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "web"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict(self):
s = Span(tracer=None, name="test.span", service="s", resource="r")
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict_sub(self):
parent = Span(tracer=None, name="test.span", service="s", resource="r")
s = Span(tracer=None, name="test.span", service="s", resource="r")
s._parent = parent
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_boolean_err(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.error = True
s.finish()
d = s.to_dict()
assert d
assert d["error"] == 1
assert type(d["error"]) == int
@mock.patch("ddtrace.span.log")
def test_numeric_tags_none(self, span_log):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None)
d = s.to_dict()
assert d
assert "metrics" not in d
# Ensure we log a debug message
span_log.debug.assert_called_once_with(
"ignoring not number metric %s:%s",
ANALYTICS_SAMPLE_RATE_KEY,
None,
)
def test_numeric_tags_true(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 1.0}
assert d["metrics"] == expected
def test_numeric_tags_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 0.5}
assert d["metrics"] == expected
def test_numeric_tags_bad_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, "Hello")
d = s.to_dict()
assert d
assert "metrics" not in d
def test_set_tag_none(self):
s = Span(tracer=None, name="root.span", service="s", resource="r")
assert s.meta == dict()
s.set_tag("custom.key", "100")
assert s.meta == {"custom.key": "100"}
s.set_tag("custom.key", None)
assert s.meta == {"custom.key": "None"}
def test_duration_zero(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123)
assert s.duration_ns == 0
assert s.duration == 0
def test_start_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
assert s.start == 123
assert s.start_ns == 123000000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
assert s.start == 123.123
assert s.start_ns == 123123000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
s.start = 234567890.0
assert s.start == 234567890
assert s.start_ns == 234567890000000000
def test_duration_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.finish()
assert isinstance(s.duration_ns, int)
assert isinstance(s.duration, float)
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123.2)
assert s.duration_ns == 200000000
assert s.duration == 0.2
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.1)
s.finish(finish_time=123.2)
assert s.duration_ns == 100000000
assert s.duration == 0.1
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=122)
s.finish(finish_time=123)
assert s.duration_ns == 1000000000
assert s.duration == 1
def test_set_tag_version(self):
s = Span(tracer=None, name="test.span")
s.set_tag(VERSION_KEY, "1.2.3")
assert s.get_tag(VERSION_KEY) == "1.2.3"
assert s.get_tag(SERVICE_VERSION_KEY) is None
s.set_tag(SERVICE_VERSION_KEY, "service.version")
assert s.get_tag(VERSION_KEY) == "service.version"
assert s.get_tag(SERVICE_VERSION_KEY) == "service.version"
def test_set_tag_env(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ENV_KEY, "prod")
assert s.get_tag(ENV_KEY) == "prod"
@pytest.mark.parametrize(
"value,assertion",
[
(None, assert_is_measured),
(1, assert_is_measured),
(1.0, assert_is_measured),
(-1, assert_is_measured),
(True, assert_is_measured),
("true", assert_is_measured),
# DEV: Ends up being measured because we do `bool("false")` which is `True`
("false", assert_is_measured),
(0, assert_is_not_measured),
(0.0, assert_is_not_measured),
(False, assert_is_not_measured),
],
)
def test_set_tag_measured(value, assertion):
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, value)
assertion(s)
def test_set_tag_measured_not_set():
# Span is not measured by default
s = Span(tracer=None, name="test.span")
assert_is_not_measured(s)
def test_set_tag_measured_no_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
def test_set_tag_measured_change_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, True)
assert_is_measured(s)
s.set_tag(SPAN_MEASURED_KEY, False)
assert_is_not_measured(s)
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
@mock.patch("ddtrace.span.log")
def test_span_key(span_log):
# Span tag keys must be strings
s = Span(tracer=None, name="test.span")
s.set_tag(123, True)
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", 123, True)
assert s.get_tag(123) is None
assert s.get_tag("123") is None
span_log.reset_mock()
s.set_tag(None, "val")
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", None, "val")
assert s.get_tag(123.32) is None
def test_span_finished():
span = Span(None, None)
assert span.finished is False
assert span.duration_ns is None
span.finished = True
assert span.finished is True
assert span.duration_ns is not None
duration = span.duration_ns
span.finished = True
assert span.finished is True
assert span.duration_ns == duration
span.finished = False
assert span.finished is False
span.finished = True
assert span.finished is True
assert span.duration_ns != duration
def test_span_unicode_set_tag():
span = Span(None, None)
span.set_tag("key", u"😌")
span.set_tag("😐", u"😌")
span._set_str_tag("key", u"😌")
span._set_str_tag(u"😐", u"😌")
@pytest.mark.skipif(sys.version_info.major != 2, reason="This test only applies Python 2")
@mock.patch("ddtrace.span.log")
def test_span_binary_unicode_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", "🤔")
span._set_str_tag("key_str", "🤔")
# only span.set_tag() will fail
span_log.warning.assert_called_once_with("error setting tag %s, ignoring it", "key", exc_info=True)
assert "key" not in span.meta
assert span.meta["key_str"] == u"🤔"
@pytest.mark.skipif(sys.version_info.major == 2, reason="This test does not apply to Python 2")
@mock.patch("ddtrace.span.log")
def test_span_bytes_string_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", b"\<KEY>")
span._set_str_tag("key_str", b"\<KEY>")
assert span.meta["key"] == "b'\\<KEY>
assert span.meta["key_str"] == "🤔"
span_log.warning.assert_not_called()
@mock.patch("ddtrace.span.log")
def test_span_encoding_set_str_tag(span_log):
span = Span(None, None)
span._set_str_tag("foo", u"/?foo=bar&baz=정상처리".encode("euc-kr"))
span_log.warning.assert_not_called()
assert span.meta["foo"] == u"/?foo=bar&baz=����ó��"
def test_span_nonstring_set_str_tag_exc():
span = Span(None, None)
with pytest.raises(TypeError):
span._set_str_tag("foo", dict(a=1))
assert "foo" not in span.meta
@mock.patch("ddtrace.span.log")
def test_span_nonstring_set_str_tag_warning(span_log):
with override_global_config(dict(_raise=False)):
span = Span(None, None)
span._set_str_tag("foo", dict(a=1))
span_log.warning.assert_called_once_with(
"Failed to set text tag '%s'",
"foo",
exc_info=True,
)
def test_span_ignored_exceptions():
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 1
assert s.get_tag(ERROR_MSG) is not None
assert "RuntimeError" in s.get_tag(ERROR_TYPE)
assert s.get_tag(ERROR_STACK) is not None
def test_span_ignored_exception_multi():
s = Span(None, None)
s._ignore_exception(ValueError)
s._ignore_exception(RuntimeError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
def test_span_ignored_exception_subclass():
s = Span(None, None)
s._ignore_exception(Exception)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
def test_on_finish_single_callback():
m = mock.Mock()
s = Span(None, "test", on_finish=[m])
m.assert_not_called()
s.finish()
m.assert_called_once_with(s)
def test_on_finish_multi_callback():
m1 = mock.Mock()
m2 = mock.Mock()
s = Span(None, "test", on_finish=[m1, m2])
s.finish()
m1.assert_called_once_with(s)
m2.assert_called_once_with(s)
@pytest.mark.parametrize("arg", ["span_id", "trace_id", "parent_id"])
def test_span_preconditions(arg):
Span(None, "test", **{arg: None})
with pytest.raises(TypeError):
Span(None, "test", **{arg: "foo"})
def test_span_pprint():
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag("t", "v")
root.set_metric("m", 1.0)
root.finish()
actual = root.pprint()
assert "name='test.span'" in actual
assert "service='s'" in actual
assert "resource='r'" in actual
assert "type='web'" in actual
assert "error=0" in actual
assert ("tags={'t': 'v'}" if six.PY3 else "tags={'t': u'v'}") in actual
assert "metrics={'m': 1.0}" in actual
assert re.search("id=[0-9]+", actual) is not None
assert re.search("trace_id=[0-9]+", actual) is not None
assert "parent_id=None" in actual
assert re.search("duration=[0-9.]+", actual) is not None
assert re.search("start=[0-9.]+", actual) is not None
assert re.search("end=[0-9.]+", actual) is not None
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
actual = root.pprint()
assert "duration=None" in actual
assert "end=None" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.error = 1
actual = root.pprint()
assert "error=1" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag(u"😌", u"😌")
actual = root.pprint()
assert (u"tags={'😌': '😌'}" if six.PY3 else "tags={u'\\U0001f60c': u'\\U0001f60c'}") in actual
root = Span(None, "test.span", service=object())
actual = root.pprint()
assert "service=<object object at" in actual
def test_manual_context_usage():
span1 = Span(None, "span1")
span2 = Span(None, "span2", context=span1.context)
span2.context.sampling_priority = 2
assert span1.context.sampling_priority == 2
span1.context.sampling_priority = 1
assert span2.context.sampling_priority == 1
assert span1.context.sampling_priority == 1
def test_set_exc_info_with_unicode():
def get_exception_span(exception):
span = Span(None, "span1")
try:
raise exception
except Exception:
type_, value_, traceback_ = sys.exc_info()
span.set_exc_info(type_, value_, traceback_)
return span
exception_span = get_exception_span(Exception(u"DataDog/水"))
assert u"DataDog/水" == exception_span.get_tag(ERROR_MSG)
if six.PY3:
exception_span = get_exception_span(Exception("DataDog/水"))
assert "DataDog/水" == exception_span.get_tag(ERROR_MSG)
|
tests/tracer/test_span.py
|
import re
import sys
import time
from unittest.case import SkipTest
import mock
import pytest
import six
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ENV_KEY
from ddtrace.constants import ERROR_MSG
from ddtrace.constants import ERROR_STACK
from ddtrace.constants import ERROR_TYPE
from ddtrace.constants import SERVICE_VERSION_KEY
from ddtrace.constants import SPAN_MEASURED_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.ext import SpanTypes
from ddtrace.span import Span
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
from tests.utils import assert_is_not_measured
from tests.utils import override_global_config
class SpanTestCase(TracerTestCase):
def test_ids(self):
s = Span(tracer=None, name="span.test")
assert s.trace_id
assert s.span_id
assert not s.parent_id
s2 = Span(tracer=None, name="t", trace_id=1, span_id=2, parent_id=1)
assert s2.trace_id == 1
assert s2.span_id == 2
assert s2.parent_id == 1
def test_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("a", "a")
s.set_tag("b", 1)
s.set_tag("c", "1")
d = s.to_dict()
assert d["meta"] == dict(a="a", c="1")
assert d["metrics"] == dict(b=1)
def test_numeric_tags(self):
s = Span(tracer=None, name="test.span")
s.set_tag("negative", -1)
s.set_tag("zero", 0)
s.set_tag("positive", 1)
s.set_tag("large_int", 2 ** 53)
s.set_tag("really_large_int", (2 ** 53) + 1)
s.set_tag("large_negative_int", -(2 ** 53))
s.set_tag("really_large_negative_int", -((2 ** 53) + 1))
s.set_tag("float", 12.3456789)
s.set_tag("negative_float", -12.3456789)
s.set_tag("large_float", 2.0 ** 53)
s.set_tag("really_large_float", (2.0 ** 53) + 1)
d = s.to_dict()
assert d["meta"] == dict(
really_large_int=str(((2 ** 53) + 1)),
really_large_negative_int=str(-((2 ** 53) + 1)),
)
assert d["metrics"] == {
"negative": -1,
"zero": 0,
"positive": 1,
"large_int": 2 ** 53,
"large_negative_int": -(2 ** 53),
"float": 12.3456789,
"negative_float": -12.3456789,
"large_float": 2.0 ** 53,
"really_large_float": (2.0 ** 53) + 1,
}
def test_set_tag_bool(self):
s = Span(tracer=None, name="test.span")
s.set_tag("true", True)
s.set_tag("false", False)
d = s.to_dict()
assert d["meta"] == dict(true="True", false="False")
assert "metrics" not in d
def test_set_tag_metric(self):
s = Span(tracer=None, name="test.span")
s.set_tag("test", "value")
assert s.meta == dict(test="value")
assert s.metrics == dict()
s.set_tag("test", 1)
assert s.meta == dict()
assert s.metrics == dict(test=1)
def test_set_valid_metrics(self):
s = Span(tracer=None, name="test.span")
s.set_metric("a", 0)
s.set_metric("b", -12)
s.set_metric("c", 12.134)
s.set_metric("d", 1231543543265475686787869123)
s.set_metric("e", "12.34")
d = s.to_dict()
expected = {
"a": 0,
"b": -12,
"c": 12.134,
"d": 1231543543265475686787869123,
"e": 12.34,
}
assert d["metrics"] == expected
def test_set_invalid_metric(self):
s = Span(tracer=None, name="test.span")
invalid_metrics = [None, {}, [], s, "quarante-douze", float("nan"), float("inf"), 1j]
for i, m in enumerate(invalid_metrics):
k = str(i)
s.set_metric(k, m)
assert s.get_metric(k) is None
def test_set_numpy_metric(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
s = Span(tracer=None, name="test.span")
s.set_metric("a", np.int64(1))
assert s.get_metric("a") == 1
assert type(s.get_metric("a")) == float
def test_tags_not_string(self):
# ensure we can cast as strings
class Foo(object):
def __repr__(self):
1 / 0
s = Span(tracer=None, name="test.span")
s.set_tag("a", Foo())
def test_finish(self):
# ensure span.finish() marks the end time of the span
s = Span(None, "test.span")
sleep = 0.05
time.sleep(sleep)
s.finish()
assert s.duration >= sleep, "%s < %s" % (s.duration, sleep)
def test_finish_no_tracer(self):
# ensure finish works with no tracer without raising exceptions
s = Span(tracer=None, name="test.span")
s.finish()
def test_finish_called_multiple_times(self):
# we should only record a span the first time finish is called on it
s = Span(self.tracer, "bar")
s.finish()
s.finish()
def test_finish_set_span_duration(self):
# If set the duration on a span, the span should be recorded with this
# duration
s = Span(tracer=None, name="test.span")
s.duration = 1337.0
s.finish()
assert s.duration == 1337.0
def test_setter_casts_duration_ns_as_int(self):
s = Span(tracer=None, name="test.span")
s.duration = 3.2
s.finish()
assert s.duration == 3.2
assert s.duration_ns == 3200000000
assert isinstance(s.duration_ns, int)
def test_get_span_returns_none_by_default(self):
s = Span(tracer=None, name="test.span")
assert s.duration is None
def test_traceback_with_error(self):
s = Span(None, "test.span")
try:
1 / 0
except ZeroDivisionError:
s.set_traceback()
else:
assert 0, "should have failed"
assert s.error
assert "by zero" in s.get_tag(ERROR_MSG)
assert "ZeroDivisionError" in s.get_tag(ERROR_TYPE)
def test_traceback_without_error(self):
s = Span(None, "test.span")
s.set_traceback()
assert not s.error
assert not s.get_tag(ERROR_MSG)
assert not s.get_tag(ERROR_TYPE)
assert "in test_traceback_without_error" in s.get_tag(ERROR_STACK)
def test_ctx_mgr(self):
s = Span(self.tracer, "bar")
assert not s.duration
assert not s.error
e = Exception("boo")
try:
with s:
time.sleep(0.01)
raise e
except Exception as out:
assert out == e
assert s.duration > 0, s.duration
assert s.error
assert s.get_tag(ERROR_MSG) == "boo"
assert "Exception" in s.get_tag(ERROR_TYPE)
assert s.get_tag(ERROR_STACK)
else:
assert 0, "should have failed"
def test_span_type(self):
s = Span(tracer=None, name="test.span", service="s", resource="r", span_type=SpanTypes.WEB)
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "web"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict(self):
s = Span(tracer=None, name="test.span", service="s", resource="r")
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_to_dict_sub(self):
parent = Span(tracer=None, name="test.span", service="s", resource="r")
s = Span(tracer=None, name="test.span", service="s", resource="r")
s._parent = parent
s.span_type = "foo"
s.set_tag("a", "1")
s.set_meta("b", "2")
s.finish()
d = s.to_dict()
assert d
assert d["span_id"] == s.span_id
assert d["trace_id"] == s.trace_id
assert d["parent_id"] == s.parent_id
assert d["meta"] == {"a": "1", "b": "2"}
assert d["type"] == "foo"
assert d["error"] == 0
assert type(d["error"]) == int
def test_span_boolean_err(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.error = True
s.finish()
d = s.to_dict()
assert d
assert d["error"] == 1
assert type(d["error"]) == int
@mock.patch("ddtrace.span.log")
def test_numeric_tags_none(self, span_log):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None)
d = s.to_dict()
assert d
assert "metrics" not in d
# Ensure we log a debug message
span_log.debug.assert_called_once_with(
"ignoring not number metric %s:%s",
ANALYTICS_SAMPLE_RATE_KEY,
None,
)
def test_numeric_tags_true(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 1.0}
assert d["metrics"] == expected
def test_numeric_tags_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5)
d = s.to_dict()
assert d
expected = {ANALYTICS_SAMPLE_RATE_KEY: 0.5}
assert d["metrics"] == expected
def test_numeric_tags_bad_value(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, "Hello")
d = s.to_dict()
assert d
assert "metrics" not in d
def test_set_tag_none(self):
s = Span(tracer=None, name="root.span", service="s", resource="r")
assert s.meta == dict()
s.set_tag("custom.key", "100")
assert s.meta == {"custom.key": "100"}
s.set_tag("custom.key", None)
assert s.meta == {"custom.key": "None"}
def test_duration_zero(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123)
assert s.duration_ns == 0
assert s.duration == 0
def test_start_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
assert s.start == 123
assert s.start_ns == 123000000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
assert s.start == 123.123
assert s.start_ns == 123123000000
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.123)
s.start = 234567890.0
assert s.start == 234567890
assert s.start_ns == 234567890000000000
def test_duration_int(self):
s = Span(tracer=None, name="foo.bar", service="s", resource="r")
s.finish()
assert isinstance(s.duration_ns, int)
assert isinstance(s.duration, float)
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123)
s.finish(finish_time=123.2)
assert s.duration_ns == 200000000
assert s.duration == 0.2
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.1)
s.finish(finish_time=123.2)
assert s.duration_ns == 100000000
assert s.duration == 0.1
s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=122)
s.finish(finish_time=123)
assert s.duration_ns == 1000000000
assert s.duration == 1
def test_set_tag_version(self):
s = Span(tracer=None, name="test.span")
s.set_tag(VERSION_KEY, "1.2.3")
assert s.get_tag(VERSION_KEY) == "1.2.3"
assert s.get_tag(SERVICE_VERSION_KEY) is None
s.set_tag(SERVICE_VERSION_KEY, "service.version")
assert s.get_tag(VERSION_KEY) == "service.version"
assert s.get_tag(SERVICE_VERSION_KEY) == "service.version"
def test_set_tag_env(self):
s = Span(tracer=None, name="test.span")
s.set_tag(ENV_KEY, "prod")
assert s.get_tag(ENV_KEY) == "prod"
@pytest.mark.parametrize(
"value,assertion",
[
(None, assert_is_measured),
(1, assert_is_measured),
(1.0, assert_is_measured),
(-1, assert_is_measured),
(True, assert_is_measured),
("true", assert_is_measured),
# DEV: Ends up being measured because we do `bool("false")` which is `True`
("false", assert_is_measured),
(0, assert_is_not_measured),
(0.0, assert_is_not_measured),
(False, assert_is_not_measured),
],
)
def test_set_tag_measured(value, assertion):
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, value)
assertion(s)
def test_set_tag_measured_not_set():
# Span is not measured by default
s = Span(tracer=None, name="test.span")
assert_is_not_measured(s)
def test_set_tag_measured_no_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
def test_set_tag_measured_change_value():
s = Span(tracer=None, name="test.span")
s.set_tag(SPAN_MEASURED_KEY, True)
assert_is_measured(s)
s.set_tag(SPAN_MEASURED_KEY, False)
assert_is_not_measured(s)
s.set_tag(SPAN_MEASURED_KEY)
assert_is_measured(s)
@mock.patch("ddtrace.span.log")
def test_span_key(span_log):
# Span tag keys must be strings
s = Span(tracer=None, name="test.span")
s.set_tag(123, True)
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", 123, True)
assert s.get_tag(123) is None
assert s.get_tag("123") is None
span_log.reset_mock()
s.set_tag(None, "val")
span_log.warning.assert_called_once_with("Ignoring tag pair %s:%s. Key must be a string.", None, "val")
assert s.get_tag(123.32) is None
def test_span_finished():
span = Span(None, None)
assert span.finished is False
assert span.duration_ns is None
span.finished = True
assert span.finished is True
assert span.duration_ns is not None
duration = span.duration_ns
span.finished = True
assert span.finished is True
assert span.duration_ns == duration
span.finished = False
assert span.finished is False
span.finished = True
assert span.finished is True
assert span.duration_ns != duration
def test_span_unicode_set_tag():
span = Span(None, None)
span.set_tag("key", u"😌")
span.set_tag("😐", u"😌")
span._set_str_tag("key", u"😌")
span._set_str_tag(u"😐", u"😌")
@pytest.mark.skipif(sys.version_info.major != 2, reason="This test only applies Python 2")
@mock.patch("ddtrace.span.log")
def test_span_binary_unicode_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", "🤔")
span._set_str_tag("key_str", "🤔")
# only span.set_tag() will fail
span_log.warning.assert_called_once_with("error setting tag %s, ignoring it", "key", exc_info=True)
assert "key" not in span.meta
assert span.meta["key_str"] == u"🤔"
@pytest.mark.skipif(sys.version_info.major == 2, reason="This test does not apply to Python 2")
@mock.patch("ddtrace.span.log")
def test_span_bytes_string_set_tag(span_log):
span = Span(None, None)
span.set_tag("key", b"\<KEY>")
span._set_str_tag("key_str", b"\<KEY>")
assert span.meta["key"] == "b'\\<KEY>
assert span.meta["key_str"] == "🤔"
span_log.warning.assert_not_called()
@mock.patch("ddtrace.span.log")
def test_span_encoding_set_str_tag(span_log):
span = Span(None, None)
span._set_str_tag("foo", u"/?foo=bar&baz=정상처리".encode("euc-kr"))
span_log.warning.assert_not_called()
assert span.meta["foo"] == u"/?foo=bar&baz=����ó��"
def test_span_nonstring_set_str_tag_exc():
span = Span(None, None)
with pytest.raises(TypeError):
span._set_str_tag("foo", dict(a=1))
assert "foo" not in span.meta
@mock.patch("ddtrace.span.log")
def test_span_nonstring_set_str_tag_warning(span_log):
with override_global_config(dict(_raise=False)):
span = Span(None, None)
span._set_str_tag("foo", dict(a=1))
span_log.warning.assert_called_once_with(
"Failed to set text tag '%s'",
"foo",
exc_info=True,
)
def test_span_ignored_exceptions():
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
s = Span(None, None)
s._ignore_exception(ValueError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 1
assert s.get_tag(ERROR_MSG) is not None
assert "RuntimeError" in s.get_tag(ERROR_TYPE)
assert s.get_tag(ERROR_STACK) is not None
def test_span_ignored_exception_multi():
s = Span(None, None)
s._ignore_exception(ValueError)
s._ignore_exception(RuntimeError)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
def test_span_ignored_exception_subclass():
s = Span(None, None)
s._ignore_exception(Exception)
with pytest.raises(ValueError):
with s:
raise ValueError()
with pytest.raises(RuntimeError):
with s:
raise RuntimeError()
assert s.error == 0
assert s.get_tag(ERROR_MSG) is None
assert s.get_tag(ERROR_TYPE) is None
assert s.get_tag(ERROR_STACK) is None
def test_on_finish_single_callback():
m = mock.Mock()
s = Span(None, "test", on_finish=[m])
m.assert_not_called()
s.finish()
m.assert_called_once_with(s)
def test_on_finish_multi_callback():
m1 = mock.Mock()
m2 = mock.Mock()
s = Span(None, "test", on_finish=[m1, m2])
s.finish()
m1.assert_called_once_with(s)
m2.assert_called_once_with(s)
@pytest.mark.parametrize("arg", ["span_id", "trace_id", "parent_id"])
def test_span_preconditions(arg):
Span(None, "test", **{arg: None})
with pytest.raises(TypeError):
Span(None, "test", **{arg: "foo"})
def test_span_pprint():
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag("t", "v")
root.set_metric("m", 1.0)
root.finish()
actual = root.pprint()
assert "name='test.span'" in actual
assert "service='s'" in actual
assert "resource='r'" in actual
assert "type='web'" in actual
assert "error=0" in actual
assert ("tags={'t': 'v'}" if six.PY3 else "tags={'t': u'v'}") in actual
assert "metrics={'m': 1.0}" in actual
assert re.search("id=[0-9]+", actual) is not None
assert re.search("trace_id=[0-9]+", actual) is not None
assert "parent_id=None" in actual
assert re.search("duration=[0-9.]+", actual) is not None
assert re.search("start=[0-9.]+", actual) is not None
assert re.search("end=[0-9.]+", actual) is not None
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
actual = root.pprint()
assert "duration=None" in actual
assert "end=None" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.error = 1
actual = root.pprint()
assert "error=1" in actual
root = Span(None, "test.span", service="s", resource="r", span_type=SpanTypes.WEB)
root.set_tag(u"😌", u"😌")
actual = root.pprint()
assert (u"tags={'😌': '😌'}" if six.PY3 else "tags={u'\\U0001f60c': u'\\U0001f60c'}") in actual
root = Span(None, "test.span", service=object())
actual = root.pprint()
assert "service=<object object at" in actual
def test_manual_context_usage():
span1 = Span(None, "span1")
span2 = Span(None, "span2", context=span1.context)
span2.context.sampling_priority = 2
assert span1.context.sampling_priority == 2
span1.context.sampling_priority = 1
assert span2.context.sampling_priority == 1
assert span1.context.sampling_priority == 1
def test_set_exc_info_with_unicode():
def get_exception_span(exception):
span = Span(None, "span1")
try:
raise exception
except Exception:
type_, value_, traceback_ = sys.exc_info()
span.set_exc_info(type_, value_, traceback_)
return span
exception_span = get_exception_span(Exception(u"DataDog/水"))
assert u"DataDog/水" == exception_span.get_tag(ERROR_MSG)
if six.PY3:
exception_span = get_exception_span(Exception("DataDog/水"))
assert "DataDog/水" == exception_span.get_tag(ERROR_MSG)
| 0.428473 | 0.479747 |
import asyncio
import email.header
import email.message
import email.mime.multipart
import email.mime.text
import socket
import ssl
import sys
import traceback
from pathlib import Path
import hypothesis
import pytest
from aiosmtplib import SMTP, SMTPStatus
from aiosmtplib.sync import shutdown_loop
from .smtpd import RecordingHandler, SMTPDController, TestSMTPD
try:
import uvloop
except ImportError:
HAS_UVLOOP = False
else:
HAS_UVLOOP = True
BASE_CERT_PATH = Path("tests/certs/")
IS_PYPY = hasattr(sys, "pypy_version_info")
# pypy can take a while to generate data, so don't fail the test due to health checks.
if IS_PYPY:
base_settings = hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.too_slow,)
)
else:
base_settings = hypothesis.settings()
hypothesis.settings.register_profile("dev", parent=base_settings, max_examples=10)
hypothesis.settings.register_profile("ci", parent=base_settings, max_examples=100)
class AsyncPytestWarning(pytest.PytestWarning):
pass
def pytest_addoption(parser):
parser.addoption(
"--event-loop",
action="store",
default="asyncio",
choices=["asyncio", "uvloop"],
help="event loop to run tests on",
)
parser.addoption(
"--bind-addr",
action="store",
default="127.0.0.1",
help="server address to bind on, e.g 127.0.0.1",
)
@pytest.fixture(scope="session")
def event_loop_policy(request):
loop_type = request.config.getoption("--event-loop")
if loop_type == "uvloop":
if not HAS_UVLOOP:
raise RuntimeError("uvloop not installed.")
old_policy = asyncio.get_event_loop_policy()
policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(policy)
request.addfinalizer(lambda: asyncio.set_event_loop_policy(old_policy))
return asyncio.get_event_loop_policy()
@pytest.fixture(scope="function")
def event_loop(request, event_loop_policy):
verbosity = request.config.getoption("verbose", default=0)
old_loop = event_loop_policy.get_event_loop()
loop = event_loop_policy.new_event_loop()
event_loop_policy.set_event_loop(loop)
def handle_async_exception(loop, context):
message = "{}: {}".format(context["message"], repr(context["exception"]))
if verbosity > 1:
message += "\n"
message += "Future: {}".format(repr(context["future"]))
message += "\nTraceback:\n"
message += "".join(traceback.format_list(context["source_traceback"]))
request.node.warn(AsyncPytestWarning(message))
loop.set_exception_handler(handle_async_exception)
def cleanup():
shutdown_loop(loop)
event_loop_policy.set_event_loop(old_loop)
request.addfinalizer(cleanup)
return loop
@pytest.fixture(scope="session")
def hostname(request):
return "localhost"
@pytest.fixture(scope="session")
def bind_address(request):
"""Server side address for socket binding"""
return request.config.getoption("--bind-addr")
@pytest.fixture(
scope="function",
params=(
str,
bytes,
pytest.param(
lambda path: path,
marks=pytest.mark.xfail(
sys.version_info < (3, 7),
reason="os.PathLike support introduced in 3.7.",
),
),
),
ids=("str", "bytes", "pathlike"),
)
def socket_path(request, tmp_path):
if sys.platform.startswith("darwin"):
# Work around OSError: AF_UNIX path too long
tmp_dir = Path("/tmp") # nosec
else:
tmp_dir = tmp_path
index = 0
socket_path = tmp_dir / "aiosmtplib-test{}".format(index)
while socket_path.exists():
index += 1
socket_path = tmp_dir / "aiosmtplib-test{}".format(index)
return request.param(socket_path)
@pytest.fixture(scope="function")
def compat32_message(request):
message = email.message.Message()
message["To"] = email.header.Header("<EMAIL>")
message["From"] = email.header.Header("<EMAIL>")
message["Subject"] = "A message"
message.set_payload("Hello World")
return message
@pytest.fixture(scope="function")
def mime_message(request):
message = email.mime.multipart.MIMEMultipart()
message["To"] = "<EMAIL>"
message["From"] = "<EMAIL>"
message["Subject"] = "A message"
message.attach(email.mime.text.MIMEText("Hello World"))
return message
@pytest.fixture(scope="function", params=["mime_multipart", "compat32"])
def message(request, compat32_message, mime_message):
if request.param == "compat32":
return compat32_message
else:
return mime_message
@pytest.fixture(scope="session")
def recipient_str(request):
return "<EMAIL>"
@pytest.fixture(scope="session")
def sender_str(request):
return "<EMAIL>"
@pytest.fixture(scope="session")
def message_str(request, recipient_str, sender_str):
return (
"Content-Type: multipart/mixed; "
'boundary="===============6842273139637972052=="\n'
"MIME-Version: 1.0\n"
"To: <EMAIL>\n"
"From: [email protected]\n"
"Subject: A message\n\n"
"--===============6842273139637972052==\n"
'Content-Type: text/plain; charset="us-ascii"\n'
"MIME-Version: 1.0\n"
"Content-Transfer-Encoding: 7bit\n\n"
"Hello World\n"
"--===============6842273139637972052==--\n"
)
@pytest.fixture(scope="function")
def received_messages(request):
return []
@pytest.fixture(scope="function")
def received_commands(request):
return []
@pytest.fixture(scope="function")
def smtpd_responses(request):
return []
@pytest.fixture(scope="function")
def smtpd_handler(request, received_messages, received_commands, smtpd_responses):
return RecordingHandler(received_messages, received_commands, smtpd_responses)
@pytest.fixture(scope="session")
def smtpd_class(request):
return TestSMTPD
@pytest.fixture(scope="session")
def valid_cert_path(request):
return str(BASE_CERT_PATH.joinpath("selfsigned.crt"))
@pytest.fixture(scope="session")
def valid_key_path(request):
return str(BASE_CERT_PATH.joinpath("selfsigned.key"))
@pytest.fixture(scope="session")
def invalid_cert_path(request):
return str(BASE_CERT_PATH.joinpath("invalid.crt"))
@pytest.fixture(scope="session")
def invalid_key_path(request):
return str(BASE_CERT_PATH.joinpath("invalid.key"))
@pytest.fixture(scope="session")
def client_tls_context(request, valid_cert_path, valid_key_path):
tls_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
tls_context.check_hostname = False
tls_context.verify_mode = ssl.CERT_NONE
return tls_context
@pytest.fixture(scope="session")
def server_tls_context(request, valid_cert_path, valid_key_path):
tls_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
tls_context.load_cert_chain(valid_cert_path, keyfile=valid_key_path)
return tls_context
@pytest.fixture(scope="function")
def smtpd_server(
request,
event_loop,
bind_address,
hostname,
smtpd_class,
smtpd_handler,
server_tls_context,
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def smtpd_server_port(request, smtpd_server):
return smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtpd_server_smtputf8(
request,
event_loop,
bind_address,
hostname,
smtpd_class,
smtpd_handler,
server_tls_context,
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=True,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def smtpd_server_smtputf8_port(request, smtpd_server_smtputf8):
return smtpd_server_smtputf8.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtpd_server_socket_path(
request, socket_path, event_loop, smtpd_class, smtpd_handler, server_tls_context
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_unix_server(factory, path=socket_path)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="session")
def smtpd_response_handler_factory(request):
def smtpd_response(
response_text, second_response_text=None, write_eof=False, close_after=False
):
async def response_handler(smtpd, *args, **kwargs):
if args and args[0]:
smtpd.session.host_name = args[0]
if response_text is not None:
await smtpd.push(response_text)
if write_eof:
smtpd.transport.write_eof()
if second_response_text is not None:
await smtpd.push(second_response_text)
if close_after:
smtpd.transport.close()
return response_handler
return smtpd_response
@pytest.fixture(scope="function")
def smtp_client(request, event_loop, hostname, smtpd_server_port):
client = SMTP(hostname=hostname, port=smtpd_server_port, timeout=1.0)
return client
@pytest.fixture(scope="function")
def smtp_client_smtputf8(request, event_loop, hostname, smtpd_server_smtputf8_port):
client = SMTP(hostname=hostname, port=smtpd_server_smtputf8_port, timeout=1.0)
return client
class EchoServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
self.transport.write(data)
@pytest.fixture(scope="function")
def echo_server(request, bind_address, event_loop):
server = event_loop.run_until_complete(
event_loop.create_server(
EchoServerProtocol, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def echo_server_port(request, echo_server):
return echo_server.sockets[0].getsockname()[1]
@pytest.fixture(
params=[
SMTPStatus.mailbox_unavailable,
SMTPStatus.unrecognized_command,
SMTPStatus.bad_command_sequence,
SMTPStatus.syntax_error,
],
ids=[
SMTPStatus.mailbox_unavailable.name,
SMTPStatus.unrecognized_command.name,
SMTPStatus.bad_command_sequence.name,
SMTPStatus.syntax_error.name,
],
)
def error_code(request):
return request.param
@pytest.fixture(scope="function")
def tls_smtpd_server(
request, event_loop, bind_address, smtpd_class, smtpd_handler, server_tls_context
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=bind_address,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory,
host=bind_address,
port=0,
ssl=server_tls_context,
family=socket.AF_INET,
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def tls_smtpd_server_port(request, tls_smtpd_server):
return tls_smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def tls_smtp_client(request, event_loop, hostname, tls_smtpd_server_port):
tls_client = SMTP(
hostname=hostname,
port=tls_smtpd_server_port,
use_tls=True,
validate_certs=False,
)
return tls_client
@pytest.fixture(scope="function")
def threaded_smtpd_server(request, bind_address, smtpd_handler):
controller = SMTPDController(smtpd_handler, hostname=bind_address, port=0)
controller.start()
request.addfinalizer(controller.stop)
return controller.server
@pytest.fixture(scope="function")
def threaded_smtpd_server_port(request, threaded_smtpd_server):
return threaded_smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtp_client_threaded(request, hostname, threaded_smtpd_server_port):
client = SMTP(hostname=hostname, port=threaded_smtpd_server_port, timeout=1.0)
return client
|
tests/conftest.py
|
import asyncio
import email.header
import email.message
import email.mime.multipart
import email.mime.text
import socket
import ssl
import sys
import traceback
from pathlib import Path
import hypothesis
import pytest
from aiosmtplib import SMTP, SMTPStatus
from aiosmtplib.sync import shutdown_loop
from .smtpd import RecordingHandler, SMTPDController, TestSMTPD
try:
import uvloop
except ImportError:
HAS_UVLOOP = False
else:
HAS_UVLOOP = True
BASE_CERT_PATH = Path("tests/certs/")
IS_PYPY = hasattr(sys, "pypy_version_info")
# pypy can take a while to generate data, so don't fail the test due to health checks.
if IS_PYPY:
base_settings = hypothesis.settings(
suppress_health_check=(hypothesis.HealthCheck.too_slow,)
)
else:
base_settings = hypothesis.settings()
hypothesis.settings.register_profile("dev", parent=base_settings, max_examples=10)
hypothesis.settings.register_profile("ci", parent=base_settings, max_examples=100)
class AsyncPytestWarning(pytest.PytestWarning):
pass
def pytest_addoption(parser):
parser.addoption(
"--event-loop",
action="store",
default="asyncio",
choices=["asyncio", "uvloop"],
help="event loop to run tests on",
)
parser.addoption(
"--bind-addr",
action="store",
default="127.0.0.1",
help="server address to bind on, e.g 127.0.0.1",
)
@pytest.fixture(scope="session")
def event_loop_policy(request):
loop_type = request.config.getoption("--event-loop")
if loop_type == "uvloop":
if not HAS_UVLOOP:
raise RuntimeError("uvloop not installed.")
old_policy = asyncio.get_event_loop_policy()
policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(policy)
request.addfinalizer(lambda: asyncio.set_event_loop_policy(old_policy))
return asyncio.get_event_loop_policy()
@pytest.fixture(scope="function")
def event_loop(request, event_loop_policy):
verbosity = request.config.getoption("verbose", default=0)
old_loop = event_loop_policy.get_event_loop()
loop = event_loop_policy.new_event_loop()
event_loop_policy.set_event_loop(loop)
def handle_async_exception(loop, context):
message = "{}: {}".format(context["message"], repr(context["exception"]))
if verbosity > 1:
message += "\n"
message += "Future: {}".format(repr(context["future"]))
message += "\nTraceback:\n"
message += "".join(traceback.format_list(context["source_traceback"]))
request.node.warn(AsyncPytestWarning(message))
loop.set_exception_handler(handle_async_exception)
def cleanup():
shutdown_loop(loop)
event_loop_policy.set_event_loop(old_loop)
request.addfinalizer(cleanup)
return loop
@pytest.fixture(scope="session")
def hostname(request):
return "localhost"
@pytest.fixture(scope="session")
def bind_address(request):
"""Server side address for socket binding"""
return request.config.getoption("--bind-addr")
@pytest.fixture(
scope="function",
params=(
str,
bytes,
pytest.param(
lambda path: path,
marks=pytest.mark.xfail(
sys.version_info < (3, 7),
reason="os.PathLike support introduced in 3.7.",
),
),
),
ids=("str", "bytes", "pathlike"),
)
def socket_path(request, tmp_path):
if sys.platform.startswith("darwin"):
# Work around OSError: AF_UNIX path too long
tmp_dir = Path("/tmp") # nosec
else:
tmp_dir = tmp_path
index = 0
socket_path = tmp_dir / "aiosmtplib-test{}".format(index)
while socket_path.exists():
index += 1
socket_path = tmp_dir / "aiosmtplib-test{}".format(index)
return request.param(socket_path)
@pytest.fixture(scope="function")
def compat32_message(request):
message = email.message.Message()
message["To"] = email.header.Header("<EMAIL>")
message["From"] = email.header.Header("<EMAIL>")
message["Subject"] = "A message"
message.set_payload("Hello World")
return message
@pytest.fixture(scope="function")
def mime_message(request):
message = email.mime.multipart.MIMEMultipart()
message["To"] = "<EMAIL>"
message["From"] = "<EMAIL>"
message["Subject"] = "A message"
message.attach(email.mime.text.MIMEText("Hello World"))
return message
@pytest.fixture(scope="function", params=["mime_multipart", "compat32"])
def message(request, compat32_message, mime_message):
if request.param == "compat32":
return compat32_message
else:
return mime_message
@pytest.fixture(scope="session")
def recipient_str(request):
return "<EMAIL>"
@pytest.fixture(scope="session")
def sender_str(request):
return "<EMAIL>"
@pytest.fixture(scope="session")
def message_str(request, recipient_str, sender_str):
return (
"Content-Type: multipart/mixed; "
'boundary="===============6842273139637972052=="\n'
"MIME-Version: 1.0\n"
"To: <EMAIL>\n"
"From: [email protected]\n"
"Subject: A message\n\n"
"--===============6842273139637972052==\n"
'Content-Type: text/plain; charset="us-ascii"\n'
"MIME-Version: 1.0\n"
"Content-Transfer-Encoding: 7bit\n\n"
"Hello World\n"
"--===============6842273139637972052==--\n"
)
@pytest.fixture(scope="function")
def received_messages(request):
return []
@pytest.fixture(scope="function")
def received_commands(request):
return []
@pytest.fixture(scope="function")
def smtpd_responses(request):
return []
@pytest.fixture(scope="function")
def smtpd_handler(request, received_messages, received_commands, smtpd_responses):
return RecordingHandler(received_messages, received_commands, smtpd_responses)
@pytest.fixture(scope="session")
def smtpd_class(request):
return TestSMTPD
@pytest.fixture(scope="session")
def valid_cert_path(request):
return str(BASE_CERT_PATH.joinpath("selfsigned.crt"))
@pytest.fixture(scope="session")
def valid_key_path(request):
return str(BASE_CERT_PATH.joinpath("selfsigned.key"))
@pytest.fixture(scope="session")
def invalid_cert_path(request):
return str(BASE_CERT_PATH.joinpath("invalid.crt"))
@pytest.fixture(scope="session")
def invalid_key_path(request):
return str(BASE_CERT_PATH.joinpath("invalid.key"))
@pytest.fixture(scope="session")
def client_tls_context(request, valid_cert_path, valid_key_path):
tls_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
tls_context.check_hostname = False
tls_context.verify_mode = ssl.CERT_NONE
return tls_context
@pytest.fixture(scope="session")
def server_tls_context(request, valid_cert_path, valid_key_path):
tls_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
tls_context.load_cert_chain(valid_cert_path, keyfile=valid_key_path)
return tls_context
@pytest.fixture(scope="function")
def smtpd_server(
request,
event_loop,
bind_address,
hostname,
smtpd_class,
smtpd_handler,
server_tls_context,
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def smtpd_server_port(request, smtpd_server):
return smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtpd_server_smtputf8(
request,
event_loop,
bind_address,
hostname,
smtpd_class,
smtpd_handler,
server_tls_context,
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=True,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def smtpd_server_smtputf8_port(request, smtpd_server_smtputf8):
return smtpd_server_smtputf8.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtpd_server_socket_path(
request, socket_path, event_loop, smtpd_class, smtpd_handler, server_tls_context
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=hostname,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_unix_server(factory, path=socket_path)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="session")
def smtpd_response_handler_factory(request):
def smtpd_response(
response_text, second_response_text=None, write_eof=False, close_after=False
):
async def response_handler(smtpd, *args, **kwargs):
if args and args[0]:
smtpd.session.host_name = args[0]
if response_text is not None:
await smtpd.push(response_text)
if write_eof:
smtpd.transport.write_eof()
if second_response_text is not None:
await smtpd.push(second_response_text)
if close_after:
smtpd.transport.close()
return response_handler
return smtpd_response
@pytest.fixture(scope="function")
def smtp_client(request, event_loop, hostname, smtpd_server_port):
client = SMTP(hostname=hostname, port=smtpd_server_port, timeout=1.0)
return client
@pytest.fixture(scope="function")
def smtp_client_smtputf8(request, event_loop, hostname, smtpd_server_smtputf8_port):
client = SMTP(hostname=hostname, port=smtpd_server_smtputf8_port, timeout=1.0)
return client
class EchoServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
self.transport.write(data)
@pytest.fixture(scope="function")
def echo_server(request, bind_address, event_loop):
server = event_loop.run_until_complete(
event_loop.create_server(
EchoServerProtocol, host=bind_address, port=0, family=socket.AF_INET
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def echo_server_port(request, echo_server):
return echo_server.sockets[0].getsockname()[1]
@pytest.fixture(
params=[
SMTPStatus.mailbox_unavailable,
SMTPStatus.unrecognized_command,
SMTPStatus.bad_command_sequence,
SMTPStatus.syntax_error,
],
ids=[
SMTPStatus.mailbox_unavailable.name,
SMTPStatus.unrecognized_command.name,
SMTPStatus.bad_command_sequence.name,
SMTPStatus.syntax_error.name,
],
)
def error_code(request):
return request.param
@pytest.fixture(scope="function")
def tls_smtpd_server(
request, event_loop, bind_address, smtpd_class, smtpd_handler, server_tls_context
):
def factory():
return smtpd_class(
smtpd_handler,
hostname=bind_address,
enable_SMTPUTF8=False,
tls_context=server_tls_context,
)
server = event_loop.run_until_complete(
event_loop.create_server(
factory,
host=bind_address,
port=0,
ssl=server_tls_context,
family=socket.AF_INET,
)
)
def close_server():
server.close()
event_loop.run_until_complete(server.wait_closed())
request.addfinalizer(close_server)
return server
@pytest.fixture(scope="function")
def tls_smtpd_server_port(request, tls_smtpd_server):
return tls_smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def tls_smtp_client(request, event_loop, hostname, tls_smtpd_server_port):
tls_client = SMTP(
hostname=hostname,
port=tls_smtpd_server_port,
use_tls=True,
validate_certs=False,
)
return tls_client
@pytest.fixture(scope="function")
def threaded_smtpd_server(request, bind_address, smtpd_handler):
controller = SMTPDController(smtpd_handler, hostname=bind_address, port=0)
controller.start()
request.addfinalizer(controller.stop)
return controller.server
@pytest.fixture(scope="function")
def threaded_smtpd_server_port(request, threaded_smtpd_server):
return threaded_smtpd_server.sockets[0].getsockname()[1]
@pytest.fixture(scope="function")
def smtp_client_threaded(request, hostname, threaded_smtpd_server_port):
client = SMTP(hostname=hostname, port=threaded_smtpd_server_port, timeout=1.0)
return client
| 0.315841 | 0.121295 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import poi_pb2 as poi_dot_poi__pb2
class PoiServiceStub(object):
"""
Allow users to get poi information
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubscribePoiReport = channel.unary_stream(
"/mavsdk.rpc.poi.PoiService/SubscribePoiReport",
request_serializer=poi_dot_poi__pb2.SubscribePoiReportRequest.SerializeToString,
response_deserializer=poi_dot_poi__pb2.PoiReportResponse.FromString,
)
class PoiServiceServicer(object):
"""
Allow users to get poi information
"""
def SubscribePoiReport(self, request, context):
"""Subscribe to 'poi' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PoiServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"SubscribePoiReport": grpc.unary_stream_rpc_method_handler(
servicer.SubscribePoiReport,
request_deserializer=poi_dot_poi__pb2.SubscribePoiReportRequest.FromString,
response_serializer=poi_dot_poi__pb2.PoiReportResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"mavsdk.rpc.poi.PoiService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PoiService(object):
"""
Allow users to get poi information
"""
@staticmethod
def SubscribePoiReport(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.poi.PoiService/SubscribePoiReport",
poi_dot_poi__pb2.SubscribePoiReportRequest.SerializeToString,
poi_dot_poi__pb2.PoiReportResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
mavsdk/poi_pb2_grpc.py
|
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import poi_pb2 as poi_dot_poi__pb2
class PoiServiceStub(object):
"""
Allow users to get poi information
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubscribePoiReport = channel.unary_stream(
"/mavsdk.rpc.poi.PoiService/SubscribePoiReport",
request_serializer=poi_dot_poi__pb2.SubscribePoiReportRequest.SerializeToString,
response_deserializer=poi_dot_poi__pb2.PoiReportResponse.FromString,
)
class PoiServiceServicer(object):
"""
Allow users to get poi information
"""
def SubscribePoiReport(self, request, context):
"""Subscribe to 'poi' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PoiServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"SubscribePoiReport": grpc.unary_stream_rpc_method_handler(
servicer.SubscribePoiReport,
request_deserializer=poi_dot_poi__pb2.SubscribePoiReportRequest.FromString,
response_serializer=poi_dot_poi__pb2.PoiReportResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"mavsdk.rpc.poi.PoiService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PoiService(object):
"""
Allow users to get poi information
"""
@staticmethod
def SubscribePoiReport(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.poi.PoiService/SubscribePoiReport",
poi_dot_poi__pb2.SubscribePoiReportRequest.SerializeToString,
poi_dot_poi__pb2.PoiReportResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 0.756537 | 0.17172 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility script that directly loads in data from another place to
the MephistoDB under a specified task run, using MockRequester and
MockWorkers as we don't know where the data actually came from.
!! Currently in development, not necessarily for use !!
"""
from mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (
StaticReactBlueprint,
BLUEPRINT_TYPE_STATIC_REACT,
)
from mephisto.abstractions.blueprint import AgentState
from mephisto.abstractions.blueprints.abstract.static_task.static_agent_state import (
StaticAgentState,
)
from mephisto.abstractions.providers.mock.mock_requester import MockRequester
from mephisto.abstractions.providers.mock.mock_worker import MockWorker
from mephisto.abstractions.providers.mock.mock_agent import MockAgent
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.assignment import Assignment, InitializationData
from mephisto.data_model.unit import Unit
from mephisto.data_model.agent import Agent
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
from mephisto.data_model.task_run import TaskRun
from typing import List, Dict, Any, cast
import json
def main():
db = LocalMephistoDB()
# Get the requester that the run will be requested from
all_requesters = db.find_requesters(provider_type="mock")
print("You have the following requesters available for use on mock:")
r_names = [r.requester_name for r in all_requesters]
print(sorted(r_names))
use_name = input("Enter the name of the requester to use, or a new requester:\n>> ")
while use_name not in r_names:
confirm = input(
f"{use_name} is not in the requester list. "
f"Would you like to create a new MockRequester with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {r_names} \n >> ")
else:
MockRequester.new(db, use_name)
r_names.append(use_name)
requester = db.find_requesters(provider_type="mock", requester_name=use_name)[0]
# Get the worker that will be acting as the worker on this task
all_workers = db.find_workers(provider_type="mock")
print("You have the following workers available for use on mock:")
w_names = [r.worker_name for r in all_workers]
print(sorted(w_names))
use_name = input("Enter the name of the worker to use, or a new worker:\n>> ")
while use_name not in w_names:
confirm = input(
f"{use_name} is not in the worker list. "
f"Would you like to create a new MockWorker with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {w_names} \n >> ")
else:
MockWorker.new(db, use_name)
w_names.append(use_name)
worker = db.find_workers(provider_type="mock", worker_name=use_name)[0]
# Get or create a task run for this
tasks = db.find_tasks()
task_names = [
t.task_name for t in tasks if t.task_type == BLUEPRINT_TYPE_STATIC_REACT
]
print(f"Use an existing run? ")
print(f"You have the following existing mock runs:")
print(sorted(task_names))
use_name = input("Enter the name of the task_run to use, or make a new one:\n>> ")
while use_name not in task_names:
confirm = input(
f"{use_name} is not in the task name list. "
f"Would you like to create a new TaskRun with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {task_names} \n >> ")
else:
task_id = db.new_task(use_name, BLUEPRINT_TYPE_STATIC_REACT)
task_names.append(use_name)
task_run_id = db.new_task_run(
task_id,
requester.db_id,
json.dumps({}),
"mock",
BLUEPRINT_TYPE_STATIC_REACT,
requester.is_sandbox(),
)
task_run = TaskRun.get(db, task_run_id)
tasks = db.find_tasks(task_name=use_name)
valid_tasks = [t for t in tasks if t.task_type == BLUEPRINT_TYPE_STATIC_REACT]
task_run = db.find_task_runs(task_id=valid_tasks[0].db_id)[0]
print(f"Found task run: {task_run}")
test_annotations: List[Dict[str, Any]] = [
{
"inputs": {"something": True, "something else": False},
"outputs": {"some": "annotations"},
}
]
# Write a new task, and then complete it
for annotation in test_annotations:
assignment_id = db.new_assignment(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
task_run.sandbox,
)
assignment = Assignment.get(db, assignment_id)
assignment.write_assignment_data(
InitializationData(unit_data=[{}], shared=annotation["inputs"])
)
unit_id = db.new_unit(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
assignment_id,
0, # Unit_index
0, # reward
task_run.provider_type,
task_run.task_type,
task_run.sandbox,
)
unit = Unit.get(db, unit_id)
agent = MockAgent.new(db, worker, unit)
agent_state = cast("StaticAgentState", agent.state)
agent_state.state["inputs"] = annotation["inputs"]
agent_state.state["outputs"] = annotation["outputs"]
agent.state.save_data()
agent.mark_done()
agent.update_status(AgentState.STATUS_COMPLETED)
# Show tasks appear in MephistoDB:
mephisto_data_browser = MephistoDataBrowser(db=db)
units = mephisto_data_browser.get_units_for_task_name(input("Input task name: "))
for unit in units:
print(mephisto_data_browser.get_data_from_unit(unit))
if __name__ == "__main__":
main()
|
mephisto/scripts/local_db/load_data_to_mephisto_db.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility script that directly loads in data from another place to
the MephistoDB under a specified task run, using MockRequester and
MockWorkers as we don't know where the data actually came from.
!! Currently in development, not necessarily for use !!
"""
from mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (
StaticReactBlueprint,
BLUEPRINT_TYPE_STATIC_REACT,
)
from mephisto.abstractions.blueprint import AgentState
from mephisto.abstractions.blueprints.abstract.static_task.static_agent_state import (
StaticAgentState,
)
from mephisto.abstractions.providers.mock.mock_requester import MockRequester
from mephisto.abstractions.providers.mock.mock_worker import MockWorker
from mephisto.abstractions.providers.mock.mock_agent import MockAgent
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.assignment import Assignment, InitializationData
from mephisto.data_model.unit import Unit
from mephisto.data_model.agent import Agent
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
from mephisto.data_model.task_run import TaskRun
from typing import List, Dict, Any, cast
import json
def main():
db = LocalMephistoDB()
# Get the requester that the run will be requested from
all_requesters = db.find_requesters(provider_type="mock")
print("You have the following requesters available for use on mock:")
r_names = [r.requester_name for r in all_requesters]
print(sorted(r_names))
use_name = input("Enter the name of the requester to use, or a new requester:\n>> ")
while use_name not in r_names:
confirm = input(
f"{use_name} is not in the requester list. "
f"Would you like to create a new MockRequester with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {r_names} \n >> ")
else:
MockRequester.new(db, use_name)
r_names.append(use_name)
requester = db.find_requesters(provider_type="mock", requester_name=use_name)[0]
# Get the worker that will be acting as the worker on this task
all_workers = db.find_workers(provider_type="mock")
print("You have the following workers available for use on mock:")
w_names = [r.worker_name for r in all_workers]
print(sorted(w_names))
use_name = input("Enter the name of the worker to use, or a new worker:\n>> ")
while use_name not in w_names:
confirm = input(
f"{use_name} is not in the worker list. "
f"Would you like to create a new MockWorker with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {w_names} \n >> ")
else:
MockWorker.new(db, use_name)
w_names.append(use_name)
worker = db.find_workers(provider_type="mock", worker_name=use_name)[0]
# Get or create a task run for this
tasks = db.find_tasks()
task_names = [
t.task_name for t in tasks if t.task_type == BLUEPRINT_TYPE_STATIC_REACT
]
print(f"Use an existing run? ")
print(f"You have the following existing mock runs:")
print(sorted(task_names))
use_name = input("Enter the name of the task_run to use, or make a new one:\n>> ")
while use_name not in task_names:
confirm = input(
f"{use_name} is not in the task name list. "
f"Would you like to create a new TaskRun with this name? (y)/n > "
)
if confirm.lower().startswith("n"):
use_name = input(f"Okay, enter another name from {task_names} \n >> ")
else:
task_id = db.new_task(use_name, BLUEPRINT_TYPE_STATIC_REACT)
task_names.append(use_name)
task_run_id = db.new_task_run(
task_id,
requester.db_id,
json.dumps({}),
"mock",
BLUEPRINT_TYPE_STATIC_REACT,
requester.is_sandbox(),
)
task_run = TaskRun.get(db, task_run_id)
tasks = db.find_tasks(task_name=use_name)
valid_tasks = [t for t in tasks if t.task_type == BLUEPRINT_TYPE_STATIC_REACT]
task_run = db.find_task_runs(task_id=valid_tasks[0].db_id)[0]
print(f"Found task run: {task_run}")
test_annotations: List[Dict[str, Any]] = [
{
"inputs": {"something": True, "something else": False},
"outputs": {"some": "annotations"},
}
]
# Write a new task, and then complete it
for annotation in test_annotations:
assignment_id = db.new_assignment(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
task_run.task_type,
task_run.provider_type,
task_run.sandbox,
)
assignment = Assignment.get(db, assignment_id)
assignment.write_assignment_data(
InitializationData(unit_data=[{}], shared=annotation["inputs"])
)
unit_id = db.new_unit(
task_run.task_id,
task_run.db_id,
task_run.requester_id,
assignment_id,
0, # Unit_index
0, # reward
task_run.provider_type,
task_run.task_type,
task_run.sandbox,
)
unit = Unit.get(db, unit_id)
agent = MockAgent.new(db, worker, unit)
agent_state = cast("StaticAgentState", agent.state)
agent_state.state["inputs"] = annotation["inputs"]
agent_state.state["outputs"] = annotation["outputs"]
agent.state.save_data()
agent.mark_done()
agent.update_status(AgentState.STATUS_COMPLETED)
# Show tasks appear in MephistoDB:
mephisto_data_browser = MephistoDataBrowser(db=db)
units = mephisto_data_browser.get_units_for_task_name(input("Input task name: "))
for unit in units:
print(mephisto_data_browser.get_data_from_unit(unit))
if __name__ == "__main__":
main()
| 0.590543 | 0.214362 |
import ast
import json
import os
import glob
import sys
import requests
import inspect
from clebear.configs import cfg
from requests_toolbelt import MultipartEncoder
def load_module(file_path, module_name=None):
"""
Load a module by name and search path
This function should work with python 2.7 and 3.x
Returns None if Module could not be loaded.
"""
if module_name is None:
module_name = os.path.basename(os.path.splitext(file_path)[0])
if sys.version_info >= (3, 5,):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
return
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
mod = imp.load_source(module_name, file_path)
return mod
def auto_module_by_py_dir(py_dir):
func_dir, func_basename = os.path.split(py_dir)
import_basename = func_basename.strip().split("_v")[0]
import_dir = glob.glob(os.path.join(func_dir, "../../../**", f"{import_basename}.py"), recursive=True)
assert len(import_dir) == 1
import_dir = import_dir[0]
module = load_module(import_dir)
return module
def decorator_default(method_name, class_name="Solution"):
def decorator(func):
def decorated(*args, **kwargs):
kws = dict(class_name=class_name, method_name=method_name)
signature = inspect.signature(func)
for key, value in signature.parameters.items():
if value.default and (value.default != inspect._empty):
kws.update({key: value.default})
kws.update(kwargs)
for a, b in zip(args, signature.parameters):
kws.update({b: a})
new_kws = dict()
for key, value in kws.items():
new_kws.update({key: value if isinstance(value, str) else value.__name__})
new_args = list()
for i, a, b in zip(range(max(len(args), len(signature.parameters))), args, signature.parameters):
new_args.append(new_kws[b])
new_kws.pop(b)
if "_v" in os.path.basename(func.__code__.co_filename):
module = auto_module_by_py_dir(func.__code__.co_filename)
return getattr(module, "ctest")(*new_args, **new_kws)
return func(*new_args, **new_kws)
return decorated
return decorator
def login(session, user_agent=None):
if user_agent is None:
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
url = 'https://leetcode-cn.com'
# cookies = session.get(url, proxies=cfg.LOGINmsg.proxy_dict)
url = "https://leetcode-cn.com/accounts/login"
params_data = dict(
login=cfg.LOGINmsg.email,
password=cfg.LOGINmsg.password,
next="problems"
)
headers = {
"User-Agent": user_agent,
"Connection": 'keep-alive',
'Referer': 'https://leetcode-cn.com/accounts/login/',
"origin": "https://leetcode-cn.com"
}
m = MultipartEncoder(params_data)
headers['Content-Type'] = m.content_type
session.post(url, headers=headers, data=m,
timeout=10, allow_redirects=False,
proxies=cfg.LOGINmsg.proxy_dict)
is_login = session.cookies.get('LEETCODE_SESSION') != None
return is_login, session
|
clebear/core/utils.py
|
import ast
import json
import os
import glob
import sys
import requests
import inspect
from clebear.configs import cfg
from requests_toolbelt import MultipartEncoder
def load_module(file_path, module_name=None):
"""
Load a module by name and search path
This function should work with python 2.7 and 3.x
Returns None if Module could not be loaded.
"""
if module_name is None:
module_name = os.path.basename(os.path.splitext(file_path)[0])
if sys.version_info >= (3, 5,):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
return
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
mod = imp.load_source(module_name, file_path)
return mod
def auto_module_by_py_dir(py_dir):
func_dir, func_basename = os.path.split(py_dir)
import_basename = func_basename.strip().split("_v")[0]
import_dir = glob.glob(os.path.join(func_dir, "../../../**", f"{import_basename}.py"), recursive=True)
assert len(import_dir) == 1
import_dir = import_dir[0]
module = load_module(import_dir)
return module
def decorator_default(method_name, class_name="Solution"):
def decorator(func):
def decorated(*args, **kwargs):
kws = dict(class_name=class_name, method_name=method_name)
signature = inspect.signature(func)
for key, value in signature.parameters.items():
if value.default and (value.default != inspect._empty):
kws.update({key: value.default})
kws.update(kwargs)
for a, b in zip(args, signature.parameters):
kws.update({b: a})
new_kws = dict()
for key, value in kws.items():
new_kws.update({key: value if isinstance(value, str) else value.__name__})
new_args = list()
for i, a, b in zip(range(max(len(args), len(signature.parameters))), args, signature.parameters):
new_args.append(new_kws[b])
new_kws.pop(b)
if "_v" in os.path.basename(func.__code__.co_filename):
module = auto_module_by_py_dir(func.__code__.co_filename)
return getattr(module, "ctest")(*new_args, **new_kws)
return func(*new_args, **new_kws)
return decorated
return decorator
def login(session, user_agent=None):
if user_agent is None:
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
url = 'https://leetcode-cn.com'
# cookies = session.get(url, proxies=cfg.LOGINmsg.proxy_dict)
url = "https://leetcode-cn.com/accounts/login"
params_data = dict(
login=cfg.LOGINmsg.email,
password=cfg.LOGINmsg.password,
next="problems"
)
headers = {
"User-Agent": user_agent,
"Connection": 'keep-alive',
'Referer': 'https://leetcode-cn.com/accounts/login/',
"origin": "https://leetcode-cn.com"
}
m = MultipartEncoder(params_data)
headers['Content-Type'] = m.content_type
session.post(url, headers=headers, data=m,
timeout=10, allow_redirects=False,
proxies=cfg.LOGINmsg.proxy_dict)
is_login = session.cookies.get('LEETCODE_SESSION') != None
return is_login, session
| 0.331985 | 0.175503 |
import functools
from . import basic_type
from .schema_dsl_common import *
def _check_object_type(input_object, path):
if not isinstance(input_object, dict):
raise TypeError(get_message(path, 'Should be an object'))
def _validate_outgoing_object(input_object, path):
if input_object is None:
return None
_check_object_type(input_object, path)
def _process_field(field, parent, input_object):
path = parent + [field['name']]
result = functools.reduce(lambda res, f: f(res, path), field['filters'], input_object)
return convert(field['field_type'], result, path)
def _collect_object_result(input_object, path, fields):
result = {}
for field in fields:
field_name = field['name']
if field_name in input_object:
result[field_name] = _process_field(field, path, input_object[field_name])
else:
field_result = _process_field(field, path, None)
if field_result is not None:
result[field_name] = field_result
return result
def _convert_object(schema, input_object, path):
path = path or []
_validate_outgoing_object(input_object, path)
if input_object is None:
return None
return _collect_object_result(input_object, path, schema['fields'])
def _check_array_type(input_object, path):
if not isinstance(input_object, list):
raise TypeError(get_message(path, 'Should be an array'))
def _validate_array(input_object, path):
if input_object is None:
return
_check_array_type(input_object, path)
def _filter_array_element(input_object, path, element_type, filters):
result = functools.reduce(lambda res, f: f(res, path), filters, input_object)
return convert(element_type, result, path)
def _collect_array_result(input_object, path, element_type, filters):
return [_filter_array_element(input_object[i], path + [str(i)], element_type, filters) for i in
range(len(input_object))]
def _convert_array(schema, input_object, path):
path = path or []
_validate_array(input_object, path)
if input_object is None:
return None
return _collect_array_result(input_object, path, schema['element_type'], schema['filters'])
def _schema_wrap(converter):
def f(schema, input_object, path):
return converter(input_object, path)
return f
TYPE_FUNCTION_MAP = {
'Any': _schema_wrap(basic_type.any_type),
'String': _schema_wrap(basic_type.string_type),
'Integer': _schema_wrap(basic_type.integer_type),
'Number': _schema_wrap(basic_type.number_type),
'Boolean': _schema_wrap(basic_type.boolean_type),
'StringMap': _schema_wrap(basic_type.string_map),
'Object': _convert_object,
'Array': _convert_array
}
def convert(schema, input_object, path=None):
return TYPE_FUNCTION_MAP[schema['type']](schema, input_object, path)
|
python3/src/json_outgoing.py
|
import functools
from . import basic_type
from .schema_dsl_common import *
def _check_object_type(input_object, path):
if not isinstance(input_object, dict):
raise TypeError(get_message(path, 'Should be an object'))
def _validate_outgoing_object(input_object, path):
if input_object is None:
return None
_check_object_type(input_object, path)
def _process_field(field, parent, input_object):
path = parent + [field['name']]
result = functools.reduce(lambda res, f: f(res, path), field['filters'], input_object)
return convert(field['field_type'], result, path)
def _collect_object_result(input_object, path, fields):
result = {}
for field in fields:
field_name = field['name']
if field_name in input_object:
result[field_name] = _process_field(field, path, input_object[field_name])
else:
field_result = _process_field(field, path, None)
if field_result is not None:
result[field_name] = field_result
return result
def _convert_object(schema, input_object, path):
path = path or []
_validate_outgoing_object(input_object, path)
if input_object is None:
return None
return _collect_object_result(input_object, path, schema['fields'])
def _check_array_type(input_object, path):
if not isinstance(input_object, list):
raise TypeError(get_message(path, 'Should be an array'))
def _validate_array(input_object, path):
if input_object is None:
return
_check_array_type(input_object, path)
def _filter_array_element(input_object, path, element_type, filters):
result = functools.reduce(lambda res, f: f(res, path), filters, input_object)
return convert(element_type, result, path)
def _collect_array_result(input_object, path, element_type, filters):
return [_filter_array_element(input_object[i], path + [str(i)], element_type, filters) for i in
range(len(input_object))]
def _convert_array(schema, input_object, path):
path = path or []
_validate_array(input_object, path)
if input_object is None:
return None
return _collect_array_result(input_object, path, schema['element_type'], schema['filters'])
def _schema_wrap(converter):
def f(schema, input_object, path):
return converter(input_object, path)
return f
TYPE_FUNCTION_MAP = {
'Any': _schema_wrap(basic_type.any_type),
'String': _schema_wrap(basic_type.string_type),
'Integer': _schema_wrap(basic_type.integer_type),
'Number': _schema_wrap(basic_type.number_type),
'Boolean': _schema_wrap(basic_type.boolean_type),
'StringMap': _schema_wrap(basic_type.string_map),
'Object': _convert_object,
'Array': _convert_array
}
def convert(schema, input_object, path=None):
return TYPE_FUNCTION_MAP[schema['type']](schema, input_object, path)
| 0.512205 | 0.11928 |
from __future__ import absolute_import
import ctypes
import platform
import pkg_resources
if platform.system() == "Windows":
name = "awkward.dll"
elif platform.system() == "Darwin":
name = "libawkward.dylib"
else:
name = "libawkward.so"
libpath = pkg_resources.resource_filename("awkward1", name)
lib = ctypes.cdll.LoadLibrary(libpath)
# bool awkward_ArrayBuilder_length(void* fillablearray,
# int64_t* result);
ArrayBuilder_length = lib.awkward_ArrayBuilder_length
ArrayBuilder_length.name = "ArrayBuilder.length"
ArrayBuilder_length.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_int64)]
ArrayBuilder_length.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_clear(void* fillablearray);
ArrayBuilder_clear = lib.awkward_ArrayBuilder_clear
ArrayBuilder_clear.name = "ArrayBuilder.clear"
ArrayBuilder_clear.argtypes = [ctypes.c_voidp]
ArrayBuilder_clear.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_null(void* fillablearray);
ArrayBuilder_null = lib.awkward_ArrayBuilder_null
ArrayBuilder_null.name = "ArrayBuilder.null"
ArrayBuilder_null.argtypes = [ctypes.c_voidp]
ArrayBuilder_null.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_boolean(void* fillablearray,
# bool x);
ArrayBuilder_boolean = lib.awkward_ArrayBuilder_boolean
ArrayBuilder_boolean.name = "ArrayBuilder.boolean"
ArrayBuilder_boolean.argtypes = [ctypes.c_voidp, ctypes.c_uint8]
ArrayBuilder_boolean.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_integer(void* fillablearray,
# int64_t x);
ArrayBuilder_integer = lib.awkward_ArrayBuilder_integer
ArrayBuilder_integer.name = "ArrayBuilder.integer"
ArrayBuilder_integer.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_integer.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_real(void* fillablearray,
# double x);
ArrayBuilder_real = lib.awkward_ArrayBuilder_real
ArrayBuilder_real.name = "ArrayBuilder.real"
ArrayBuilder_real.argtypes = [ctypes.c_voidp, ctypes.c_double]
ArrayBuilder_real.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_beginlist(void* fillablearray);
ArrayBuilder_beginlist = lib.awkward_ArrayBuilder_beginlist
ArrayBuilder_beginlist.name = "ArrayBuilder.beginlist"
ArrayBuilder_beginlist.argtypes = [ctypes.c_voidp]
ArrayBuilder_beginlist.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_endlist(void* fillablearray);
ArrayBuilder_endlist = lib.awkward_ArrayBuilder_endlist
ArrayBuilder_endlist.name = "ArrayBuilder.endlist"
ArrayBuilder_endlist.argtypes = [ctypes.c_voidp]
ArrayBuilder_endlist.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_begintuple(void* fillablearray,
# int64_t numfields);
ArrayBuilder_begintuple = lib.awkward_ArrayBuilder_begintuple
ArrayBuilder_begintuple.name = "ArrayBuilder.begintuple"
ArrayBuilder_begintuple.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_begintuple.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_index(void* fillablearray,
# int64_t index);
ArrayBuilder_index = lib.awkward_ArrayBuilder_index
ArrayBuilder_index.name = "ArrayBuilder.index"
ArrayBuilder_index.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_index.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_endtuple(void* fillablearray);
ArrayBuilder_endtuple = lib.awkward_ArrayBuilder_endtuple
ArrayBuilder_endtuple.name = "ArrayBuilder.endtuple"
ArrayBuilder_endtuple.argtypes = [ctypes.c_voidp]
ArrayBuilder_endtuple.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord(void* fillablearray);
ArrayBuilder_beginrecord = lib.awkward_ArrayBuilder_beginrecord
ArrayBuilder_beginrecord.name = "ArrayBuilder.beginrecord"
ArrayBuilder_beginrecord.argtypes = [ctypes.c_voidp]
ArrayBuilder_beginrecord.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord_fast(void* fillablearray,
# const char* name);
ArrayBuilder_beginrecord_fast = lib.awkward_ArrayBuilder_beginrecord_fast
ArrayBuilder_beginrecord_fast.name = "ArrayBuilder.beginrecord_fast"
ArrayBuilder_beginrecord_fast.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_beginrecord_fast.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord_check(void* fillablearray,
# const char* name);
ArrayBuilder_beginrecord_check = lib.awkward_ArrayBuilder_beginrecord_check
ArrayBuilder_beginrecord_check.name = "ArrayBuilder.beginrecord_check"
ArrayBuilder_beginrecord_check.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_beginrecord_check.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_field_fast(void* fillablearray,
# const char* key);
ArrayBuilder_field_fast = lib.awkward_ArrayBuilder_field_fast
ArrayBuilder_field_fast.name = "ArrayBuilder.field_fast"
ArrayBuilder_field_fast.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_field_fast.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_field_check(void* fillablearray,
# const char* key);
ArrayBuilder_field_check = lib.awkward_ArrayBuilder_field_check
ArrayBuilder_field_check.name = "ArrayBuilder.field_check"
ArrayBuilder_field_check.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_field_check.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_endrecord(void* fillablearray);
ArrayBuilder_endrecord = lib.awkward_ArrayBuilder_endrecord
ArrayBuilder_endrecord.name = "ArrayBuilder.endrecord"
ArrayBuilder_endrecord.argtypes = [ctypes.c_voidp]
ArrayBuilder_endrecord.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_append_nowrap(void* fillablearray,
# const void* shared_ptr_ptr,
# int64_t at);
ArrayBuilder_append_nowrap = lib.awkward_ArrayBuilder_append_nowrap
ArrayBuilder_append_nowrap.name = "ArrayBuilder.append_nowrap"
ArrayBuilder_append_nowrap.argtypes = [ctypes.c_voidp, ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_append_nowrap.restype = ctypes.c_uint8
|
src/awkward1/_libawkward.py
|
from __future__ import absolute_import
import ctypes
import platform
import pkg_resources
if platform.system() == "Windows":
name = "awkward.dll"
elif platform.system() == "Darwin":
name = "libawkward.dylib"
else:
name = "libawkward.so"
libpath = pkg_resources.resource_filename("awkward1", name)
lib = ctypes.cdll.LoadLibrary(libpath)
# bool awkward_ArrayBuilder_length(void* fillablearray,
# int64_t* result);
ArrayBuilder_length = lib.awkward_ArrayBuilder_length
ArrayBuilder_length.name = "ArrayBuilder.length"
ArrayBuilder_length.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_int64)]
ArrayBuilder_length.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_clear(void* fillablearray);
ArrayBuilder_clear = lib.awkward_ArrayBuilder_clear
ArrayBuilder_clear.name = "ArrayBuilder.clear"
ArrayBuilder_clear.argtypes = [ctypes.c_voidp]
ArrayBuilder_clear.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_null(void* fillablearray);
ArrayBuilder_null = lib.awkward_ArrayBuilder_null
ArrayBuilder_null.name = "ArrayBuilder.null"
ArrayBuilder_null.argtypes = [ctypes.c_voidp]
ArrayBuilder_null.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_boolean(void* fillablearray,
# bool x);
ArrayBuilder_boolean = lib.awkward_ArrayBuilder_boolean
ArrayBuilder_boolean.name = "ArrayBuilder.boolean"
ArrayBuilder_boolean.argtypes = [ctypes.c_voidp, ctypes.c_uint8]
ArrayBuilder_boolean.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_integer(void* fillablearray,
# int64_t x);
ArrayBuilder_integer = lib.awkward_ArrayBuilder_integer
ArrayBuilder_integer.name = "ArrayBuilder.integer"
ArrayBuilder_integer.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_integer.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_real(void* fillablearray,
# double x);
ArrayBuilder_real = lib.awkward_ArrayBuilder_real
ArrayBuilder_real.name = "ArrayBuilder.real"
ArrayBuilder_real.argtypes = [ctypes.c_voidp, ctypes.c_double]
ArrayBuilder_real.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_beginlist(void* fillablearray);
ArrayBuilder_beginlist = lib.awkward_ArrayBuilder_beginlist
ArrayBuilder_beginlist.name = "ArrayBuilder.beginlist"
ArrayBuilder_beginlist.argtypes = [ctypes.c_voidp]
ArrayBuilder_beginlist.restype = ctypes.c_uint8
# bool awkward_ArrayBuilder_endlist(void* fillablearray);
ArrayBuilder_endlist = lib.awkward_ArrayBuilder_endlist
ArrayBuilder_endlist.name = "ArrayBuilder.endlist"
ArrayBuilder_endlist.argtypes = [ctypes.c_voidp]
ArrayBuilder_endlist.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_begintuple(void* fillablearray,
# int64_t numfields);
ArrayBuilder_begintuple = lib.awkward_ArrayBuilder_begintuple
ArrayBuilder_begintuple.name = "ArrayBuilder.begintuple"
ArrayBuilder_begintuple.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_begintuple.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_index(void* fillablearray,
# int64_t index);
ArrayBuilder_index = lib.awkward_ArrayBuilder_index
ArrayBuilder_index.name = "ArrayBuilder.index"
ArrayBuilder_index.argtypes = [ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_index.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_endtuple(void* fillablearray);
ArrayBuilder_endtuple = lib.awkward_ArrayBuilder_endtuple
ArrayBuilder_endtuple.name = "ArrayBuilder.endtuple"
ArrayBuilder_endtuple.argtypes = [ctypes.c_voidp]
ArrayBuilder_endtuple.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord(void* fillablearray);
ArrayBuilder_beginrecord = lib.awkward_ArrayBuilder_beginrecord
ArrayBuilder_beginrecord.name = "ArrayBuilder.beginrecord"
ArrayBuilder_beginrecord.argtypes = [ctypes.c_voidp]
ArrayBuilder_beginrecord.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord_fast(void* fillablearray,
# const char* name);
ArrayBuilder_beginrecord_fast = lib.awkward_ArrayBuilder_beginrecord_fast
ArrayBuilder_beginrecord_fast.name = "ArrayBuilder.beginrecord_fast"
ArrayBuilder_beginrecord_fast.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_beginrecord_fast.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_beginrecord_check(void* fillablearray,
# const char* name);
ArrayBuilder_beginrecord_check = lib.awkward_ArrayBuilder_beginrecord_check
ArrayBuilder_beginrecord_check.name = "ArrayBuilder.beginrecord_check"
ArrayBuilder_beginrecord_check.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_beginrecord_check.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_field_fast(void* fillablearray,
# const char* key);
ArrayBuilder_field_fast = lib.awkward_ArrayBuilder_field_fast
ArrayBuilder_field_fast.name = "ArrayBuilder.field_fast"
ArrayBuilder_field_fast.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_field_fast.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_field_check(void* fillablearray,
# const char* key);
ArrayBuilder_field_check = lib.awkward_ArrayBuilder_field_check
ArrayBuilder_field_check.name = "ArrayBuilder.field_check"
ArrayBuilder_field_check.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
ArrayBuilder_field_check.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_endrecord(void* fillablearray);
ArrayBuilder_endrecord = lib.awkward_ArrayBuilder_endrecord
ArrayBuilder_endrecord.name = "ArrayBuilder.endrecord"
ArrayBuilder_endrecord.argtypes = [ctypes.c_voidp]
ArrayBuilder_endrecord.restype = ctypes.c_uint8
# uint8_t awkward_ArrayBuilder_append_nowrap(void* fillablearray,
# const void* shared_ptr_ptr,
# int64_t at);
ArrayBuilder_append_nowrap = lib.awkward_ArrayBuilder_append_nowrap
ArrayBuilder_append_nowrap.name = "ArrayBuilder.append_nowrap"
ArrayBuilder_append_nowrap.argtypes = [ctypes.c_voidp, ctypes.c_voidp, ctypes.c_int64]
ArrayBuilder_append_nowrap.restype = ctypes.c_uint8
| 0.313735 | 0.057019 |
import sys
import numpy as np
from datetime import datetime
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
def get_clf_model(name='knn', n=1, solver='liblinear', max_iter=200):
"""
:param name: name of the classifier, one of rf:randomForest, lda: Linear Discriminant Analysis,
lr: logistic regression, and knn: k-nearest-neighbour
:param n: number of neighbours for KNN
:param solver: solver of logistic regression. Can be liblinear, lbfgs
:param max_iter: Number of maximum iterations for logistic regression
:return: scikit-learn object of a classifier
"""
if name == 'rf':
return RandomForestClassifier(random_state=0)
elif name == 'lda':
return LinearDiscriminantAnalysis()
elif name == 'lr':
return LogisticRegression(solver=solver, max_iter=max_iter)
elif name == 'knn':
return KNeighborsClassifier(n_neighbors=n)
else:
print("CLF not implemented")
sys.exit(0)
def shallow_clf_accuracy(labeled_train_feat, train_labels, test_image_feat, test_labels, name='knn', n=1,
solver='liblinear'):
"""
:param labeled_train_feat: training examples' embeddings
:param train_labels: labels of training examples
:param test_image_feat: test examples' embeddings
:param test_labels: labels of test examples
:param name: name of classifier, rf, lda, lr, knn
:param n: number of nearest neighbours for KNN
:param solver: solver if name of classifier is lr(logistic regression), one of liblinear, lbfgs
:return: computed accuracy
"""
true_test_labels = np.array(test_labels)
clf = get_clf_model(name.lower(), n, solver)
clf.fit(labeled_train_feat, train_labels)
pred_labels = clf.predict(test_image_feat)
accuracy = accuracy_score(true_test_labels, pred_labels)
return pred_labels, accuracy
def date_diff_in_seconds(dt2, dt1):
"""
Computes difference in two datetime objects
"""
timedelta = dt2 - dt1
return timedelta.days * 24 * 3600 + timedelta.seconds
def dhms_from_seconds(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return days, hours, minutes, seconds
def program_duration(dt1, prefix=''):
"""
Returns string for program duration: #days #hours ...
"""
dt2 = datetime.now()
dtwithoutseconds = dt2.replace(second=0, microsecond=0)
seconds = date_diff_in_seconds(dt2, dt1)
abc = dhms_from_seconds(seconds)
if abc[0] > 0:
text = " {} days, {} hours, {} minutes, {} seconds".format(abc[0], abc[1], abc[2], abc[3])
elif abc[1] > 0:
text = " {} hours, {} minutes, {} seconds".format(abc[1], abc[2], abc[3])
elif abc[2] > 0:
text = " {} minutes, {} seconds".format(abc[2], abc[3])
else:
text = " {} seconds".format(abc[2], abc[3])
return prefix + text + ' at ' + str(dtwithoutseconds)
|
utils.py
|
import sys
import numpy as np
from datetime import datetime
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
def get_clf_model(name='knn', n=1, solver='liblinear', max_iter=200):
"""
:param name: name of the classifier, one of rf:randomForest, lda: Linear Discriminant Analysis,
lr: logistic regression, and knn: k-nearest-neighbour
:param n: number of neighbours for KNN
:param solver: solver of logistic regression. Can be liblinear, lbfgs
:param max_iter: Number of maximum iterations for logistic regression
:return: scikit-learn object of a classifier
"""
if name == 'rf':
return RandomForestClassifier(random_state=0)
elif name == 'lda':
return LinearDiscriminantAnalysis()
elif name == 'lr':
return LogisticRegression(solver=solver, max_iter=max_iter)
elif name == 'knn':
return KNeighborsClassifier(n_neighbors=n)
else:
print("CLF not implemented")
sys.exit(0)
def shallow_clf_accuracy(labeled_train_feat, train_labels, test_image_feat, test_labels, name='knn', n=1,
solver='liblinear'):
"""
:param labeled_train_feat: training examples' embeddings
:param train_labels: labels of training examples
:param test_image_feat: test examples' embeddings
:param test_labels: labels of test examples
:param name: name of classifier, rf, lda, lr, knn
:param n: number of nearest neighbours for KNN
:param solver: solver if name of classifier is lr(logistic regression), one of liblinear, lbfgs
:return: computed accuracy
"""
true_test_labels = np.array(test_labels)
clf = get_clf_model(name.lower(), n, solver)
clf.fit(labeled_train_feat, train_labels)
pred_labels = clf.predict(test_image_feat)
accuracy = accuracy_score(true_test_labels, pred_labels)
return pred_labels, accuracy
def date_diff_in_seconds(dt2, dt1):
"""
Computes difference in two datetime objects
"""
timedelta = dt2 - dt1
return timedelta.days * 24 * 3600 + timedelta.seconds
def dhms_from_seconds(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return days, hours, minutes, seconds
def program_duration(dt1, prefix=''):
"""
Returns string for program duration: #days #hours ...
"""
dt2 = datetime.now()
dtwithoutseconds = dt2.replace(second=0, microsecond=0)
seconds = date_diff_in_seconds(dt2, dt1)
abc = dhms_from_seconds(seconds)
if abc[0] > 0:
text = " {} days, {} hours, {} minutes, {} seconds".format(abc[0], abc[1], abc[2], abc[3])
elif abc[1] > 0:
text = " {} hours, {} minutes, {} seconds".format(abc[1], abc[2], abc[3])
elif abc[2] > 0:
text = " {} minutes, {} seconds".format(abc[2], abc[3])
else:
text = " {} seconds".format(abc[2], abc[3])
return prefix + text + ' at ' + str(dtwithoutseconds)
| 0.686685 | 0.551936 |
import json
from package.decorator_csrf_setting import my_csrf_decorator
from package.request_method_limit import post_limit, login_limit
from package.response_data import get_res_json
from package.session_manage import set_user_session, clear_session, is_logined
from .models import UserModel
from .forms import RegisterForm, LoginForm, SendRegCodeForm
from .utils import is_can_reg
from mail_pusher import utils as mail_utils
from mail_pusher.models import RegVerifyMailModel
from config.username_banned import BANNED_USERNAME
# Create your views here.
@my_csrf_decorator()
@post_limit
def send_email_regcode(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = SendRegCodeForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码
username = uf.cleaned_data['username']
email = uf.cleaned_data['email']
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META.get('HTTP_X_FORWARDED_FOR')
else:
ip = request.META.get('REMOTE_ADDR')
# 1. 账号、邮箱已使用,则不能继续注册
# 2. 检查推送频率
# 3. 推送验证码邮件
# 先查重(已注册显然不能继续推送)
search_info = is_can_reg(username=username, email=email)
if search_info is not None:
return get_res_json(code=0, msg=search_info)
# 检查是否可以推送邮件
is_can_send = mail_utils.can_send_regcode_email(email=email, ip=ip)
if is_can_send is not None:
return get_res_json(code=0, msg=is_can_send)
# 创建一条邮件记录
new_mail_info = RegVerifyMailModel.objects.create(email=email, ip=ip)
# 推送邮件
send_result = mail_utils.send_regcode_email(email=email, regcode=new_mail_info.verify_code)
if send_result.code == 200:
return get_res_json(code=send_result.code, msg='邮件发送成功,请访问邮箱查看验证码')
else:
return get_res_json(code=send_result.code, msg='邮件发送失败,请重试或联系管理员。\n失败原因:%s' % send_result.msg)
@my_csrf_decorator()
@post_limit
def register(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = RegisterForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码(并去掉中间的空格)
username = uf.cleaned_data['username'].replace(' ', '')
password = <PASSWORD>_data['password'].replace(' ', '')
email = uf.cleaned_data['email'].replace(' ', '')
regcode = uf.cleaned_data['regcode'].replace(' ', '')
if username in BANNED_USERNAME:
return get_res_json(code=0, msg='非法的用户名,请勿使用敏感词作为用户名')
# 1. 账号、邮箱已使用,则不能继续注册
# 2. 检查验证码+邮箱 是否有数据。如果有数据,该条数据是否过期
# 3. 创建一条注册信息,设置密码(加密)
# 先查账号、邮件是否已存在
search_info = is_can_reg(username=username, email=email)
if search_info is not None:
return get_res_json(code=0, msg=search_info)
# 查数据
check_result = mail_utils.is_regcode_correct(email=email, regcode=regcode)
if check_result is not None:
return get_res_json(code=0, msg=check_result)
user = UserModel.objects.create(username=username, email=email)
# 设置密码
user.set_pw(password)
user.save()
set_user_session(request, user)
return get_res_json(code=200, msg='注册成功', data={
'username': username,
'userid': user.id
})
@my_csrf_decorator()
@post_limit
def login(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = LoginForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码
username = uf.cleaned_data['username']
password = <PASSWORD>_data['password']
# 先查重
search_info = UserModel.objects.order_by('-id').filter(username=username)
if len(search_info) == 0:
return get_res_json(code=0, msg='该用户名不存在或密码错误。\n重设密码请联系 QQ:20004604')
user = search_info[0]
# 校验密码
if user.is_pw_correct(password) is False:
# 密码错误
return get_res_json(code=0, msg='该用户名不存在或密码错误。\n重设密码请联系 QQ:20004604')
# 登录成功,设置session
set_user_session(request, user)
user.set_last_login()
user.save()
return get_res_json(code=200, msg='登录成功', data={
'username': username,
'userid': user.id
})
@my_csrf_decorator()
@post_limit
@login_limit
def logout(request):
try:
clear_session(request)
return get_res_json(code=200)
except BaseException as e:
print(e)
return get_res_json(code=0, msg='未知错误')
@my_csrf_decorator()
@post_limit
def had_logined(request):
if is_logined(request) is True:
return get_res_json(code=200, data={
'username': request.session.get('username'),
'userid': request.session.get('id'),
})
else:
return get_res_json(code=5, msg='')
|
user/views.py
|
import json
from package.decorator_csrf_setting import my_csrf_decorator
from package.request_method_limit import post_limit, login_limit
from package.response_data import get_res_json
from package.session_manage import set_user_session, clear_session, is_logined
from .models import UserModel
from .forms import RegisterForm, LoginForm, SendRegCodeForm
from .utils import is_can_reg
from mail_pusher import utils as mail_utils
from mail_pusher.models import RegVerifyMailModel
from config.username_banned import BANNED_USERNAME
# Create your views here.
@my_csrf_decorator()
@post_limit
def send_email_regcode(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = SendRegCodeForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码
username = uf.cleaned_data['username']
email = uf.cleaned_data['email']
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META.get('HTTP_X_FORWARDED_FOR')
else:
ip = request.META.get('REMOTE_ADDR')
# 1. 账号、邮箱已使用,则不能继续注册
# 2. 检查推送频率
# 3. 推送验证码邮件
# 先查重(已注册显然不能继续推送)
search_info = is_can_reg(username=username, email=email)
if search_info is not None:
return get_res_json(code=0, msg=search_info)
# 检查是否可以推送邮件
is_can_send = mail_utils.can_send_regcode_email(email=email, ip=ip)
if is_can_send is not None:
return get_res_json(code=0, msg=is_can_send)
# 创建一条邮件记录
new_mail_info = RegVerifyMailModel.objects.create(email=email, ip=ip)
# 推送邮件
send_result = mail_utils.send_regcode_email(email=email, regcode=new_mail_info.verify_code)
if send_result.code == 200:
return get_res_json(code=send_result.code, msg='邮件发送成功,请访问邮箱查看验证码')
else:
return get_res_json(code=send_result.code, msg='邮件发送失败,请重试或联系管理员。\n失败原因:%s' % send_result.msg)
@my_csrf_decorator()
@post_limit
def register(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = RegisterForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码(并去掉中间的空格)
username = uf.cleaned_data['username'].replace(' ', '')
password = <PASSWORD>_data['password'].replace(' ', '')
email = uf.cleaned_data['email'].replace(' ', '')
regcode = uf.cleaned_data['regcode'].replace(' ', '')
if username in BANNED_USERNAME:
return get_res_json(code=0, msg='非法的用户名,请勿使用敏感词作为用户名')
# 1. 账号、邮箱已使用,则不能继续注册
# 2. 检查验证码+邮箱 是否有数据。如果有数据,该条数据是否过期
# 3. 创建一条注册信息,设置密码(加密)
# 先查账号、邮件是否已存在
search_info = is_can_reg(username=username, email=email)
if search_info is not None:
return get_res_json(code=0, msg=search_info)
# 查数据
check_result = mail_utils.is_regcode_correct(email=email, regcode=regcode)
if check_result is not None:
return get_res_json(code=0, msg=check_result)
user = UserModel.objects.create(username=username, email=email)
# 设置密码
user.set_pw(password)
user.save()
set_user_session(request, user)
return get_res_json(code=200, msg='注册成功', data={
'username': username,
'userid': user.id
})
@my_csrf_decorator()
@post_limit
def login(request):
# 加载数据
data = json.loads(request.body)
# 表单校验
uf = LoginForm(data)
# 数据是否合法
if uf.is_valid() is False:
# 返回错误信息
return get_res_json(code=0, msg=uf.get_form_error_msg())
# 拿到用户名和密码
username = uf.cleaned_data['username']
password = <PASSWORD>_data['password']
# 先查重
search_info = UserModel.objects.order_by('-id').filter(username=username)
if len(search_info) == 0:
return get_res_json(code=0, msg='该用户名不存在或密码错误。\n重设密码请联系 QQ:20004604')
user = search_info[0]
# 校验密码
if user.is_pw_correct(password) is False:
# 密码错误
return get_res_json(code=0, msg='该用户名不存在或密码错误。\n重设密码请联系 QQ:20004604')
# 登录成功,设置session
set_user_session(request, user)
user.set_last_login()
user.save()
return get_res_json(code=200, msg='登录成功', data={
'username': username,
'userid': user.id
})
@my_csrf_decorator()
@post_limit
@login_limit
def logout(request):
try:
clear_session(request)
return get_res_json(code=200)
except BaseException as e:
print(e)
return get_res_json(code=0, msg='未知错误')
@my_csrf_decorator()
@post_limit
def had_logined(request):
if is_logined(request) is True:
return get_res_json(code=200, data={
'username': request.session.get('username'),
'userid': request.session.get('id'),
})
else:
return get_res_json(code=5, msg='')
| 0.219672 | 0.077622 |
import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
NaT,
Period,
PeriodIndex,
Timedelta,
UInt64Index,
period_range,
)
import pandas._testing as tm
class TestPeriodIndexAsType:
@pytest.mark.parametrize("dtype", [float, "timedelta64", "timedelta64[ns]"])
def test_astype_raises(self, dtype):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D")
msg = "Cannot cast PeriodArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_conversion(self):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D", name="idx")
result = idx.astype(object)
expected = Index(
[Period("2016-05-16", freq="D")] + [Period(NaT, freq="D")] * 3,
dtype="object",
name="idx",
)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.astype(np.int64)
expected = Int64Index(
[16937] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
idx = period_range("1990", "2009", freq="A", name="idx")
with tm.assert_produces_warning(FutureWarning):
result = idx.astype("i8")
tm.assert_index_equal(result, Index(idx.asi8, name="idx"))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_uint(self):
arr = period_range("2000", periods=2, name="idx")
expected = UInt64Index(np.array([10957, 10958], dtype="uint64"), name="idx")
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_object(self):
idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
idx = period_range(start="2013-01-01", periods=4, freq="M", name="idx")
expected_list = [
Period("2013-01-31", freq="M"),
Period("2013-02-28", freq="M"),
Period("2013-03-31", freq="M"),
Period("2013-04-30", freq="M"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = PeriodIndex(
["2013-01-01", "2013-01-02", "NaT", "2013-01-04"], freq="D", name="idx"
)
expected_list = [
Period("2013-01-01", freq="D"),
Period("2013-01-02", freq="D"),
Period("NaT", freq="D"),
Period("2013-01-04", freq="D"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
assert result[2] is NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
assert result_list[2] is NaT
def test_astype_category(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype("category")
expected = CategoricalIndex(
[Period("2000-01-01", freq="D"), Period("2000-01-02", freq="D")], name="idx"
)
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype(bool)
expected = Index(np.array([True, True]), name="idx")
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_period_astype_to_timestamp(self):
pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="start")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"])
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern")
res = pi.astype("datetime64[ns, US/Eastern]")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern")
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns, US/Eastern]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
|
pandas/tests/indexes/period/methods/test_astype.py
|
import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
NaT,
Period,
PeriodIndex,
Timedelta,
UInt64Index,
period_range,
)
import pandas._testing as tm
class TestPeriodIndexAsType:
@pytest.mark.parametrize("dtype", [float, "timedelta64", "timedelta64[ns]"])
def test_astype_raises(self, dtype):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D")
msg = "Cannot cast PeriodArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_conversion(self):
# GH#13149, GH#13209
idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D", name="idx")
result = idx.astype(object)
expected = Index(
[Period("2016-05-16", freq="D")] + [Period(NaT, freq="D")] * 3,
dtype="object",
name="idx",
)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.astype(np.int64)
expected = Int64Index(
[16937] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
idx = period_range("1990", "2009", freq="A", name="idx")
with tm.assert_produces_warning(FutureWarning):
result = idx.astype("i8")
tm.assert_index_equal(result, Index(idx.asi8, name="idx"))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_uint(self):
arr = period_range("2000", periods=2, name="idx")
expected = UInt64Index(np.array([10957, 10958], dtype="uint64"), name="idx")
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_object(self):
idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
idx = period_range(start="2013-01-01", periods=4, freq="M", name="idx")
expected_list = [
Period("2013-01-31", freq="M"),
Period("2013-02-28", freq="M"),
Period("2013-03-31", freq="M"),
Period("2013-04-30", freq="M"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = PeriodIndex(
["2013-01-01", "2013-01-02", "NaT", "2013-01-04"], freq="D", name="idx"
)
expected_list = [
Period("2013-01-01", freq="D"),
Period("2013-01-02", freq="D"),
Period("NaT", freq="D"),
Period("2013-01-04", freq="D"),
]
expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
assert result[2] is NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
assert result_list[2] is NaT
def test_astype_category(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype("category")
expected = CategoricalIndex(
[Period("2000-01-01", freq="D"), Period("2000-01-02", freq="D")], name="idx"
)
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = period_range("2000", periods=2, name="idx")
result = obj.astype(bool)
expected = Index(np.array([True, True]), name="idx")
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
def test_period_astype_to_timestamp(self):
pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="start")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"])
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern")
res = pi.astype("datetime64[ns, US/Eastern]")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern")
exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
with tm.assert_produces_warning(FutureWarning):
# how keyword deprecated GH#37982
res = pi.astype("datetime64[ns, US/Eastern]", how="end")
tm.assert_index_equal(res, exp)
assert res.freq == exp.freq
| 0.559531 | 0.718416 |
import tensorflow as tf
from detection.models.backbones import resnet
from detection.models.necks import fpn
from detection.models.rpn_heads import rpn_head
from detection.models.bbox_heads import bbox_head
from detection.models.roi_extractors import roi_align
from detection.models.detectors.test_mixins import RPNTestMixin, BBoxTestMixin
from detection.core.bbox import bbox_target
class FasterRCNN(tf.keras.Model, RPNTestMixin, BBoxTestMixin):
def __init__(self, num_classes, **kwags):
super(FasterRCNN, self).__init__(**kwags)
self.NUM_CLASSES = num_classes
# RPN configuration
# Anchor attributes
self.ANCHOR_SCALES = (32, 64, 128, 256, 512)
self.ANCHOR_RATIOS = (0.5, 1, 2)
self.ANCHOR_FEATURE_STRIDES = (4, 8, 16, 32, 64)
# Bounding box refinement mean and standard deviation
self.RPN_TARGET_MEANS = (0., 0., 0., 0.)
self.RPN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
# RPN training configuration
self.PRN_BATCH_SIZE = 256
self.RPN_POS_FRAC = 0.5
self.RPN_POS_IOU_THR = 0.7
self.RPN_NEG_IOU_THR = 0.3
# ROIs kept configuration
self.PRN_PROPOSAL_COUNT = 2000
self.PRN_NMS_THRESHOLD = 0.7
# RCNN configuration
# Bounding box refinement mean and standard deviation
self.RCNN_TARGET_MEANS = (0., 0., 0., 0.)
self.RCNN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
# ROI Feat Size
self.POOL_SIZE = (7, 7)
# RCNN training configuration
self.RCNN_BATCH_SIZE = 256
self.RCNN_POS_FRAC = 0.25
self.RCNN_POS_IOU_THR = 0.5
self.RCNN_NEG_IOU_THR = 0.5
# Boxes kept configuration
self.RCNN_MIN_CONFIDENCE = 0.05
self.RCNN_NMS_THRESHOLD = 0.5
self.RCNN_MAX_INSTANCES = 100
# Target Generator for the second stage.
self.bbox_target = bbox_target.ProposalTarget(
target_means=self.RCNN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
num_rcnn_deltas=self.RCNN_BATCH_SIZE,
positive_fraction=self.RCNN_POS_FRAC,
pos_iou_thr=self.RCNN_POS_IOU_THR,
neg_iou_thr=self.RCNN_NEG_IOU_THR,
num_classes=self.NUM_CLASSES)
# Modules
self.backbone = resnet.ResNet(
depth=101,
name='res_net')
self.neck = fpn.FPN(
name='fpn')
self.rpn_head = rpn_head.RPNHead(
anchor_scales=self.ANCHOR_SCALES,
anchor_ratios=self.ANCHOR_RATIOS,
anchor_feature_strides=self.ANCHOR_FEATURE_STRIDES,
proposal_count=self.PRN_PROPOSAL_COUNT,
nms_threshold=self.PRN_NMS_THRESHOLD,
target_means=self.RPN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
num_rpn_deltas=self.PRN_BATCH_SIZE,
positive_fraction=self.RPN_POS_FRAC,
pos_iou_thr=self.RPN_POS_IOU_THR,
neg_iou_thr=self.RPN_NEG_IOU_THR,
name='rpn_head')
self.roi_align = roi_align.PyramidROIAlign(
pool_shape=self.POOL_SIZE,
name='pyramid_roi_align')
self.bbox_head = bbox_head.BBoxHead(
num_classes=self.NUM_CLASSES,
pool_size=self.POOL_SIZE,
target_means=self.RCNN_TARGET_MEANS,
target_stds=self.RCNN_TARGET_STDS,
min_confidence=self.RCNN_MIN_CONFIDENCE,
nms_threshold=self.RCNN_NMS_THRESHOLD,
max_instances=self.RCNN_MAX_INSTANCES,
name='b_box_head')
def __call__(self, inputs, training=True):
if training: # training
imgs, img_metas, gt_boxes, gt_class_ids = inputs
else: # inference
imgs, img_metas = inputs
C2, C3, C4, C5 = self.backbone(imgs,
training=training)
P2, P3, P4, P5, P6 = self.neck([C2, C3, C4, C5],
training=training)
rpn_feature_maps = [P2, P3, P4, P5, P6]
rcnn_feature_maps = [P2, P3, P4, P5]
rpn_class_logits, rpn_probs, rpn_deltas = self.rpn_head(
rpn_feature_maps, training=training)
proposals = self.rpn_head.get_proposals(
rpn_probs, rpn_deltas, img_metas)
if training:
rois, rcnn_labels, rcnn_label_weights, rcnn_delta_targets, rcnn_delta_weights = \
self.bbox_target.build_targets(
proposals, gt_boxes, gt_class_ids, img_metas)
else:
rois = proposals
pooled_regions = self.roi_align(
(rois, rcnn_feature_maps, img_metas), training=training)
rcnn_class_logits, rcnn_probs, rcnn_deltas = \
self.bbox_head(pooled_regions, training=training)
if training:
rpn_class_loss, rpn_bbox_loss = self.rpn_head.loss(
rpn_class_logits, rpn_deltas,
gt_boxes, gt_class_ids, img_metas)
rcnn_class_loss, rcnn_bbox_loss = self.bbox_head.loss(
rcnn_class_logits, rcnn_deltas,
rcnn_labels, rcnn_label_weights, rcnn_delta_targets, rcnn_delta_weights)
return [rpn_class_loss, rpn_bbox_loss,
rcnn_class_loss, rcnn_bbox_loss]
else:
detections_list = self.bbox_head.get_bboxes(
rcnn_probs, rcnn_deltas, rois, img_metas)
return detections_list
|
detection/models/detectors/faster_rcnn.py
|
import tensorflow as tf
from detection.models.backbones import resnet
from detection.models.necks import fpn
from detection.models.rpn_heads import rpn_head
from detection.models.bbox_heads import bbox_head
from detection.models.roi_extractors import roi_align
from detection.models.detectors.test_mixins import RPNTestMixin, BBoxTestMixin
from detection.core.bbox import bbox_target
class FasterRCNN(tf.keras.Model, RPNTestMixin, BBoxTestMixin):
def __init__(self, num_classes, **kwags):
super(FasterRCNN, self).__init__(**kwags)
self.NUM_CLASSES = num_classes
# RPN configuration
# Anchor attributes
self.ANCHOR_SCALES = (32, 64, 128, 256, 512)
self.ANCHOR_RATIOS = (0.5, 1, 2)
self.ANCHOR_FEATURE_STRIDES = (4, 8, 16, 32, 64)
# Bounding box refinement mean and standard deviation
self.RPN_TARGET_MEANS = (0., 0., 0., 0.)
self.RPN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
# RPN training configuration
self.PRN_BATCH_SIZE = 256
self.RPN_POS_FRAC = 0.5
self.RPN_POS_IOU_THR = 0.7
self.RPN_NEG_IOU_THR = 0.3
# ROIs kept configuration
self.PRN_PROPOSAL_COUNT = 2000
self.PRN_NMS_THRESHOLD = 0.7
# RCNN configuration
# Bounding box refinement mean and standard deviation
self.RCNN_TARGET_MEANS = (0., 0., 0., 0.)
self.RCNN_TARGET_STDS = (0.1, 0.1, 0.2, 0.2)
# ROI Feat Size
self.POOL_SIZE = (7, 7)
# RCNN training configuration
self.RCNN_BATCH_SIZE = 256
self.RCNN_POS_FRAC = 0.25
self.RCNN_POS_IOU_THR = 0.5
self.RCNN_NEG_IOU_THR = 0.5
# Boxes kept configuration
self.RCNN_MIN_CONFIDENCE = 0.05
self.RCNN_NMS_THRESHOLD = 0.5
self.RCNN_MAX_INSTANCES = 100
# Target Generator for the second stage.
self.bbox_target = bbox_target.ProposalTarget(
target_means=self.RCNN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
num_rcnn_deltas=self.RCNN_BATCH_SIZE,
positive_fraction=self.RCNN_POS_FRAC,
pos_iou_thr=self.RCNN_POS_IOU_THR,
neg_iou_thr=self.RCNN_NEG_IOU_THR,
num_classes=self.NUM_CLASSES)
# Modules
self.backbone = resnet.ResNet(
depth=101,
name='res_net')
self.neck = fpn.FPN(
name='fpn')
self.rpn_head = rpn_head.RPNHead(
anchor_scales=self.ANCHOR_SCALES,
anchor_ratios=self.ANCHOR_RATIOS,
anchor_feature_strides=self.ANCHOR_FEATURE_STRIDES,
proposal_count=self.PRN_PROPOSAL_COUNT,
nms_threshold=self.PRN_NMS_THRESHOLD,
target_means=self.RPN_TARGET_MEANS,
target_stds=self.RPN_TARGET_STDS,
num_rpn_deltas=self.PRN_BATCH_SIZE,
positive_fraction=self.RPN_POS_FRAC,
pos_iou_thr=self.RPN_POS_IOU_THR,
neg_iou_thr=self.RPN_NEG_IOU_THR,
name='rpn_head')
self.roi_align = roi_align.PyramidROIAlign(
pool_shape=self.POOL_SIZE,
name='pyramid_roi_align')
self.bbox_head = bbox_head.BBoxHead(
num_classes=self.NUM_CLASSES,
pool_size=self.POOL_SIZE,
target_means=self.RCNN_TARGET_MEANS,
target_stds=self.RCNN_TARGET_STDS,
min_confidence=self.RCNN_MIN_CONFIDENCE,
nms_threshold=self.RCNN_NMS_THRESHOLD,
max_instances=self.RCNN_MAX_INSTANCES,
name='b_box_head')
def __call__(self, inputs, training=True):
if training: # training
imgs, img_metas, gt_boxes, gt_class_ids = inputs
else: # inference
imgs, img_metas = inputs
C2, C3, C4, C5 = self.backbone(imgs,
training=training)
P2, P3, P4, P5, P6 = self.neck([C2, C3, C4, C5],
training=training)
rpn_feature_maps = [P2, P3, P4, P5, P6]
rcnn_feature_maps = [P2, P3, P4, P5]
rpn_class_logits, rpn_probs, rpn_deltas = self.rpn_head(
rpn_feature_maps, training=training)
proposals = self.rpn_head.get_proposals(
rpn_probs, rpn_deltas, img_metas)
if training:
rois, rcnn_labels, rcnn_label_weights, rcnn_delta_targets, rcnn_delta_weights = \
self.bbox_target.build_targets(
proposals, gt_boxes, gt_class_ids, img_metas)
else:
rois = proposals
pooled_regions = self.roi_align(
(rois, rcnn_feature_maps, img_metas), training=training)
rcnn_class_logits, rcnn_probs, rcnn_deltas = \
self.bbox_head(pooled_regions, training=training)
if training:
rpn_class_loss, rpn_bbox_loss = self.rpn_head.loss(
rpn_class_logits, rpn_deltas,
gt_boxes, gt_class_ids, img_metas)
rcnn_class_loss, rcnn_bbox_loss = self.bbox_head.loss(
rcnn_class_logits, rcnn_deltas,
rcnn_labels, rcnn_label_weights, rcnn_delta_targets, rcnn_delta_weights)
return [rpn_class_loss, rpn_bbox_loss,
rcnn_class_loss, rcnn_bbox_loss]
else:
detections_list = self.bbox_head.get_bboxes(
rcnn_probs, rcnn_deltas, rois, img_metas)
return detections_list
| 0.697094 | 0.180865 |
import os
import glob
from netCDF4 import Dataset as open_ncfile
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pickle
# -- Read result
emerge = pickle.load( open( "/home/ysilvy/Density_bining/Yona_analysis/data/percentage_emergence_medians_meanhistNat.pkl", "rb" ) )
# -- Median and range
median_emerge = np.ma.median(emerge,axis=1)
pc25_emerge = np.percentile(emerge,25,axis=1)
pc75_emerge = np.percentile(emerge,75,axis=1)
time = np.arange(1860,2100)
# -- Plot
fig, axes = plt.subplots(1,3,sharex=True,sharey=True,figsize=(16, 5))
axes[0].plot(time,median_emerge[:,1],color='k')
axes[0].fill_between(time,pc25_emerge[:,1],pc75_emerge[:,1],color='0.8') #alpha=0.3
axes[0].set_xlim([1920,2080])
axes[0].set_ylim([0,83])
axes[0].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[0].grid(axis='y')
axes[0].text(2050,5,'Atlantic',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].plot(time,median_emerge[:,2],color='k')
axes[1].fill_between(time,pc25_emerge[:,2],pc75_emerge[:,2],color='0.8')
axes[1].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[1].text(2050,5,'Pacific',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].grid(axis='y')
axes[2].plot(time,median_emerge[:,3],color='k')
axes[2].fill_between(time,pc25_emerge[:,3],pc75_emerge[:,3],color='0.8')
axes[2].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[2].text(2050,5,'Indian',fontweight='bold',fontsize=15,va='center',ha='center')
axes[2].grid(axis='y')
axes[0].set_ylabel('% of basin zonal mean',fontweight='bold',fontsize=14)
axes[0].set_xticks(np.arange(1920,2081,20))
axes[1].tick_params(axis='y', labelleft='on')
axes[2].tick_params(axis='y', labelleft='on')
plt.subplots_adjust(wspace=0.1,top=0.85,left=0.04, right=0.92)
plt.figtext(.006,.95,'b',fontweight='bold',fontsize=18)
for i in range(3):
plt.setp(axes[i].get_xticklabels(), fontweight='bold',fontsize=12, rotation=20)
plt.setp(axes[i].get_yticklabels(), fontweight='bold',fontsize=12)
axes[i].xaxis.set_tick_params(which='major',width=2,labelsize=12)
axes[i].yaxis.set_tick_params(which='major',width=2,labelsize=12)
# Date
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
plotName = 'fig3b'
figureDir = 'models/ToE/'
plt.savefig(plotName+'.svg', dpi=300) #,bbox_inches='tight')
#plt.show()
|
fig3b.py
|
import os
import glob
from netCDF4 import Dataset as open_ncfile
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pickle
# -- Read result
emerge = pickle.load( open( "/home/ysilvy/Density_bining/Yona_analysis/data/percentage_emergence_medians_meanhistNat.pkl", "rb" ) )
# -- Median and range
median_emerge = np.ma.median(emerge,axis=1)
pc25_emerge = np.percentile(emerge,25,axis=1)
pc75_emerge = np.percentile(emerge,75,axis=1)
time = np.arange(1860,2100)
# -- Plot
fig, axes = plt.subplots(1,3,sharex=True,sharey=True,figsize=(16, 5))
axes[0].plot(time,median_emerge[:,1],color='k')
axes[0].fill_between(time,pc25_emerge[:,1],pc75_emerge[:,1],color='0.8') #alpha=0.3
axes[0].set_xlim([1920,2080])
axes[0].set_ylim([0,83])
axes[0].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[0].grid(axis='y')
axes[0].text(2050,5,'Atlantic',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].plot(time,median_emerge[:,2],color='k')
axes[1].fill_between(time,pc25_emerge[:,2],pc75_emerge[:,2],color='0.8')
axes[1].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[1].text(2050,5,'Pacific',fontweight='bold',fontsize=15,va='center',ha='center')
axes[1].grid(axis='y')
axes[2].plot(time,median_emerge[:,3],color='k')
axes[2].fill_between(time,pc25_emerge[:,3],pc75_emerge[:,3],color='0.8')
axes[2].axvline(x=2005,ls='--',color='k',lw=0.5)
axes[2].text(2050,5,'Indian',fontweight='bold',fontsize=15,va='center',ha='center')
axes[2].grid(axis='y')
axes[0].set_ylabel('% of basin zonal mean',fontweight='bold',fontsize=14)
axes[0].set_xticks(np.arange(1920,2081,20))
axes[1].tick_params(axis='y', labelleft='on')
axes[2].tick_params(axis='y', labelleft='on')
plt.subplots_adjust(wspace=0.1,top=0.85,left=0.04, right=0.92)
plt.figtext(.006,.95,'b',fontweight='bold',fontsize=18)
for i in range(3):
plt.setp(axes[i].get_xticklabels(), fontweight='bold',fontsize=12, rotation=20)
plt.setp(axes[i].get_yticklabels(), fontweight='bold',fontsize=12)
axes[i].xaxis.set_tick_params(which='major',width=2,labelsize=12)
axes[i].yaxis.set_tick_params(which='major',width=2,labelsize=12)
# Date
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
plotName = 'fig3b'
figureDir = 'models/ToE/'
plt.savefig(plotName+'.svg', dpi=300) #,bbox_inches='tight')
#plt.show()
| 0.47025 | 0.409929 |
import os
import sys
import cv2
import numpy as np
#PYCAFFE_DIR = '/home/kevin/Development/caffe3/python'
PYCAFFE_DIR = '/usr/local/opt/caffe-2015-07/python'
def _create_net(specfile, modelfile):
if not PYCAFFE_DIR in sys.path:
sys.path.insert(0, PYCAFFE_DIR)
import caffe
return caffe.Net(specfile, modelfile, caffe.TEST)
def find_scale_to_fit(im, shape):
"""Finds the scale that makes the image fit in the rect"""
w, h = im.shape[1], im.shape[0]
target_w, target_h = shape[1], shape[0]
scale = 1.0
if target_w is not None:
scale = min(scale, target_w / float(w))
if target_h is not None:
scale = min(scale, target_h / float(h))
return scale
class SalNet(object):
input_layer = 'data1'
output_layer = 'deconv1'
default_model_path = os.path.join(os.path.dirname(__file__), 'model')
def __init__(self,
specfile=None,
modelfile=None,
input_size=None,
max_input_size=(320, 320),
channel_swap=(2,1,0),
mean_value=(100,110,118),
input_scale=0.0078431372549,
saliency_mean=127,
blur_size=5,
stretch_output=True,
interpolation=cv2.INTER_CUBIC):
if not specfile:
specfile = os.path.join(self.default_model_path, 'deploy.prototxt')
if not modelfile:
modelfile = os.path.join(self.default_model_path, 'model.caffemodel')
self.input_size = input_size
self.max_input_size = max_input_size
self.channel_swap = channel_swap
self.mean_value = mean_value
self.input_scale = input_scale
self.saliency_mean = saliency_mean
self.blur_size = blur_size
self.net = _create_net(specfile, modelfile)
self.stretch_output = stretch_output
self.interpolation = interpolation
def scale_input(self, im):
if self.input_size:
# scale to fixed size
h, w = self.input_size
im = cv2.resize(im, (w, h), interpolation=cv2.INTER_AREA)
elif self.max_input_size:
# scale to fit in a rectangle
scale = find_scale_to_fit(im, self.max_input_size)
im = cv2.resize(im, None, fx=scale, fy=scale)
return im
def preprocess_input(self, input_image):
# scale
im = self.scale_input(input_image)
# rgb -> bgr
if self.channel_swap:
im = im[:,:,self.channel_swap]
# to float
im = im.astype(np.float32)
# mean subtraction
im -= np.array(self.mean_value)
# scale to [-1,1]
im *= self.input_scale
# transpose
im = im.transpose((2,0,1))
# add lead dimension
return np.ascontiguousarray(im[np.newaxis,...], dtype=np.float32)
def postprocess_output(self, net_output, map_shape):
# squeeze extra dimensions
p = np.squeeze(np.array(net_output))
# rescale
p *= 128
# add back the mean
p += self.saliency_mean
# clip
p = np.clip(p, 0, 255)
# resize back to original size
if map_shape:
h, w = map_shape
p = cv2.resize(p, (w, h), interpolation=self.interpolation)
# blur
if self.blur_size:
p = cv2.GaussianBlur(p, (self.blur_size, self.blur_size), 0)
# clip again
p = np.clip(p, 0, 255)
# stretch
if self.stretch_output:
if p.max() > 0:
p = (p / p.max()) * 255.0
return p.astype(np.uint8)
def get_saliency(self, image):
# Prepare the image for the network
net_input = self.preprocess_input(image)
# Reshape the input layer to match the network input
self.net.blobs[self.input_layer].reshape(*net_input.shape)
# Copy the prepared image to the network input layer
self.net.blobs[self.input_layer].data[...] = net_input
# Run the network forward
self.net.forward()
# Grab the output layer
net_output = self.net.blobs[self.output_layer].data[0,0]
# Postprocess the output to compute saliency map
return self.postprocess_output(net_output, image.shape[:2])
|
deep/__init__.py
|
import os
import sys
import cv2
import numpy as np
#PYCAFFE_DIR = '/home/kevin/Development/caffe3/python'
PYCAFFE_DIR = '/usr/local/opt/caffe-2015-07/python'
def _create_net(specfile, modelfile):
if not PYCAFFE_DIR in sys.path:
sys.path.insert(0, PYCAFFE_DIR)
import caffe
return caffe.Net(specfile, modelfile, caffe.TEST)
def find_scale_to_fit(im, shape):
"""Finds the scale that makes the image fit in the rect"""
w, h = im.shape[1], im.shape[0]
target_w, target_h = shape[1], shape[0]
scale = 1.0
if target_w is not None:
scale = min(scale, target_w / float(w))
if target_h is not None:
scale = min(scale, target_h / float(h))
return scale
class SalNet(object):
input_layer = 'data1'
output_layer = 'deconv1'
default_model_path = os.path.join(os.path.dirname(__file__), 'model')
def __init__(self,
specfile=None,
modelfile=None,
input_size=None,
max_input_size=(320, 320),
channel_swap=(2,1,0),
mean_value=(100,110,118),
input_scale=0.0078431372549,
saliency_mean=127,
blur_size=5,
stretch_output=True,
interpolation=cv2.INTER_CUBIC):
if not specfile:
specfile = os.path.join(self.default_model_path, 'deploy.prototxt')
if not modelfile:
modelfile = os.path.join(self.default_model_path, 'model.caffemodel')
self.input_size = input_size
self.max_input_size = max_input_size
self.channel_swap = channel_swap
self.mean_value = mean_value
self.input_scale = input_scale
self.saliency_mean = saliency_mean
self.blur_size = blur_size
self.net = _create_net(specfile, modelfile)
self.stretch_output = stretch_output
self.interpolation = interpolation
def scale_input(self, im):
if self.input_size:
# scale to fixed size
h, w = self.input_size
im = cv2.resize(im, (w, h), interpolation=cv2.INTER_AREA)
elif self.max_input_size:
# scale to fit in a rectangle
scale = find_scale_to_fit(im, self.max_input_size)
im = cv2.resize(im, None, fx=scale, fy=scale)
return im
def preprocess_input(self, input_image):
# scale
im = self.scale_input(input_image)
# rgb -> bgr
if self.channel_swap:
im = im[:,:,self.channel_swap]
# to float
im = im.astype(np.float32)
# mean subtraction
im -= np.array(self.mean_value)
# scale to [-1,1]
im *= self.input_scale
# transpose
im = im.transpose((2,0,1))
# add lead dimension
return np.ascontiguousarray(im[np.newaxis,...], dtype=np.float32)
def postprocess_output(self, net_output, map_shape):
# squeeze extra dimensions
p = np.squeeze(np.array(net_output))
# rescale
p *= 128
# add back the mean
p += self.saliency_mean
# clip
p = np.clip(p, 0, 255)
# resize back to original size
if map_shape:
h, w = map_shape
p = cv2.resize(p, (w, h), interpolation=self.interpolation)
# blur
if self.blur_size:
p = cv2.GaussianBlur(p, (self.blur_size, self.blur_size), 0)
# clip again
p = np.clip(p, 0, 255)
# stretch
if self.stretch_output:
if p.max() > 0:
p = (p / p.max()) * 255.0
return p.astype(np.uint8)
def get_saliency(self, image):
# Prepare the image for the network
net_input = self.preprocess_input(image)
# Reshape the input layer to match the network input
self.net.blobs[self.input_layer].reshape(*net_input.shape)
# Copy the prepared image to the network input layer
self.net.blobs[self.input_layer].data[...] = net_input
# Run the network forward
self.net.forward()
# Grab the output layer
net_output = self.net.blobs[self.output_layer].data[0,0]
# Postprocess the output to compute saliency map
return self.postprocess_output(net_output, image.shape[:2])
| 0.396652 | 0.161849 |
import logging
import itertools
from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage
from data.logs_model.interface import ActionLogsDataInterface
from data.logs_model.shared import SharedModel
logger = logging.getLogger(__name__)
def _merge_aggregated_log_counts(*args):
""" Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime.
"""
matching_keys = {}
aggregated_log_counts_list = itertools.chain.from_iterable(args)
def canonical_key_from_kind_date_tuple(kind_id, dt):
""" Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """
return str(kind_id) + ',' + str(dt)
for kind_id, count, dt in aggregated_log_counts_list:
kind_date_key = canonical_key_from_kind_date_tuple(kind_id, dt)
if kind_date_key in matching_keys:
existing_count = matching_keys[kind_date_key][2]
matching_keys[kind_date_key] = (kind_id, dt, existing_count + count)
else:
matching_keys[kind_date_key] = (kind_id, dt, count)
return [AggregatedLogCount(kind_id, count, dt) for (kind_id, dt, count) in matching_keys.values()]
class CombinedLogsModel(SharedModel, ActionLogsDataInterface):
"""
CombinedLogsModel implements the data model that logs to the first logs model and reads from
both.
"""
def __init__(self, read_write_logs_model, read_only_logs_model):
self.read_write_logs_model = read_write_logs_model
self.read_only_logs_model = read_only_logs_model
def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
return self.read_write_logs_model.log_action(kind_name, namespace_name, performer, ip, metadata,
repository, repository_name, timestamp,
is_free_namespace)
def count_repository_actions(self, repository, day):
rw_count = self.read_write_logs_model.count_repository_actions(repository, day)
ro_count = self.read_only_logs_model.count_repository_actions(repository, day)
return rw_count + ro_count
def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
repository_name=None, namespace_name=None, filter_kinds=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_count = rw_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
ro_count = ro_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
return _merge_aggregated_log_counts(rw_count, ro_count)
def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
namespace_id=None, max_query_time=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
ro_logs = ro_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
for batch in itertools.chain(rw_logs, ro_logs):
yield batch
def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
page_token = page_token or {}
new_page_token = {}
if page_token is None or not page_token.get('under_readonly_model', False):
rw_page_token = page_token.get('readwrite_page_token')
rw_logs = rw_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
rw_page_token, max_page_count)
logs, next_page_token = rw_logs
new_page_token['under_readonly_model'] = next_page_token is None
new_page_token['readwrite_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
else:
readonly_page_token = page_token.get('readonly_page_token')
ro_logs = ro_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
readonly_page_token, max_page_count)
logs, next_page_token = ro_logs
if next_page_token is None:
return LogEntriesPage(logs, None)
new_page_token['under_readonly_model'] = True
new_page_token['readonly_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
filter_kinds=None, size=20):
latest_logs = []
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size)
latest_logs.extend(rw_logs)
if len(latest_logs) < size:
ro_logs = ro_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size - len(latest_logs))
latest_logs.extend(ro_logs)
return latest_logs
def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
ro_model = self.read_only_logs_model
rw_model = self.read_write_logs_model
ro_ctx = ro_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
rw_ctx = rw_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
for ctx in itertools.chain(ro_ctx, rw_ctx):
yield ctx
|
data/logs_model/combined_model.py
|
import logging
import itertools
from data.logs_model.datatypes import AggregatedLogCount, LogEntriesPage
from data.logs_model.interface import ActionLogsDataInterface
from data.logs_model.shared import SharedModel
logger = logging.getLogger(__name__)
def _merge_aggregated_log_counts(*args):
""" Merge two lists of AggregatedLogCount based on the value of their kind_id and datetime.
"""
matching_keys = {}
aggregated_log_counts_list = itertools.chain.from_iterable(args)
def canonical_key_from_kind_date_tuple(kind_id, dt):
""" Return a comma separated key from an AggregatedLogCount's kind_id and datetime. """
return str(kind_id) + ',' + str(dt)
for kind_id, count, dt in aggregated_log_counts_list:
kind_date_key = canonical_key_from_kind_date_tuple(kind_id, dt)
if kind_date_key in matching_keys:
existing_count = matching_keys[kind_date_key][2]
matching_keys[kind_date_key] = (kind_id, dt, existing_count + count)
else:
matching_keys[kind_date_key] = (kind_id, dt, count)
return [AggregatedLogCount(kind_id, count, dt) for (kind_id, dt, count) in matching_keys.values()]
class CombinedLogsModel(SharedModel, ActionLogsDataInterface):
"""
CombinedLogsModel implements the data model that logs to the first logs model and reads from
both.
"""
def __init__(self, read_write_logs_model, read_only_logs_model):
self.read_write_logs_model = read_write_logs_model
self.read_only_logs_model = read_only_logs_model
def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None,
repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
return self.read_write_logs_model.log_action(kind_name, namespace_name, performer, ip, metadata,
repository, repository_name, timestamp,
is_free_namespace)
def count_repository_actions(self, repository, day):
rw_count = self.read_write_logs_model.count_repository_actions(repository, day)
ro_count = self.read_only_logs_model.count_repository_actions(repository, day)
return rw_count + ro_count
def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None,
repository_name=None, namespace_name=None, filter_kinds=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_count = rw_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
ro_count = ro_model.get_aggregated_log_counts(start_datetime, end_datetime,
performer_name=performer_name,
repository_name=repository_name,
namespace_name=namespace_name,
filter_kinds=filter_kinds)
return _merge_aggregated_log_counts(rw_count, ro_count)
def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None,
namespace_id=None, max_query_time=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
ro_logs = ro_model.yield_logs_for_export(start_datetime, end_datetime, repository_id,
namespace_id, max_query_time)
for batch in itertools.chain(rw_logs, ro_logs):
yield batch
def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None,
namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
page_token = page_token or {}
new_page_token = {}
if page_token is None or not page_token.get('under_readonly_model', False):
rw_page_token = page_token.get('readwrite_page_token')
rw_logs = rw_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
rw_page_token, max_page_count)
logs, next_page_token = rw_logs
new_page_token['under_readonly_model'] = next_page_token is None
new_page_token['readwrite_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
else:
readonly_page_token = page_token.get('readonly_page_token')
ro_logs = ro_model.lookup_logs(start_datetime, end_datetime, performer_name,
repository_name, namespace_name, filter_kinds,
readonly_page_token, max_page_count)
logs, next_page_token = ro_logs
if next_page_token is None:
return LogEntriesPage(logs, None)
new_page_token['under_readonly_model'] = True
new_page_token['readonly_page_token'] = next_page_token
return LogEntriesPage(logs, new_page_token)
def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None,
filter_kinds=None, size=20):
latest_logs = []
rw_model = self.read_write_logs_model
ro_model = self.read_only_logs_model
rw_logs = rw_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size)
latest_logs.extend(rw_logs)
if len(latest_logs) < size:
ro_logs = ro_model.lookup_latest_logs(performer_name, repository_name, namespace_name,
filter_kinds, size - len(latest_logs))
latest_logs.extend(ro_logs)
return latest_logs
def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
ro_model = self.read_only_logs_model
rw_model = self.read_write_logs_model
ro_ctx = ro_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
rw_ctx = rw_model.yield_log_rotation_context(cutoff_date, min_logs_per_rotation)
for ctx in itertools.chain(ro_ctx, rw_ctx):
yield ctx
| 0.593374 | 0.168891 |
# Import libraries
from absl import logging
import tensorflow as tf
from official.vision.beta.ops import spatial_transform_ops
# The fixed NAS-FPN architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, combine_fn, (input_offset0, input_offset1), is_output).
NASFPN_BLOCK_SPECS = [
(4, 'attention', (1, 3), False),
(4, 'sum', (1, 5), False),
(3, 'sum', (0, 6), True),
(4, 'sum', (6, 7), True),
(5, 'attention', (7, 8), True),
(7, 'attention', (6, 9), True),
(6, 'attention', (9, 10), True),
]
class BlockSpec(object):
"""A container class that specifies the block configuration for NAS-FPN."""
def __init__(self, level, combine_fn, input_offsets, is_output):
self.level = level
self.combine_fn = combine_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for NAS-FPN."""
if not block_specs:
block_specs = NASFPN_BLOCK_SPECS
logging.info('Building NAS-FPN block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class NASFPN(tf.keras.Model):
"""NAS-FPN."""
def __init__(self,
input_specs,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
num_filters=256,
num_repeats=5,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""FPN initialization function.
Args:
input_specs: `dict` input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
block_specs: a list of BlockSpec objects that specifies the NAS-FPN
network topology. By default, the previously discovered architecture is
used.
num_filters: `int` number of filters in FPN layers.
num_repeats: number of repeats for feature pyramid network.
use_separable_conv: `bool`, if True use separable convolution for
convolution in FPN layers.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
"""
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_filters': num_filters,
'num_repeats': num_repeats,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._num_repeats = num_repeats
self._conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
if self._config_dict['use_separable_conv']:
self._conv_kwargs = {
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
else:
self._conv_kwargs = {
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._norm_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._norm_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
# Gets input feature pyramid from backbone.
inputs = self._build_input_pyramid(input_specs, min_level)
# Projects the input features.
feats = []
for level in range(self._min_level, self._max_level + 1):
if str(level) in inputs.keys():
feats.append(self._resample_feature_map(
inputs[str(level)], level, level, self._config_dict['num_filters']))
else:
feats.append(self._resample_feature_map(
feats[-1], level - 1, level, self._config_dict['num_filters']))
# Repeatly builds the NAS-FPN modules.
for _ in range(self._num_repeats):
output_feats = self._build_feature_pyramid(feats)
feats = [output_feats[level]
for level in range(self._min_level, self._max_level + 1)]
self._output_specs = {
str(level): output_feats[level].get_shape()
for level in range(min_level, max_level + 1)
}
output_feats = {str(level): output_feats[level]
for level in output_feats.keys()}
super(NASFPN, self).__init__(inputs=inputs, outputs=output_feats, **kwargs)
def _build_input_pyramid(self, input_specs, min_level):
assert isinstance(input_specs, dict)
if min(input_specs.keys()) > str(min_level):
raise ValueError(
'Backbone min level should be less or equal to FPN min level')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def _resample_feature_map(self,
inputs,
input_level,
target_level,
target_num_filters=256):
x = inputs
_, _, _, input_num_filters = x.get_shape().as_list()
if input_num_filters != target_num_filters:
x = self._conv_op(
filters=target_num_filters,
kernel_size=1,
padding='same',
**self._conv_kwargs)(x)
x = self._norm_op(**self._norm_kwargs)(x)
if input_level < target_level:
stride = int(2 ** (target_level - input_level))
x = tf.keras.layers.MaxPool2D(
pool_size=stride, strides=stride, padding='same')(x)
elif input_level > target_level:
scale = int(2 ** (input_level - target_level))
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
return x
def _global_attention(self, feat0, feat1):
m = tf.math.reduce_max(feat0, axis=[1, 2], keepdims=True)
m = tf.math.sigmoid(m)
return feat0 + feat1 * m
def _build_feature_pyramid(self, feats):
num_output_connections = [0] * len(feats)
num_output_levels = self._max_level - self._min_level + 1
feat_levels = list(range(self._min_level, self._max_level + 1))
for i, block_spec in enumerate(self._block_specs):
new_level = block_spec.level
# Checks the range of input_offsets.
for input_offset in block_spec.input_offsets:
if input_offset >= len(feats):
raise ValueError(
'input_offset ({}) is larger than num feats({})'.format(
input_offset, len(feats)))
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
# Update graph with inputs.
node0 = feats[input0]
node0_level = feat_levels[input0]
num_output_connections[input0] += 1
node0 = self._resample_feature_map(node0, node0_level, new_level)
node1 = feats[input1]
node1_level = feat_levels[input1]
num_output_connections[input1] += 1
node1 = self._resample_feature_map(node1, node1_level, new_level)
# Combine node0 and node1 to create new feat.
if block_spec.combine_fn == 'sum':
new_node = node0 + node1
elif block_spec.combine_fn == 'attention':
if node0_level >= node1_level:
new_node = self._global_attention(node0, node1)
else:
new_node = self._global_attention(node1, node0)
else:
raise ValueError('unknown combine_fn `{}`.'
.format(block_spec.combine_fn))
# Add intermediate nodes that do not have any connections to output.
if block_spec.is_output:
for j, (feat, feat_level, num_output) in enumerate(
zip(feats, feat_levels, num_output_connections)):
if num_output == 0 and feat_level == new_level:
num_output_connections[j] += 1
feat_ = self._resample_feature_map(feat, feat_level, new_level)
new_node += feat_
new_node = self._activation(new_node)
new_node = self._conv_op(
filters=self._config_dict['num_filters'],
kernel_size=(3, 3),
padding='same',
**self._conv_kwargs)(new_node)
new_node = self._norm_op(**self._norm_kwargs)(new_node)
feats.append(new_node)
feat_levels.append(new_level)
num_output_connections.append(0)
output_feats = {}
for i in range(len(feats) - num_output_levels, len(feats)):
level = feat_levels[i]
output_feats[level] = feats[i]
logging.info('Output feature pyramid: %s', output_feats)
return output_feats
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
|
official/vision/beta/modeling/decoders/nasfpn.py
|
# Import libraries
from absl import logging
import tensorflow as tf
from official.vision.beta.ops import spatial_transform_ops
# The fixed NAS-FPN architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, combine_fn, (input_offset0, input_offset1), is_output).
NASFPN_BLOCK_SPECS = [
(4, 'attention', (1, 3), False),
(4, 'sum', (1, 5), False),
(3, 'sum', (0, 6), True),
(4, 'sum', (6, 7), True),
(5, 'attention', (7, 8), True),
(7, 'attention', (6, 9), True),
(6, 'attention', (9, 10), True),
]
class BlockSpec(object):
"""A container class that specifies the block configuration for NAS-FPN."""
def __init__(self, level, combine_fn, input_offsets, is_output):
self.level = level
self.combine_fn = combine_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for NAS-FPN."""
if not block_specs:
block_specs = NASFPN_BLOCK_SPECS
logging.info('Building NAS-FPN block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class NASFPN(tf.keras.Model):
"""NAS-FPN."""
def __init__(self,
input_specs,
min_level=3,
max_level=7,
block_specs=build_block_specs(),
num_filters=256,
num_repeats=5,
use_separable_conv=False,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""FPN initialization function.
Args:
input_specs: `dict` input specifications. A dictionary consists of
{level: TensorShape} from a backbone.
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
block_specs: a list of BlockSpec objects that specifies the NAS-FPN
network topology. By default, the previously discovered architecture is
used.
num_filters: `int` number of filters in FPN layers.
num_repeats: number of repeats for feature pyramid network.
use_separable_conv: `bool`, if True use separable convolution for
convolution in FPN layers.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
**kwargs: keyword arguments to be passed.
"""
self._config_dict = {
'input_specs': input_specs,
'min_level': min_level,
'max_level': max_level,
'num_filters': num_filters,
'num_repeats': num_repeats,
'use_separable_conv': use_separable_conv,
'activation': activation,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
}
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._num_repeats = num_repeats
self._conv_op = (tf.keras.layers.SeparableConv2D
if self._config_dict['use_separable_conv']
else tf.keras.layers.Conv2D)
if self._config_dict['use_separable_conv']:
self._conv_kwargs = {
'depthwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'pointwise_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'depthwise_regularizer': self._config_dict['kernel_regularizer'],
'pointwise_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
else:
self._conv_kwargs = {
'kernel_initializer': tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
'bias_initializer': tf.zeros_initializer(),
'kernel_regularizer': self._config_dict['kernel_regularizer'],
'bias_regularizer': self._config_dict['bias_regularizer'],
}
self._norm_op = (tf.keras.layers.experimental.SyncBatchNormalization
if self._config_dict['use_sync_bn']
else tf.keras.layers.BatchNormalization)
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._norm_kwargs = {
'axis': self._bn_axis,
'momentum': self._config_dict['norm_momentum'],
'epsilon': self._config_dict['norm_epsilon'],
}
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
# Gets input feature pyramid from backbone.
inputs = self._build_input_pyramid(input_specs, min_level)
# Projects the input features.
feats = []
for level in range(self._min_level, self._max_level + 1):
if str(level) in inputs.keys():
feats.append(self._resample_feature_map(
inputs[str(level)], level, level, self._config_dict['num_filters']))
else:
feats.append(self._resample_feature_map(
feats[-1], level - 1, level, self._config_dict['num_filters']))
# Repeatly builds the NAS-FPN modules.
for _ in range(self._num_repeats):
output_feats = self._build_feature_pyramid(feats)
feats = [output_feats[level]
for level in range(self._min_level, self._max_level + 1)]
self._output_specs = {
str(level): output_feats[level].get_shape()
for level in range(min_level, max_level + 1)
}
output_feats = {str(level): output_feats[level]
for level in output_feats.keys()}
super(NASFPN, self).__init__(inputs=inputs, outputs=output_feats, **kwargs)
def _build_input_pyramid(self, input_specs, min_level):
assert isinstance(input_specs, dict)
if min(input_specs.keys()) > str(min_level):
raise ValueError(
'Backbone min level should be less or equal to FPN min level')
inputs = {}
for level, spec in input_specs.items():
inputs[level] = tf.keras.Input(shape=spec[1:])
return inputs
def _resample_feature_map(self,
inputs,
input_level,
target_level,
target_num_filters=256):
x = inputs
_, _, _, input_num_filters = x.get_shape().as_list()
if input_num_filters != target_num_filters:
x = self._conv_op(
filters=target_num_filters,
kernel_size=1,
padding='same',
**self._conv_kwargs)(x)
x = self._norm_op(**self._norm_kwargs)(x)
if input_level < target_level:
stride = int(2 ** (target_level - input_level))
x = tf.keras.layers.MaxPool2D(
pool_size=stride, strides=stride, padding='same')(x)
elif input_level > target_level:
scale = int(2 ** (input_level - target_level))
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
return x
def _global_attention(self, feat0, feat1):
m = tf.math.reduce_max(feat0, axis=[1, 2], keepdims=True)
m = tf.math.sigmoid(m)
return feat0 + feat1 * m
def _build_feature_pyramid(self, feats):
num_output_connections = [0] * len(feats)
num_output_levels = self._max_level - self._min_level + 1
feat_levels = list(range(self._min_level, self._max_level + 1))
for i, block_spec in enumerate(self._block_specs):
new_level = block_spec.level
# Checks the range of input_offsets.
for input_offset in block_spec.input_offsets:
if input_offset >= len(feats):
raise ValueError(
'input_offset ({}) is larger than num feats({})'.format(
input_offset, len(feats)))
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
# Update graph with inputs.
node0 = feats[input0]
node0_level = feat_levels[input0]
num_output_connections[input0] += 1
node0 = self._resample_feature_map(node0, node0_level, new_level)
node1 = feats[input1]
node1_level = feat_levels[input1]
num_output_connections[input1] += 1
node1 = self._resample_feature_map(node1, node1_level, new_level)
# Combine node0 and node1 to create new feat.
if block_spec.combine_fn == 'sum':
new_node = node0 + node1
elif block_spec.combine_fn == 'attention':
if node0_level >= node1_level:
new_node = self._global_attention(node0, node1)
else:
new_node = self._global_attention(node1, node0)
else:
raise ValueError('unknown combine_fn `{}`.'
.format(block_spec.combine_fn))
# Add intermediate nodes that do not have any connections to output.
if block_spec.is_output:
for j, (feat, feat_level, num_output) in enumerate(
zip(feats, feat_levels, num_output_connections)):
if num_output == 0 and feat_level == new_level:
num_output_connections[j] += 1
feat_ = self._resample_feature_map(feat, feat_level, new_level)
new_node += feat_
new_node = self._activation(new_node)
new_node = self._conv_op(
filters=self._config_dict['num_filters'],
kernel_size=(3, 3),
padding='same',
**self._conv_kwargs)(new_node)
new_node = self._norm_op(**self._norm_kwargs)(new_node)
feats.append(new_node)
feat_levels.append(new_level)
num_output_connections.append(0)
output_feats = {}
for i in range(len(feats) - num_output_levels, len(feats)):
level = feat_levels[i]
output_feats[level] = feats[i]
logging.info('Output feature pyramid: %s', output_feats)
return output_feats
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 0.930324 | 0.44746 |
import requests
import re
import uuid
from flask import current_app
from app.book.models import Book, SearchableBookMapping, searchable_book_index, searchable_book_doc_type
from app.search import services as es_service
def check_connection():
if not es_service.check_connection():
raise ValueError("Connection to search/storage service is unavailable. Please try again.")
return True
def create_index_with_mapping():
es_service.create_index(searchable_book_index)
es_service.add_mapping_to_index(searchable_book_index, searchable_book_doc_type, SearchableBookMapping)
def read_and_insert_books(book_url):
res = requests.get(book_url)
res.encoding = res.apparent_encoding
book_text = res.text
book_data = parse_book_file(book_text)
insert_book_bulk_data(book_data)
def parse_book_file(book_text) -> Book:
title_regex = re.compile(r'^Title:\s(.+)$', re.MULTILINE)
title = 'Unknown title'
author_regex = re.compile(r'^Author:\s(.+)$', re.MULTILINE)
author = 'Unknown author'
start_of_book_regex = re.compile(r'^\*{3}\s*START(.+)\*{3}', re.MULTILINE)
end_of_book_regex = re.compile(r'^\*{3}\s*END(.+)\*{3}', re.MULTILINE)
split_lines_regex = re.compile(r'\n\s+\n', re.MULTILINE)
match = title_regex.search(book_text)
if match:
title = match.group(1).rstrip().strip()
match = author_regex.search(book_text)
if match:
author = match.group(1).rstrip().strip()
current_app.logger.info(f'Reading Book - {title} By {author}')
start_of_book_match = start_of_book_regex.search(book_text)
start_of_book_index = start_of_book_match.start() + len(start_of_book_match.group())
end_of_book_index = end_of_book_regex.search(book_text).start()
book_text = book_text[start_of_book_index:end_of_book_index]
book_paragraphs = split_lines_regex.split(book_text)
book_paragraphs_no_carriage = [paragraph.replace('\r', '') for paragraph in book_paragraphs]
book_paragraphs_no_new_line = [paragraph.replace('\n', ' ') for paragraph in book_paragraphs_no_carriage]
book_paragraphs_no_italic_signal = [paragraph.replace('_', '') for paragraph in book_paragraphs_no_new_line]
cleaned_book_paragraphs = [paragraph for paragraph in book_paragraphs_no_italic_signal if paragraph]
current_app.logger.info(f'Parsed {len(cleaned_book_paragraphs)} book paragraphs')
return Book(title, author, cleaned_book_paragraphs)
def insert_book_bulk_data(book_data):
data = []
for i, paragraph in enumerate(book_data.paragraphs):
data.append({'index': {'_index': 'library', '_type': 'book', '_id': str(uuid.uuid4())}})
data.append({
'author': book_data.author,
'title': book_data.title,
'location': i,
'text': paragraph
})
# Do bulk insert after every 500 paragraphs
if i > 0 and i % 500 == 0:
es_service.add_bulk_data(data)
data = []
current_app.logger.info(f'Indexed paragraphs {i - 499} - {i}')
es_service.add_bulk_data(data)
current_app.logger.info(f'Indexed paragraphs {len(book_data.paragraphs) - len(data)/2} - {len(book_data.paragraphs)}')
def search_book_data(query, search_page):
body = {
'from': (search_page.page - 1) * search_page.per_page,
'size': search_page.per_page,
'query': {
'match': {
'text': {
'query': query,
'operator': 'and',
'fuzziness': 'auto'
}
}
},
'highlight': {
'fields': {
'text': {}
}
}
}
return es_service.query_index_page(searchable_book_index, body)
def get_book_paragraphs(book_title, start, end):
query_filter = [
{'term': {'title': book_title}},
{'range': {'location': {'gte': start, 'lte': end}}}
]
body = {
'size': end - start,
'sort': {'location': 'asc'},
'query': {'bool': {'filter': query_filter}}
}
return es_service.query_index_page(searchable_book_index, body)
|
api/app/book/services.py
|
import requests
import re
import uuid
from flask import current_app
from app.book.models import Book, SearchableBookMapping, searchable_book_index, searchable_book_doc_type
from app.search import services as es_service
def check_connection():
if not es_service.check_connection():
raise ValueError("Connection to search/storage service is unavailable. Please try again.")
return True
def create_index_with_mapping():
es_service.create_index(searchable_book_index)
es_service.add_mapping_to_index(searchable_book_index, searchable_book_doc_type, SearchableBookMapping)
def read_and_insert_books(book_url):
res = requests.get(book_url)
res.encoding = res.apparent_encoding
book_text = res.text
book_data = parse_book_file(book_text)
insert_book_bulk_data(book_data)
def parse_book_file(book_text) -> Book:
title_regex = re.compile(r'^Title:\s(.+)$', re.MULTILINE)
title = 'Unknown title'
author_regex = re.compile(r'^Author:\s(.+)$', re.MULTILINE)
author = 'Unknown author'
start_of_book_regex = re.compile(r'^\*{3}\s*START(.+)\*{3}', re.MULTILINE)
end_of_book_regex = re.compile(r'^\*{3}\s*END(.+)\*{3}', re.MULTILINE)
split_lines_regex = re.compile(r'\n\s+\n', re.MULTILINE)
match = title_regex.search(book_text)
if match:
title = match.group(1).rstrip().strip()
match = author_regex.search(book_text)
if match:
author = match.group(1).rstrip().strip()
current_app.logger.info(f'Reading Book - {title} By {author}')
start_of_book_match = start_of_book_regex.search(book_text)
start_of_book_index = start_of_book_match.start() + len(start_of_book_match.group())
end_of_book_index = end_of_book_regex.search(book_text).start()
book_text = book_text[start_of_book_index:end_of_book_index]
book_paragraphs = split_lines_regex.split(book_text)
book_paragraphs_no_carriage = [paragraph.replace('\r', '') for paragraph in book_paragraphs]
book_paragraphs_no_new_line = [paragraph.replace('\n', ' ') for paragraph in book_paragraphs_no_carriage]
book_paragraphs_no_italic_signal = [paragraph.replace('_', '') for paragraph in book_paragraphs_no_new_line]
cleaned_book_paragraphs = [paragraph for paragraph in book_paragraphs_no_italic_signal if paragraph]
current_app.logger.info(f'Parsed {len(cleaned_book_paragraphs)} book paragraphs')
return Book(title, author, cleaned_book_paragraphs)
def insert_book_bulk_data(book_data):
data = []
for i, paragraph in enumerate(book_data.paragraphs):
data.append({'index': {'_index': 'library', '_type': 'book', '_id': str(uuid.uuid4())}})
data.append({
'author': book_data.author,
'title': book_data.title,
'location': i,
'text': paragraph
})
# Do bulk insert after every 500 paragraphs
if i > 0 and i % 500 == 0:
es_service.add_bulk_data(data)
data = []
current_app.logger.info(f'Indexed paragraphs {i - 499} - {i}')
es_service.add_bulk_data(data)
current_app.logger.info(f'Indexed paragraphs {len(book_data.paragraphs) - len(data)/2} - {len(book_data.paragraphs)}')
def search_book_data(query, search_page):
body = {
'from': (search_page.page - 1) * search_page.per_page,
'size': search_page.per_page,
'query': {
'match': {
'text': {
'query': query,
'operator': 'and',
'fuzziness': 'auto'
}
}
},
'highlight': {
'fields': {
'text': {}
}
}
}
return es_service.query_index_page(searchable_book_index, body)
def get_book_paragraphs(book_title, start, end):
query_filter = [
{'term': {'title': book_title}},
{'range': {'location': {'gte': start, 'lte': end}}}
]
body = {
'size': end - start,
'sort': {'location': 'asc'},
'query': {'bool': {'filter': query_filter}}
}
return es_service.query_index_page(searchable_book_index, body)
| 0.304869 | 0.310054 |