language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def add_remove(self):
""" Method to add or remove a LabelBar. Depending on the value of the
drop down menu the LabelBar is added if it is not present otherwise
removed."""
field = self.ids.data_spinner.text
if field is LABEL_SPINNER_TEXT:
return
if field in self.labels and not self.dual_mode:
self.remove_widget(self.labels[field])
del(self.labels[field])
self.screen.status(f'Removing {field}')
else:
# in dual mode replace the second entry with the new one
if self.dual_mode and len(self.labels) == 2:
k, v = list(self.labels.items())[-1]
self.remove_widget(v)
del(self.labels[k])
field_property = rc_handler.field_properties.get(decompose(field)[0])
cfg = tub_screen().ids.config_manager.config
lb = LabelBar(field=field, field_property=field_property, config=cfg)
self.labels[field] = lb
self.add_widget(lb)
lb.update(self.record)
if len(self.labels) == 2:
self.throttle_field = field
self.screen.status(f'Adding {field}')
if self.screen.name == 'tub':
self.screen.ids.data_plot.plot_from_current_bars()
self.ids.data_spinner.text = LABEL_SPINNER_TEXT
self.auto_text = field | def add_remove(self):
""" Method to add or remove a LabelBar. Depending on the value of the
drop down menu the LabelBar is added if it is not present otherwise
removed."""
field = self.ids.data_spinner.text
if field is LABEL_SPINNER_TEXT:
return
if field in self.labels and not self.dual_mode:
self.remove_widget(self.labels[field])
del(self.labels[field])
self.screen.status(f'Removing {field}')
else:
# in dual mode replace the second entry with the new one
if self.dual_mode and len(self.labels) == 2:
k, v = list(self.labels.items())[-1]
self.remove_widget(v)
del(self.labels[k])
field_property = rc_handler.field_properties.get(decompose(field)[0])
cfg = tub_screen().ids.config_manager.config
lb = LabelBar(field=field, field_property=field_property, config=cfg)
self.labels[field] = lb
self.add_widget(lb)
lb.update(self.record)
if len(self.labels) == 2:
self.throttle_field = field
self.screen.status(f'Adding {field}')
if self.screen.name == 'tub':
self.screen.ids.data_plot.plot_from_current_bars()
self.ids.data_spinner.text = LABEL_SPINNER_TEXT
self.auto_text = field |
Python | def update(self, record):
""" This method is called ever time a record gets updated. """
try:
img_arr = self.get_image(record)
pil_image = PilImage.fromarray(img_arr)
bytes_io = io.BytesIO()
pil_image.save(bytes_io, format='png')
bytes_io.seek(0)
self.core_image = CoreImage(bytes_io, ext='png')
self.texture = self.core_image.texture
except KeyError as e:
Logger.error('Record: Missing key:', e)
except Exception as e:
Logger.error('Record: Bad record:', str(e)) | def update(self, record):
""" This method is called ever time a record gets updated. """
try:
img_arr = self.get_image(record)
pil_image = PilImage.fromarray(img_arr)
bytes_io = io.BytesIO()
pil_image.save(bytes_io, format='png')
bytes_io.seek(0)
self.core_image = CoreImage(bytes_io, ext='png')
self.texture = self.core_image.texture
except KeyError as e:
Logger.error('Record: Missing key:', e)
except Exception as e:
Logger.error('Record: Bad record:', str(e)) |
Python | def step(self, fwd=True, continuous=False, *largs):
"""
Updating a single step and cap/floor the index so we stay w/in the tub.
:param fwd: If we go forward or backward
:param continuous: If we are in continuous mode <<, >>
:param largs: dummy
:return: None
"""
new_index = self.screen.index + (1 if fwd else -1)
if new_index >= tub_screen().ids.tub_loader.len:
new_index = 0
elif new_index < 0:
new_index = tub_screen().ids.tub_loader.len - 1
self.screen.index = new_index
msg = f'Donkey {"run" if continuous else "step"} ' \
f'{"forward" if fwd else "backward"}'
if not continuous:
msg += f' - you can also use {"<right>" if fwd else "<left>"} key'
else:
msg += ' - you can toggle run/stop with <space>'
self.screen.status(msg) | def step(self, fwd=True, continuous=False, *largs):
"""
Updating a single step and cap/floor the index so we stay w/in the tub.
:param fwd: If we go forward or backward
:param continuous: If we are in continuous mode <<, >>
:param largs: dummy
:return: None
"""
new_index = self.screen.index + (1 if fwd else -1)
if new_index >= tub_screen().ids.tub_loader.len:
new_index = 0
elif new_index < 0:
new_index = tub_screen().ids.tub_loader.len - 1
self.screen.index = new_index
msg = f'Donkey {"run" if continuous else "step"} ' \
f'{"forward" if fwd else "backward"}'
if not continuous:
msg += f' - you can also use {"<right>" if fwd else "<left>"} key'
else:
msg += ' - you can toggle run/stop with <space>'
self.screen.status(msg) |
Python | def update_speed(self, up=True):
""" Method to update the speed on the controller"""
values = self.ids.control_spinner.values
idx = values.index(self.ids.control_spinner.text)
if up and idx < len(values) - 1:
self.ids.control_spinner.text = values[idx + 1]
elif not up and idx > 0:
self.ids.control_spinner.text = values[idx - 1] | def update_speed(self, up=True):
""" Method to update the speed on the controller"""
values = self.ids.control_spinner.values
idx = values.index(self.ids.control_spinner.text)
if up and idx < len(values) - 1:
self.ids.control_spinner.text = values[idx + 1]
elif not up and idx > 0:
self.ids.control_spinner.text = values[idx - 1] |
Python | def on_keyboard(self, key, scancode):
""" Method to chack with keystroke has ben sent. """
if key == ' ':
if self.clock and self.clock.is_triggered:
self.stop()
self.set_button_status(disabled=False)
self.screen.status('Donkey stopped')
else:
self.start(continuous=True)
self.set_button_status(disabled=True)
elif scancode == 79:
self.step(fwd=True)
elif scancode == 80:
self.step(fwd=False)
elif scancode == 45:
self.update_speed(up=False)
elif scancode == 46:
self.update_speed(up=True) | def on_keyboard(self, key, scancode):
""" Method to chack with keystroke has ben sent. """
if key == ' ':
if self.clock and self.clock.is_triggered:
self.stop()
self.set_button_status(disabled=False)
self.screen.status('Donkey stopped')
else:
self.start(continuous=True)
self.set_button_status(disabled=True)
elif scancode == 79:
self.step(fwd=True)
elif scancode == 80:
self.step(fwd=False)
elif scancode == 45:
self.update_speed(up=False)
elif scancode == 46:
self.update_speed(up=True) |
Python | def del_lr(self, is_del):
""" Deletes or restores records in chosen range """
tub = tub_screen().ids.tub_loader.tub
if self.lr[1] >= self.lr[0]:
selected = list(range(*self.lr))
else:
last_id = tub.manifest.current_index
selected = list(range(self.lr[0], last_id))
selected += list(range(self.lr[1]))
tub.delete_records(selected) if is_del else tub.restore_records(selected) | def del_lr(self, is_del):
""" Deletes or restores records in chosen range """
tub = tub_screen().ids.tub_loader.tub
if self.lr[1] >= self.lr[0]:
selected = list(range(*self.lr))
else:
last_id = tub.manifest.current_index
selected = list(range(self.lr[0], last_id))
selected += list(range(self.lr[1]))
tub.delete_records(selected) if is_del else tub.restore_records(selected) |
Python | def plot_from_current_bars(self, in_app=True):
""" Plotting from current selected bars. The DataFrame for plotting
should contain all bars except for strings fields and all data is
selected if bars are empty. """
tub = tub_screen().ids.tub_loader.tub
field_map = dict(zip(tub.manifest.inputs, tub.manifest.types))
# Use selected fields or all fields if nothing is slected
all_cols = tub_screen().ids.data_panel.labels.keys() or self.df.columns
cols = [c for c in all_cols if decompose(c)[0] in field_map
and field_map[decompose(c)[0]] not in ('image_array', 'str')]
df = self.df[cols]
if df is None:
return
# Don't plot the milliseconds time stamp as this is a too big number
df = df.drop(labels=['_timestamp_ms'], axis=1, errors='ignore')
if in_app:
tub_screen().ids.graph.df = df
else:
fig = px.line(df, x=df.index, y=df.columns, title=tub.base_path)
fig.update_xaxes(rangeslider=dict(visible=True))
fig.show() | def plot_from_current_bars(self, in_app=True):
""" Plotting from current selected bars. The DataFrame for plotting
should contain all bars except for strings fields and all data is
selected if bars are empty. """
tub = tub_screen().ids.tub_loader.tub
field_map = dict(zip(tub.manifest.inputs, tub.manifest.types))
# Use selected fields or all fields if nothing is slected
all_cols = tub_screen().ids.data_panel.labels.keys() or self.df.columns
cols = [c for c in all_cols if decompose(c)[0] in field_map
and field_map[decompose(c)[0]] not in ('image_array', 'str')]
df = self.df[cols]
if df is None:
return
# Don't plot the milliseconds time stamp as this is a too big number
df = df.drop(labels=['_timestamp_ms'], axis=1, errors='ignore')
if in_app:
tub_screen().ids.graph.df = df
else:
fig = px.line(df, x=df.index, y=df.columns, title=tub.base_path)
fig.update_xaxes(rangeslider=dict(visible=True))
fig.show() |
Python | def unravel_vectors(self):
""" Unravels vector and list entries in tub which are created
when the DataFrame is created from a list of records"""
manifest = tub_screen().ids.tub_loader.tub.manifest
for k, v in zip(manifest.inputs, manifest.types):
if v == 'vector' or v == 'list':
dim = len(tub_screen().current_record.underlying[k])
df_keys = [k + f'_{i}' for i in range(dim)]
self.df[df_keys] = pd.DataFrame(self.df[k].tolist(),
index=self.df.index)
self.df.drop(k, axis=1, inplace=True) | def unravel_vectors(self):
""" Unravels vector and list entries in tub which are created
when the DataFrame is created from a list of records"""
manifest = tub_screen().ids.tub_loader.tub.manifest
for k, v in zip(manifest.inputs, manifest.types):
if v == 'vector' or v == 'list':
dim = len(tub_screen().current_record.underlying[k])
df_keys = [k + f'_{i}' for i in range(dim)]
self.df[df_keys] = pd.DataFrame(self.df[k].tolist(),
index=self.df.index)
self.df.drop(k, axis=1, inplace=True) |
Python | def update_dataframe_from_tub(self):
""" Called from TubManager when a tub is reloaded/recreated. Fills
the DataFrame from records, and updates the dropdown menu in the
data panel."""
generator = (t.underlying for t in tub_screen().ids.tub_loader.records)
self.df = pd.DataFrame(generator).dropna()
to_drop = {'cam/image_array'}
self.df.drop(labels=to_drop, axis=1, errors='ignore', inplace=True)
self.df.set_index('_index', inplace=True)
self.unravel_vectors()
tub_screen().ids.data_panel.ids.data_spinner.values = self.df.columns
self.plot_from_current_bars() | def update_dataframe_from_tub(self):
""" Called from TubManager when a tub is reloaded/recreated. Fills
the DataFrame from records, and updates the dropdown menu in the
data panel."""
generator = (t.underlying for t in tub_screen().ids.tub_loader.records)
self.df = pd.DataFrame(generator).dropna()
to_drop = {'cam/image_array'}
self.df.drop(labels=to_drop, axis=1, errors='ignore', inplace=True)
self.df.set_index('_index', inplace=True)
self.unravel_vectors()
tub_screen().ids.data_panel.ids.data_spinner.values = self.df.columns
self.plot_from_current_bars() |
Python | def on_current_record(self, obj, record):
""" Kivy method that is called if self.current_record changes."""
self.ids.img.update(record)
i = record.underlying['_index']
self.ids.control_panel.record_display = f"Record {i:06}" | def on_current_record(self, obj, record):
""" Kivy method that is called if self.current_record changes."""
self.ids.img.update(record)
i = record.underlying['_index']
self.ids.control_panel.record_display = f"Record {i:06}" |
Python | def on_model_type(self, obj, model_type):
""" Kivy method that is called if self.model_type changes. """
if self.model_type and self.model_type != 'Model type':
cfg = tub_screen().ids.config_manager.config
if cfg:
self.pilot = get_model_by_type(self.model_type, cfg)
self.ids.pilot_button.disabled = False
if 'tflite' in self.model_type:
self.filters = ['*.tflite']
elif 'tensorrt' in self.model_type:
self.filters = ['*.trt']
else:
self.filters = ['*.h5', '*.savedmodel'] | def on_model_type(self, obj, model_type):
""" Kivy method that is called if self.model_type changes. """
if self.model_type and self.model_type != 'Model type':
cfg = tub_screen().ids.config_manager.config
if cfg:
self.pilot = get_model_by_type(self.model_type, cfg)
self.ids.pilot_button.disabled = False
if 'tflite' in self.model_type:
self.filters = ['*.tflite']
elif 'tensorrt' in self.model_type:
self.filters = ['*.trt']
else:
self.filters = ['*.h5', '*.savedmodel'] |
Python | def on_index(self, obj, index):
""" Kivy method that is called if self.index changes. Here we update
self.current_record and the slider value. """
if tub_screen().ids.tub_loader.records:
self.current_record = tub_screen().ids.tub_loader.records[index]
self.ids.slider.value = index | def on_index(self, obj, index):
""" Kivy method that is called if self.index changes. Here we update
self.current_record and the slider value. """
if tub_screen().ids.tub_loader.records:
self.current_record = tub_screen().ids.tub_loader.records[index]
self.ids.slider.value = index |
Python | def on_current_record(self, obj, record):
""" Kivy method that is called when self.current_index changes. Here
we update the images and the control panel entry."""
i = record.underlying['_index']
self.ids.pilot_control.record_display = f"Record {i:06}"
self.ids.img_1.update(record)
self.ids.img_2.update(record) | def on_current_record(self, obj, record):
""" Kivy method that is called when self.current_index changes. Here
we update the images and the control panel entry."""
i = record.underlying['_index']
self.ids.pilot_control.record_display = f"Record {i:06}"
self.ids.img_1.update(record)
self.ids.img_2.update(record) |
Python | def map_pilot_field(self, text):
""" Method to return user -> pilot mapped fields except for the
initial value called Add/remove. """
if text == LABEL_SPINNER_TEXT:
return text
return rc_handler.data['user_pilot_map'][text] | def map_pilot_field(self, text):
""" Method to return user -> pilot mapped fields except for the
initial value called Add/remove. """
if text == LABEL_SPINNER_TEXT:
return text
return rc_handler.data['user_pilot_map'][text] |
Python | def run(self, cam_img):
'''
main runloop of the CV controller
input: cam_image, an RGB numpy array
output: steering, throttle, and recording flag
'''
if cam_img is None:
return 0, 0, False
max_yellow, confidense, mask = self.get_i_color(cam_img)
conf_thresh = 0.001
if self.target_pixel is None:
# Use the first run of get_i_color to set our relationship with the yellow line.
# You could optionally init the target_pixel with the desired value.
self.target_pixel = max_yellow
# this is the target of our steering PID controller
self.pid_st.setpoint = self.target_pixel
elif confidense > conf_thresh:
# invoke the controller with the current yellow line position
# get the new steering value as it chases the ideal
self.steering = self.pid_st(max_yellow)
# slow down linearly when away from ideal, and speed up when close
if abs(max_yellow - self.target_pixel) > 10:
if self.throttle > self.throttle_min:
self.throttle -= self.delta_th
else:
if self.throttle < self.throttle_max:
self.throttle += self.delta_th
# show some diagnostics
if self.debug:
self.debug_display(cam_img, mask, max_yellow, confidense)
return self.steering, self.throttle, self.recording | def run(self, cam_img):
'''
main runloop of the CV controller
input: cam_image, an RGB numpy array
output: steering, throttle, and recording flag
'''
if cam_img is None:
return 0, 0, False
max_yellow, confidense, mask = self.get_i_color(cam_img)
conf_thresh = 0.001
if self.target_pixel is None:
# Use the first run of get_i_color to set our relationship with the yellow line.
# You could optionally init the target_pixel with the desired value.
self.target_pixel = max_yellow
# this is the target of our steering PID controller
self.pid_st.setpoint = self.target_pixel
elif confidense > conf_thresh:
# invoke the controller with the current yellow line position
# get the new steering value as it chases the ideal
self.steering = self.pid_st(max_yellow)
# slow down linearly when away from ideal, and speed up when close
if abs(max_yellow - self.target_pixel) > 10:
if self.throttle > self.throttle_min:
self.throttle -= self.delta_th
else:
if self.throttle < self.throttle_max:
self.throttle += self.delta_th
# show some diagnostics
if self.debug:
self.debug_display(cam_img, mask, max_yellow, confidense)
return self.steering, self.throttle, self.recording |
Python | def debug_display(self, cam_img, mask, max_yellow, confidense):
'''
composite mask on top the original image.
show some values we are using for control
'''
mask_exp = np.stack((mask,)*3, axis=-1)
iSlice = self.scan_y
img = np.copy(cam_img)
img[iSlice : iSlice + self.scan_height, :, :] = mask_exp
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
display_str = []
display_str.append("STEERING:{:.1f}".format(self.steering))
display_str.append("THROTTLE:{:.2f}".format(self.throttle))
display_str.append("I YELLOW:{:d}".format(max_yellow))
display_str.append("CONF:{:.2f}".format(confidense))
y = 10
x = 10
for s in display_str:
cv2.putText(img, s, color=(0,255,255), org=(x,y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4)
y += 10
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow("image", img)
cv2.resizeWindow('image', 300,300)
cv2.waitKey(1) | def debug_display(self, cam_img, mask, max_yellow, confidense):
'''
composite mask on top the original image.
show some values we are using for control
'''
mask_exp = np.stack((mask,)*3, axis=-1)
iSlice = self.scan_y
img = np.copy(cam_img)
img[iSlice : iSlice + self.scan_height, :, :] = mask_exp
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
display_str = []
display_str.append("STEERING:{:.1f}".format(self.steering))
display_str.append("THROTTLE:{:.2f}".format(self.throttle))
display_str.append("I YELLOW:{:d}".format(max_yellow))
display_str.append("CONF:{:.2f}".format(confidense))
y = 10
x = 10
for s in display_str:
cv2.putText(img, s, color=(0,255,255), org=(x,y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4)
y += 10
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow("image", img)
cv2.resizeWindow('image', 300,300)
cv2.waitKey(1) |
Python | def drive(cfg, args):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
#Camera
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
cfg.GYM_CONF['racer_name'] = args['--name']
cfg.GYM_CONF['car_name'] = args['--name']
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
inputs = ['steering', 'throttle']
else:
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
inputs = []
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True)
#Controller
V.add(LineFollower(bool(args['--debug'])),
inputs=['cam/image_array'],
outputs=['steering', 'throttle', 'recording'])
#Drive train setup
if not cfg.DONKEY_GYM:
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['steering'])
V.add(throttle, inputs=['throttle'])
#add tub to save data
inputs=['cam/image_array',
'steering', 'throttle']
types=['image_array',
'float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition="recording")
#run the vehicle
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS) | def drive(cfg, args):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
#Camera
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
cfg.GYM_CONF['racer_name'] = args['--name']
cfg.GYM_CONF['car_name'] = args['--name']
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
inputs = ['steering', 'throttle']
else:
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
inputs = []
V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True)
#Controller
V.add(LineFollower(bool(args['--debug'])),
inputs=['cam/image_array'],
outputs=['steering', 'throttle', 'recording'])
#Drive train setup
if not cfg.DONKEY_GYM:
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['steering'])
V.add(throttle, inputs=['throttle'])
#add tub to save data
inputs=['cam/image_array',
'steering', 'throttle']
types=['image_array',
'float', 'float']
th = TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types)
V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition="recording")
#run the vehicle
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS) |
Python | def drive(cfg ):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['angle', 'throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['throttle'], outputs=['throttle'])
drive_train = None
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "PWM_STEERING_THROTTLE":
#
# drivetrain for RC car with servo and ESC.
# using a PwmPin for steering (servo)
# and as second PwmPin for throttle (ESC)
#
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PulseController
steering_controller = PulseController(
pwm_pin=pins.pwm_pin_by_id(cfg.PWM_STEERING_PIN),
pwm_scale=cfg.PWM_STEERING_SCALE,
pwm_inverted=cfg.PWM_STEERING_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PulseController(
pwm_pin=pins.pwm_pin_by_id(cfg.PWM_THROTTLE_PIN),
pwm_scale=cfg.PWM_THROTTLE_SCALE,
pwm_inverted=cfg.PWM_THROTTLE_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
drive_train = dict()
drive_train['steering'] = steering
drive_train['throttle'] = throttle
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
drive_train = dict()
drive_train['steering'] = steering
drive_train['throttle'] = throttle
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
drive_train = RoboHATDriver(cfg)
V.add(drive_train, inputs=['angle', 'throttle'])
ctr.drive_train = drive_train
ctr.drive_train_type = cfg.DRIVE_TRAIN_TYPE
class ShowHowTo:
def __init__(self):
print(f"Go to http://{gethostname()}.local:{ctr.port}/calibrate to calibrate ")
def run(self):
pass
V.add(ShowHowTo())
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS) | def drive(cfg ):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['angle', 'throttle', 'user/mode', 'recording'],
threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['throttle'], outputs=['throttle'])
drive_train = None
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "PWM_STEERING_THROTTLE":
#
# drivetrain for RC car with servo and ESC.
# using a PwmPin for steering (servo)
# and as second PwmPin for throttle (ESC)
#
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PulseController
steering_controller = PulseController(
pwm_pin=pins.pwm_pin_by_id(cfg.PWM_STEERING_PIN),
pwm_scale=cfg.PWM_STEERING_SCALE,
pwm_inverted=cfg.PWM_STEERING_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PulseController(
pwm_pin=pins.pwm_pin_by_id(cfg.PWM_THROTTLE_PIN),
pwm_scale=cfg.PWM_THROTTLE_SCALE,
pwm_inverted=cfg.PWM_THROTTLE_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
drive_train = dict()
drive_train['steering'] = steering
drive_train['throttle'] = throttle
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
drive_train = dict()
drive_train['steering'] = steering
drive_train['throttle'] = throttle
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
drive_train = RoboHATDriver(cfg)
V.add(drive_train, inputs=['angle', 'throttle'])
ctr.drive_train = drive_train
ctr.drive_train_type = cfg.DRIVE_TRAIN_TYPE
class ShowHowTo:
def __init__(self):
print(f"Go to http://{gethostname()}.local:{ctr.port}/calibrate to calibrate ")
def run(self):
pass
V.add(ShowHowTo())
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS) |
Python | def duty_cycle(pulse_ms:float, frequency_hz:float) -> float:
"""
Calculate the duty cycle, 0 to 1, of a pulse given
the frequency and the pulse length
:param pulse_ms:float the desired pulse length in milliseconds
:param frequency_hz:float the pwm frequency in hertz
:return:float duty cycle in range 0 to 1
"""
ms_per_cycle = 1000 / frequency_hz
duty = pulse_ms / ms_per_cycle
return duty | def duty_cycle(pulse_ms:float, frequency_hz:float) -> float:
"""
Calculate the duty cycle, 0 to 1, of a pulse given
the frequency and the pulse length
:param pulse_ms:float the desired pulse length in milliseconds
:param frequency_hz:float the pwm frequency in hertz
:return:float duty cycle in range 0 to 1
"""
ms_per_cycle = 1000 / frequency_hz
duty = pulse_ms / ms_per_cycle
return duty |
Python | def pulse_ms(pulse_bits:int) -> float:
"""
Calculate pulse width in milliseconds given a
12bit pulse (as a PCA9685 would use).
Donkeycar throttle and steering PWM values are
based on PCA9685 12bit pulse values, where
0 is zero duty cycle and 4095 is 100% duty cycle.
:param pulse_bits:int 12bit integer in range 0 to 4095
:return:float pulse length in milliseconds
"""
if pulse_bits < 0 or pulse_bits > 4095:
raise ValueError("pulse_bits must be in range 0 to 4095 (12bit integer)")
return pulse_bits / 4095 | def pulse_ms(pulse_bits:int) -> float:
"""
Calculate pulse width in milliseconds given a
12bit pulse (as a PCA9685 would use).
Donkeycar throttle and steering PWM values are
based on PCA9685 12bit pulse values, where
0 is zero duty cycle and 4095 is 100% duty cycle.
:param pulse_bits:int 12bit integer in range 0 to 4095
:return:float pulse length in milliseconds
"""
if pulse_bits < 0 or pulse_bits > 4095:
raise ValueError("pulse_bits must be in range 0 to 4095 (12bit integer)")
return pulse_bits / 4095 |
Python | def run(self, pulse:int) -> None:
"""
Set the length of the pulse using a 12 bit integer (0..4095)
:param pulse:int 12bit integer (0..4095)
"""
self.set_pulse(pulse) | def run(self, pulse:int) -> None:
"""
Set the length of the pulse using a 12 bit integer (0..4095)
:param pulse:int 12bit integer (0..4095)
"""
self.set_pulse(pulse) |
Python | def read_pwm(self):
'''
send read requests via i2c bus to Teensy to get
pwm control values from last RC input
'''
h1 = self.pwm._device.readU8(self.register)
# first byte of header must be 100, otherwize we might be reading
# in the wrong byte offset
while h1 != 100:
logger.debug("skipping to start of header")
h1 = self.pwm._device.readU8(self.register)
h2 = self.pwm._device.readU8(self.register)
# h2 ignored now
val_a = self.pwm._device.readU8(self.register)
val_b = self.pwm._device.readU8(self.register)
self.steering = (val_b << 8) + val_a
val_c = self.pwm._device.readU8(self.register)
val_d = self.pwm._device.readU8(self.register)
self.throttle = (val_d << 8) + val_c
# scale the values from -1 to 1
self.steering = (self.steering - 1500.0) / 500.0 + 0.158
self.throttle = (self.throttle - 1500.0) / 500.0 + 0.136 | def read_pwm(self):
'''
send read requests via i2c bus to Teensy to get
pwm control values from last RC input
'''
h1 = self.pwm._device.readU8(self.register)
# first byte of header must be 100, otherwize we might be reading
# in the wrong byte offset
while h1 != 100:
logger.debug("skipping to start of header")
h1 = self.pwm._device.readU8(self.register)
h2 = self.pwm._device.readU8(self.register)
# h2 ignored now
val_a = self.pwm._device.readU8(self.register)
val_b = self.pwm._device.readU8(self.register)
self.steering = (val_b << 8) + val_a
val_c = self.pwm._device.readU8(self.register)
val_d = self.pwm._device.readU8(self.register)
self.throttle = (val_d << 8) + val_c
# scale the values from -1 to 1
self.steering = (self.steering - 1500.0) / 500.0 + 0.158
self.throttle = (self.throttle - 1500.0) / 500.0 + 0.136 |
Python | def run(self, throttle:float) -> None:
"""
Update the speed of the motor
:param throttle:float throttle value in range -1 to 1,
where 1 is full forward and -1 is full backwards.
"""
if throttle > 1 or throttle < -1:
raise ValueError( "Speed must be between 1(forward) and -1(reverse)")
self.speed = throttle
self.throttle = dk.utils.map_range_float(throttle, -1, 1, -self.max_duty, self.max_duty)
if self.throttle > self.zero_throttle:
self.pwm_pin.duty_cycle(self.throttle)
self.pin_backward.output(PinState.LOW)
self.pin_forward.output(PinState.HIGH)
elif self.throttle < -self.zero_throttle:
self.pwm_pin.duty_cycle(-self.throttle)
self.pin_forward.output(PinState.LOW)
self.pin_backward.output(PinState.HIGH)
else:
self.pwm_pin.duty_cycle(0)
self.pin_forward.output(PinState.LOW)
self.pin_backward.output(PinState.LOW) | def run(self, throttle:float) -> None:
"""
Update the speed of the motor
:param throttle:float throttle value in range -1 to 1,
where 1 is full forward and -1 is full backwards.
"""
if throttle > 1 or throttle < -1:
raise ValueError( "Speed must be between 1(forward) and -1(reverse)")
self.speed = throttle
self.throttle = dk.utils.map_range_float(throttle, -1, 1, -self.max_duty, self.max_duty)
if self.throttle > self.zero_throttle:
self.pwm_pin.duty_cycle(self.throttle)
self.pin_backward.output(PinState.LOW)
self.pin_forward.output(PinState.HIGH)
elif self.throttle < -self.zero_throttle:
self.pwm_pin.duty_cycle(-self.throttle)
self.pin_forward.output(PinState.LOW)
self.pin_backward.output(PinState.HIGH)
else:
self.pwm_pin.duty_cycle(0)
self.pin_forward.output(PinState.LOW)
self.pin_backward.output(PinState.LOW) |
Python | def run(self, throttle:float) -> None:
"""
Update the speed of the motor
:param throttle:float throttle value in range -1 to 1,
where 1 is full forward and -1 is full backwards.
"""
if throttle is None:
return
if throttle > 1 or throttle < -1:
raise ValueError( "Throttle must be between 1(forward) and -1(reverse)")
self.speed = throttle
self.throttle = dk.utils.map_range_float(throttle, -1, 1, -self.max_duty, self.max_duty)
if self.throttle > self.zero_throttle:
self.pin_backward.duty_cycle(0)
self.pin_forward.duty_cycle(self.throttle)
elif self.throttle < -self.zero_throttle:
self.pin_forward.duty_cycle(0)
self.pin_backward.duty_cycle(-self.throttle)
else:
self.pin_forward.duty_cycle(0)
self.pin_backward.duty_cycle(0) | def run(self, throttle:float) -> None:
"""
Update the speed of the motor
:param throttle:float throttle value in range -1 to 1,
where 1 is full forward and -1 is full backwards.
"""
if throttle is None:
return
if throttle > 1 or throttle < -1:
raise ValueError( "Throttle must be between 1(forward) and -1(reverse)")
self.speed = throttle
self.throttle = dk.utils.map_range_float(throttle, -1, 1, -self.max_duty, self.max_duty)
if self.throttle > self.zero_throttle:
self.pin_backward.duty_cycle(0)
self.pin_forward.duty_cycle(self.throttle)
elif self.throttle < -self.zero_throttle:
self.pin_forward.duty_cycle(0)
self.pin_backward.duty_cycle(-self.throttle)
else:
self.pin_forward.duty_cycle(0)
self.pin_backward.duty_cycle(0) |
Python | def text_to_instance(self, # type: ignore
tokens: List[Token],
ner_tags: List[str] = None) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Add "tag label" to instance
instance_fields['tags'] = SequenceLabelField(ner_tags, sequence)
return Instance(instance_fields) | def text_to_instance(self, # type: ignore
tokens: List[Token],
ner_tags: List[str] = None) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Add "tag label" to instance
instance_fields['tags'] = SequenceLabelField(ner_tags, sequence)
return Instance(instance_fields) |
Python | def token_to_indices(self, token: Token, vocabulary: Vocabulary) -> TokenType:
"""
Takes a string token and converts it into indices. This could return an ID for the token
from the vocabulary, or it could split the token into characters and return a list of
IDs for each character from the vocabulary, or something else.
"""
raise NotImplementedError | def token_to_indices(self, token: Token, vocabulary: Vocabulary) -> TokenType:
"""
Takes a string token and converts it into indices. This could return an ID for the token
from the vocabulary, or it could split the token into characters and return a list of
IDs for each character from the vocabulary, or something else.
"""
raise NotImplementedError |
Python | def pad_token_sequence(self,
tokens: List[TokenType],
desired_num_tokens: int,
padding_lengths: Dict[str, int]) -> List[TokenType]:
"""
This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the
input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be
truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
"""
raise NotImplementedError | def pad_token_sequence(self,
tokens: List[TokenType],
desired_num_tokens: int,
padding_lengths: Dict[str, int]) -> List[TokenType]:
"""
This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the
input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be
truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
"""
raise NotImplementedError |
Python | def configDownloadFrom(self, kachery_names: StrOrStrList) -> None:
"""
Configure uris to download entities from particular kacheries.
Parameters
----------
kachery_names : str or iterable
Kachery names to enable
"""
if type(kachery_names) == str:
kachery_names = [str(kachery_names)]
for kname in kachery_names:
if kname not in self._config_download_from:
self._config_download_from.append(kname) | def configDownloadFrom(self, kachery_names: StrOrStrList) -> None:
"""
Configure uris to download entities from particular kacheries.
Parameters
----------
kachery_names : str or iterable
Kachery names to enable
"""
if type(kachery_names) == str:
kachery_names = [str(kachery_names)]
for kname in kachery_names:
if kname not in self._config_download_from:
self._config_download_from.append(kname) |
Python | def configVerbose(self, value: bool) -> None:
"""Toggle on or off verbose mode
Parameters
----------
value : bool
Whether to turn on verbose mode
"""
self._verbose = value
self._local_db.configVerbose(value) | def configVerbose(self, value: bool) -> None:
"""Toggle on or off verbose mode
Parameters
----------
value : bool
Whether to turn on verbose mode
"""
self._verbose = value
self._local_db.configVerbose(value) |
Python | def loadText(self, *,
path: Optional[str]=None,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
download_from: Optional[StrOrStrList]=None,
local_only: bool=False,
remote_only: bool=False
) -> Optional[str]:
"""
Get content of a specified file, downloading the file from a remote server if needed.
For detailed info on what you can pass as path or key, see docs for realizeFile().
Parameters
----------
path : str, optional
The path of a file to read. This could either be a local path, a
sha1:// URI, or a sha1dir:// URI as described in docs for
realizeFile(). Either path or key must be provided, but not both.
key : str, optional
The key used for locating the file as described in docs for realizeFile().
Either path or key must be provided, but not both.
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
Returns
-------
str or None
Content of downloaded file or None if the file was not found or could
not be opened.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
fname = self.realizeFile(
key=key, path=path, subkey=subkey, collection=collection, download_from=download_from, local_only=local_only, remote_only=remote_only)
if fname is None:
return None
try:
with open(fname) as f:
return f.read()
except:
print('Unexpected problem reading file in loadText: ' + fname)
return None | def loadText(self, *,
path: Optional[str]=None,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
download_from: Optional[StrOrStrList]=None,
local_only: bool=False,
remote_only: bool=False
) -> Optional[str]:
"""
Get content of a specified file, downloading the file from a remote server if needed.
For detailed info on what you can pass as path or key, see docs for realizeFile().
Parameters
----------
path : str, optional
The path of a file to read. This could either be a local path, a
sha1:// URI, or a sha1dir:// URI as described in docs for
realizeFile(). Either path or key must be provided, but not both.
key : str, optional
The key used for locating the file as described in docs for realizeFile().
Either path or key must be provided, but not both.
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
Returns
-------
str or None
Content of downloaded file or None if the file was not found or could
not be opened.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
fname = self.realizeFile(
key=key, path=path, subkey=subkey, collection=collection, download_from=download_from, local_only=local_only, remote_only=remote_only)
if fname is None:
return None
try:
with open(fname) as f:
return f.read()
except:
print('Unexpected problem reading file in loadText: ' + fname)
return None |
Python | def saveText(self, text: str, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
basename='file.txt',
dest_path: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Save given text to a file, put that file in the local SHA-1 cache and
optionally upload to a remote kachery. If key (but not collection) is
provided, a reference to the file is also stored in the local key/value
database under that key. If both key and collection are provided, then
the reference to the file is stored in the remote pairio database
collection under that key. Returns a sha1:// URI referring to the file.
Parameters
----------
text : str
The text to save
key : Optional[StrOrDict], optional
The key for storing the reference to the file, by default None
subkey : Optional[str], optional
The optional subkey for storing the reference to the file, by
default None
collection : Optional[str], optional
The optional collection for remote pairio storage, by default None
basename : str, optional
The base name for forming the sha1:// URI to be returned, by default
'file.txt'
dest_path : Optional[str], optional
The optional destination path which could be a local file path or a
key:// URI, by default None
upload_to : Optional[StrOrStrList], optional
A list of kacheries to upload the file content to, by default None
Returns
-------
Optional[str]
The sha1:// URI to the file.
"""
if text is None:
self.setValue(key=key, subkey=subkey,
value=None, collection=collection)
return
if dest_path is None:
tmp_fname = _create_temporary_file_for_text(text=text)
else:
with open(dest_path, 'w') as f:
f.write(text)
tmp_fname = dest_path
try:
ret = self.saveFile(tmp_fname, key=key, subkey=subkey, collection=collection,
basename=basename, upload_to=upload_to)
except:
if dest_path is None:
os.unlink(tmp_fname)
raise
if dest_path is None:
os.unlink(tmp_fname)
return ret | def saveText(self, text: str, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
basename='file.txt',
dest_path: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Save given text to a file, put that file in the local SHA-1 cache and
optionally upload to a remote kachery. If key (but not collection) is
provided, a reference to the file is also stored in the local key/value
database under that key. If both key and collection are provided, then
the reference to the file is stored in the remote pairio database
collection under that key. Returns a sha1:// URI referring to the file.
Parameters
----------
text : str
The text to save
key : Optional[StrOrDict], optional
The key for storing the reference to the file, by default None
subkey : Optional[str], optional
The optional subkey for storing the reference to the file, by
default None
collection : Optional[str], optional
The optional collection for remote pairio storage, by default None
basename : str, optional
The base name for forming the sha1:// URI to be returned, by default
'file.txt'
dest_path : Optional[str], optional
The optional destination path which could be a local file path or a
key:// URI, by default None
upload_to : Optional[StrOrStrList], optional
A list of kacheries to upload the file content to, by default None
Returns
-------
Optional[str]
The sha1:// URI to the file.
"""
if text is None:
self.setValue(key=key, subkey=subkey,
value=None, collection=collection)
return
if dest_path is None:
tmp_fname = _create_temporary_file_for_text(text=text)
else:
with open(dest_path, 'w') as f:
f.write(text)
tmp_fname = dest_path
try:
ret = self.saveFile(tmp_fname, key=key, subkey=subkey, collection=collection,
basename=basename, upload_to=upload_to)
except:
if dest_path is None:
os.unlink(tmp_fname)
raise
if dest_path is None:
os.unlink(tmp_fname)
return ret |
Python | def loadObject(self, *,
key: Optional[StrOrDict]=None,
path: Optional[str]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
download_from: Optional[StrOrStrList]=None,
local_only: bool=False,
remote_only: bool=False
) -> Optional[dict]:
"""
Return contents of a given JSON file as Python dictionary, downloading the file if necessary.
Parameters
----------
key : str or dict
The key used to look up the value
subkey : str, optional
A subkey string (the default is None, which means that no subkey is
used). To retrieve values for all subkeys, use subkey='-'.
collection : str, optional
The name of the collection to retrieve the value from, which may be
different from the collection specified in configRemoteReadonly()
configRemoteReadWrite() (the default is None, which means that the
configured collection is used)
Returns
-------
dict or None
Dictionary representing JSON object stored in the file
or None if data could not be retrieved.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
txt = self.loadText(key=key, path=path,
subkey=subkey, collection=collection, download_from=download_from, local_only=local_only, remote_only=remote_only)
try:
if txt is not None:
return json.loads(txt)
except:
print('WARNING: unable to parse json in loadObject.', path, key, subkey)
return None | def loadObject(self, *,
key: Optional[StrOrDict]=None,
path: Optional[str]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
download_from: Optional[StrOrStrList]=None,
local_only: bool=False,
remote_only: bool=False
) -> Optional[dict]:
"""
Return contents of a given JSON file as Python dictionary, downloading the file if necessary.
Parameters
----------
key : str or dict
The key used to look up the value
subkey : str, optional
A subkey string (the default is None, which means that no subkey is
used). To retrieve values for all subkeys, use subkey='-'.
collection : str, optional
The name of the collection to retrieve the value from, which may be
different from the collection specified in configRemoteReadonly()
configRemoteReadWrite() (the default is None, which means that the
configured collection is used)
Returns
-------
dict or None
Dictionary representing JSON object stored in the file
or None if data could not be retrieved.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
txt = self.loadText(key=key, path=path,
subkey=subkey, collection=collection, download_from=download_from, local_only=local_only, remote_only=remote_only)
try:
if txt is not None:
return json.loads(txt)
except:
print('WARNING: unable to parse json in loadObject.', path, key, subkey)
return None |
Python | def saveObject(self, object: dict, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
basename: Optional[str]='object.json',
dest_path: Optional[str]=None,
collection: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None,
indent: Optional[int]=None) -> Optional[str]:
"""
Save object into a json file and/or upload it to a remote kachery.
Parameters
----------
object : dict
Object to be saved.
key : str, optional
The key used for locating the file as described in the docs for
realizeFile()
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
Returns
-------
str or None
A SHA-1 URI for the saved or uploaded file, or None if the file was
unable to be saved.
"""
if object is None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=None),
return
# return self.saveText(text=simplejson.dumps(object, indent=indent, ignore_nan=True), key=key, collection=collection, subkey=subkey, basename=basename, dest_path=dest_path, upload_to=upload_to)
return self.saveText(text=json.dumps(object, indent=indent), key=key, collection=collection, subkey=subkey, basename=basename, dest_path=dest_path, upload_to=upload_to) | def saveObject(self, object: dict, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
basename: Optional[str]='object.json',
dest_path: Optional[str]=None,
collection: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None,
indent: Optional[int]=None) -> Optional[str]:
"""
Save object into a json file and/or upload it to a remote kachery.
Parameters
----------
object : dict
Object to be saved.
key : str, optional
The key used for locating the file as described in the docs for
realizeFile()
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
Returns
-------
str or None
A SHA-1 URI for the saved or uploaded file, or None if the file was
unable to be saved.
"""
if object is None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=None),
return
# return self.saveText(text=simplejson.dumps(object, indent=indent, ignore_nan=True), key=key, collection=collection, subkey=subkey, basename=basename, dest_path=dest_path, upload_to=upload_to)
return self.saveText(text=json.dumps(object, indent=indent), key=key, collection=collection, subkey=subkey, basename=basename, dest_path=dest_path, upload_to=upload_to) |
Python | def saveFile(self, path: Optional[str]=None, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
basename: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Save a file to the local SHA-1 cache and/or upload to a remote kachery
and return a SHA-1 URL referring to the file.
The file is specified using either path or key, as described in the
documentation for realizeFile().
Parameters
----------
path : str, optional
The path of the file. This could either be a local path, a sha1://
URL, or a sha1dir:// URL as described in the docs for realizeFile()
The default is None, in which case key must be specified.
You cannot specify both path and key.
key : str, optional
The key used for locating the file as described in the docs for
realizeFile(). The default is None, in which case path must be
specified. You cannot specify both path and key.
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
collection : str, optional
The name of the collection to retrieve the value from. The default
is None, which means that the configured collection is used.
basename : str, optional
An optional basename to be used in constructing the returned SHA-1
URL.
upload_to : str, optional
Optional name of kachery server to upload the file to
Returns
-------
str or None
A SHA-1 URL for the saved or uploaded file, or None if the file was
unable to be saved.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
if path is None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=None)
return None
sha1_path = self._save_file(
path=path, basename=basename, upload_to=upload_to)
if key is not None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=sha1_path)
return sha1_path | def saveFile(self, path: Optional[str]=None, *,
key: Optional[StrOrDict]=None,
subkey: Optional[str]=None,
collection: Optional[str]=None,
basename: Optional[str]=None,
upload_to: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Save a file to the local SHA-1 cache and/or upload to a remote kachery
and return a SHA-1 URL referring to the file.
The file is specified using either path or key, as described in the
documentation for realizeFile().
Parameters
----------
path : str, optional
The path of the file. This could either be a local path, a sha1://
URL, or a sha1dir:// URL as described in the docs for realizeFile()
The default is None, in which case key must be specified.
You cannot specify both path and key.
key : str, optional
The key used for locating the file as described in the docs for
realizeFile(). The default is None, in which case path must be
specified. You cannot specify both path and key.
subkey : str, optional
The optional subkey as described in the docs for getValue() and
setValue() (the default is None)
collection : str, optional
The name of the collection to retrieve the value from. The default
is None, which means that the configured collection is used.
basename : str, optional
An optional basename to be used in constructing the returned SHA-1
URL.
upload_to : str, optional
Optional name of kachery server to upload the file to
Returns
-------
str or None
A SHA-1 URL for the saved or uploaded file, or None if the file was
unable to be saved.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
if path is None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=None)
return None
sha1_path = self._save_file(
path=path, basename=basename, upload_to=upload_to)
if key is not None:
self.setValue(key=key, subkey=subkey, collection=collection,
value=sha1_path)
return sha1_path |
Python | def findFile(self, path: str, *,
local_only: bool=False,
remote_only: bool=False,
download_from: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Find a file without downloading it, returning either the local
location of the file or a http:// or https:// address.
Parameters
----------
path : str
The path or URI of the file to find.
local_only : bool, optional
Whether to only find the file locally, by default False
remote_only : bool, optional
Whether to only find the file remotely, by default False
download_from : Optional[StrOrStrList], optional
The names of the remote kacheries to search, by default None
Returns
-------
Optional[str]
Either the local path of the file or the http URL of the remote
file.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
return self._realize_file(path=path, resolve_locally=False, local_only=local_only, remote_only=remote_only, download_from=download_from) | def findFile(self, path: str, *,
local_only: bool=False,
remote_only: bool=False,
download_from: Optional[StrOrStrList]=None
) -> Optional[str]:
"""
Find a file without downloading it, returning either the local
location of the file or a http:// or https:// address.
Parameters
----------
path : str
The path or URI of the file to find.
local_only : bool, optional
Whether to only find the file locally, by default False
remote_only : bool, optional
Whether to only find the file remotely, by default False
download_from : Optional[StrOrStrList], optional
The names of the remote kacheries to search, by default None
Returns
-------
Optional[str]
Either the local path of the file or the http URL of the remote
file.
"""
if path and path.startswith('key://'):
path = self.resolveKeyPath(path)
if not path:
return None
return self._realize_file(path=path, resolve_locally=False, local_only=local_only, remote_only=remote_only, download_from=download_from) |
Python | def isFile(self, path: str) -> bool:
"""
Returns True if the path or URI represents a file rather than a
directory.
Parameters
----------
path : str
The path or URI to the putative (is putative the right word here?)
file.
Returns
-------
bool
True if the path or URI represents a file rather than a directory
"""
if self.isLocalPath(path=path):
return os.path.isfile(path)
if path.startswith('kbucket://'):
raise Exception('kucket:// paths are no longer supported')
if path.startswith('sha1://'):
return True
elif path.startswith('sha1dir://'):
if len(path.split('/')) <= 3:
return False
else:
return (self.computeFileSha1(path) is not None)
elif path.startswith('key://'):
return (self.computeFileSha1(path) is not None)
else:
return os.path.isfile(path) | def isFile(self, path: str) -> bool:
"""
Returns True if the path or URI represents a file rather than a
directory.
Parameters
----------
path : str
The path or URI to the putative (is putative the right word here?)
file.
Returns
-------
bool
True if the path or URI represents a file rather than a directory
"""
if self.isLocalPath(path=path):
return os.path.isfile(path)
if path.startswith('kbucket://'):
raise Exception('kucket:// paths are no longer supported')
if path.startswith('sha1://'):
return True
elif path.startswith('sha1dir://'):
if len(path.split('/')) <= 3:
return False
else:
return (self.computeFileSha1(path) is not None)
elif path.startswith('key://'):
return (self.computeFileSha1(path) is not None)
else:
return os.path.isfile(path) |
Python | def localCacheDir(self) -> str:
"""Returns the path of the directory used for the local cache.
Returns
-------
str
Path to the cache directory.
"""
return self._local_db.localCacheDir() | def localCacheDir(self) -> str:
"""Returns the path of the directory used for the local cache.
Returns
-------
str
Path to the cache directory.
"""
return self._local_db.localCacheDir() |
Python | def alternateLocalCacheDirs(self) -> List[str]:
"""Returns a list of paths to alternate local cache directories.
Returns
-------
List[str]
The list of alternate local cache paths.
"""
return self._local_db.alternateLocalCacheDirs() | def alternateLocalCacheDirs(self) -> List[str]:
"""Returns a list of paths to alternate local cache directories.
Returns
-------
List[str]
The list of alternate local cache paths.
"""
return self._local_db.alternateLocalCacheDirs() |
Python | def queueJob(self, job: MountainJob) -> MountainJobResult:
"""Queue a job. This happens automatically by the framework when .execute() is called on a processor
Parameters
----------
job : MountainJob
The job to queue
Returns
-------
MountainJobResult
The job result object associated with the job. Same as "job.result".
"""
job_id = self._last_job_id + 1
self._last_job_id = job_id
self._queued_jobs[job_id] = job
self._all_jobs[job_id] = job
job_object = job.getObject()
job_outputs = job_object['outputs']
result_outputs = dict()
for output_name in job_outputs.keys():
result_outputs[output_name] = dict(
queue_job_id=job_id,
output_name=output_name,
pending=True
)
obj0 = dict(
outputs=result_outputs
)
result0 = MountainJobResult(result_object=obj0, job_queue=self)
setattr(job, 'result', result0)
return result0 | def queueJob(self, job: MountainJob) -> MountainJobResult:
"""Queue a job. This happens automatically by the framework when .execute() is called on a processor
Parameters
----------
job : MountainJob
The job to queue
Returns
-------
MountainJobResult
The job result object associated with the job. Same as "job.result".
"""
job_id = self._last_job_id + 1
self._last_job_id = job_id
self._queued_jobs[job_id] = job
self._all_jobs[job_id] = job
job_object = job.getObject()
job_outputs = job_object['outputs']
result_outputs = dict()
for output_name in job_outputs.keys():
result_outputs[output_name] = dict(
queue_job_id=job_id,
output_name=output_name,
pending=True
)
obj0 = dict(
outputs=result_outputs
)
result0 = MountainJobResult(result_object=obj0, job_queue=self)
setattr(job, 'result', result0)
return result0 |
Python | def iterate(self) -> None:
"""Called periodically by the framework to take care of business.
Returns
-------
None
"""
if self._halted:
return
if self._job_handler:
self._job_handler.iterate()
self._check_for_finished_jobs()
queued_job_ids = sorted(list(self._queued_jobs.keys()))
job_ids_to_run = []
for id in queued_job_ids:
job = self._queued_jobs[id]
if self._job_is_ready_to_run(job):
job_ids_to_run.append(id)
newly_running_jobs = []
for id in job_ids_to_run:
if self._halted:
return
job = self._queued_jobs[id]
del self._queued_jobs[id]
self._running_jobs[id] = job
job.result._status = 'running'
newly_running_jobs.append(job)
if len(newly_running_jobs) > 0:
print('Checking cache for {} jobs...'.format(len(newly_running_jobs)))
newly_running_job_results_from_cache = _check_cache_for_job_results(newly_running_jobs)
for ii, job in enumerate(newly_running_jobs):
newly_running_job_result = newly_running_job_results_from_cache[ii]
if newly_running_job_result is not None:
jobj = job.getObject()
print('Using result from cache: {}'.format(jobj.get('label', jobj.get('processor_name', '<>'))))
job.result.fromObject(newly_running_job_result.getObject())
job.result._status = 'finished'
else:
self._job_handler.executeJob(job)
if self._job_handler:
self._job_handler.iterate()
self._check_for_finished_jobs() | def iterate(self) -> None:
"""Called periodically by the framework to take care of business.
Returns
-------
None
"""
if self._halted:
return
if self._job_handler:
self._job_handler.iterate()
self._check_for_finished_jobs()
queued_job_ids = sorted(list(self._queued_jobs.keys()))
job_ids_to_run = []
for id in queued_job_ids:
job = self._queued_jobs[id]
if self._job_is_ready_to_run(job):
job_ids_to_run.append(id)
newly_running_jobs = []
for id in job_ids_to_run:
if self._halted:
return
job = self._queued_jobs[id]
del self._queued_jobs[id]
self._running_jobs[id] = job
job.result._status = 'running'
newly_running_jobs.append(job)
if len(newly_running_jobs) > 0:
print('Checking cache for {} jobs...'.format(len(newly_running_jobs)))
newly_running_job_results_from_cache = _check_cache_for_job_results(newly_running_jobs)
for ii, job in enumerate(newly_running_jobs):
newly_running_job_result = newly_running_job_results_from_cache[ii]
if newly_running_job_result is not None:
jobj = job.getObject()
print('Using result from cache: {}'.format(jobj.get('label', jobj.get('processor_name', '<>'))))
job.result.fromObject(newly_running_job_result.getObject())
job.result._status = 'finished'
else:
self._job_handler.executeJob(job)
if self._job_handler:
self._job_handler.iterate()
self._check_for_finished_jobs() |
Python | def wait(self, timeout: float=-1) -> bool:
"""Wait until all queued jobs have completed or until timeout.
Parameters
----------
timeout : float, optional
The maximum number of seconds to wait. The default (-1) means it will never time out.
Returns
-------
bool
Whether all the jobs have finished. Will return False if the timeout has been reached.
"""
timer = time.time()
while not self.isFinished():
self.iterate()
elapsed = time.time() - timer
if (timeout >= 0) and (elapsed > timeout):
return False
if not self.isFinished():
time.sleep(0.2)
return True | def wait(self, timeout: float=-1) -> bool:
"""Wait until all queued jobs have completed or until timeout.
Parameters
----------
timeout : float, optional
The maximum number of seconds to wait. The default (-1) means it will never time out.
Returns
-------
bool
Whether all the jobs have finished. Will return False if the timeout has been reached.
"""
timer = time.time()
while not self.isFinished():
self.iterate()
elapsed = time.time() - timer
if (timeout >= 0) and (elapsed > timeout):
return False
if not self.isFinished():
time.sleep(0.2)
return True |
Python | def isFinished(self) -> bool:
"""Whether all queued jobs have finished.
Returns
-------
bool
True if all queued jobs have finished.
"""
if self._halted:
return True
return (self._queued_jobs == {}) and (self._running_jobs == {}) | def isFinished(self) -> bool:
"""Whether all queued jobs have finished.
Returns
-------
bool
True if all queued jobs have finished.
"""
if self._halted:
return True
return (self._queued_jobs == {}) and (self._running_jobs == {}) |
Python | def halt(self) -> None:
"""Stop the job queue in its tracks, and send the halt message to the job handler.
Returns
-------
None
"""
if self._job_handler:
self._job_handler.halt()
self._halted = True | def halt(self) -> None:
"""Stop the job queue in its tracks, and send the halt message to the job handler.
Returns
-------
None
"""
if self._job_handler:
self._job_handler.halt()
self._halted = True |
Python | def add(self, name: str, type: str, token: str) -> bool:
"""
Add a new token to the store
Parameters
----------
name: str
server name
type: str
upload or download
token: str
token data to be set
"""
if not len(token):
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
entry[2] = token
self._entries[i] = '\t'.join(entry)
return True
# not found, let's add
entry = [name, type, token]
self._entries.append('\t'.join(entry))
return True | def add(self, name: str, type: str, token: str) -> bool:
"""
Add a new token to the store
Parameters
----------
name: str
server name
type: str
upload or download
token: str
token data to be set
"""
if not len(token):
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
entry[2] = token
self._entries[i] = '\t'.join(entry)
return True
# not found, let's add
entry = [name, type, token]
self._entries.append('\t'.join(entry))
return True |
Python | def add_download(self, name: str, token: str) -> bool:
"""
Add a download token to the store
Parameters
----------
name: str
server name
token: str
token data
"""
return self.add(name, 'download', token) | def add_download(self, name: str, token: str) -> bool:
"""
Add a download token to the store
Parameters
----------
name: str
server name
token: str
token data
"""
return self.add(name, 'download', token) |
Python | def add_upload(self, name: str, token: str) -> bool:
"""
Add a upload token to the store
Parameters
----------
name: str
server name
token: str
token data
"""
return self.add(name, 'upload', token) | def add_upload(self, name: str, token: str) -> bool:
"""
Add a upload token to the store
Parameters
----------
name: str
server name
token: str
token data
"""
return self.add(name, 'upload', token) |
Python | def disable(self, name: str, type: str):
"""
Temporarily disable existing token configuration
Parameters
----------
name: str
server name
type: str
upload or download
"""
if not name or not type:
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
self._entries[i] = '#' + self._entries[i]
return True
return False | def disable(self, name: str, type: str):
"""
Temporarily disable existing token configuration
Parameters
----------
name: str
server name
type: str
upload or download
"""
if not name or not type:
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
self._entries[i] = '#' + self._entries[i]
return True
return False |
Python | def enable(self, name: str, type: str) -> bool:
"""
Enables back previously disabled token configuration
Parameters
----------
name: str
server name
type: str
upload or download
"""
if not name or not type:
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if not current.startswith('#'):
continue
entry = current[1:].strip().split()
if len(entry) == 3 and entry[0] == name and entry[1] == type:
self._entries[i] = current[1:].strip()
return True
return False | def enable(self, name: str, type: str) -> bool:
"""
Enables back previously disabled token configuration
Parameters
----------
name: str
server name
type: str
upload or download
"""
if not name or not type:
return False
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if not current.startswith('#'):
continue
entry = current[1:].strip().split()
if len(entry) == 3 and entry[0] == name and entry[1] == type:
self._entries[i] = current[1:].strip()
return True
return False |
Python | def disable_download(self, name: str) -> bool:
"""
Temporarily disable a download token
Parameters
----------
name: str
server name
"""
return self.disable(name, 'download') | def disable_download(self, name: str) -> bool:
"""
Temporarily disable a download token
Parameters
----------
name: str
server name
"""
return self.disable(name, 'download') |
Python | def disable_upload(self, name: str) -> bool:
"""
Temporarily disable an upload token
Parameters
----------
name: str
server name
"""
return self.disable(name, 'upload') | def disable_upload(self, name: str) -> bool:
"""
Temporarily disable an upload token
Parameters
----------
name: str
server name
"""
return self.disable(name, 'upload') |
Python | def enable_download(self, name: str) -> bool:
"""
Reenable disabled download token
Parameters
----------
name: str
server name
"""
return self.enable(name, 'download') | def enable_download(self, name: str) -> bool:
"""
Reenable disabled download token
Parameters
----------
name: str
server name
"""
return self.enable(name, 'download') |
Python | def enable_upload(self, name: str) -> bool:
"""
Reenable disabled upload token
Parameters
----------
name: str
server name
"""
return self.enable(name, 'upload') | def enable_upload(self, name: str) -> bool:
"""
Reenable disabled upload token
Parameters
----------
name: str
server name
"""
return self.enable(name, 'upload') |
Python | def remove(self, name: str, type: Optional[str] = None) -> bool:
"""
Remove token from the store
Parameters
----------
name: str
server name
type: str or None
upload or download, both if None
"""
if not type:
dn = self.remove(name, 'download')
up = self.remove(name, 'upload')
return dn or up
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
del self._entries[i]
return True
return False | def remove(self, name: str, type: Optional[str] = None) -> bool:
"""
Remove token from the store
Parameters
----------
name: str
server name
type: str or None
upload or download, both if None
"""
if not type:
dn = self.remove(name, 'download')
up = self.remove(name, 'upload')
return dn or up
if type not in ['download', 'upload']:
return False
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
if entry[0] == name and entry[1] == type:
del self._entries[i]
return True
return False |
Python | def remove_download(self, name: str) -> bool:
"""
Remove download token from the store
Paramters
---------
name: str
server name
"""
return self.remove(name, 'download') | def remove_download(self, name: str) -> bool:
"""
Remove download token from the store
Paramters
---------
name: str
server name
"""
return self.remove(name, 'download') |
Python | def remove_upload(self, name: str) -> bool:
"""
Remove upload token from the store
Paramters
---------
name: str
server name
"""
return self.remove(name, 'upload') | def remove_upload(self, name: str) -> bool:
"""
Remove upload token from the store
Paramters
---------
name: str
server name
"""
return self.remove(name, 'upload') |
Python | def entries(self) -> Iterable:
"""
Iterate over enabled entries
Returns an iterable where each iteration yields
a triplet of (name, type, token)
"""
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
yield entry | def entries(self) -> Iterable:
"""
Iterate over enabled entries
Returns an iterable where each iteration yields
a triplet of (name, type, token)
"""
for i in range(0, len(self._entries)):
current = self._entries[i]
if current.startswith('#') or not len(current.strip()):
continue
entry = current.strip().split()
yield entry |
Python | def apply(cls, self, *args, **kwargs):
"""
Applies kwargs arguments to the instance passed as the first
argument to the call.
For defined INPUTS, OUTPUTS and PARAMETERS the method extracts
a corresponding value from kwargs and sets it as an instance attribute.
For example, if the processor has a 'foo' parameter declared and
'foo = something' is passed to apply(), self.foo will become
'something'.
"""
for key in kwargs:
if key in [x.name for x in cls.INPUTS]:
setattr(self, key, kwargs[key])
if key in [x.name for x in cls.OUTPUTS]:
setattr(self, key, kwargs[key])
if key in [x.name for x in cls.PARAMETERS]:
setattr(self, key, kwargs[key]) | def apply(cls, self, *args, **kwargs):
"""
Applies kwargs arguments to the instance passed as the first
argument to the call.
For defined INPUTS, OUTPUTS and PARAMETERS the method extracts
a corresponding value from kwargs and sets it as an instance attribute.
For example, if the processor has a 'foo' parameter declared and
'foo = something' is passed to apply(), self.foo will become
'something'.
"""
for key in kwargs:
if key in [x.name for x in cls.INPUTS]:
setattr(self, key, kwargs[key])
if key in [x.name for x in cls.OUTPUTS]:
setattr(self, key, kwargs[key])
if key in [x.name for x in cls.PARAMETERS]:
setattr(self, key, kwargs[key]) |
Python | def spec(self):
"""
Generate spec for the processor as a Python dictionary.
A spec is a standard way to describe a MountainLab processor in a way
that is easy to process, yet still understandable by humans.
This method generates a Python dictionary that complies with a spec
definition.
"""
pspec = {}
pspec['name'] = self.NAME
pspec['version'] = self.VERSION
pspec['description'] = self.DESCRIPTION
# if hasattr(self, 'run') and callable(self.run):
components = [sys.argv[0], self.NAME]
if self.USE_ARGUMENTS:
components.append('$(arguments)')
pspec['exe_command'] = self.COMMAND or ' '.join(components)
pspec['inputs'] = [inp.spec for inp in self.INPUTS]
pspec['outputs'] = [out.spec for out in self.OUTPUTS]
pspec['parameters'] = [param.spec for param in self.PARAMETERS]
if hasattr(self, 'test') and callable(self.test):
pspec['has_test'] = True
return pspec | def spec(self):
"""
Generate spec for the processor as a Python dictionary.
A spec is a standard way to describe a MountainLab processor in a way
that is easy to process, yet still understandable by humans.
This method generates a Python dictionary that complies with a spec
definition.
"""
pspec = {}
pspec['name'] = self.NAME
pspec['version'] = self.VERSION
pspec['description'] = self.DESCRIPTION
# if hasattr(self, 'run') and callable(self.run):
components = [sys.argv[0], self.NAME]
if self.USE_ARGUMENTS:
components.append('$(arguments)')
pspec['exe_command'] = self.COMMAND or ' '.join(components)
pspec['inputs'] = [inp.spec for inp in self.INPUTS]
pspec['outputs'] = [out.spec for out in self.OUTPUTS]
pspec['parameters'] = [param.spec for param in self.PARAMETERS]
if hasattr(self, 'test') and callable(self.test):
pspec['has_test'] = True
return pspec |
Python | def invoke_parser(self, supparser=None, noexit=False):
"""
Return a commandline parser (argparse) for the processor.
"""
if supparser:
parser = supparser.add_parser(
self.NAME, description=self.DESCRIPTION)
else:
if noexit:
class NoExitArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
raise ParserError()
def error(self, message):
raise ParserError()
parser = NoExitArgumentParser(
prog=self.NAME, description=self.DESCRIPTION)
else:
parser = argparse.ArgumentParser(
prog=self.NAME, description=self.DESCRIPTION)
def populate_parser(parser, dataset):
for elem in dataset:
opts = {}
opts['help'] = elem.description
opts['required'] = not elem.optional
if elem.multi:
opts['action'] = 'append'
parser.add_argument('--' + elem.name, **opts)
# populate parser with INPUTS
populate_parser(parser, self.INPUTS)
# populate parser with OUTPUTS
populate_parser(parser, self.OUTPUTS)
# populate parser with PARAMETERS
for param in self.PARAMETERS:
opts = {}
opts['help'] = param.description
opts['required'] = not param.optional
if isinstance(param.datatype, tuple):
opts['type'] = str
# opts['type'] = param.datatype[1]
else:
opts['type'] = param.datatype
if param.multi:
opts['action'] = 'append'
if param.choices:
if isinstance(param.choices, tuple):
# if choices is a tuple, assume it is a tuple of mappings
# and expand them
opts['choices'] = [choice[0] for choice in param.choices]
else:
opts['choices'] = param.choices
parser.add_argument('--' + param.name, **opts)
if self.USE_ARGUMENTS:
parser.add_argument('--_tempdir', required=False,
help=argparse.SUPPRESS)
return parser | def invoke_parser(self, supparser=None, noexit=False):
"""
Return a commandline parser (argparse) for the processor.
"""
if supparser:
parser = supparser.add_parser(
self.NAME, description=self.DESCRIPTION)
else:
if noexit:
class NoExitArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
raise ParserError()
def error(self, message):
raise ParserError()
parser = NoExitArgumentParser(
prog=self.NAME, description=self.DESCRIPTION)
else:
parser = argparse.ArgumentParser(
prog=self.NAME, description=self.DESCRIPTION)
def populate_parser(parser, dataset):
for elem in dataset:
opts = {}
opts['help'] = elem.description
opts['required'] = not elem.optional
if elem.multi:
opts['action'] = 'append'
parser.add_argument('--' + elem.name, **opts)
# populate parser with INPUTS
populate_parser(parser, self.INPUTS)
# populate parser with OUTPUTS
populate_parser(parser, self.OUTPUTS)
# populate parser with PARAMETERS
for param in self.PARAMETERS:
opts = {}
opts['help'] = param.description
opts['required'] = not param.optional
if isinstance(param.datatype, tuple):
opts['type'] = str
# opts['type'] = param.datatype[1]
else:
opts['type'] = param.datatype
if param.multi:
opts['action'] = 'append'
if param.choices:
if isinstance(param.choices, tuple):
# if choices is a tuple, assume it is a tuple of mappings
# and expand them
opts['choices'] = [choice[0] for choice in param.choices]
else:
opts['choices'] = param.choices
parser.add_argument('--' + param.name, **opts)
if self.USE_ARGUMENTS:
parser.add_argument('--_tempdir', required=False,
help=argparse.SUPPRESS)
return parser |
Python | def invoke(proc, args=None, *, _instance=None, **kwargs):
"""
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
"""
if args is None:
args = []
for kwargname in kwargs:
args.append('--' + kwargname)
args.append('{}'.format(kwargs[kwargname]))
parser = proc.invoke_parser(noexit=(_instance is not None))
opts = parser.parse_args(args)
kwargs0 = {}
def handle_set(opts, dataset, kwargs0, canMulti=False):
for elem in dataset:
elemname = elem.name
# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')
if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']:
# value for element was given in the invocation
elemvalue = getattr(opts, elemname)
if canMulti and isinstance(elemvalue, list):
elemlist = elemvalue
else:
elemlist = [elemvalue]
for elemelem in elemlist:
for validator in elem.validators:
validator(elemelem)
if hasattr(opts, elem.name):
prepared = elem.prepare(elemvalue) or elemvalue
kwargs0[elem.name] = prepared
elif elem.optional:
# value was not set but is optional so ignore it
kwargs0[elem.name] = None
else:
# value was not set and is mandatory -- error
raise AttributeError(
'Missing value for {} '.format(elemname))
try:
handle_set(opts, proc.INPUTS, kwargs0, True)
handle_set(opts, proc.OUTPUTS, kwargs0, True)
for param in proc.PARAMETERS:
if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '':
value = getattr(opts, param.name)
# validate if needed
for validator in param.validators:
validator(value)
# if param is a tuple of choices, each choice is a tuple itself
# with first element of the input value and second element
# containing the value to be passed to the processor
if param.choices and isinstance(param.choices, tuple):
for choice in param.choices:
if choice[0] == value:
kwargs0[param.name] = choice[1]
break
else:
kwargs0[param.name] = value
elif param.optional:
kwargs0[param.name] = param.default
else:
raise AttributeError(
'Missing value for {} parameter'.format(param.name))
if not _instance:
_instance = proc(**kwargs0)
else:
_instance.apply(_instance, **kwargs0)
return _instance.run()
# todo: cleanup
except Exception as e:
print("Error:", e)
# traceback.print_exc()
raise | def invoke(proc, args=None, *, _instance=None, **kwargs):
"""
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
"""
if args is None:
args = []
for kwargname in kwargs:
args.append('--' + kwargname)
args.append('{}'.format(kwargs[kwargname]))
parser = proc.invoke_parser(noexit=(_instance is not None))
opts = parser.parse_args(args)
kwargs0 = {}
def handle_set(opts, dataset, kwargs0, canMulti=False):
for elem in dataset:
elemname = elem.name
# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')
if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']:
# value for element was given in the invocation
elemvalue = getattr(opts, elemname)
if canMulti and isinstance(elemvalue, list):
elemlist = elemvalue
else:
elemlist = [elemvalue]
for elemelem in elemlist:
for validator in elem.validators:
validator(elemelem)
if hasattr(opts, elem.name):
prepared = elem.prepare(elemvalue) or elemvalue
kwargs0[elem.name] = prepared
elif elem.optional:
# value was not set but is optional so ignore it
kwargs0[elem.name] = None
else:
# value was not set and is mandatory -- error
raise AttributeError(
'Missing value for {} '.format(elemname))
try:
handle_set(opts, proc.INPUTS, kwargs0, True)
handle_set(opts, proc.OUTPUTS, kwargs0, True)
for param in proc.PARAMETERS:
if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '':
value = getattr(opts, param.name)
# validate if needed
for validator in param.validators:
validator(value)
# if param is a tuple of choices, each choice is a tuple itself
# with first element of the input value and second element
# containing the value to be passed to the processor
if param.choices and isinstance(param.choices, tuple):
for choice in param.choices:
if choice[0] == value:
kwargs0[param.name] = choice[1]
break
else:
kwargs0[param.name] = value
elif param.optional:
kwargs0[param.name] = param.default
else:
raise AttributeError(
'Missing value for {} parameter'.format(param.name))
if not _instance:
_instance = proc(**kwargs0)
else:
_instance.apply(_instance, **kwargs0)
return _instance.run()
# todo: cleanup
except Exception as e:
print("Error:", e)
# traceback.print_exc()
raise |
Python | def deprecated(reason: str) -> Callable:
"""
Mark a given function as deprecated and issue a given warning when
the function is executed.
"""
def decorator(func):
if not func.__doc__:
func.__doc__ = 'Deprecated'
@functools.wraps(func)
def wrapper(*args, **kwargs):
# TODO replace with warnings package
print(reason)
return func(*args, **kwargs)
return wrapper
return decorator | def deprecated(reason: str) -> Callable:
"""
Mark a given function as deprecated and issue a given warning when
the function is executed.
"""
def decorator(func):
if not func.__doc__:
func.__doc__ = 'Deprecated'
@functools.wraps(func)
def wrapper(*args, **kwargs):
# TODO replace with warnings package
print(reason)
return func(*args, **kwargs)
return wrapper
return decorator |
Python | def executeJob(self, job: mlpr.MountainJob) -> MountainJobResult:
"""Queue a job to run in a batch. This is called from the framework (e.g., the job queue)
Parameters
----------
job : mlpr.MountainJob
The job to run. The job can specify the batch type in its compute requirements.
The default batch type is "default".
Returns
-------
MountainJobResult
The job result object. Same as "job.result".
"""
job_timeout = job.getObject().get('timeout', None)
if job_timeout is None:
job_timeout = DEFAULT_JOB_TIMEOUT
compute_requirements = job.getObject().get('compute_requirements', {})
batch_type_name = compute_requirements.get('batch_type', 'default')
if batch_type_name not in self._batch_types:
raise Exception('No batch type: {}'.format(batch_type_name))
batch_type = self._batch_types[batch_type_name]
if batch_type['time_limit_per_batch'] is not None:
if job_timeout > batch_type['time_limit_per_batch']:
raise Exception('Cannot execute job. Job timeout exceeds time limit: {} > {}'.format(job_timeout, batch_type['time_limit_per_batch']))
self._unassigned_jobs.append(job)
return job.result | def executeJob(self, job: mlpr.MountainJob) -> MountainJobResult:
"""Queue a job to run in a batch. This is called from the framework (e.g., the job queue)
Parameters
----------
job : mlpr.MountainJob
The job to run. The job can specify the batch type in its compute requirements.
The default batch type is "default".
Returns
-------
MountainJobResult
The job result object. Same as "job.result".
"""
job_timeout = job.getObject().get('timeout', None)
if job_timeout is None:
job_timeout = DEFAULT_JOB_TIMEOUT
compute_requirements = job.getObject().get('compute_requirements', {})
batch_type_name = compute_requirements.get('batch_type', 'default')
if batch_type_name not in self._batch_types:
raise Exception('No batch type: {}'.format(batch_type_name))
batch_type = self._batch_types[batch_type_name]
if batch_type['time_limit_per_batch'] is not None:
if job_timeout > batch_type['time_limit_per_batch']:
raise Exception('Cannot execute job. Job timeout exceeds time limit: {} > {}'.format(job_timeout, batch_type['time_limit_per_batch']))
self._unassigned_jobs.append(job)
return job.result |
Python | def iterate(self) -> None:
"""Called by the framework to take care of business.
Returns
-------
None
"""
# Iterate the batches that are not finished
for _, b in self._batches.items():
if not b.isFinished():
b.iterate()
# Return if we have been halted
if self._halted:
return
# Handle the unassigned jobs
unassigned_jobs_after = []
for job in self._unassigned_jobs:
if not self._handle_unassigned_job(job):
# Unable to assign the job, so we'll try next iteration
unassigned_jobs_after.append(job)
self._unassigned_jobs = unassigned_jobs_after | def iterate(self) -> None:
"""Called by the framework to take care of business.
Returns
-------
None
"""
# Iterate the batches that are not finished
for _, b in self._batches.items():
if not b.isFinished():
b.iterate()
# Return if we have been halted
if self._halted:
return
# Handle the unassigned jobs
unassigned_jobs_after = []
for job in self._unassigned_jobs:
if not self._handle_unassigned_job(job):
# Unable to assign the job, so we'll try next iteration
unassigned_jobs_after.append(job)
self._unassigned_jobs = unassigned_jobs_after |
Python | def isFinished(self) -> bool:
"""Whether all queued jobs have finished
Returns
-------
bool
True if all queued jobs have finished
"""
if self._halted:
return True
if len(self._unassigned_jobs) > 0:
# Some job is unassigned
return False
for b in self._batches.values():
if b.isRunning():
if b.hasJob():
# Some batch has a running job
return False
return True | def isFinished(self) -> bool:
"""Whether all queued jobs have finished
Returns
-------
bool
True if all queued jobs have finished
"""
if self._halted:
return True
if len(self._unassigned_jobs) > 0:
# Some job is unassigned
return False
for b in self._batches.values():
if b.isRunning():
if b.hasJob():
# Some batch has a running job
return False
return True |
Python | def halt(self) -> None:
"""Stop the job handler in its tracks.
Returns
-------
None
"""
# Halt all of the batches that are not finished
for _, b in self._batches.items():
if not b.isFinished():
b.halt()
self._halted = True | def halt(self) -> None:
"""Stop the job handler in its tracks.
Returns
-------
None
"""
# Halt all of the batches that are not finished
for _, b in self._batches.items():
if not b.isFinished():
b.halt()
self._halted = True |
Python | def cleanup(self) -> None:
"""Remove the working directory
Returns
-------
None
"""
_rmdir_with_retries(self._working_dir, num_retries=10) | def cleanup(self) -> None:
"""Remove the working directory
Returns
-------
None
"""
_rmdir_with_retries(self._working_dir, num_retries=10) |
Python | def canAddJob(self, job: mlpr.MountainJob) -> bool:
"""Return True if we are able to add job, based on timing info, etc.
Parameters
----------
job : mlpr.MountainJob
Job to potentially add
Returns
-------
bool
Whether the job can be added
"""
if self.isFinished():
# We are finished, so we can't add any jobs
return False
# Determine the specified timeout of the job
job_timeout = job.getObject().get('timeout', None)
if job_timeout is None:
# if job doesn't have timeout, we use the default
job_timeout = DEFAULT_JOB_TIMEOUT
# See if adding this job would exceed the time limit
if self._time_limit is not None:
if job_timeout + self.elapsedSinceStarted() > self._time_limit + 5:
# We would exceed the time limit. Can't add the job
return False
# If some worker has a vacancy then we can add the job
for w in self._workers:
if not w.hasJob():
return True
# Otherwise, we have no vacancy for a new job
return False | def canAddJob(self, job: mlpr.MountainJob) -> bool:
"""Return True if we are able to add job, based on timing info, etc.
Parameters
----------
job : mlpr.MountainJob
Job to potentially add
Returns
-------
bool
Whether the job can be added
"""
if self.isFinished():
# We are finished, so we can't add any jobs
return False
# Determine the specified timeout of the job
job_timeout = job.getObject().get('timeout', None)
if job_timeout is None:
# if job doesn't have timeout, we use the default
job_timeout = DEFAULT_JOB_TIMEOUT
# See if adding this job would exceed the time limit
if self._time_limit is not None:
if job_timeout + self.elapsedSinceStarted() > self._time_limit + 5:
# We would exceed the time limit. Can't add the job
return False
# If some worker has a vacancy then we can add the job
for w in self._workers:
if not w.hasJob():
return True
# Otherwise, we have no vacancy for a new job
return False |
Python | def hasJob(self) -> bool:
"""Return True if some worker has a job
"""
for w in self._workers:
if w.hasJob():
return True
return False | def hasJob(self) -> bool:
"""Return True if some worker has a job
"""
for w in self._workers:
if w.hasJob():
return True
return False |
Python | def hasJob(self) -> bool:
"""Whether this worker has a job
"""
if self._job is not None:
return True
return False | def hasJob(self) -> bool:
"""Whether this worker has a job
"""
if self._job is not None:
return True
return False |
Python | def everHadJob(self) -> bool:
"""Whether this worker ever had a job
"""
if self._job is not None:
return True
if self._job_finish_timestamp is not None:
return True
return False | def everHadJob(self) -> bool:
"""Whether this worker ever had a job
"""
if self._job is not None:
return True
if self._job_finish_timestamp is not None:
return True
return False |
Python | def elapsedTimeSinceLastJob(self) -> Optional[float]:
"""If the worker ever had a job, returns elapsed number of seconds since that job completed. Otherwise returns None.
"""
if self._job is not None:
return 0
if self._job_finish_timestamp is not None:
elapsed = time.time() - self._job_finish_timestamp
return elapsed
return None | def elapsedTimeSinceLastJob(self) -> Optional[float]:
"""If the worker ever had a job, returns elapsed number of seconds since that job completed. Otherwise returns None.
"""
if self._job is not None:
return 0
if self._job_finish_timestamp is not None:
elapsed = time.time() - self._job_finish_timestamp
return elapsed
return None |
Python | def iterate(self) -> None:
"""Take care of business of the worker
"""
if not self._job:
# If we don't have a job, then we don't need to take care of any business.
return
job_fname = self._base_path + '_job.json'
result_fname = self._base_path + '_result.json'
result_obj: Optional[dict] = None
with FileLock(result_fname + '.lock', exclusive=False):
if os.path.exists(result_fname):
# The result file exists. So the active job must have completed.
with open(result_fname, 'r') as f:
# Here's the result object that we will deal with below
result_obj = json.load(f)
# Let's remove the _job.json.complete file if it exists
if os.path.exists(job_fname + '.complete'):
os.remove(job_fname + '.complete')
# Let's move the _job.json file to the _job.json.complete file
os.rename(job_fname, job_fname + '.complete')
# Similarly for the _result.json.complete file
if os.path.exists(result_fname + '.complete'):
os.remove(result_fname + '.complete')
os.rename(result_fname, result_fname + '.complete')
elif os.path.exists(result_fname + '.error'):
# It looks like there was an error processing the job
# This is not a job error, this is a mountaintools error
# So we are going to read the exception information from the .error
# file, print it, and then raise an exception
# This is serious and should not happen.
with open(result_fname + '.error', 'r') as f:
print(f.read())
raise Exception('Unexpected error processing job in batch.')
if result_obj:
# Here's the result that we read above
self._job.result.fromObject(result_obj)
self._job.result._status = 'finished'
# We no longer have a job, and we should set the finished timestamp
self._job = None
self._job_finish_timestamp = time.time() | def iterate(self) -> None:
"""Take care of business of the worker
"""
if not self._job:
# If we don't have a job, then we don't need to take care of any business.
return
job_fname = self._base_path + '_job.json'
result_fname = self._base_path + '_result.json'
result_obj: Optional[dict] = None
with FileLock(result_fname + '.lock', exclusive=False):
if os.path.exists(result_fname):
# The result file exists. So the active job must have completed.
with open(result_fname, 'r') as f:
# Here's the result object that we will deal with below
result_obj = json.load(f)
# Let's remove the _job.json.complete file if it exists
if os.path.exists(job_fname + '.complete'):
os.remove(job_fname + '.complete')
# Let's move the _job.json file to the _job.json.complete file
os.rename(job_fname, job_fname + '.complete')
# Similarly for the _result.json.complete file
if os.path.exists(result_fname + '.complete'):
os.remove(result_fname + '.complete')
os.rename(result_fname, result_fname + '.complete')
elif os.path.exists(result_fname + '.error'):
# It looks like there was an error processing the job
# This is not a job error, this is a mountaintools error
# So we are going to read the exception information from the .error
# file, print it, and then raise an exception
# This is serious and should not happen.
with open(result_fname + '.error', 'r') as f:
print(f.read())
raise Exception('Unexpected error processing job in batch.')
if result_obj:
# Here's the result that we read above
self._job.result.fromObject(result_obj)
self._job.result._status = 'finished'
# We no longer have a job, and we should set the finished timestamp
self._job = None
self._job_finish_timestamp = time.time() |
Python | def wait(self, timeout) -> bool:
"""Wait until the process or processes have finished
Parameters
----------
timeout : [type]
Amount of time to wait
Returns
-------
bool
True if the processes have finished. False if timeout occurred.
"""
timer = time.time()
while True:
all_finished = True
for x in self._srun_sh_scripts:
if not x.isFinished():
all_finished = False
if all_finished:
break
elapsed = time.time() - timer
if elapsed >= timeout:
return False
time.sleep(0.2)
return True | def wait(self, timeout) -> bool:
"""Wait until the process or processes have finished
Parameters
----------
timeout : [type]
Amount of time to wait
Returns
-------
bool
True if the processes have finished. False if timeout occurred.
"""
timer = time.time()
while True:
all_finished = True
for x in self._srun_sh_scripts:
if not x.isFinished():
all_finished = False
if all_finished:
break
elapsed = time.time() - timer
if elapsed >= timeout:
return False
time.sleep(0.2)
return True |
Python | def _get_xp_args(org_module, to_xp, arg):
"""
Converts org_module.ndarray object using to_xp.
Args:
(org_module.ndarray, tuple, list, dict): These will be returned by
either converting the object or it's elements if object is iterable.
(int, float, str, numpy.ScalarType (constant)): Returned as it is.
Returns:
Return data structure will be same as before after converting ndarrays.
"""
if isinstance(arg, org_module.ndarray):
return to_xp(arg)
if isinstance(arg, tuple):
return tuple([_get_xp_args(org_module, to_xp, x) for x in arg])
if isinstance(arg, dict):
return {x_name: _get_xp_args(org_module, to_xp, x)
for x_name, x in arg.items()}
if isinstance(arg, list):
return [_get_xp_args(org_module, to_xp, x) for x in arg]
if isinstance(arg, np.ScalarType) or callable(arg):
return arg
raise NotImplementedError | def _get_xp_args(org_module, to_xp, arg):
"""
Converts org_module.ndarray object using to_xp.
Args:
(org_module.ndarray, tuple, list, dict): These will be returned by
either converting the object or it's elements if object is iterable.
(int, float, str, numpy.ScalarType (constant)): Returned as it is.
Returns:
Return data structure will be same as before after converting ndarrays.
"""
if isinstance(arg, org_module.ndarray):
return to_xp(arg)
if isinstance(arg, tuple):
return tuple([_get_xp_args(org_module, to_xp, x) for x in arg])
if isinstance(arg, dict):
return {x_name: _get_xp_args(org_module, to_xp, x)
for x_name, x in arg.items()}
if isinstance(arg, list):
return [_get_xp_args(org_module, to_xp, x) for x in arg]
if isinstance(arg, np.ScalarType) or callable(arg):
return arg
raise NotImplementedError |
Python | def numpy_fallback_equal(name='xp'):
"""
Decorator that checks fallback_mode results are equal to NumPy ones.
Args:
name(str): Argument name whose value is either
``numpy`` or ``cupy`` module.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kwargs):
kwargs[name] = fallback_mode.numpy
fallback_result = impl(self, *args, **kwargs)
kwargs[name] = numpy
numpy_result = impl(self, *args, **kwargs)
if isinstance(numpy_result, numpy.ndarray):
# if numpy returns ndarray, cupy must return ndarray
assert isinstance(fallback_result, cupy.ndarray)
testing.assert_array_equal(numpy_result, fallback_result)
elif isinstance(numpy_result, numpy.ScalarType):
# if numpy returns scalar
# cupy must return scalar or 0-dim array
if isinstance(fallback_result, numpy.ScalarType):
assert numpy_result == fallback_result
else:
# cupy 0-dim array
assert numpy_result == int(fallback_result)
else:
raise NotImplementedError
return test_func
return decorator | def numpy_fallback_equal(name='xp'):
"""
Decorator that checks fallback_mode results are equal to NumPy ones.
Args:
name(str): Argument name whose value is either
``numpy`` or ``cupy`` module.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kwargs):
kwargs[name] = fallback_mode.numpy
fallback_result = impl(self, *args, **kwargs)
kwargs[name] = numpy
numpy_result = impl(self, *args, **kwargs)
if isinstance(numpy_result, numpy.ndarray):
# if numpy returns ndarray, cupy must return ndarray
assert isinstance(fallback_result, cupy.ndarray)
testing.assert_array_equal(numpy_result, fallback_result)
elif isinstance(numpy_result, numpy.ScalarType):
# if numpy returns scalar
# cupy must return scalar or 0-dim array
if isinstance(fallback_result, numpy.ScalarType):
assert numpy_result == fallback_result
else:
# cupy 0-dim array
assert numpy_result == int(fallback_result)
else:
raise NotImplementedError
return test_func
return decorator |
Python | def empty() -> 'InformationFlowGraph':
'''Represents the graph for a path in which nothing is modified.
For instance, if a block of code could be executed or not depending on
the processor state, then the path where the block is not executed
would be an empty() graph. Then, one could use update() to get a graph
representing the combination of the two possibilities.
'''
return InformationFlowGraph({}) | def empty() -> 'InformationFlowGraph':
'''Represents the graph for a path in which nothing is modified.
For instance, if a block of code could be executed or not depending on
the processor state, then the path where the block is not executed
would be an empty() graph. Then, one could use update() to get a graph
representing the combination of the two possibilities.
'''
return InformationFlowGraph({}) |
Python | def nonexistent() -> 'InformationFlowGraph':
'''Represents the graph for a nonexistent path.
There is an important distinction between this and an "empty" graph. In
particular, for any graph G, G.update(nonexistent) = G, which is not
the case for a merely "empty" graph; since the update() method is
combining all possible paths, an empty graph means we need to consider
that all nodes might be unmodified, while a nonexistent graph has no
possible paths and therefore no effect.
A nonexistent graph can be thought of as "None", but handling it
directly within the class reduces the need for None checks.
For instance, imagine we want to represent the information flow for
only paths of a program that end in RET. If no paths from the current
point end in RET (because, for instance, all paths end the program with
ECALL), then a nonexistent graph would represent the information flow.
'''
return InformationFlowGraph({}, False) | def nonexistent() -> 'InformationFlowGraph':
'''Represents the graph for a nonexistent path.
There is an important distinction between this and an "empty" graph. In
particular, for any graph G, G.update(nonexistent) = G, which is not
the case for a merely "empty" graph; since the update() method is
combining all possible paths, an empty graph means we need to consider
that all nodes might be unmodified, while a nonexistent graph has no
possible paths and therefore no effect.
A nonexistent graph can be thought of as "None", but handling it
directly within the class reduces the need for None checks.
For instance, imagine we want to represent the information flow for
only paths of a program that end in RET. If no paths from the current
point end in RET (because, for instance, all paths end the program with
ECALL), then a nonexistent graph would represent the information flow.
'''
return InformationFlowGraph({}, False) |
Python | def sinks(self, source: str) -> Set[str]:
'''Returns all sinks for the given source.'''
out = set()
for sink in self.flow:
if source in self.flow[sink]:
out.add(sink)
if source not in self.flow:
# Implicitly, the source is unmodified and depends on itself
out.add(source)
return out | def sinks(self, source: str) -> Set[str]:
'''Returns all sinks for the given source.'''
out = set()
for sink in self.flow:
if source in self.flow[sink]:
out.add(sink)
if source not in self.flow:
# Implicitly, the source is unmodified and depends on itself
out.add(source)
return out |
Python | def remove_source(self, node: str) -> None:
'''Removes the node from the graph anywhere it appears as a source.
If the node is not a source in the graph, does nothing.
'''
for sources in self.flow.values():
sources.discard(node) | def remove_source(self, node: str) -> None:
'''Removes the node from the graph anywhere it appears as a source.
If the node is not a source in the graph, does nothing.
'''
for sources in self.flow.values():
sources.discard(node) |
Python | def remove_sink(self, node: str) -> None:
'''Removes the node from the graph anywhere it appears as a sink.
If the node is not a sink in the graph, does nothing.
'''
self.flow.pop(node, None) | def remove_sink(self, node: str) -> None:
'''Removes the node from the graph anywhere it appears as a sink.
If the node is not a sink in the graph, does nothing.
'''
self.flow.pop(node, None) |
Python | def update(self, other: 'InformationFlowGraph') -> None:
'''Updates self to include the information flow from other.
Every sink that appears in either or both graphs will, in the updated
self, have sources formed from the union of its sources in both graphs.
Sinks that appear in only one graph will have sources formed of the
union of their sources in the graph in which they appear and themselves
(because a sink not appearing in a graph implicitly means the value is
unchanged).
Important note: updating with an empty graph will not be a no-op. An
empty graph implies everything remains unmodified, so combining an
empty graph with something that *overwrites* a value (i.e. a graph with
a sink whose sources do not include itself) will add the "value doesn't
change" information flow (i.e. the sink will be added to its own
sources).
Does not modify other.
'''
if not other.exists:
# If the other graph is nonexistent, then this is a no-op.
return
if not self.exists:
# Updating a nonexistent graph with another graph should return the
# other graph; since we need to modify self, we change this graph's
# flow to match other's.
self.flow = other.flow.copy()
self.exists = other.exists
return
for sink, sources in other.flow.items():
if sink not in self.flow:
# implicitly, a non-updated value depends only on itself (NOT
# an empty set, which would indicate a value that is
# overwritten with a constant)
self.flow[sink] = {sink}
self.flow[sink].update(sources)
return | def update(self, other: 'InformationFlowGraph') -> None:
'''Updates self to include the information flow from other.
Every sink that appears in either or both graphs will, in the updated
self, have sources formed from the union of its sources in both graphs.
Sinks that appear in only one graph will have sources formed of the
union of their sources in the graph in which they appear and themselves
(because a sink not appearing in a graph implicitly means the value is
unchanged).
Important note: updating with an empty graph will not be a no-op. An
empty graph implies everything remains unmodified, so combining an
empty graph with something that *overwrites* a value (i.e. a graph with
a sink whose sources do not include itself) will add the "value doesn't
change" information flow (i.e. the sink will be added to its own
sources).
Does not modify other.
'''
if not other.exists:
# If the other graph is nonexistent, then this is a no-op.
return
if not self.exists:
# Updating a nonexistent graph with another graph should return the
# other graph; since we need to modify self, we change this graph's
# flow to match other's.
self.flow = other.flow.copy()
self.exists = other.exists
return
for sink, sources in other.flow.items():
if sink not in self.flow:
# implicitly, a non-updated value depends only on itself (NOT
# an empty set, which would indicate a value that is
# overwritten with a constant)
self.flow[sink] = {sink}
self.flow[sink].update(sources)
return |
Python | def seq(self, other: 'InformationFlowGraph') -> 'InformationFlowGraph':
'''Performs sequential composition of information flow graphs.
Returns the information flow graph that results from self sequentially
composed with other. For example, if these are the two graphs (b and c
being the sinks of self and e and d being the sinks of other):
self: other:
a,d -> b b -> e
a,b -> c f -> d
b,c -> c
...then the result of this operation will be:
a,d -> e
f -> d
a,b,d -> c
Defensively copies all source sets for the new graph.
'''
if not self.exists or not other.exists:
# If either this or the other graph is nonexistent, then the
# sequence is nonexistent.
return InformationFlowGraph.nonexistent()
flow = {}
for sink, sources in other.flow.items():
new_sources = set()
for source in sources:
if source in self.flow:
new_sources.update(self.flow[source])
else:
# source is not a sink in self's flow; assume it stays
# constant
new_sources.add(source)
flow[sink] = new_sources
for sink, sources in self.flow.items():
if sink not in flow:
# sink is not updated in other's flow
flow[sink] = sources.copy()
return InformationFlowGraph(flow) | def seq(self, other: 'InformationFlowGraph') -> 'InformationFlowGraph':
'''Performs sequential composition of information flow graphs.
Returns the information flow graph that results from self sequentially
composed with other. For example, if these are the two graphs (b and c
being the sinks of self and e and d being the sinks of other):
self: other:
a,d -> b b -> e
a,b -> c f -> d
b,c -> c
...then the result of this operation will be:
a,d -> e
f -> d
a,b,d -> c
Defensively copies all source sets for the new graph.
'''
if not self.exists or not other.exists:
# If either this or the other graph is nonexistent, then the
# sequence is nonexistent.
return InformationFlowGraph.nonexistent()
flow = {}
for sink, sources in other.flow.items():
new_sources = set()
for source in sources:
if source in self.flow:
new_sources.update(self.flow[source])
else:
# source is not a sink in self's flow; assume it stays
# constant
new_sources.add(source)
flow[sink] = new_sources
for sink, sources in self.flow.items():
if sink not in flow:
# sink is not updated in other's flow
flow[sink] = sources.copy()
return InformationFlowGraph(flow) |
Python | def pretty(self, indent: int = 0) -> str:
'''Return a human-readable representation of the graph.'''
if not self.exists:
return 'Nonexistent information-flow graph (no possible paths).'
prefix = ' ' * indent
flow_strings = {
sink: ','.join(sorted(sources))
for sink, sources in self.flow.items()
}
max_source_chars = max([len(s) for s in flow_strings.values()],
default=0)
lines = []
for sink in sorted(self.flow.keys()):
sources_str = flow_strings[sink]
padding = ' ' * (max_source_chars - len(sources_str))
lines.append('{}{}{} -> {}'.format(prefix, sources_str, padding,
sink))
return '\n'.join(lines) | def pretty(self, indent: int = 0) -> str:
'''Return a human-readable representation of the graph.'''
if not self.exists:
return 'Nonexistent information-flow graph (no possible paths).'
prefix = ' ' * indent
flow_strings = {
sink: ','.join(sorted(sources))
for sink, sources in self.flow.items()
}
max_source_chars = max([len(s) for s in flow_strings.values()],
default=0)
lines = []
for sink in sorted(self.flow.keys()):
sources_str = flow_strings[sink]
padding = ' ' * (max_source_chars - len(sources_str))
lines.append('{}{}{} -> {}'.format(prefix, sources_str, padding,
sink))
return '\n'.join(lines) |
Python | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.
For instance, for an indirect reference of a WDR via a GPR, the GPR's
value must be constant for the node to be evaluated. Subclasses that
require constants override this method.
'''
return set() | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.
For instance, for an indirect reference of a WDR via a GPR, the GPR's
value must be constant for the node to be evaluated. Subclasses that
require constants override this method.
'''
return set() |
Python | def evaluate(self, op_vals: Dict[str, int],
constant_regs: Dict[str, int]) -> str:
'''Determines information flow graph for the instruction.
Evaluates the information-flow node according to the given operand
values. The `constant_regs` dictionary is only used to access values in
`self.required_constants()`; changes to other values will have no
effect.
'''
raise NotImplementedError() | def evaluate(self, op_vals: Dict[str, int],
constant_regs: Dict[str, int]) -> str:
'''Determines information flow graph for the instruction.
Evaluates the information-flow node according to the given operand
values. The `constant_regs` dictionary is only used to access values in
`self.required_constants()`; changes to other values will have no
effect.
'''
raise NotImplementedError() |
Python | def _parse_iflow_nodes(
node: str, what: str,
operands: List[Operand]) -> List[InsnInformationFlowNode]:
'''Parses information flow node(s) from the instruction description.
Valid information flow nodes are one of the following:
- the name of a *register* operand from the operands list
- "wref-<reg operand name>" for instructions where a WDR is indirectly
accessed via a GPR; in this case <reg operand name> is the GPR and must
be in the operands list
- one of the special strings "dmem", "acc", or "mod"
- a flag (represented as "<flag group>-<flag>", where <flag group> can
be either "fg0", "fg1", or (if the instruction has a flag_group
operand) simply "flags" to select the current flag group, and <flag>
can be l, c, m, z, or "all".
Raises a ValueError if the node does not have a valid format.
'''
node = node.lower()
# Check if node is a register operand
for op in operands:
if node == op.name:
if not isinstance(op.op_type, RegOperandType):
raise RuntimeError(
'Information-flow node {} matches operand name, but '
'operand type is not a register (type {}). Note that '
'immediate operands cannot be information-flow nodes.'.
format(node, type(op.op_type)))
return [InsnRegOperandNode(op)]
# Check if node is an indirect reference to a WDR through a GPR
if node.startswith('wref-'):
gpr = node[len('wref-'):]
for op in operands:
if gpr == op.name:
if not (isinstance(op.op_type, RegOperandType) and
op.op_type.reg_type == 'gpr'):
raise RuntimeError(
'Operand {} in indirect reference {} is not a GPR '
'(type {}). Only GPRs can be indirect references.'.
format(gpr, node, type(op.op_type)))
return [InsnIndirectWDRNode(op)]
raise RuntimeError(
'Could not find GPR operand corresponding to {} when decoding '
'indirect reference {}. Operand names: {}'.format(
gpr, node, ', '.join([op.name for op in operands])))
# Check if node is a special string
if node == 'dmem' or node in SPECIAL_REG_NAMES:
return [InsnConstantNode(node)]
# Try to interpret node as a flag or set of flags
node_split = node.split('-')
if len(node_split) != 2:
raise ValueError('Cannot parse information flow node {} for {}'.format(
node, what))
flag_group_str, flag = node_split
# Case where flag group depends on the flag_group operand
if flag_group_str == 'flags':
if flag == 'all':
return [InsnGroupFlagNode(flag) for flag in FLAG_NAMES]
return [InsnGroupFlagNode(flag)]
elif flag_group_str == 'fg0' or flag_group_str == 'fg1':
if flag == 'all':
return [
InsnConstantNode('{}-{}'.format(flag_group_str, flag))
for flag in FLAG_NAMES
]
return [InsnConstantNode('{}-{}'.format(flag_group_str, flag))]
else:
raise ValueError('Cannot parse information flow node {} for {}'.format(
node, what)) | def _parse_iflow_nodes(
node: str, what: str,
operands: List[Operand]) -> List[InsnInformationFlowNode]:
'''Parses information flow node(s) from the instruction description.
Valid information flow nodes are one of the following:
- the name of a *register* operand from the operands list
- "wref-<reg operand name>" for instructions where a WDR is indirectly
accessed via a GPR; in this case <reg operand name> is the GPR and must
be in the operands list
- one of the special strings "dmem", "acc", or "mod"
- a flag (represented as "<flag group>-<flag>", where <flag group> can
be either "fg0", "fg1", or (if the instruction has a flag_group
operand) simply "flags" to select the current flag group, and <flag>
can be l, c, m, z, or "all".
Raises a ValueError if the node does not have a valid format.
'''
node = node.lower()
# Check if node is a register operand
for op in operands:
if node == op.name:
if not isinstance(op.op_type, RegOperandType):
raise RuntimeError(
'Information-flow node {} matches operand name, but '
'operand type is not a register (type {}). Note that '
'immediate operands cannot be information-flow nodes.'.
format(node, type(op.op_type)))
return [InsnRegOperandNode(op)]
# Check if node is an indirect reference to a WDR through a GPR
if node.startswith('wref-'):
gpr = node[len('wref-'):]
for op in operands:
if gpr == op.name:
if not (isinstance(op.op_type, RegOperandType) and
op.op_type.reg_type == 'gpr'):
raise RuntimeError(
'Operand {} in indirect reference {} is not a GPR '
'(type {}). Only GPRs can be indirect references.'.
format(gpr, node, type(op.op_type)))
return [InsnIndirectWDRNode(op)]
raise RuntimeError(
'Could not find GPR operand corresponding to {} when decoding '
'indirect reference {}. Operand names: {}'.format(
gpr, node, ', '.join([op.name for op in operands])))
# Check if node is a special string
if node == 'dmem' or node in SPECIAL_REG_NAMES:
return [InsnConstantNode(node)]
# Try to interpret node as a flag or set of flags
node_split = node.split('-')
if len(node_split) != 2:
raise ValueError('Cannot parse information flow node {} for {}'.format(
node, what))
flag_group_str, flag = node_split
# Case where flag group depends on the flag_group operand
if flag_group_str == 'flags':
if flag == 'all':
return [InsnGroupFlagNode(flag) for flag in FLAG_NAMES]
return [InsnGroupFlagNode(flag)]
elif flag_group_str == 'fg0' or flag_group_str == 'fg1':
if flag == 'all':
return [
InsnConstantNode('{}-{}'.format(flag_group_str, flag))
for flag in FLAG_NAMES
]
return [InsnConstantNode('{}-{}'.format(flag_group_str, flag))]
else:
raise ValueError('Cannot parse information flow node {} for {}'.format(
node, what)) |
Python | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.'''
out = set()
for node in self.flows_to:
out.update(node.required_constants(op_vals))
for node in self.flows_from:
out.update(node.required_constants(op_vals))
return out | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.'''
out = set()
for node in self.flows_to:
out.update(node.required_constants(op_vals))
for node in self.flows_from:
out.update(node.required_constants(op_vals))
return out |
Python | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.'''
return {
const
for rule in self.rules
for const in rule.required_constants(op_vals)
} | def required_constants(self, op_vals: Dict[str, int]) -> Set[str]:
'''Returns the names of regs that must be constant for `evaluate()`.'''
return {
const
for rule in self.rules
for const in rule.required_constants(op_vals)
} |
Python | def _build_iflow_straightline(
program: OTBNProgram, start_pc: int, end_pc: int,
constants: ConstantContext) -> Tuple[Set[str], InformationFlowGraph]:
'''Constructs the information-flow graph for a straightline code section.
Returns two values:
- The set of constants (at the start instruction) that the graph depends on
- The information-flow graph
The instruction at end_pc is included in the calculation. Errors upon
encountering a control-flow instruction. Updates `constants` to hold the
constant values after the section has finished.
'''
iflow = InformationFlowGraph.empty()
constant_deps = set()
for pc in range(start_pc, end_pc + 4, 4):
insn = program.get_insn(pc)
op_vals = program.get_operands(pc)
assert insn.straight_line
used_constants, insn_iflow = _build_iflow_insn(insn, op_vals, pc,
constants)
for const in used_constants:
constant_deps.update(iflow.sources(const))
# Compose iflow with the information flow from this instruction
iflow = iflow.seq(insn_iflow)
# Update constants to their values after the instruction
constants.update_insn(insn, op_vals)
return constant_deps, iflow | def _build_iflow_straightline(
program: OTBNProgram, start_pc: int, end_pc: int,
constants: ConstantContext) -> Tuple[Set[str], InformationFlowGraph]:
'''Constructs the information-flow graph for a straightline code section.
Returns two values:
- The set of constants (at the start instruction) that the graph depends on
- The information-flow graph
The instruction at end_pc is included in the calculation. Errors upon
encountering a control-flow instruction. Updates `constants` to hold the
constant values after the section has finished.
'''
iflow = InformationFlowGraph.empty()
constant_deps = set()
for pc in range(start_pc, end_pc + 4, 4):
insn = program.get_insn(pc)
op_vals = program.get_operands(pc)
assert insn.straight_line
used_constants, insn_iflow = _build_iflow_insn(insn, op_vals, pc,
constants)
for const in used_constants:
constant_deps.update(iflow.sources(const))
# Compose iflow with the information flow from this instruction
iflow = iflow.seq(insn_iflow)
# Update constants to their values after the instruction
constants.update_insn(insn, op_vals)
return constant_deps, iflow |
Python | def _get_iflow_update_state(
rec_result: IFlowResult, iflow: InformationFlowGraph,
program_end_iflow: InformationFlowGraph, used_constants: Set[str],
control_deps: Dict[str, Set[int]]) -> InformationFlowGraph:
'''Update the internal state of _get_iflow after a recursive call.
The `used_constants` and `control_deps` state elements are updated in
place, but the new `program_end_iflow` is returned. The `iflow` input is
not modified.
'''
rec_used_constants, _, rec_program_end_iflow, _, rec_control_deps = rec_result
# Update the used constants and control-flow dependencies
for const in rec_used_constants:
used_constants.update(iflow.sources(const))
_update_control_deps(control_deps, iflow, rec_control_deps)
# Update information flow results for paths where the program ends
program_end_iflow.update(iflow.seq(rec_program_end_iflow))
return program_end_iflow | def _get_iflow_update_state(
rec_result: IFlowResult, iflow: InformationFlowGraph,
program_end_iflow: InformationFlowGraph, used_constants: Set[str],
control_deps: Dict[str, Set[int]]) -> InformationFlowGraph:
'''Update the internal state of _get_iflow after a recursive call.
The `used_constants` and `control_deps` state elements are updated in
place, but the new `program_end_iflow` is returned. The `iflow` input is
not modified.
'''
rec_used_constants, _, rec_program_end_iflow, _, rec_control_deps = rec_result
# Update the used constants and control-flow dependencies
for const in rec_used_constants:
used_constants.update(iflow.sources(const))
_update_control_deps(control_deps, iflow, rec_control_deps)
# Update information flow results for paths where the program ends
program_end_iflow.update(iflow.seq(rec_program_end_iflow))
return program_end_iflow |
Python | def _get_iflow(program: OTBNProgram, graph: ControlGraph, start_pc: int,
start_constants: ConstantContext, loop_end_pc: Optional[int],
cache: IFlowCache) -> IFlowResult:
'''Gets the information-flow graphs for paths starting at start_pc.
Returns None for the return and/or end iflow if there are no paths ending
in RET or the program end, respectively.
Raises a ValueError if an indirect reference cannot be resolved, or if a
loop has a number of iterations that can't be resolved to a compile-time
constant.
Caches results from recursive calls (updating input cache). Does not modify
start_constants.
'''
cached = cache.lookup(start_pc, start_constants)
if cached is not None:
return cached
constants = start_constants.copy()
# The combined information flow for all paths leading to the end of the
# subroutine (i.e. a RET, not counting RETS that happen after jumps within
# the subroutine at start_pc). Initialize as nonexistent() because no paths
# have yet been found.
return_iflow = InformationFlowGraph.nonexistent()
# The combined information flow for all paths leading to the end of the
# program (i.e. an ECALL or the end of IMEM)
program_end_iflow = InformationFlowGraph.nonexistent()
# The control-flow nodes whose values at the start PC influence control
# flow (and the PCs of the control-flow instructions they influence)
control_deps: Dict[str, Set[int]] = {}
section = graph.get_section(start_pc)
edges = graph.get_edges(start_pc)
# If we're crossing the loop end PC, we must do so at the end of the
# section. In this case, we do not pass the end of the loop; we treat the
# end of the loop like a RET instruction.
if loop_end_pc is not None and loop_end_pc in section:
assert loop_end_pc == section.end
loop_end_pc = None
edges = [Ret()]
# Get the information flow, used constants, and known constants at the end
# of the straightline block (not including the last instruction). Note that
# _build_iflow_straightline updates the `constants` dictionary in-place.
used_constants, iflow = _build_iflow_straightline(program, section.start,
section.end - 4,
constants)
# Get the instruction/operands at the very end of the block (i.e. the
# control-flow instruction) for special handling
last_insn = program.get_insn(section.end)
last_op_vals = program.get_operands(section.end)
last_insn_used_constants, last_insn_iflow = _build_iflow_insn(
last_insn, last_op_vals, section.end, constants)
# Update used constants to include last instruction
for const in last_insn_used_constants:
used_constants.update(iflow.sources(const))
# Update control_deps to include last instruction
last_insn_control_deps = {
node: {section.end}
for node in _get_insn_control_deps(last_insn, last_op_vals)
}
_update_control_deps(control_deps, iflow, last_insn_control_deps)
# Update information-flow to include last instruction
iflow = iflow.seq(last_insn_iflow)
if last_insn.mnemonic in ['loopi', 'loop']:
# Special handling for loops; reject non-constant #iterations, and run
# loop body #iterations times
iterations = _get_constant_loop_iterations(last_insn, last_op_vals,
constants)
if iterations is None:
raise ValueError(
'LOOP instruction on a register that is not a '
'known constant at PC {:#x} (known constants: {}). If '
'the register is in fact a constant, you may need to '
'add constant-tracking support for more instructions.'.format(
section.end, constants.values.keys()))
# A loop instruction should result in exactly one edge of type
# LoopStart; check that assumption before we rely on it
assert len(edges) == 1 and isinstance(edges[0], LoopStart)
body_loc = edges[0]
# Update the constants to include the loop instruction
constants.update_insn(last_insn, last_op_vals)
# Recursive calls for each iteration
for _ in range(iterations):
body_result = _get_iflow(program, graph, body_loc.loop_start_pc,
constants, body_loc.loop_end_pc, cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(
body_result, iflow, program_end_iflow, used_constants,
control_deps)
# Update constants and get information flow for paths that loop
# back to the start ("return")
_, body_return_iflow, _, constants, _ = body_result
# Compose current iflow with the flow for paths that hit the end of
# the loop
iflow = iflow.seq(body_return_iflow)
# Set the next edges to the instruction after the loop ends
edges = [ControlLoc(body_loc.loop_end_pc + 4)]
elif last_insn.mnemonic == 'jal' and last_op_vals['grd'] == 1:
# Special handling for jumps; recursive call for jump destination, then
# continue at pc+4
# Jumps should produce exactly one non-special edge; check that
# assumption before we rely on it
assert len(edges) == 1 and not edges[0].is_special()
jump_loc = edges[0]
jump_result = _get_iflow(program, graph, jump_loc.pc, constants, None,
cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(jump_result, iflow,
program_end_iflow,
used_constants,
control_deps)
# Update constants and get information flow for return paths
_, jump_return_iflow, _, constants, _ = jump_result
# Compose current iflow with the flow for the jump's return paths
iflow = iflow.seq(jump_return_iflow)
# Set the next edges to the instruction after the jump returns
edges = [ControlLoc(section.end + 4)]
else:
# Update the constants to include the last instruction
constants.update_insn(last_insn, last_op_vals)
# We're only returning constants that are the same in all RET branches
common_constants = None
for loc in edges:
if isinstance(loc, Ecall) or isinstance(loc, ImemEnd):
# Ecall or ImemEnd nodes are expected to be the only edge
assert len(edges) == 1
# Clear common constants; there are no return branches here
common_constants = ConstantContext.empty()
program_end_iflow.update(iflow)
elif isinstance(loc, Ret):
if loop_end_pc is not None:
raise RuntimeError(
'RET before end of loop at PC {:#x} (loop end PC: '
'{:#x})'.format(section.end, loop_end_pc))
# Ret nodes are expected to be the only edge
assert len(edges) == 1
# Since this is the only edge, common_constants must be unset
common_constants = constants
return_iflow.update(iflow)
elif isinstance(loc, LoopStart) or isinstance(loc, LoopEnd):
# We shouldn't hit a loop instances here; those cases (a loop
# instruction or the end of a loop) are all handled earlier
raise RuntimeError(
'Unexpected loop edge (type {}) at PC {:#x}'.format(
type(loc), section.end))
elif not loc.is_special():
# Just a normal PC; recurse
result = _get_iflow(program, graph, loc.pc, constants, loop_end_pc,
cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(
result, iflow, program_end_iflow, used_constants, control_deps)
# Get information flow for return paths and new constants
_, rec_return_iflow, _, rec_constants, _ = result
# Take values on which existing and recursive constants agree
if common_constants is None:
common_constants = rec_constants
else:
common_constants.intersect(rec_constants)
# Update return_iflow with the current iflow composed with return
# paths
return_iflow.update(iflow.seq(rec_return_iflow))
else:
raise RuntimeError(
'Unexpected next control location type at PC {:#x}: {}'.format(
section.end, type(loc)))
# There should be at least one edge, and all edges should set
# common_constants to some non-None value
assert common_constants is not None
# Update used_constants to include any constant dependencies of
# common_constants, since common_constants will be cached
used_constants.update(
return_iflow.sources_for_any(common_constants.values.keys()))
# Strip special register x0 from both sources and sinks of graphs returned.
return_iflow.remove_source('x0')
return_iflow.remove_sink('x0')
program_end_iflow.remove_source('x0')
program_end_iflow.remove_sink('x0')
control_deps.pop('x0', None)
# Update the cache and return
out = (used_constants, return_iflow, program_end_iflow, common_constants,
control_deps)
_get_iflow_cache_update(start_pc, start_constants, out, cache)
return out | def _get_iflow(program: OTBNProgram, graph: ControlGraph, start_pc: int,
start_constants: ConstantContext, loop_end_pc: Optional[int],
cache: IFlowCache) -> IFlowResult:
'''Gets the information-flow graphs for paths starting at start_pc.
Returns None for the return and/or end iflow if there are no paths ending
in RET or the program end, respectively.
Raises a ValueError if an indirect reference cannot be resolved, or if a
loop has a number of iterations that can't be resolved to a compile-time
constant.
Caches results from recursive calls (updating input cache). Does not modify
start_constants.
'''
cached = cache.lookup(start_pc, start_constants)
if cached is not None:
return cached
constants = start_constants.copy()
# The combined information flow for all paths leading to the end of the
# subroutine (i.e. a RET, not counting RETS that happen after jumps within
# the subroutine at start_pc). Initialize as nonexistent() because no paths
# have yet been found.
return_iflow = InformationFlowGraph.nonexistent()
# The combined information flow for all paths leading to the end of the
# program (i.e. an ECALL or the end of IMEM)
program_end_iflow = InformationFlowGraph.nonexistent()
# The control-flow nodes whose values at the start PC influence control
# flow (and the PCs of the control-flow instructions they influence)
control_deps: Dict[str, Set[int]] = {}
section = graph.get_section(start_pc)
edges = graph.get_edges(start_pc)
# If we're crossing the loop end PC, we must do so at the end of the
# section. In this case, we do not pass the end of the loop; we treat the
# end of the loop like a RET instruction.
if loop_end_pc is not None and loop_end_pc in section:
assert loop_end_pc == section.end
loop_end_pc = None
edges = [Ret()]
# Get the information flow, used constants, and known constants at the end
# of the straightline block (not including the last instruction). Note that
# _build_iflow_straightline updates the `constants` dictionary in-place.
used_constants, iflow = _build_iflow_straightline(program, section.start,
section.end - 4,
constants)
# Get the instruction/operands at the very end of the block (i.e. the
# control-flow instruction) for special handling
last_insn = program.get_insn(section.end)
last_op_vals = program.get_operands(section.end)
last_insn_used_constants, last_insn_iflow = _build_iflow_insn(
last_insn, last_op_vals, section.end, constants)
# Update used constants to include last instruction
for const in last_insn_used_constants:
used_constants.update(iflow.sources(const))
# Update control_deps to include last instruction
last_insn_control_deps = {
node: {section.end}
for node in _get_insn_control_deps(last_insn, last_op_vals)
}
_update_control_deps(control_deps, iflow, last_insn_control_deps)
# Update information-flow to include last instruction
iflow = iflow.seq(last_insn_iflow)
if last_insn.mnemonic in ['loopi', 'loop']:
# Special handling for loops; reject non-constant #iterations, and run
# loop body #iterations times
iterations = _get_constant_loop_iterations(last_insn, last_op_vals,
constants)
if iterations is None:
raise ValueError(
'LOOP instruction on a register that is not a '
'known constant at PC {:#x} (known constants: {}). If '
'the register is in fact a constant, you may need to '
'add constant-tracking support for more instructions.'.format(
section.end, constants.values.keys()))
# A loop instruction should result in exactly one edge of type
# LoopStart; check that assumption before we rely on it
assert len(edges) == 1 and isinstance(edges[0], LoopStart)
body_loc = edges[0]
# Update the constants to include the loop instruction
constants.update_insn(last_insn, last_op_vals)
# Recursive calls for each iteration
for _ in range(iterations):
body_result = _get_iflow(program, graph, body_loc.loop_start_pc,
constants, body_loc.loop_end_pc, cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(
body_result, iflow, program_end_iflow, used_constants,
control_deps)
# Update constants and get information flow for paths that loop
# back to the start ("return")
_, body_return_iflow, _, constants, _ = body_result
# Compose current iflow with the flow for paths that hit the end of
# the loop
iflow = iflow.seq(body_return_iflow)
# Set the next edges to the instruction after the loop ends
edges = [ControlLoc(body_loc.loop_end_pc + 4)]
elif last_insn.mnemonic == 'jal' and last_op_vals['grd'] == 1:
# Special handling for jumps; recursive call for jump destination, then
# continue at pc+4
# Jumps should produce exactly one non-special edge; check that
# assumption before we rely on it
assert len(edges) == 1 and not edges[0].is_special()
jump_loc = edges[0]
jump_result = _get_iflow(program, graph, jump_loc.pc, constants, None,
cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(jump_result, iflow,
program_end_iflow,
used_constants,
control_deps)
# Update constants and get information flow for return paths
_, jump_return_iflow, _, constants, _ = jump_result
# Compose current iflow with the flow for the jump's return paths
iflow = iflow.seq(jump_return_iflow)
# Set the next edges to the instruction after the jump returns
edges = [ControlLoc(section.end + 4)]
else:
# Update the constants to include the last instruction
constants.update_insn(last_insn, last_op_vals)
# We're only returning constants that are the same in all RET branches
common_constants = None
for loc in edges:
if isinstance(loc, Ecall) or isinstance(loc, ImemEnd):
# Ecall or ImemEnd nodes are expected to be the only edge
assert len(edges) == 1
# Clear common constants; there are no return branches here
common_constants = ConstantContext.empty()
program_end_iflow.update(iflow)
elif isinstance(loc, Ret):
if loop_end_pc is not None:
raise RuntimeError(
'RET before end of loop at PC {:#x} (loop end PC: '
'{:#x})'.format(section.end, loop_end_pc))
# Ret nodes are expected to be the only edge
assert len(edges) == 1
# Since this is the only edge, common_constants must be unset
common_constants = constants
return_iflow.update(iflow)
elif isinstance(loc, LoopStart) or isinstance(loc, LoopEnd):
# We shouldn't hit a loop instances here; those cases (a loop
# instruction or the end of a loop) are all handled earlier
raise RuntimeError(
'Unexpected loop edge (type {}) at PC {:#x}'.format(
type(loc), section.end))
elif not loc.is_special():
# Just a normal PC; recurse
result = _get_iflow(program, graph, loc.pc, constants, loop_end_pc,
cache)
# Update program_end_iflow, used constants, and control_deps
program_end_iflow = _get_iflow_update_state(
result, iflow, program_end_iflow, used_constants, control_deps)
# Get information flow for return paths and new constants
_, rec_return_iflow, _, rec_constants, _ = result
# Take values on which existing and recursive constants agree
if common_constants is None:
common_constants = rec_constants
else:
common_constants.intersect(rec_constants)
# Update return_iflow with the current iflow composed with return
# paths
return_iflow.update(iflow.seq(rec_return_iflow))
else:
raise RuntimeError(
'Unexpected next control location type at PC {:#x}: {}'.format(
section.end, type(loc)))
# There should be at least one edge, and all edges should set
# common_constants to some non-None value
assert common_constants is not None
# Update used_constants to include any constant dependencies of
# common_constants, since common_constants will be cached
used_constants.update(
return_iflow.sources_for_any(common_constants.values.keys()))
# Strip special register x0 from both sources and sinks of graphs returned.
return_iflow.remove_source('x0')
return_iflow.remove_sink('x0')
program_end_iflow.remove_source('x0')
program_end_iflow.remove_sink('x0')
control_deps.pop('x0', None)
# Update the cache and return
out = (used_constants, return_iflow, program_end_iflow, common_constants,
control_deps)
_get_iflow_cache_update(start_pc, start_constants, out, cache)
return out |
Python | def check_acyclic(graph: ControlGraph) -> None:
'''Checks for (non LOOP/LOOPI) cycles in control-flow graph.
If there are such cycles, we need to raise an error and not proceed; the
control-flow traversal would infinite-loop.
'''
cycles = graph.get_cycles(graph.start)
if cycles:
msg = [
'One or more cycles found in control-flow graph at the following PCs:'
]
for pc, links in cycles.items():
msg.append('{:#x} <-> {}'.format(
pc, ','.join(['{:#x}'.format(link) for link in links])))
msg.append('Analyzing cyclic control flow outside of LOOP/LOOPI '
'instructions is not currently supported.')
raise ValueError('\n'.join(msg))
return | def check_acyclic(graph: ControlGraph) -> None:
'''Checks for (non LOOP/LOOPI) cycles in control-flow graph.
If there are such cycles, we need to raise an error and not proceed; the
control-flow traversal would infinite-loop.
'''
cycles = graph.get_cycles(graph.start)
if cycles:
msg = [
'One or more cycles found in control-flow graph at the following PCs:'
]
for pc, links in cycles.items():
msg.append('{:#x} <-> {}'.format(
pc, ','.join(['{:#x}'.format(link) for link in links])))
msg.append('Analyzing cyclic control flow outside of LOOP/LOOPI '
'instructions is not currently supported.')
raise ValueError('\n'.join(msg))
return |
Python | def savefig(fname, *args, **kwargs):
'''Save current plot window to the figures directory.'''
if "PYPROBML" in os.environ:
root = os.environ["PYPROBML"]
figdir = os.path.join(root, 'figures')
else:
figdir = 'figures'
print('cannot find environment variable PYPROBML, writing to {}'.format(figdir))
if not os.path.exists(figdir):
os.mkdir(figdir)
fname_full = os.path.join(figdir, fname)
print('saving image to {}'.format(fname_full))
plt.tight_layout()
plt.savefig(fname_full, *args, **kwargs) | def savefig(fname, *args, **kwargs):
'''Save current plot window to the figures directory.'''
if "PYPROBML" in os.environ:
root = os.environ["PYPROBML"]
figdir = os.path.join(root, 'figures')
else:
figdir = 'figures'
print('cannot find environment variable PYPROBML, writing to {}'.format(figdir))
if not os.path.exists(figdir):
os.mkdir(figdir)
fname_full = os.path.join(figdir, fname)
print('saving image to {}'.format(fname_full))
plt.tight_layout()
plt.savefig(fname_full, *args, **kwargs) |
Python | def git_ssh(git_command, email="[email protected]", username="probml",
verbose=False):
'''Execute a git command via ssh from colab.
Details in https://github.com/probml/pyprobml/blob/master/book1/intro/colab_intro.ipynb
Authors: Mahmoud Soliman <[email protected]> and Kevin Murphy <[email protected]>
'''
git_command=git_command.replace(r"https://github.com/","[email protected]:")
print('executing command via ssh:', git_command)
# copy keys from drive to local .ssh folder
if verbose:
print('Copying keys from gdrive to local VM')
os.system('rm -rf ~/.ssh')
os.system('mkdir ~/.ssh')
os.system('cp -r /content/drive/MyDrive/ssh/* ~/.ssh/')
os.system('ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts')
os.system('ssh -T [email protected]') # test
# git commands
if verbose:
print('Executing git commands')
os.system('git config --global user.email {}'.format(email))
os.system('git config --global user.name {}'.format(username))
os.system(git_command)
# cleanup
if verbose:
print('Cleanup local VM')
os.system('rm -r ~/.ssh/')
os.system('git config --global user.email ""')
os.system('git config --global user.name ""') | def git_ssh(git_command, email="[email protected]", username="probml",
verbose=False):
'''Execute a git command via ssh from colab.
Details in https://github.com/probml/pyprobml/blob/master/book1/intro/colab_intro.ipynb
Authors: Mahmoud Soliman <[email protected]> and Kevin Murphy <[email protected]>
'''
git_command=git_command.replace(r"https://github.com/","[email protected]:")
print('executing command via ssh:', git_command)
# copy keys from drive to local .ssh folder
if verbose:
print('Copying keys from gdrive to local VM')
os.system('rm -rf ~/.ssh')
os.system('mkdir ~/.ssh')
os.system('cp -r /content/drive/MyDrive/ssh/* ~/.ssh/')
os.system('ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts')
os.system('ssh -T [email protected]') # test
# git commands
if verbose:
print('Executing git commands')
os.system('git config --global user.email {}'.format(email))
os.system('git config --global user.name {}'.format(username))
os.system(git_command)
# cleanup
if verbose:
print('Cleanup local VM')
os.system('rm -r ~/.ssh/')
os.system('git config --global user.email ""')
os.system('git config --global user.name ""') |
Python | def _search(self, filters=None):
""" Return a list of ids of the given criteria"""
# TODO: Paginate
extra = dict()
if filters:
extra = {k: _get_value(v) for k, v in filters.items()}
params = {
'resource': self.resource_name,
'extra': extra
}
page = 1
while True:
response = self._make_request(**params)
object_response = self._response_to_object(response)
size = int(object_response.result.size)
if not size:
return []
elif size == 1:
yield [object_response.result.entries.entry['@id']]
break
elif size <= 19:
yield [int(x['@id']) for x in object_response.result.entries.entry]
break
else:
yield [int(x['@id']) for x in object_response.result.entries.entry]
page = page + 1
params.get('extra').update({
'paging.page': page
}) | def _search(self, filters=None):
""" Return a list of ids of the given criteria"""
# TODO: Paginate
extra = dict()
if filters:
extra = {k: _get_value(v) for k, v in filters.items()}
params = {
'resource': self.resource_name,
'extra': extra
}
page = 1
while True:
response = self._make_request(**params)
object_response = self._response_to_object(response)
size = int(object_response.result.size)
if not size:
return []
elif size == 1:
yield [object_response.result.entries.entry['@id']]
break
elif size <= 19:
yield [int(x['@id']) for x in object_response.result.entries.entry]
break
else:
yield [int(x['@id']) for x in object_response.result.entries.entry]
page = page + 1
params.get('extra').update({
'paging.page': page
}) |
Python | def targets_to_features(tfv_ta_df, target_df):
"""Extracts a sample subset with targets and features of a specific time aggregation
based on given targets. target_df and tfv_ta_df both have to share the same index basis.
The index of target_df shall be a subset of tfv_ta_df.
"""
df = tfv_ta_df[tfv_ta_df.index.isin(target_df.index)]
# check compatibility of target_df.sym with
d = len(target_df.index.difference(tfv_ta_df.index))
c = len(df)
b = len(target_df)
p = len(tfv_ta_df)
if d > 0:
raise NoSubsetWarning(f"subset({b}) with {c}/{d} rows that are/are not in superset({p})")
return df | def targets_to_features(tfv_ta_df, target_df):
"""Extracts a sample subset with targets and features of a specific time aggregation
based on given targets. target_df and tfv_ta_df both have to share the same index basis.
The index of target_df shall be a subset of tfv_ta_df.
"""
df = tfv_ta_df[tfv_ta_df.index.isin(target_df.index)]
# check compatibility of target_df.sym with
d = len(target_df.index.difference(tfv_ta_df.index))
c = len(df)
b = len(target_df)
p = len(tfv_ta_df)
if d > 0:
raise NoSubsetWarning(f"subset({b}) with {c}/{d} rows that are/are not in superset({p})")
return df |
Python | def derive_features(df):
"""derived features in relation to price based on the provided
time aggregated dataframe df with the exception of the derived feature 'delta'
that is calculated together with targets
"""
# price deltas in 1/1000
df["height"] = (df["high"] - df["low"]) / df["close"] * 1000
df.loc[df["close"] > df["open"],
"top"] = (df["high"] - df["close"]) / df["close"] * 1000
df.loc[df["close"] <= df["open"],
"top"] = (df["high"] - df["open"]) / df["close"] * 1000
df.loc[df["close"] > df["open"],
"bottom"] = (df["open"] - df["low"]) / df["close"] * 1000
df.loc[df["close"] <= df["open"],
"bottom"] = (df["close"] - df["low"]) / df["close"] * 1000 | def derive_features(df):
"""derived features in relation to price based on the provided
time aggregated dataframe df with the exception of the derived feature 'delta'
that is calculated together with targets
"""
# price deltas in 1/1000
df["height"] = (df["high"] - df["low"]) / df["close"] * 1000
df.loc[df["close"] > df["open"],
"top"] = (df["high"] - df["close"]) / df["close"] * 1000
df.loc[df["close"] <= df["open"],
"top"] = (df["high"] - df["open"]) / df["close"] * 1000
df.loc[df["close"] > df["open"],
"bottom"] = (df["open"] - df["low"]) / df["close"] * 1000
df.loc[df["close"] <= df["open"],
"bottom"] = (df["close"] - df["low"]) / df["close"] * 1000 |
Python | def calc_aggregation(minute_df, time_aggregations):
"""Time aggregation through rolling aggregation with the consequence that new data is
generated every minute and even long time aggregations reflect all minute bumps in their
features
in:
dataframe of minute data of a currency pair
with the columns: open, high, low, close, volume
out:
dict of dataframes of aggregations with features and targets
"""
tf_aggs = dict() # feature and target aggregations
mdf = minute_df # .copy()
df = pd.DataFrame(minute_df) # .copy()
df["vol"] = (mdf["volume"] - mdf.volume.rolling(VOL_BASE_PERIOD).mean()) \
/ mdf.volume.rolling(VOL_BASE_PERIOD).mean()
df = df.fillna(value={"vol": 0.000001})
maxmin = 0
for time_agg in time_aggregations:
if isinstance(time_agg, int):
if (time_agg * time_aggregations[time_agg]) > maxmin:
maxmin = time_agg * time_aggregations[time_agg]
if maxmin > len(df.index):
raise MissingHistoryData("History data has {} samples but should have >= {}".format(
len(df.index), maxmin))
for time_agg in time_aggregations:
# print(f"{datetime.now()}: time_aggregation {time_agg}")
if time_agg > 1:
df = pd.DataFrame()
df["open"] = mdf.open.shift(time_agg-1)
df["high"] = mdf.high.rolling(time_agg).max()
df["low"] = mdf.low.rolling(time_agg).min()
df["close"] = mdf.close
df["vol"] = mdf.vol.rolling(time_agg).mean()
df["delta"] = (mdf.close - mdf.close.shift(time_agg)) / mdf.close.shift(time_agg)
tf_aggs[time_agg] = df
derive_features(df)
return tf_aggs | def calc_aggregation(minute_df, time_aggregations):
"""Time aggregation through rolling aggregation with the consequence that new data is
generated every minute and even long time aggregations reflect all minute bumps in their
features
in:
dataframe of minute data of a currency pair
with the columns: open, high, low, close, volume
out:
dict of dataframes of aggregations with features and targets
"""
tf_aggs = dict() # feature and target aggregations
mdf = minute_df # .copy()
df = pd.DataFrame(minute_df) # .copy()
df["vol"] = (mdf["volume"] - mdf.volume.rolling(VOL_BASE_PERIOD).mean()) \
/ mdf.volume.rolling(VOL_BASE_PERIOD).mean()
df = df.fillna(value={"vol": 0.000001})
maxmin = 0
for time_agg in time_aggregations:
if isinstance(time_agg, int):
if (time_agg * time_aggregations[time_agg]) > maxmin:
maxmin = time_agg * time_aggregations[time_agg]
if maxmin > len(df.index):
raise MissingHistoryData("History data has {} samples but should have >= {}".format(
len(df.index), maxmin))
for time_agg in time_aggregations:
# print(f"{datetime.now()}: time_aggregation {time_agg}")
if time_agg > 1:
df = pd.DataFrame()
df["open"] = mdf.open.shift(time_agg-1)
df["high"] = mdf.high.rolling(time_agg).max()
df["low"] = mdf.low.rolling(time_agg).min()
df["close"] = mdf.close
df["vol"] = mdf.vol.rolling(time_agg).mean()
df["delta"] = (mdf.close - mdf.close.shift(time_agg)) / mdf.close.shift(time_agg)
tf_aggs[time_agg] = df
derive_features(df)
return tf_aggs |
Python | def expand_target_feature_vectors(tf_aggs, target_key):
"""Builds a target and feature vector for just the target_key with
1 minute DHTBV and D*V feature sequences and the remaining D sequences of
n time steps (tics) as configured in time_aggregations in T units.
The most important step in expand_target_feature_vectors is
1) the concatenation of feature vectors per sample to provide a history
for the classifier
2) discarding the original currency values that are not used
as features (except 'close')
Result:
a self.vecs dict with the single target_key that is
referring to a DataFrame with feature vectors as rows. The column name indicates
the type of feature, i.e. either 'target', 'close' or 'D|H|T|B|V|DV' in case of
1 minute aggregation or just 'D' for all other aggregations with aggregation+'T_'
as column prefix
"""
df = pd.DataFrame(tf_aggs[target_key], columns=["close"])
skey = smallest_dict_key(tf_aggs)
for ta in tf_aggs:
for tics in range(TIME_AGGS[ta]):
ctitle = str(ta) + "T_" + str(tics) + "_"
offset = tics*ta
# now add feature columns according to aggregation
df[ctitle + "D"] = tf_aggs[ta].delta.shift(offset)
if ta == skey: # full set only for smallest aggregation (minute data)
df[ctitle + "H"] = tf_aggs[ta].height.shift(offset)
df[ctitle + "T"] = tf_aggs[ta].top.shift(offset)
df[ctitle + "B"] = tf_aggs[ta].bottom.shift(offset)
df[ctitle + "V"] = tf_aggs[ta].vol.shift(offset)
df[ctitle + "DV"] = tf_aggs[ta].vol.shift(offset) *\
tf_aggs[ta].delta.shift(offset)
df = df.dropna()
if df.empty:
raise MissingHistoryData("empty dataframe from expand_target_feature_vectors")
return df | def expand_target_feature_vectors(tf_aggs, target_key):
"""Builds a target and feature vector for just the target_key with
1 minute DHTBV and D*V feature sequences and the remaining D sequences of
n time steps (tics) as configured in time_aggregations in T units.
The most important step in expand_target_feature_vectors is
1) the concatenation of feature vectors per sample to provide a history
for the classifier
2) discarding the original currency values that are not used
as features (except 'close')
Result:
a self.vecs dict with the single target_key that is
referring to a DataFrame with feature vectors as rows. The column name indicates
the type of feature, i.e. either 'target', 'close' or 'D|H|T|B|V|DV' in case of
1 minute aggregation or just 'D' for all other aggregations with aggregation+'T_'
as column prefix
"""
df = pd.DataFrame(tf_aggs[target_key], columns=["close"])
skey = smallest_dict_key(tf_aggs)
for ta in tf_aggs:
for tics in range(TIME_AGGS[ta]):
ctitle = str(ta) + "T_" + str(tics) + "_"
offset = tics*ta
# now add feature columns according to aggregation
df[ctitle + "D"] = tf_aggs[ta].delta.shift(offset)
if ta == skey: # full set only for smallest aggregation (minute data)
df[ctitle + "H"] = tf_aggs[ta].height.shift(offset)
df[ctitle + "T"] = tf_aggs[ta].top.shift(offset)
df[ctitle + "B"] = tf_aggs[ta].bottom.shift(offset)
df[ctitle + "V"] = tf_aggs[ta].vol.shift(offset)
df[ctitle + "DV"] = tf_aggs[ta].vol.shift(offset) *\
tf_aggs[ta].delta.shift(offset)
df = df.dropna()
if df.empty:
raise MissingHistoryData("empty dataframe from expand_target_feature_vectors")
return df |
Python | def add_targets(time_agg, df):
"target = achieved if improvement > 1% without intermediate loss of more than 0.2%"
# print(f"{datetime.now()}: add_targets {time_agg}")
df['target'] = TARGETS[HOLD]
lix = df.columns.get_loc('target')
cix = df.columns.get_loc('close')
win = dict()
loss = dict()
lossix = dict()
winix = dict()
ixfifo = Queue() # will hold all sell ix to smooth out if dip sell does no tpay off
closeatsell = closeatbuy = 0
lasttarget = dict()
for slot in range(0, time_agg):
win[slot] = loss[slot] = 0.
winix[slot] = lossix[slot] = slot
lasttarget[slot] = TARGETS[HOLD]
for tix in range(time_agg, len(df), 1): # tix = time index
slot = (tix % time_agg)
last_close = df.iat[tix - time_agg, cix]
this_close = df.iat[tix, cix]
delta = (this_close - last_close) / last_close # * 1000 no longer in per mille
if delta < 0:
if loss[slot] < 0: # loss monitoring is running
loss[slot] += delta
else: # first time bar of decrease period
lossix[slot] = tix
loss[slot] = delta
if win[slot] > 0: # win monitoring is running
win[slot] += delta
if win[slot] < 0: # reset win monitor because it is below start price
win[slot] = 0.
if loss[slot] < SELL_THRESHOLD: # reset win monitor -> dip exceeded threshold
win[slot] = 0.
df.iat[lossix[slot], lix] = lasttarget[slot] = TARGETS[SELL]
lossix[slot] += 1 # allow multiple signals if conditions hold => FIX this changes slot!
# FIX loss[slot] is not corrected with the index change
# here comes the smooth execution for BUY peaks:
if closeatbuy > 0: # smoothing is active
buy_sell = -2 * (FEE + TRADE_SLIP) + this_close - closeatbuy
while not ixfifo.empty():
smooth_ix = ixfifo.get()
if buy_sell < 0:
# if fee loss more than dip loss/gain then smoothing
df.iat[smooth_ix, lix] = TARGETS[HOLD]
closeatbuy = 0
# here comes the smooth preparation for SELL dips:
if closeatsell == 0:
closeatsell = this_close
ixfifo.put(tix) # prep after execution due to queue reuse
elif delta > 0:
if win[slot] > 0: # win monitoring is running
win[slot] += delta
else: # first time bar of increase period
winix[slot] = tix
win[slot] = delta
if loss[slot] < 0: # loss monitoring is running
loss[slot] += delta
if loss[slot] > 0:
loss[slot] = 0. # reset loss monitor -> recovered before sell threshold
if win[slot] > BUY_THRESHOLD: # reset win monitor -> dip exceeded threshold
loss[slot] = 0.
df.iat[winix[slot], lix] = lasttarget[slot] = TARGETS[BUY]
winix[slot] += 1 # allow multiple signals if conditions hold => FIX this changes slot!
# FIX win[slot] is not corrected with the index change
# here comes the smooth execution for SELL dips:
if closeatsell > 0: # smoothing is active
sell_buy = -2 * (FEE + TRADE_SLIP)
holdgain = this_close - closeatsell
while not ixfifo.empty():
smooth_ix = ixfifo.get()
if sell_buy < holdgain:
# if fee loss more than dip loss/gain then smoothing
df.iat[smooth_ix, lix] = TARGETS[HOLD]
closeatsell = 0
# here comes the smooth preparation for BUY peaks:
if closeatbuy == 0:
closeatbuy = this_close
ixfifo.put(tix) | def add_targets(time_agg, df):
"target = achieved if improvement > 1% without intermediate loss of more than 0.2%"
# print(f"{datetime.now()}: add_targets {time_agg}")
df['target'] = TARGETS[HOLD]
lix = df.columns.get_loc('target')
cix = df.columns.get_loc('close')
win = dict()
loss = dict()
lossix = dict()
winix = dict()
ixfifo = Queue() # will hold all sell ix to smooth out if dip sell does no tpay off
closeatsell = closeatbuy = 0
lasttarget = dict()
for slot in range(0, time_agg):
win[slot] = loss[slot] = 0.
winix[slot] = lossix[slot] = slot
lasttarget[slot] = TARGETS[HOLD]
for tix in range(time_agg, len(df), 1): # tix = time index
slot = (tix % time_agg)
last_close = df.iat[tix - time_agg, cix]
this_close = df.iat[tix, cix]
delta = (this_close - last_close) / last_close # * 1000 no longer in per mille
if delta < 0:
if loss[slot] < 0: # loss monitoring is running
loss[slot] += delta
else: # first time bar of decrease period
lossix[slot] = tix
loss[slot] = delta
if win[slot] > 0: # win monitoring is running
win[slot] += delta
if win[slot] < 0: # reset win monitor because it is below start price
win[slot] = 0.
if loss[slot] < SELL_THRESHOLD: # reset win monitor -> dip exceeded threshold
win[slot] = 0.
df.iat[lossix[slot], lix] = lasttarget[slot] = TARGETS[SELL]
lossix[slot] += 1 # allow multiple signals if conditions hold => FIX this changes slot!
# FIX loss[slot] is not corrected with the index change
# here comes the smooth execution for BUY peaks:
if closeatbuy > 0: # smoothing is active
buy_sell = -2 * (FEE + TRADE_SLIP) + this_close - closeatbuy
while not ixfifo.empty():
smooth_ix = ixfifo.get()
if buy_sell < 0:
# if fee loss more than dip loss/gain then smoothing
df.iat[smooth_ix, lix] = TARGETS[HOLD]
closeatbuy = 0
# here comes the smooth preparation for SELL dips:
if closeatsell == 0:
closeatsell = this_close
ixfifo.put(tix) # prep after execution due to queue reuse
elif delta > 0:
if win[slot] > 0: # win monitoring is running
win[slot] += delta
else: # first time bar of increase period
winix[slot] = tix
win[slot] = delta
if loss[slot] < 0: # loss monitoring is running
loss[slot] += delta
if loss[slot] > 0:
loss[slot] = 0. # reset loss monitor -> recovered before sell threshold
if win[slot] > BUY_THRESHOLD: # reset win monitor -> dip exceeded threshold
loss[slot] = 0.
df.iat[winix[slot], lix] = lasttarget[slot] = TARGETS[BUY]
winix[slot] += 1 # allow multiple signals if conditions hold => FIX this changes slot!
# FIX win[slot] is not corrected with the index change
# here comes the smooth execution for SELL dips:
if closeatsell > 0: # smoothing is active
sell_buy = -2 * (FEE + TRADE_SLIP)
holdgain = this_close - closeatsell
while not ixfifo.empty():
smooth_ix = ixfifo.get()
if sell_buy < holdgain:
# if fee loss more than dip loss/gain then smoothing
df.iat[smooth_ix, lix] = TARGETS[HOLD]
closeatsell = 0
# here comes the smooth preparation for BUY peaks:
if closeatbuy == 0:
closeatbuy = this_close
ixfifo.put(tix) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.