code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
async def _expect_command(cls, reader, name):
"""
Expect a command.
:param reader: The reader to use.
:returns: The command data.
"""
size_type = struct.unpack('B', await reader.readexactly(1))[0]
if size_type == 0x04:
size = struct.unpack('!B', await reader.readexactly(1))[0]
elif size_type == 0x06:
size = struct.unpack('!Q', await reader.readexactly(8))[0]
else:
raise ProtocolError(
"Unexpected size type: %0x" % size_type,
fatal=True,
)
name_size = struct.unpack('B', await reader.readexactly(1))[0]
if name_size != len(name):
raise ProtocolError(
"Unexpected command name size: %s (expecting %s)" % (
name_size,
len(name),
),
fatal=True,
)
c_name = await reader.readexactly(name_size)
if c_name != name:
raise ProtocolError(
"Unexpected command name: %s (expecting %s)" % (c_name, name),
fatal=True,
)
return await reader.readexactly(size - name_size - 1) | Expect a command.
:param reader: The reader to use.
:returns: The command data. | Below is the the instruction that describes the task:
### Input:
Expect a command.
:param reader: The reader to use.
:returns: The command data.
### Response:
async def _expect_command(cls, reader, name):
"""
Expect a command.
:param reader: The reader to use.
:returns: The command data.
"""
size_type = struct.unpack('B', await reader.readexactly(1))[0]
if size_type == 0x04:
size = struct.unpack('!B', await reader.readexactly(1))[0]
elif size_type == 0x06:
size = struct.unpack('!Q', await reader.readexactly(8))[0]
else:
raise ProtocolError(
"Unexpected size type: %0x" % size_type,
fatal=True,
)
name_size = struct.unpack('B', await reader.readexactly(1))[0]
if name_size != len(name):
raise ProtocolError(
"Unexpected command name size: %s (expecting %s)" % (
name_size,
len(name),
),
fatal=True,
)
c_name = await reader.readexactly(name_size)
if c_name != name:
raise ProtocolError(
"Unexpected command name: %s (expecting %s)" % (c_name, name),
fatal=True,
)
return await reader.readexactly(size - name_size - 1) |
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc | Make a light-weight loc figure | Below is the the instruction that describes the task:
### Input:
Make a light-weight loc figure
### Response:
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc |
def close(self):
"""
Closes the device.
"""
try:
self._running = False
self._read_thread.stop()
self._device.close()
except Exception:
pass
self.on_close() | Closes the device. | Below is the the instruction that describes the task:
### Input:
Closes the device.
### Response:
def close(self):
"""
Closes the device.
"""
try:
self._running = False
self._read_thread.stop()
self._device.close()
except Exception:
pass
self.on_close() |
def check(self,
uid=None,
usage_limits_count=None,
cryptographic_usage_mask=None,
lease_time=None):
"""
Check the constraints for a managed object.
Args:
uid (string): The unique ID of the managed object to check.
Optional, defaults to None.
usage_limits_count (int): The number of items that can be secured
with the specified managed object. Optional, defaults to None.
cryptographic_usage_mask (list): A list of CryptographicUsageMask
enumerations specifying the operations possible with the
specified managed object. Optional, defaults to None.
lease_time (int): The number of seconds that can be leased for the
specified managed object. Optional, defaults to None.
"""
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("The unique identifier must be a string.")
if usage_limits_count is not None:
if not isinstance(usage_limits_count, six.integer_types):
raise TypeError("The usage limits count must be an integer.")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
not all(isinstance(
x,
enums.CryptographicUsageMask
) for x in cryptographic_usage_mask):
raise TypeError(
"The cryptographic usage mask must be a list of "
"CryptographicUsageMask enumerations."
)
if lease_time is not None:
if not isinstance(lease_time, six.integer_types):
raise TypeError("The lease time must be an integer.")
result = self.proxy.check(
uid,
usage_limits_count,
cryptographic_usage_mask,
lease_time
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
) | Check the constraints for a managed object.
Args:
uid (string): The unique ID of the managed object to check.
Optional, defaults to None.
usage_limits_count (int): The number of items that can be secured
with the specified managed object. Optional, defaults to None.
cryptographic_usage_mask (list): A list of CryptographicUsageMask
enumerations specifying the operations possible with the
specified managed object. Optional, defaults to None.
lease_time (int): The number of seconds that can be leased for the
specified managed object. Optional, defaults to None. | Below is the the instruction that describes the task:
### Input:
Check the constraints for a managed object.
Args:
uid (string): The unique ID of the managed object to check.
Optional, defaults to None.
usage_limits_count (int): The number of items that can be secured
with the specified managed object. Optional, defaults to None.
cryptographic_usage_mask (list): A list of CryptographicUsageMask
enumerations specifying the operations possible with the
specified managed object. Optional, defaults to None.
lease_time (int): The number of seconds that can be leased for the
specified managed object. Optional, defaults to None.
### Response:
def check(self,
uid=None,
usage_limits_count=None,
cryptographic_usage_mask=None,
lease_time=None):
"""
Check the constraints for a managed object.
Args:
uid (string): The unique ID of the managed object to check.
Optional, defaults to None.
usage_limits_count (int): The number of items that can be secured
with the specified managed object. Optional, defaults to None.
cryptographic_usage_mask (list): A list of CryptographicUsageMask
enumerations specifying the operations possible with the
specified managed object. Optional, defaults to None.
lease_time (int): The number of seconds that can be leased for the
specified managed object. Optional, defaults to None.
"""
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("The unique identifier must be a string.")
if usage_limits_count is not None:
if not isinstance(usage_limits_count, six.integer_types):
raise TypeError("The usage limits count must be an integer.")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
not all(isinstance(
x,
enums.CryptographicUsageMask
) for x in cryptographic_usage_mask):
raise TypeError(
"The cryptographic usage mask must be a list of "
"CryptographicUsageMask enumerations."
)
if lease_time is not None:
if not isinstance(lease_time, six.integer_types):
raise TypeError("The lease time must be an integer.")
result = self.proxy.check(
uid,
usage_limits_count,
cryptographic_usage_mask,
lease_time
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
) |
def rad_latitude(self):
"""
Lazy conversion degrees latitude to radians.
"""
if self._rad_latitude is None:
self._rad_latitude = math.radians(self.latitude)
return self._rad_latitude | Lazy conversion degrees latitude to radians. | Below is the the instruction that describes the task:
### Input:
Lazy conversion degrees latitude to radians.
### Response:
def rad_latitude(self):
"""
Lazy conversion degrees latitude to radians.
"""
if self._rad_latitude is None:
self._rad_latitude = math.radians(self.latitude)
return self._rad_latitude |
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size | Returns the size of the cache in bytes. | Below is the the instruction that describes the task:
### Input:
Returns the size of the cache in bytes.
### Response:
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size |
def autostrip(cls):
"""
strip text fields before validation
example:
@autostrip
class PersonForm(forms.Form):
name = forms.CharField(min_length=2, max_length=10)
email = forms.EmailField()
Author: nail.xx
"""
warnings.warn(
"django-annoying autostrip is deprecated and will be removed in a "
"future version. Django now has native support for stripping form "
"fields. "
"https://docs.djangoproject.com/en/stable/ref/forms/fields/#django.forms.CharField.strip",
DeprecationWarning,
stacklevel=2,
)
fields = [(key, value) for key, value in cls.base_fields.items() if isinstance(value, forms.CharField)]
for field_name, field_object in fields:
def get_clean_func(original_clean):
return lambda value: original_clean(value and value.strip())
clean_func = get_clean_func(getattr(field_object, 'clean'))
setattr(field_object, 'clean', clean_func)
return cls | strip text fields before validation
example:
@autostrip
class PersonForm(forms.Form):
name = forms.CharField(min_length=2, max_length=10)
email = forms.EmailField()
Author: nail.xx | Below is the the instruction that describes the task:
### Input:
strip text fields before validation
example:
@autostrip
class PersonForm(forms.Form):
name = forms.CharField(min_length=2, max_length=10)
email = forms.EmailField()
Author: nail.xx
### Response:
def autostrip(cls):
"""
strip text fields before validation
example:
@autostrip
class PersonForm(forms.Form):
name = forms.CharField(min_length=2, max_length=10)
email = forms.EmailField()
Author: nail.xx
"""
warnings.warn(
"django-annoying autostrip is deprecated and will be removed in a "
"future version. Django now has native support for stripping form "
"fields. "
"https://docs.djangoproject.com/en/stable/ref/forms/fields/#django.forms.CharField.strip",
DeprecationWarning,
stacklevel=2,
)
fields = [(key, value) for key, value in cls.base_fields.items() if isinstance(value, forms.CharField)]
for field_name, field_object in fields:
def get_clean_func(original_clean):
return lambda value: original_clean(value and value.strip())
clean_func = get_clean_func(getattr(field_object, 'clean'))
setattr(field_object, 'clean', clean_func)
return cls |
def run_tutorial(plot=False, multiplex=True, return_streams=False, cores=4,
verbose=False):
"""
Run the tutorial.
:return: detections
"""
client = Client("GEONET", debug=verbose)
cat = client.get_events(
minlatitude=-40.98, maxlatitude=-40.85, minlongitude=175.4,
maxlongitude=175.5, starttime=UTCDateTime(2016, 5, 1),
endtime=UTCDateTime(2016, 5, 20))
print("Downloaded a catalog of %i events" % len(cat))
# This gives us a catalog of events - it takes a while to download all
# the information, so give it a bit!
# We will generate a five station, multi-channel detector.
cat = filter_picks(catalog=cat, top_n_picks=5)
stachans = list(set(
[(pick.waveform_id.station_code, pick.waveform_id.channel_code)
for event in cat for pick in event.picks]))
# In this tutorial we will only work on one cluster, defined spatially.
# You can work on multiple clusters, or try to whole set.
clusters = space_cluster(catalog=cat, d_thresh=2, show=False)
# We will work on the largest cluster
cluster = sorted(clusters, key=lambda c: len(c))[-1]
# This cluster contains 32 events, we will now download and trim the
# waveforms. Note that each chanel must start at the same time and be the
# same length for multiplexing. If not multiplexing EQcorrscan will
# maintain the individual differences in time between channels and delay
# the detection statistics by that amount before stacking and detection.
client = Client('GEONET')
design_set = []
st = Stream()
for event in cluster:
print("Downloading for event {0}".format(event.resource_id.id))
bulk_info = []
t1 = event.origins[0].time
t2 = t1 + 25.1 # Have to download extra data, otherwise GeoNet will
# trim wherever suits.
t1 -= 0.1
for station, channel in stachans:
bulk_info.append(('NZ', station, '*', channel[0:2] + '?', t1, t2))
st += client.get_waveforms_bulk(bulk=bulk_info)
print("Downloaded %i channels" % len(st))
for event in cluster:
t1 = event.origins[0].time
t2 = t1 + 25
design_set.append(st.copy().trim(t1, t2))
# Construction of the detector will process the traces, then align them,
# before multiplexing.
print("Making detector")
detector = subspace.Detector()
detector.construct(
streams=design_set, lowcut=2.0, highcut=9.0, filt_order=4,
sampling_rate=20, multiplex=multiplex, name='Wairarapa1', align=True,
reject=0.2, shift_len=6, plot=plot).partition(9)
print("Constructed Detector")
if plot:
detector.plot()
# We also want the continuous stream to detect in.
t1 = UTCDateTime(2016, 5, 11, 19)
t2 = UTCDateTime(2016, 5, 11, 20)
# We are going to look in a single hour just to minimize cost, but you can
# run for much longer.
bulk_info = [('NZ', stachan[0], '*',
stachan[1][0] + '?' + stachan[1][-1],
t1, t2) for stachan in detector.stachans]
print("Downloading continuous data")
st = client.get_waveforms_bulk(bulk_info)
st.merge().detrend('simple').trim(starttime=t1, endtime=t2)
# We set a very low threshold because the detector is not that great, we
# haven't aligned it particularly well - however, at this threshold we make
# two real detections.
print("Computing detections")
detections, det_streams = detector.detect(
st=st, threshold=0.3, trig_int=2, extract_detections=True,
cores=cores)
if return_streams:
return detections, det_streams
else:
return detections | Run the tutorial.
:return: detections | Below is the the instruction that describes the task:
### Input:
Run the tutorial.
:return: detections
### Response:
def run_tutorial(plot=False, multiplex=True, return_streams=False, cores=4,
verbose=False):
"""
Run the tutorial.
:return: detections
"""
client = Client("GEONET", debug=verbose)
cat = client.get_events(
minlatitude=-40.98, maxlatitude=-40.85, minlongitude=175.4,
maxlongitude=175.5, starttime=UTCDateTime(2016, 5, 1),
endtime=UTCDateTime(2016, 5, 20))
print("Downloaded a catalog of %i events" % len(cat))
# This gives us a catalog of events - it takes a while to download all
# the information, so give it a bit!
# We will generate a five station, multi-channel detector.
cat = filter_picks(catalog=cat, top_n_picks=5)
stachans = list(set(
[(pick.waveform_id.station_code, pick.waveform_id.channel_code)
for event in cat for pick in event.picks]))
# In this tutorial we will only work on one cluster, defined spatially.
# You can work on multiple clusters, or try to whole set.
clusters = space_cluster(catalog=cat, d_thresh=2, show=False)
# We will work on the largest cluster
cluster = sorted(clusters, key=lambda c: len(c))[-1]
# This cluster contains 32 events, we will now download and trim the
# waveforms. Note that each chanel must start at the same time and be the
# same length for multiplexing. If not multiplexing EQcorrscan will
# maintain the individual differences in time between channels and delay
# the detection statistics by that amount before stacking and detection.
client = Client('GEONET')
design_set = []
st = Stream()
for event in cluster:
print("Downloading for event {0}".format(event.resource_id.id))
bulk_info = []
t1 = event.origins[0].time
t2 = t1 + 25.1 # Have to download extra data, otherwise GeoNet will
# trim wherever suits.
t1 -= 0.1
for station, channel in stachans:
bulk_info.append(('NZ', station, '*', channel[0:2] + '?', t1, t2))
st += client.get_waveforms_bulk(bulk=bulk_info)
print("Downloaded %i channels" % len(st))
for event in cluster:
t1 = event.origins[0].time
t2 = t1 + 25
design_set.append(st.copy().trim(t1, t2))
# Construction of the detector will process the traces, then align them,
# before multiplexing.
print("Making detector")
detector = subspace.Detector()
detector.construct(
streams=design_set, lowcut=2.0, highcut=9.0, filt_order=4,
sampling_rate=20, multiplex=multiplex, name='Wairarapa1', align=True,
reject=0.2, shift_len=6, plot=plot).partition(9)
print("Constructed Detector")
if plot:
detector.plot()
# We also want the continuous stream to detect in.
t1 = UTCDateTime(2016, 5, 11, 19)
t2 = UTCDateTime(2016, 5, 11, 20)
# We are going to look in a single hour just to minimize cost, but you can
# run for much longer.
bulk_info = [('NZ', stachan[0], '*',
stachan[1][0] + '?' + stachan[1][-1],
t1, t2) for stachan in detector.stachans]
print("Downloading continuous data")
st = client.get_waveforms_bulk(bulk_info)
st.merge().detrend('simple').trim(starttime=t1, endtime=t2)
# We set a very low threshold because the detector is not that great, we
# haven't aligned it particularly well - however, at this threshold we make
# two real detections.
print("Computing detections")
detections, det_streams = detector.detect(
st=st, threshold=0.3, trig_int=2, extract_detections=True,
cores=cores)
if return_streams:
return detections, det_streams
else:
return detections |
def _get_bokeh_chart(self, x_field, y_field, chart_type,
label, opts, style, options={}, **kwargs):
"""
Get a Bokeh chart object
"""
if isinstance(x_field, list):
kdims = x_field
else:
kdims = [x_field]
if isinstance(y_field, list):
vdims = y_field
else:
vdims = [y_field]
args = kwargs
args["data"] = self.df
args["kdims"] = kdims
args["vdims"] = vdims
if label is not None:
args["label"] = label
else:
if self.label is not None:
args["label"] = self.label
chart = None
try:
if chart_type == "line":
chart = hv.Curve(**args)
if chart_type == "hline":
chart = self._hline_bokeh_(y_field)
elif chart_type == "point":
chart = hv.Scatter(**args)
elif chart_type == "area":
chart = hv.Area(**args)
elif chart_type == "bar":
chart = hv.Bars(**args)
elif chart_type == "hist":
chart = hv.Histogram(**args)
elif chart_type == "errorBar":
chart = hv.ErrorBars(**args)
elif chart_type == "heatmap":
chart = hv.HeatMap(**args)
elif chart_type == "lreg":
chart = self._lreg_bokeh(**args)
elif chart_type == "sline":
window_size, y_label = options["window_size"], options["y_label"]
chart = self._sline_bokeh(window_size, y_label)
if chart is None:
self.err("Chart type " + chart_type +
" unknown", self._get_bokeh_chart)
return
endchart = chart(plot=opts, style=style)
return endchart
except DataError as e:
msg = "Column not found in " + x_field + " and " + y_field
self.err(e, self._get_bokeh_chart, msg)
except Exception as e:
self.err(e) | Get a Bokeh chart object | Below is the the instruction that describes the task:
### Input:
Get a Bokeh chart object
### Response:
def _get_bokeh_chart(self, x_field, y_field, chart_type,
label, opts, style, options={}, **kwargs):
"""
Get a Bokeh chart object
"""
if isinstance(x_field, list):
kdims = x_field
else:
kdims = [x_field]
if isinstance(y_field, list):
vdims = y_field
else:
vdims = [y_field]
args = kwargs
args["data"] = self.df
args["kdims"] = kdims
args["vdims"] = vdims
if label is not None:
args["label"] = label
else:
if self.label is not None:
args["label"] = self.label
chart = None
try:
if chart_type == "line":
chart = hv.Curve(**args)
if chart_type == "hline":
chart = self._hline_bokeh_(y_field)
elif chart_type == "point":
chart = hv.Scatter(**args)
elif chart_type == "area":
chart = hv.Area(**args)
elif chart_type == "bar":
chart = hv.Bars(**args)
elif chart_type == "hist":
chart = hv.Histogram(**args)
elif chart_type == "errorBar":
chart = hv.ErrorBars(**args)
elif chart_type == "heatmap":
chart = hv.HeatMap(**args)
elif chart_type == "lreg":
chart = self._lreg_bokeh(**args)
elif chart_type == "sline":
window_size, y_label = options["window_size"], options["y_label"]
chart = self._sline_bokeh(window_size, y_label)
if chart is None:
self.err("Chart type " + chart_type +
" unknown", self._get_bokeh_chart)
return
endchart = chart(plot=opts, style=style)
return endchart
except DataError as e:
msg = "Column not found in " + x_field + " and " + y_field
self.err(e, self._get_bokeh_chart, msg)
except Exception as e:
self.err(e) |
def remove(self, item):
"""Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
"""
if item not in self:
raise ValueError('objectlist.remove(item) failed, item not in list')
item_path = self._view_path_for(item)
giter = self._iter_for(item)
del self[giter]
self.emit('item-removed', item, item_path) | Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list. | Below is the the instruction that describes the task:
### Input:
Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
### Response:
def remove(self, item):
"""Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
"""
if item not in self:
raise ValueError('objectlist.remove(item) failed, item not in list')
item_path = self._view_path_for(item)
giter = self._iter_for(item)
del self[giter]
self.emit('item-removed', item, item_path) |
def start_client(self, host, port=5001, protocol='TCP', timeout=5,
parallel=None, bandwidth=None):
"""iperf -D -c host -t 60
"""
cmd = ['iperf', '-c', host, '-p', str(port), '-t', str(timeout)]
if not (protocol, 'UDP'):
cmd.append('-u')
if parallel:
cmd.extend(['-P', str(parallel)])
if bandwidth:
cmd.extend(['-b', '%sM' % bandwidth])
stdcode, stdout, stderr = utils.execute_wait(cmd)
if (not stdcode) or (not stderr):
out_dict = stdout.split('\n')
if not out_dict[-1]:
out_dict.pop()
out_data = out_dict[-1].split()
data = dict()
data['Bandwidth'] = out_data[-2] + ' ' + out_data[-1]
data['Transfer'] = out_data[-4] + ' ' + out_data[-3]
data['Interval'] = out_data[-6]
return data
raise Exception('Start iperf failed, please check on the node.') | iperf -D -c host -t 60 | Below is the the instruction that describes the task:
### Input:
iperf -D -c host -t 60
### Response:
def start_client(self, host, port=5001, protocol='TCP', timeout=5,
parallel=None, bandwidth=None):
"""iperf -D -c host -t 60
"""
cmd = ['iperf', '-c', host, '-p', str(port), '-t', str(timeout)]
if not (protocol, 'UDP'):
cmd.append('-u')
if parallel:
cmd.extend(['-P', str(parallel)])
if bandwidth:
cmd.extend(['-b', '%sM' % bandwidth])
stdcode, stdout, stderr = utils.execute_wait(cmd)
if (not stdcode) or (not stderr):
out_dict = stdout.split('\n')
if not out_dict[-1]:
out_dict.pop()
out_data = out_dict[-1].split()
data = dict()
data['Bandwidth'] = out_data[-2] + ' ' + out_data[-1]
data['Transfer'] = out_data[-4] + ' ' + out_data[-3]
data['Interval'] = out_data[-6]
return data
raise Exception('Start iperf failed, please check on the node.') |
def _sdk_tools(self):
"""
Microsoft Windows SDK Tools paths generator
"""
if self.vc_ver < 15.0:
bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
yield os.path.join(self.si.WindowsSdkDir, bin_dir)
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
yield os.path.join(self.si.WindowsSdkDir, path)
if self.vc_ver == 10.0 or self.vc_ver == 11.0:
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
yield os.path.join(self.si.WindowsSdkDir, path)
elif self.vc_ver >= 15.0:
path = os.path.join(self.si.WindowsSdkDir, 'Bin')
arch_subdir = self.pi.current_dir(x64=True)
sdkver = self.si.WindowsSdkLastVersion
yield os.path.join(path, '%s%s' % (sdkver, arch_subdir))
if self.si.WindowsSDKExecutablePath:
yield self.si.WindowsSDKExecutablePath | Microsoft Windows SDK Tools paths generator | Below is the the instruction that describes the task:
### Input:
Microsoft Windows SDK Tools paths generator
### Response:
def _sdk_tools(self):
"""
Microsoft Windows SDK Tools paths generator
"""
if self.vc_ver < 15.0:
bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
yield os.path.join(self.si.WindowsSdkDir, bin_dir)
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
yield os.path.join(self.si.WindowsSdkDir, path)
if self.vc_ver == 10.0 or self.vc_ver == 11.0:
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
yield os.path.join(self.si.WindowsSdkDir, path)
elif self.vc_ver >= 15.0:
path = os.path.join(self.si.WindowsSdkDir, 'Bin')
arch_subdir = self.pi.current_dir(x64=True)
sdkver = self.si.WindowsSdkLastVersion
yield os.path.join(path, '%s%s' % (sdkver, arch_subdir))
if self.si.WindowsSDKExecutablePath:
yield self.si.WindowsSDKExecutablePath |
def AddTimeZoneOption(self, argument_group):
"""Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
# Note the default here is None so we can determine if the time zone
# option was set.
argument_group.add_argument(
'-z', '--zone', '--timezone', dest='timezone', action='store',
type=str, default=None, help=(
'explicitly define the timezone. Typically the timezone is '
'determined automatically where possible otherwise it will '
'default to UTC. Use "-z list" to see a list of available '
'timezones.')) | Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group. | Below is the the instruction that describes the task:
### Input:
Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
### Response:
def AddTimeZoneOption(self, argument_group):
"""Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
# Note the default here is None so we can determine if the time zone
# option was set.
argument_group.add_argument(
'-z', '--zone', '--timezone', dest='timezone', action='store',
type=str, default=None, help=(
'explicitly define the timezone. Typically the timezone is '
'determined automatically where possible otherwise it will '
'default to UTC. Use "-z list" to see a list of available '
'timezones.')) |
def _load_report(infile):
'''Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference'''
report_dict = {}
f = pyfastaq.utils.open_file_read(infile)
first_line = True
for line in f:
line = line.rstrip()
if first_line:
expected_first_line = '#' + '\t'.join(report.columns)
if line != expected_first_line:
pyfastaq.utils.close(f)
raise Error('Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line)
first_line = False
else:
line_dict = ReportFilter._report_line_to_dict(line)
if line_dict is None:
pyfastaq.utils.close(f)
raise Error('Error reading report file at this line:\n' + line)
ref_name = line_dict['ref_name']
ctg_name = line_dict['ctg']
if ref_name not in report_dict:
report_dict[ref_name] = {}
if ctg_name not in report_dict[ref_name]:
report_dict[ref_name][ctg_name] = []
report_dict[ref_name][ctg_name].append(line_dict)
pyfastaq.utils.close(f)
return report_dict | Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference | Below is the the instruction that describes the task:
### Input:
Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference
### Response:
def _load_report(infile):
'''Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference'''
report_dict = {}
f = pyfastaq.utils.open_file_read(infile)
first_line = True
for line in f:
line = line.rstrip()
if first_line:
expected_first_line = '#' + '\t'.join(report.columns)
if line != expected_first_line:
pyfastaq.utils.close(f)
raise Error('Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line)
first_line = False
else:
line_dict = ReportFilter._report_line_to_dict(line)
if line_dict is None:
pyfastaq.utils.close(f)
raise Error('Error reading report file at this line:\n' + line)
ref_name = line_dict['ref_name']
ctg_name = line_dict['ctg']
if ref_name not in report_dict:
report_dict[ref_name] = {}
if ctg_name not in report_dict[ref_name]:
report_dict[ref_name][ctg_name] = []
report_dict[ref_name][ctg_name].append(line_dict)
pyfastaq.utils.close(f)
return report_dict |
def set_position(self, key, latlon, layer=None, rotation=0):
'''move an object on the map'''
self.object_queue.put(SlipPosition(key, latlon, layer, rotation)) | move an object on the map | Below is the the instruction that describes the task:
### Input:
move an object on the map
### Response:
def set_position(self, key, latlon, layer=None, rotation=0):
'''move an object on the map'''
self.object_queue.put(SlipPosition(key, latlon, layer, rotation)) |
def from_path(self, request, path, lang):
"""
Resolve a request to an alias. returns a
:class:`PageAlias <pages.models.PageAlias>` if the url matches
no page at all. The aliasing system supports plain
aliases (``/foo/bar``) as well as aliases containing GET parameters
(like ``index.php?page=foo``).
:param request: the request object
:param path: the complete path to the page
:param lang: not used
"""
from pages.models import PageAlias
url = normalize_url(path)
# §1: try with complete query string
query = request.META.get('QUERY_STRING')
if query:
url = url + '?' + query
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §2: try with path only
url = normalize_url(path)
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §3: not alias found, we give up
return None | Resolve a request to an alias. returns a
:class:`PageAlias <pages.models.PageAlias>` if the url matches
no page at all. The aliasing system supports plain
aliases (``/foo/bar``) as well as aliases containing GET parameters
(like ``index.php?page=foo``).
:param request: the request object
:param path: the complete path to the page
:param lang: not used | Below is the the instruction that describes the task:
### Input:
Resolve a request to an alias. returns a
:class:`PageAlias <pages.models.PageAlias>` if the url matches
no page at all. The aliasing system supports plain
aliases (``/foo/bar``) as well as aliases containing GET parameters
(like ``index.php?page=foo``).
:param request: the request object
:param path: the complete path to the page
:param lang: not used
### Response:
def from_path(self, request, path, lang):
"""
Resolve a request to an alias. returns a
:class:`PageAlias <pages.models.PageAlias>` if the url matches
no page at all. The aliasing system supports plain
aliases (``/foo/bar``) as well as aliases containing GET parameters
(like ``index.php?page=foo``).
:param request: the request object
:param path: the complete path to the page
:param lang: not used
"""
from pages.models import PageAlias
url = normalize_url(path)
# §1: try with complete query string
query = request.META.get('QUERY_STRING')
if query:
url = url + '?' + query
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §2: try with path only
url = normalize_url(path)
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §3: not alias found, we give up
return None |
def get_event_buffer(self, dag_ids=None):
"""
Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events = dict()
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = dict()
else:
for key in list(self.event_buffer.keys()):
dag_id, _, _, _ = key
if dag_id in dag_ids:
cleared_events[key] = self.event_buffer.pop(key)
return cleared_events | Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events | Below is the the instruction that describes the task:
### Input:
Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events
### Response:
def get_event_buffer(self, dag_ids=None):
"""
Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events = dict()
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = dict()
else:
for key in list(self.event_buffer.keys()):
dag_id, _, _, _ = key
if dag_id in dag_ids:
cleared_events[key] = self.event_buffer.pop(key)
return cleared_events |
def _debug_check(self):
"""
Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort
"""
# old_start = 0
old_end = 0
old_sort = ""
for segment in self._list:
if segment.start <= old_end and segment.sort == old_sort:
raise AngrCFGError("Error in SegmentList: blocks are not merged")
# old_start = start
old_end = segment.end
old_sort = segment.sort | Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort | Below is the the instruction that describes the task:
### Input:
Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort
### Response:
def _debug_check(self):
"""
Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort
"""
# old_start = 0
old_end = 0
old_sort = ""
for segment in self._list:
if segment.start <= old_end and segment.sort == old_sort:
raise AngrCFGError("Error in SegmentList: blocks are not merged")
# old_start = start
old_end = segment.end
old_sort = segment.sort |
def exportData(self, datfile):
"""
Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute).
"""
def ampl_set(name, values):
def format_entry(e):
return repr(e).replace(' ', '')
return 'set {0} := {1};'.format(
name, ','.join(format_entry(e) for e in values)
)
def ampl_param(name, values):
def format_entry(k, v):
k = repr(k).strip('()').replace(' ', '')
if v == inf:
v = "Infinity"
elif v == -inf:
v = "-Infinity"
else:
v = repr(v).strip('()').replace(' ', '')
return '[{0}]{1}'.format(k, v)
return 'param {0} := {1};'.format(
name, ''.join(format_entry(k, v) for k, v in values.items())
)
with open(datfile, 'w') as f:
for name, entity in self.getSets():
values = entity.getValues().toList()
print(ampl_set(name, values), file=f)
for name, entity in self.getParameters():
if entity.isScalar():
print(
'param {} := {};'.format(name, entity.value()),
file=f
)
else:
values = entity.getValues().toDict()
print(ampl_param(name, values), file=f) | Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute). | Below is the the instruction that describes the task:
### Input:
Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute).
### Response:
def exportData(self, datfile):
"""
Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute).
"""
def ampl_set(name, values):
def format_entry(e):
return repr(e).replace(' ', '')
return 'set {0} := {1};'.format(
name, ','.join(format_entry(e) for e in values)
)
def ampl_param(name, values):
def format_entry(k, v):
k = repr(k).strip('()').replace(' ', '')
if v == inf:
v = "Infinity"
elif v == -inf:
v = "-Infinity"
else:
v = repr(v).strip('()').replace(' ', '')
return '[{0}]{1}'.format(k, v)
return 'param {0} := {1};'.format(
name, ''.join(format_entry(k, v) for k, v in values.items())
)
with open(datfile, 'w') as f:
for name, entity in self.getSets():
values = entity.getValues().toList()
print(ampl_set(name, values), file=f)
for name, entity in self.getParameters():
if entity.isScalar():
print(
'param {} := {};'.format(name, entity.value()),
file=f
)
else:
values = entity.getValues().toDict()
print(ampl_param(name, values), file=f) |
def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True | add header value | Below is the the instruction that describes the task:
### Input:
add header value
### Response:
def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True |
def _items_to_resources(self, body):
""" Takes a List body and return a dictionary with the following structure:
{
'api_version': str,
'kind': str,
'items': [{
'resource': Resource,
'name': str,
'namespace': str,
}]
}
"""
if body is None:
raise ValueError("You must provide a body when calling methods on a ResourceList")
api_version = body['apiVersion']
kind = body['kind']
items = body.get('items')
if not items:
raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList')
if self.kind != kind:
raise ValueError('Methods on a {} must be called with a body containing the same kind. Receieved {} instead'.format(self.kind, kind))
return {
'api_version': api_version,
'kind': kind,
'items': [self._item_to_resource(item) for item in items]
} | Takes a List body and return a dictionary with the following structure:
{
'api_version': str,
'kind': str,
'items': [{
'resource': Resource,
'name': str,
'namespace': str,
}]
} | Below is the the instruction that describes the task:
### Input:
Takes a List body and return a dictionary with the following structure:
{
'api_version': str,
'kind': str,
'items': [{
'resource': Resource,
'name': str,
'namespace': str,
}]
}
### Response:
def _items_to_resources(self, body):
""" Takes a List body and return a dictionary with the following structure:
{
'api_version': str,
'kind': str,
'items': [{
'resource': Resource,
'name': str,
'namespace': str,
}]
}
"""
if body is None:
raise ValueError("You must provide a body when calling methods on a ResourceList")
api_version = body['apiVersion']
kind = body['kind']
items = body.get('items')
if not items:
raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList')
if self.kind != kind:
raise ValueError('Methods on a {} must be called with a body containing the same kind. Receieved {} instead'.format(self.kind, kind))
return {
'api_version': api_version,
'kind': kind,
'items': [self._item_to_resource(item) for item in items]
} |
def print_information(handler, label):
"""
Prints latest tag's information
"""
click.echo('=> Latest stable: {tag}'.format(
tag=click.style(str(handler.latest_stable or 'N/A'), fg='yellow' if
handler.latest_stable else 'magenta')
))
if label is not None:
latest_revision = handler.latest_revision(label)
click.echo('=> Latest relative revision ({label}): {tag}'.format(
label=click.style(label, fg='blue'),
tag=click.style(str(latest_revision or 'N/A'),
fg='yellow' if latest_revision else 'magenta')
)) | Prints latest tag's information | Below is the the instruction that describes the task:
### Input:
Prints latest tag's information
### Response:
def print_information(handler, label):
"""
Prints latest tag's information
"""
click.echo('=> Latest stable: {tag}'.format(
tag=click.style(str(handler.latest_stable or 'N/A'), fg='yellow' if
handler.latest_stable else 'magenta')
))
if label is not None:
latest_revision = handler.latest_revision(label)
click.echo('=> Latest relative revision ({label}): {tag}'.format(
label=click.style(label, fg='blue'),
tag=click.style(str(latest_revision or 'N/A'),
fg='yellow' if latest_revision else 'magenta')
)) |
def get_label_set(self, type_str=None):
"""Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
"""
return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)} | Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root. | Below is the the instruction that describes the task:
### Input:
Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
### Response:
def get_label_set(self, type_str=None):
"""Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
"""
return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)} |
def t_WSIGNORE_comment(self, token):
r'[#][^\n]*\n+'
token.lexer.lineno += token.value.count('\n')
newline_token = _create_token('NEWLINE', '\n',
token.lineno, token.lexpos + len(token.value) - 1)
newline_token.lexer = token.lexer
self._check_for_indent(newline_token) | r'[#][^\n]*\n+ | Below is the the instruction that describes the task:
### Input:
r'[#][^\n]*\n+
### Response:
def t_WSIGNORE_comment(self, token):
r'[#][^\n]*\n+'
token.lexer.lineno += token.value.count('\n')
newline_token = _create_token('NEWLINE', '\n',
token.lineno, token.lexpos + len(token.value) - 1)
newline_token.lexer = token.lexer
self._check_for_indent(newline_token) |
def _wrap_sessions(sessions, request):
"""
Returns a list of session keys for the given lists of sessions and/or the session key of the
current logged in user, if the list contains the magic item SELF.
"""
result = set()
for s in sessions:
if s is SELF and request:
result.add(request.session.session_key)
else:
result.add(s)
return result | Returns a list of session keys for the given lists of sessions and/or the session key of the
current logged in user, if the list contains the magic item SELF. | Below is the the instruction that describes the task:
### Input:
Returns a list of session keys for the given lists of sessions and/or the session key of the
current logged in user, if the list contains the magic item SELF.
### Response:
def _wrap_sessions(sessions, request):
"""
Returns a list of session keys for the given lists of sessions and/or the session key of the
current logged in user, if the list contains the magic item SELF.
"""
result = set()
for s in sessions:
if s is SELF and request:
result.add(request.session.session_key)
else:
result.add(s)
return result |
def _find_immediately(self, locator, search_object=None):
'''
Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
search_object = self.driver if search_object is None else search_object
elements = self.locator_handler.find_by_locator(search_object, locator, True)
return [WebElementWrapper.WebElementWrapper(self, locator, element) for element in elements] | Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True | Below is the the instruction that describes the task:
### Input:
Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
### Response:
def _find_immediately(self, locator, search_object=None):
'''
Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
'''
search_object = self.driver if search_object is None else search_object
elements = self.locator_handler.find_by_locator(search_object, locator, True)
return [WebElementWrapper.WebElementWrapper(self, locator, element) for element in elements] |
def is_within(self, query, subject):
"""Accessory function to check if a range is fully within another range"""
if self.pt_within(query[0], subject) and self.pt_within(query[1], subject):
return True
return False | Accessory function to check if a range is fully within another range | Below is the the instruction that describes the task:
### Input:
Accessory function to check if a range is fully within another range
### Response:
def is_within(self, query, subject):
"""Accessory function to check if a range is fully within another range"""
if self.pt_within(query[0], subject) and self.pt_within(query[1], subject):
return True
return False |
def __EncodedAttribute_decode_rgb32(self, da, extract_as=ExtractAs.Numpy):
"""Decode a color image (JPEG_RGB or RGB24) and returns a 32 bits RGB image.
:param da: :class:`DeviceAttribute` that contains the image
:type da: :class:`DeviceAttribute`
:param extract_as: defaults to ExtractAs.Numpy
:type extract_as: ExtractAs
:return: the decoded data
- In case String string is choosen as extract method, a tuple is returned:
width<int>, height<int>, buffer<str>
- In case Numpy is choosen as extract method, a :class:`numpy.ndarray` is
returned with ndim=2, shape=(height, width) and dtype=numpy.uint32.
- In case Tuple or List are choosen, a tuple<tuple<int>> or list<list<int>>
is returned.
.. warning::
The PyTango calls that return a :class:`DeviceAttribute`
(like :meth:`DeviceProxy.read_attribute` or :meth:`DeviceProxy.command_inout`)
automatically extract the contents by default. This method requires
that the given :class:`DeviceAttribute` is obtained from a
call which **DOESN'T** extract the contents. Example::
dev = tango.DeviceProxy("a/b/c")
da = dev.read_attribute("my_attr", extract_as=tango.ExtractAs.Nothing)
enc = tango.EncodedAttribute()
data = enc.decode_rgb32(da)
"""
if hasattr(da, 'value'):
raise TypeError("DeviceAttribute argument must have been obtained from "
"a call which doesn't extract the contents")
if extract_as not in _allowed_extract:
raise TypeError("extract_as must be one of Numpy, String, Tuple, List")
return self._decode_rgb32(da, extract_as) | Decode a color image (JPEG_RGB or RGB24) and returns a 32 bits RGB image.
:param da: :class:`DeviceAttribute` that contains the image
:type da: :class:`DeviceAttribute`
:param extract_as: defaults to ExtractAs.Numpy
:type extract_as: ExtractAs
:return: the decoded data
- In case String string is choosen as extract method, a tuple is returned:
width<int>, height<int>, buffer<str>
- In case Numpy is choosen as extract method, a :class:`numpy.ndarray` is
returned with ndim=2, shape=(height, width) and dtype=numpy.uint32.
- In case Tuple or List are choosen, a tuple<tuple<int>> or list<list<int>>
is returned.
.. warning::
The PyTango calls that return a :class:`DeviceAttribute`
(like :meth:`DeviceProxy.read_attribute` or :meth:`DeviceProxy.command_inout`)
automatically extract the contents by default. This method requires
that the given :class:`DeviceAttribute` is obtained from a
call which **DOESN'T** extract the contents. Example::
dev = tango.DeviceProxy("a/b/c")
da = dev.read_attribute("my_attr", extract_as=tango.ExtractAs.Nothing)
enc = tango.EncodedAttribute()
data = enc.decode_rgb32(da) | Below is the the instruction that describes the task:
### Input:
Decode a color image (JPEG_RGB or RGB24) and returns a 32 bits RGB image.
:param da: :class:`DeviceAttribute` that contains the image
:type da: :class:`DeviceAttribute`
:param extract_as: defaults to ExtractAs.Numpy
:type extract_as: ExtractAs
:return: the decoded data
- In case String string is choosen as extract method, a tuple is returned:
width<int>, height<int>, buffer<str>
- In case Numpy is choosen as extract method, a :class:`numpy.ndarray` is
returned with ndim=2, shape=(height, width) and dtype=numpy.uint32.
- In case Tuple or List are choosen, a tuple<tuple<int>> or list<list<int>>
is returned.
.. warning::
The PyTango calls that return a :class:`DeviceAttribute`
(like :meth:`DeviceProxy.read_attribute` or :meth:`DeviceProxy.command_inout`)
automatically extract the contents by default. This method requires
that the given :class:`DeviceAttribute` is obtained from a
call which **DOESN'T** extract the contents. Example::
dev = tango.DeviceProxy("a/b/c")
da = dev.read_attribute("my_attr", extract_as=tango.ExtractAs.Nothing)
enc = tango.EncodedAttribute()
data = enc.decode_rgb32(da)
### Response:
def __EncodedAttribute_decode_rgb32(self, da, extract_as=ExtractAs.Numpy):
"""Decode a color image (JPEG_RGB or RGB24) and returns a 32 bits RGB image.
:param da: :class:`DeviceAttribute` that contains the image
:type da: :class:`DeviceAttribute`
:param extract_as: defaults to ExtractAs.Numpy
:type extract_as: ExtractAs
:return: the decoded data
- In case String string is choosen as extract method, a tuple is returned:
width<int>, height<int>, buffer<str>
- In case Numpy is choosen as extract method, a :class:`numpy.ndarray` is
returned with ndim=2, shape=(height, width) and dtype=numpy.uint32.
- In case Tuple or List are choosen, a tuple<tuple<int>> or list<list<int>>
is returned.
.. warning::
The PyTango calls that return a :class:`DeviceAttribute`
(like :meth:`DeviceProxy.read_attribute` or :meth:`DeviceProxy.command_inout`)
automatically extract the contents by default. This method requires
that the given :class:`DeviceAttribute` is obtained from a
call which **DOESN'T** extract the contents. Example::
dev = tango.DeviceProxy("a/b/c")
da = dev.read_attribute("my_attr", extract_as=tango.ExtractAs.Nothing)
enc = tango.EncodedAttribute()
data = enc.decode_rgb32(da)
"""
if hasattr(da, 'value'):
raise TypeError("DeviceAttribute argument must have been obtained from "
"a call which doesn't extract the contents")
if extract_as not in _allowed_extract:
raise TypeError("extract_as must be one of Numpy, String, Tuple, List")
return self._decode_rgb32(da, extract_as) |
def _samples_dicts_to_array(samples_dicts, labels):
"""Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
"""
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels | Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None. | Below is the the instruction that describes the task:
### Input:
Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
### Response:
def _samples_dicts_to_array(samples_dicts, labels):
"""Convert an iterable of samples where each sample is a dict to a numpy 2d array. Also
determines the labels is they are None.
"""
itersamples = iter(samples_dicts)
first_sample = next(itersamples)
if labels is None:
labels = list(first_sample)
num_variables = len(labels)
def _iter_samples():
yield np.fromiter((first_sample[v] for v in labels),
count=num_variables, dtype=np.int8)
try:
for sample in itersamples:
yield np.fromiter((sample[v] for v in labels),
count=num_variables, dtype=np.int8)
except KeyError:
msg = ("Each dict in 'samples' must have the same keys.")
raise ValueError(msg)
return np.stack(list(_iter_samples())), labels |
def dict_intersection(dict1, dict2, combine=False, combine_op=op.add):
r"""
Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
if combine:
# TODO: depricate this
dict_isect = {k: combine_op(dict1[k], dict2[k]) for k in isect_keys}
else:
# maintain order if possible
if isinstance(dict1, OrderedDict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = OrderedDict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect | r"""
Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3} | Below is the the instruction that describes the task:
### Input:
r"""
Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3}
### Response:
def dict_intersection(dict1, dict2, combine=False, combine_op=op.add):
r"""
Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
if combine:
# TODO: depricate this
dict_isect = {k: combine_op(dict1[k], dict2[k]) for k in isect_keys}
else:
# maintain order if possible
if isinstance(dict1, OrderedDict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = OrderedDict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect |
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
"""
Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns.
"""
command = ["find", "."]
command += self._match_any(patterns)
if exclude:
command += ["-and", "!"]
command += self._match_any(exclude)
return (
subprocess.run(command, stdout=subprocess.PIPE, cwd=root)
.stdout.decode("utf-8")
.split()
) | Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns. | Below is the the instruction that describes the task:
### Input:
Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns.
### Response:
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
"""
Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns.
"""
command = ["find", "."]
command += self._match_any(patterns)
if exclude:
command += ["-and", "!"]
command += self._match_any(exclude)
return (
subprocess.run(command, stdout=subprocess.PIPE, cwd=root)
.stdout.decode("utf-8")
.split()
) |
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self | Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object. | Below is the the instruction that describes the task:
### Input:
Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
### Response:
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self |
def domagicmag(file, Recs):
"""
converts a magic record back into the SIO mag format
"""
for rec in Recs:
type = ".0"
meths = []
tmp = rec["magic_method_codes"].split(':')
for meth in tmp:
meths.append(meth.strip())
if 'LT-T-I' in meths:
type = ".1"
if 'LT-PTRM-I' in meths:
type = ".2"
if 'LT-PTRM-MD' in meths:
type = ".3"
treatment = float(rec["treatment_temp"]) - 273
tr = '%i' % (treatment) + type
inten = '%8.7e ' % (float(rec["measurement_magn_moment"]) * 1e3)
outstring = rec["er_specimen_name"] + " " + tr + " " + rec["measurement_csd"] + \
" " + inten + " " + rec["measurement_dec"] + \
" " + rec["measurement_inc"] + "\n"
file.write(outstring) | converts a magic record back into the SIO mag format | Below is the the instruction that describes the task:
### Input:
converts a magic record back into the SIO mag format
### Response:
def domagicmag(file, Recs):
"""
converts a magic record back into the SIO mag format
"""
for rec in Recs:
type = ".0"
meths = []
tmp = rec["magic_method_codes"].split(':')
for meth in tmp:
meths.append(meth.strip())
if 'LT-T-I' in meths:
type = ".1"
if 'LT-PTRM-I' in meths:
type = ".2"
if 'LT-PTRM-MD' in meths:
type = ".3"
treatment = float(rec["treatment_temp"]) - 273
tr = '%i' % (treatment) + type
inten = '%8.7e ' % (float(rec["measurement_magn_moment"]) * 1e3)
outstring = rec["er_specimen_name"] + " " + tr + " " + rec["measurement_csd"] + \
" " + inten + " " + rec["measurement_dec"] + \
" " + rec["measurement_inc"] + "\n"
file.write(outstring) |
def toDom(self, node):
"""node -- node representing message"""
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'part')
epc.setAttributeNS(None, 'name', self.name)
if self.element is not None:
ns,name = self.element
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'element', '%s:%s'%(prefix,name))
elif self.type is not None:
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s'%(prefix,name)) | node -- node representing message | Below is the the instruction that describes the task:
### Input:
node -- node representing message
### Response:
def toDom(self, node):
"""node -- node representing message"""
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'part')
epc.setAttributeNS(None, 'name', self.name)
if self.element is not None:
ns,name = self.element
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'element', '%s:%s'%(prefix,name))
elif self.type is not None:
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s'%(prefix,name)) |
def get_executions(self, **kwargs):
"""
Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service.
"""
return self._client.service_executions(service=self.id, scope=self.scope_id, **kwargs) | Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service. | Below is the the instruction that describes the task:
### Input:
Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service.
### Response:
def get_executions(self, **kwargs):
"""
Retrieve the executions related to the current service.
.. versionadded:: 1.13
:param kwargs: (optional) additional search keyword arguments to limit the search even further.
:type kwargs: dict
:return: list of ServiceExecutions associated to the current service.
"""
return self._client.service_executions(service=self.id, scope=self.scope_id, **kwargs) |
def render_to_response(template, object, params=None, mimetype='text/html'):
"""
``object`` will be converted to xml using :func:`easymode.tree.xml`. The resulting xml
will be transformed using ``template``.
The result will be a :class:`~django.http.HttpResponse` object,
containing the transformed xml as the body.
:param template: an xslt template name.
:param object: an object that has an ``__xml__`` method. (See :func:`easymode.tree.xml.decorators.toxml`).
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:param mimetype: The mimetype of the :class:`~django.http.HttpResponse`
:rtype: :class:`django.http.HttpResponse`
"""
xsl_path = find_template_path(template)
xml = xmltree.xml(object)
result = transform(xml, str(xsl_path), params)
return HttpResponse(result, mimetype=mimetype) | ``object`` will be converted to xml using :func:`easymode.tree.xml`. The resulting xml
will be transformed using ``template``.
The result will be a :class:`~django.http.HttpResponse` object,
containing the transformed xml as the body.
:param template: an xslt template name.
:param object: an object that has an ``__xml__`` method. (See :func:`easymode.tree.xml.decorators.toxml`).
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:param mimetype: The mimetype of the :class:`~django.http.HttpResponse`
:rtype: :class:`django.http.HttpResponse` | Below is the the instruction that describes the task:
### Input:
``object`` will be converted to xml using :func:`easymode.tree.xml`. The resulting xml
will be transformed using ``template``.
The result will be a :class:`~django.http.HttpResponse` object,
containing the transformed xml as the body.
:param template: an xslt template name.
:param object: an object that has an ``__xml__`` method. (See :func:`easymode.tree.xml.decorators.toxml`).
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:param mimetype: The mimetype of the :class:`~django.http.HttpResponse`
:rtype: :class:`django.http.HttpResponse`
### Response:
def render_to_response(template, object, params=None, mimetype='text/html'):
"""
``object`` will be converted to xml using :func:`easymode.tree.xml`. The resulting xml
will be transformed using ``template``.
The result will be a :class:`~django.http.HttpResponse` object,
containing the transformed xml as the body.
:param template: an xslt template name.
:param object: an object that has an ``__xml__`` method. (See :func:`easymode.tree.xml.decorators.toxml`).
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:param mimetype: The mimetype of the :class:`~django.http.HttpResponse`
:rtype: :class:`django.http.HttpResponse`
"""
xsl_path = find_template_path(template)
xml = xmltree.xml(object)
result = transform(xml, str(xsl_path), params)
return HttpResponse(result, mimetype=mimetype) |
def truncate_table(self, table_name, database=None):
"""
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
"""
statement = ddl.TruncateTable(table_name, database=database)
self._execute(statement, False) | Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional) | Below is the the instruction that describes the task:
### Input:
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
### Response:
def truncate_table(self, table_name, database=None):
"""
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
"""
statement = ddl.TruncateTable(table_name, database=database)
self._execute(statement, False) |
def drop(self):
"""Release the message from lease management.
This informs the policy to no longer hold on to the lease for this
message. Pub/Sub will re-deliver the message if it is not acknowledged
before the existing lease expires.
.. warning::
For most use cases, the only reason to drop a message from
lease management is on :meth:`ack` or :meth:`nack`; these methods
both call this one. You probably do not want to call this method
directly.
"""
self._request_queue.put(
requests.DropRequest(ack_id=self._ack_id, byte_size=self.size)
) | Release the message from lease management.
This informs the policy to no longer hold on to the lease for this
message. Pub/Sub will re-deliver the message if it is not acknowledged
before the existing lease expires.
.. warning::
For most use cases, the only reason to drop a message from
lease management is on :meth:`ack` or :meth:`nack`; these methods
both call this one. You probably do not want to call this method
directly. | Below is the the instruction that describes the task:
### Input:
Release the message from lease management.
This informs the policy to no longer hold on to the lease for this
message. Pub/Sub will re-deliver the message if it is not acknowledged
before the existing lease expires.
.. warning::
For most use cases, the only reason to drop a message from
lease management is on :meth:`ack` or :meth:`nack`; these methods
both call this one. You probably do not want to call this method
directly.
### Response:
def drop(self):
"""Release the message from lease management.
This informs the policy to no longer hold on to the lease for this
message. Pub/Sub will re-deliver the message if it is not acknowledged
before the existing lease expires.
.. warning::
For most use cases, the only reason to drop a message from
lease management is on :meth:`ack` or :meth:`nack`; these methods
both call this one. You probably do not want to call this method
directly.
"""
self._request_queue.put(
requests.DropRequest(ack_id=self._ack_id, byte_size=self.size)
) |
def get_connection_id_by_endpoint(self, endpoint):
"""Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint.
"""
with self._connections_lock:
for connection_id in self._connections:
connection_info = self._connections[connection_id]
if connection_info.uri == endpoint:
return connection_id
raise KeyError() | Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint. | Below is the the instruction that describes the task:
### Input:
Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint.
### Response:
def get_connection_id_by_endpoint(self, endpoint):
"""Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint.
"""
with self._connections_lock:
for connection_id in self._connections:
connection_info = self._connections[connection_id]
if connection_info.uri == endpoint:
return connection_id
raise KeyError() |
def mixer(yaw, throttle, max_power=100):
"""
Mix a pair of joystick axes, returning a pair of wheel speeds. This is where the mapping from
joystick positions to wheel powers is defined, so any changes to how the robot drives should
be made here, everything else is really just plumbing.
:param yaw:
Yaw axis value, ranges from -1.0 to 1.0
:param throttle:
Throttle axis value, ranges from -1.0 to 1.0
:param max_power:
Maximum speed that should be returned from the mixer, defaults to 100
:return:
A pair of power_left, power_right integer values to send to the motor driver
"""
left = throttle + yaw
right = throttle - yaw
scale = float(max_power) / max(1, abs(left), abs(right))
return int(left * scale), int(right * scale) | Mix a pair of joystick axes, returning a pair of wheel speeds. This is where the mapping from
joystick positions to wheel powers is defined, so any changes to how the robot drives should
be made here, everything else is really just plumbing.
:param yaw:
Yaw axis value, ranges from -1.0 to 1.0
:param throttle:
Throttle axis value, ranges from -1.0 to 1.0
:param max_power:
Maximum speed that should be returned from the mixer, defaults to 100
:return:
A pair of power_left, power_right integer values to send to the motor driver | Below is the the instruction that describes the task:
### Input:
Mix a pair of joystick axes, returning a pair of wheel speeds. This is where the mapping from
joystick positions to wheel powers is defined, so any changes to how the robot drives should
be made here, everything else is really just plumbing.
:param yaw:
Yaw axis value, ranges from -1.0 to 1.0
:param throttle:
Throttle axis value, ranges from -1.0 to 1.0
:param max_power:
Maximum speed that should be returned from the mixer, defaults to 100
:return:
A pair of power_left, power_right integer values to send to the motor driver
### Response:
def mixer(yaw, throttle, max_power=100):
"""
Mix a pair of joystick axes, returning a pair of wheel speeds. This is where the mapping from
joystick positions to wheel powers is defined, so any changes to how the robot drives should
be made here, everything else is really just plumbing.
:param yaw:
Yaw axis value, ranges from -1.0 to 1.0
:param throttle:
Throttle axis value, ranges from -1.0 to 1.0
:param max_power:
Maximum speed that should be returned from the mixer, defaults to 100
:return:
A pair of power_left, power_right integer values to send to the motor driver
"""
left = throttle + yaw
right = throttle - yaw
scale = float(max_power) / max(1, abs(left), abs(right))
return int(left * scale), int(right * scale) |
def from_json(cls, data):
"""Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
"""
assert 'name' in data, 'Required keyword "name" is missing!'
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
if cls._type_enumeration is None:
cls._type_enumeration = _DataTypeEnumeration(import_modules=False)
if data['data_type'] == 'GenericType':
assert 'base_unit' in data, \
'Keyword "base_unit" is missing and is required for GenericType.'
return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])
elif data['data_type'] in cls._type_enumeration._TYPES:
clss = cls._type_enumeration._TYPES[data['data_type']]
if data['data_type'] == data['name'].title().replace(' ', ''):
return clss()
else:
instance = clss()
instance._name = data['name']
return instance
else:
raise ValueError(
'Data Type {} could not be recognized'.format(data['data_type'])) | Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
} | Below is the the instruction that describes the task:
### Input:
Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
### Response:
def from_json(cls, data):
"""Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
"""
assert 'name' in data, 'Required keyword "name" is missing!'
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
if cls._type_enumeration is None:
cls._type_enumeration = _DataTypeEnumeration(import_modules=False)
if data['data_type'] == 'GenericType':
assert 'base_unit' in data, \
'Keyword "base_unit" is missing and is required for GenericType.'
return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])
elif data['data_type'] in cls._type_enumeration._TYPES:
clss = cls._type_enumeration._TYPES[data['data_type']]
if data['data_type'] == data['name'].title().replace(' ', ''):
return clss()
else:
instance = clss()
instance._name = data['name']
return instance
else:
raise ValueError(
'Data Type {} could not be recognized'.format(data['data_type'])) |
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r'\s\s+', line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len | Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument. | Below is the the instruction that describes the task:
### Input:
Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
### Response:
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r'\s\s+', line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len |
def iter(self, match="*", count=1000):
""" Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
"""
replace_this = self.key_prefix+":"
for key in self._client.scan_iter(
match="{}:{}".format(self.key_prefix, match), count=count):
yield self._decode(key).replace(replace_this, "", 1) | Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance | Below is the the instruction that describes the task:
### Input:
Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
### Response:
def iter(self, match="*", count=1000):
""" Iterates the set of keys in :prop:key_prefix in :prop:_client
@match: #str pattern to match after the :prop:key_prefix
@count: the user specified the amount of work that should be done
at every call in order to retrieve elements from the collection
-> yields redis keys within this instance
"""
replace_this = self.key_prefix+":"
for key in self._client.scan_iter(
match="{}:{}".format(self.key_prefix, match), count=count):
yield self._decode(key).replace(replace_this, "", 1) |
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
return nodes.Continue(
getattr(node, "lineno", None), getattr(node, "col_offset", None), parent
) | visit a Continue node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Continue node by returning a fresh instance of it
### Response:
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
return nodes.Continue(
getattr(node, "lineno", None), getattr(node, "col_offset", None), parent
) |
def k_partitions(collection, k):
"""Generate all ``k``-partitions of a collection.
Example:
>>> list(k_partitions(range(3), 2))
[[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]]
"""
collection = list(collection)
n = len(collection)
# Special cases
if n == 0 or k < 1:
return []
if k == 1:
return [[collection]]
a = [0] * (n + 1)
for j in range(1, k + 1):
a[n - k + j] = j - 1
return _f(k, n, 0, n, a, k, collection) | Generate all ``k``-partitions of a collection.
Example:
>>> list(k_partitions(range(3), 2))
[[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]] | Below is the the instruction that describes the task:
### Input:
Generate all ``k``-partitions of a collection.
Example:
>>> list(k_partitions(range(3), 2))
[[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]]
### Response:
def k_partitions(collection, k):
"""Generate all ``k``-partitions of a collection.
Example:
>>> list(k_partitions(range(3), 2))
[[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]]
"""
collection = list(collection)
n = len(collection)
# Special cases
if n == 0 or k < 1:
return []
if k == 1:
return [[collection]]
a = [0] * (n + 1)
for j in range(1, k + 1):
a[n - k + j] = j - 1
return _f(k, n, 0, n, a, k, collection) |
def from_set(self, fileset, check_if_dicoms=True):
"""Overwrites self.items with the given set of files.
Will filter the fileset and keep only Dicom files.
Parameters
----------
fileset: iterable of str
Paths to files
check_if_dicoms: bool
Whether to check if the items in fileset are dicom file paths
"""
if check_if_dicoms:
self.items = []
for f in fileset:
if is_dicom_file(f):
self.items.append(f)
else:
self.items = fileset | Overwrites self.items with the given set of files.
Will filter the fileset and keep only Dicom files.
Parameters
----------
fileset: iterable of str
Paths to files
check_if_dicoms: bool
Whether to check if the items in fileset are dicom file paths | Below is the the instruction that describes the task:
### Input:
Overwrites self.items with the given set of files.
Will filter the fileset and keep only Dicom files.
Parameters
----------
fileset: iterable of str
Paths to files
check_if_dicoms: bool
Whether to check if the items in fileset are dicom file paths
### Response:
def from_set(self, fileset, check_if_dicoms=True):
"""Overwrites self.items with the given set of files.
Will filter the fileset and keep only Dicom files.
Parameters
----------
fileset: iterable of str
Paths to files
check_if_dicoms: bool
Whether to check if the items in fileset are dicom file paths
"""
if check_if_dicoms:
self.items = []
for f in fileset:
if is_dicom_file(f):
self.items.append(f)
else:
self.items = fileset |
def add_extension(self, klass, extension):
"""Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type)
"""
klass = self._get_class_path(klass)
# TODO: Take order into account.
self._extensions.setdefault(klass, []).append(extension) | Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type) | Below is the the instruction that describes the task:
### Input:
Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type)
### Response:
def add_extension(self, klass, extension):
"""Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type)
"""
klass = self._get_class_path(klass)
# TODO: Take order into account.
self._extensions.setdefault(klass, []).append(extension) |
def dimensions(self, *dimensions):
""" Add a list of Dimension ingredients to the query. These can either be
Dimension objects or strings representing dimensions on the shelf.
The Dimension expression will be added to the query's select statement
and to the group_by.
:param dimensions: Dimensions to add to the recipe. Dimensions can
either be keys on the ``shelf`` or
Dimension objects
:type dimensions: list
"""
for d in dimensions:
self._cauldron.use(self._shelf.find(d, Dimension))
self.dirty = True
return self | Add a list of Dimension ingredients to the query. These can either be
Dimension objects or strings representing dimensions on the shelf.
The Dimension expression will be added to the query's select statement
and to the group_by.
:param dimensions: Dimensions to add to the recipe. Dimensions can
either be keys on the ``shelf`` or
Dimension objects
:type dimensions: list | Below is the the instruction that describes the task:
### Input:
Add a list of Dimension ingredients to the query. These can either be
Dimension objects or strings representing dimensions on the shelf.
The Dimension expression will be added to the query's select statement
and to the group_by.
:param dimensions: Dimensions to add to the recipe. Dimensions can
either be keys on the ``shelf`` or
Dimension objects
:type dimensions: list
### Response:
def dimensions(self, *dimensions):
""" Add a list of Dimension ingredients to the query. These can either be
Dimension objects or strings representing dimensions on the shelf.
The Dimension expression will be added to the query's select statement
and to the group_by.
:param dimensions: Dimensions to add to the recipe. Dimensions can
either be keys on the ``shelf`` or
Dimension objects
:type dimensions: list
"""
for d in dimensions:
self._cauldron.use(self._shelf.find(d, Dimension))
self.dirty = True
return self |
def get_page_args():
"""
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
pages = {}
for arg in request.args:
re_match = re.findall("page_(.*)", arg)
if re_match:
pages[re_match[0]] = int(request.args.get(arg))
return pages | Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER> | Below is the the instruction that describes the task:
### Input:
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
### Response:
def get_page_args():
"""
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
pages = {}
for arg in request.args:
re_match = re.findall("page_(.*)", arg)
if re_match:
pages[re_match[0]] = int(request.args.get(arg))
return pages |
def _resolve_indirect_inner(maybe_idict):
"""Resolve the contents an indirect dictionary (containing promises) to produce
a dictionary actual values, including merging multiple sources into a
single input.
"""
if isinstance(maybe_idict, IndirectDict):
result = {}
for key, value in list(maybe_idict.items()):
if isinstance(value, (MergeInputs, DefaultWithSource)):
result[key] = value.resolve()
else:
result[key] = value[1].get(value[0])
return result
return maybe_idict | Resolve the contents an indirect dictionary (containing promises) to produce
a dictionary actual values, including merging multiple sources into a
single input. | Below is the the instruction that describes the task:
### Input:
Resolve the contents an indirect dictionary (containing promises) to produce
a dictionary actual values, including merging multiple sources into a
single input.
### Response:
def _resolve_indirect_inner(maybe_idict):
"""Resolve the contents an indirect dictionary (containing promises) to produce
a dictionary actual values, including merging multiple sources into a
single input.
"""
if isinstance(maybe_idict, IndirectDict):
result = {}
for key, value in list(maybe_idict.items()):
if isinstance(value, (MergeInputs, DefaultWithSource)):
result[key] = value.resolve()
else:
result[key] = value[1].get(value[0])
return result
return maybe_idict |
def handle_inform(self, connection, msg):
"""Dispatch an inform message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The inform message to process.
"""
if msg.name in self._inform_handlers:
try:
self._inform_handlers[msg.name](self, connection, msg)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Inform %s FAIL: %s" % (msg.name, reason))
else:
self._logger.warn("%s INVALID: Unknown inform." % (msg.name,)) | Dispatch an inform message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The inform message to process. | Below is the the instruction that describes the task:
### Input:
Dispatch an inform message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The inform message to process.
### Response:
def handle_inform(self, connection, msg):
"""Dispatch an inform message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The inform message to process.
"""
if msg.name in self._inform_handlers:
try:
self._inform_handlers[msg.name](self, connection, msg)
except Exception:
e_type, e_value, trace = sys.exc_info()
reason = "\n".join(traceback.format_exception(
e_type, e_value, trace, self._tb_limit))
self._logger.error("Inform %s FAIL: %s" % (msg.name, reason))
else:
self._logger.warn("%s INVALID: Unknown inform." % (msg.name,)) |
def sample(self, n_to_sample, **kwargs):
"""Sample a sequence of items from the pool
Parameters
----------
n_to_sample : int
number of items to sample
"""
n_to_sample = verify_positive(int(n_to_sample))
n_remaining = self._max_iter - self.t_
if n_remaining == 0:
if (not self.replace) and (self._n_items == self._max_iter):
raise Exception("All items have already been sampled")
else:
raise Exception("No more space available to continue sampling. "
"Consider re-initialising with a larger value "
"of max_iter.")
if n_to_sample > n_remaining:
warnings.warn("Space only remains for {} more iteration(s). "
"Setting n_to_sample = {}.".format(n_remaining, \
n_remaining))
n_to_sample = n_remaining
for _ in range(n_to_sample):
self._iterate(**kwargs) | Sample a sequence of items from the pool
Parameters
----------
n_to_sample : int
number of items to sample | Below is the the instruction that describes the task:
### Input:
Sample a sequence of items from the pool
Parameters
----------
n_to_sample : int
number of items to sample
### Response:
def sample(self, n_to_sample, **kwargs):
"""Sample a sequence of items from the pool
Parameters
----------
n_to_sample : int
number of items to sample
"""
n_to_sample = verify_positive(int(n_to_sample))
n_remaining = self._max_iter - self.t_
if n_remaining == 0:
if (not self.replace) and (self._n_items == self._max_iter):
raise Exception("All items have already been sampled")
else:
raise Exception("No more space available to continue sampling. "
"Consider re-initialising with a larger value "
"of max_iter.")
if n_to_sample > n_remaining:
warnings.warn("Space only remains for {} more iteration(s). "
"Setting n_to_sample = {}.".format(n_remaining, \
n_remaining))
n_to_sample = n_remaining
for _ in range(n_to_sample):
self._iterate(**kwargs) |
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status() | Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it. | Below is the the instruction that describes the task:
### Input:
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
### Response:
def init(url=None, ip=None, port=None, name=None, https=None, insecure=None, username=None, password=None,
cookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None, log_dir=None, log_level=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, ignore_config=False,
extra_classpath=None, jvm_custom_args=None, bind_to_localhost=True, **kwargs):
"""
Attempt to connect to a local server, or if not successful start a new server and connect to it.
:param url: Full URL of the server to connect to (can be used instead of `ip` + `port` + `https`).
:param ip: The ip address (or host name) of the server where H2O is running.
:param port: Port number that H2O service is listening to.
:param name: Cluster name. If None while connecting to an existing cluster it will not check the cluster name.
If set then will connect only if the target cluster name matches. If no instance is found and decides to start a local
one then this will be used as the cluster name or a random one will be generated if set to None.
:param https: Set to True to connect via https:// instead of http://.
:param insecure: When using https, setting this to True will disable SSL certificates verification.
:param username: Username and
:param password: Password for basic authentication.
:param cookies: Cookie (or list of) to add to each request.
:param proxy: Proxy server address.
:param start_h2o: If False, do not attempt to start an h2o server when connection to an existing one failed.
:param nthreads: "Number of threads" option when launching a new h2o server.
:param ice_root: Directory for temporary files for the new h2o server.
:param log_dir: Directory for H2O logs to be stored if a new instance is started. Ignored if connecting to an existing node.
:param log_level: The logger level for H2O if a new instance is started. One of TRACE,DEBUG,INFO,WARN,ERRR,FATA. Default is INFO. Ignored if connecting to an existing node.
:param enable_assertions: Enable assertions in Java for the new h2o server.
:param max_mem_size: Maximum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param min_mem_size: Minimum memory to use for the new h2o server. Integer input will be evaluated as gigabytes. Other units can be specified by passing in a string (e.g. "160M" for 160 megabytes).
:param strict_version_check: If True, an error will be raised if the client and server versions don't match.
:param ignore_config: Indicates whether a processing of a .h2oconfig file should be conducted or not. Default value is False.
:param extra_classpath: List of paths to libraries that should be included on the Java classpath when starting H2O from Python.
:param kwargs: (all other deprecated attributes)
:param jvm_custom_args: Customer, user-defined argument's for the JVM H2O is instantiated in. Ignored if there is an instance of H2O already running and the client connects to it.
"""
global h2oconn
assert_is_type(url, str, None)
assert_is_type(ip, str, None)
assert_is_type(port, int, str, None)
assert_is_type(name, str, None)
assert_is_type(https, bool, None)
assert_is_type(insecure, bool, None)
assert_is_type(username, str, None)
assert_is_type(password, str, None)
assert_is_type(cookies, str, [str], None)
assert_is_type(proxy, {str: str}, None)
assert_is_type(start_h2o, bool, None)
assert_is_type(nthreads, int)
assert_is_type(ice_root, str, None)
assert_is_type(log_dir, str, None)
assert_is_type(log_level, str, None)
assert_satisfies(log_level, log_level in [None, "TRACE", "DEBUG", "INFO", "WARN", "ERRR", "FATA"])
assert_is_type(enable_assertions, bool)
assert_is_type(max_mem_size, int, str, None)
assert_is_type(min_mem_size, int, str, None)
assert_is_type(strict_version_check, bool, None)
assert_is_type(extra_classpath, [str], None)
assert_is_type(jvm_custom_args, [str], None)
assert_is_type(bind_to_localhost, bool)
assert_is_type(kwargs, {"proxies": {str: str}, "max_mem_size_GB": int, "min_mem_size_GB": int,
"force_connect": bool, "as_port": bool})
def get_mem_size(mmint, mmgb):
if not mmint: # treat 0 and "" as if they were None
if mmgb is None: return None
return mmgb << 30
if is_type(mmint, int):
# If the user gives some small number just assume it's in Gigabytes...
if mmint < 1000: return mmint << 30
return mmint
if is_type(mmint, str):
last = mmint[-1].upper()
num = mmint[:-1]
if not (num.isdigit() and last in "MGT"):
raise H2OValueError("Wrong format for a *_memory_size argument: %s (should be a number followed by "
"a suffix 'M', 'G' or 'T')" % mmint)
if last == "T": return int(num) << 40
if last == "G": return int(num) << 30
if last == "M": return int(num) << 20
scheme = "https" if https else "http"
proxy = proxy[scheme] if proxy is not None and scheme in proxy else \
kwargs["proxies"][scheme] if "proxies" in kwargs and scheme in kwargs["proxies"] else None
mmax = get_mem_size(max_mem_size, kwargs.get("max_mem_size_GB"))
mmin = get_mem_size(min_mem_size, kwargs.get("min_mem_size_GB"))
auth = (username, password) if username and password else None
check_version = True
verify_ssl_certificates = True
# Apply the config file if ignore_config=False
if not ignore_config:
config = H2OConfigReader.get_config()
if url is None and ip is None and port is None and https is None and "init.url" in config:
url = config["init.url"]
if proxy is None and "init.proxy" in config:
proxy = config["init.proxy"]
if cookies is None and "init.cookies" in config:
cookies = config["init.cookies"].split(";")
if auth is None and "init.username" in config and "init.password" in config:
auth = (config["init.username"], config["init.password"])
if strict_version_check is None:
if "init.check_version" in config:
check_version = config["init.check_version"].lower() != "false"
elif os.environ.get("H2O_DISABLE_STRICT_VERSION_CHECK"):
check_version = False
else:
check_version = strict_version_check
if insecure is None:
if "init.verify_ssl_certificates" in config:
verify_ssl_certificates = config["init.verify_ssl_certificates"].lower() != "false"
else:
verify_ssl_certificates = not insecure
if not start_h2o:
print("Warning: if you don't want to start local H2O server, then use of `h2o.connect()` is preferred.")
try:
h2oconn = H2OConnection.open(url=url, ip=ip, port=port, name=name, https=https,
verify_ssl_certificates=verify_ssl_certificates,
auth=auth, proxy=proxy,cookies=cookies, verbose=True,
_msgs=("Checking whether there is an H2O instance running at {url} ",
"connected.", "not found."))
except H2OConnectionError:
# Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
if port and not str(port).endswith("+") and not kwargs.get("as_port", False):
port = str(port) + "+"
if not start_h2o: raise
if ip and not (ip == "localhost" or ip == "127.0.0.1"):
raise H2OConnectionError('Can only start H2O launcher if IP address is localhost.')
hs = H2OLocalServer.start(nthreads=nthreads, enable_assertions=enable_assertions, max_mem_size=mmax,
min_mem_size=mmin, ice_root=ice_root, log_dir=log_dir, log_level=log_level,
port=port, name=name,
extra_classpath=extra_classpath, jvm_custom_args=jvm_custom_args,
bind_to_localhost=bind_to_localhost)
h2oconn = H2OConnection.open(server=hs, https=https, verify_ssl_certificates=not insecure,
auth=auth, proxy=proxy,cookies=cookies, verbose=True)
if check_version:
version_check()
h2oconn.cluster.timezone = "UTC"
h2oconn.cluster.show_status() |
def run(self, raw_args=None):
"""
Parses the given arguments (if these are None, then argparse's parser
defaults to parsing sys.argv), inits a Core instance, calls its lint
method with the respective arguments, and then exits.
"""
args = self.parser.parse_args(raw_args)
core = Core()
try:
report = core.lint(**vars(args))
except Exception as err:
self.parser.error(str(err))
print(report)
self.parser.exit() | Parses the given arguments (if these are None, then argparse's parser
defaults to parsing sys.argv), inits a Core instance, calls its lint
method with the respective arguments, and then exits. | Below is the the instruction that describes the task:
### Input:
Parses the given arguments (if these are None, then argparse's parser
defaults to parsing sys.argv), inits a Core instance, calls its lint
method with the respective arguments, and then exits.
### Response:
def run(self, raw_args=None):
"""
Parses the given arguments (if these are None, then argparse's parser
defaults to parsing sys.argv), inits a Core instance, calls its lint
method with the respective arguments, and then exits.
"""
args = self.parser.parse_args(raw_args)
core = Core()
try:
report = core.lint(**vars(args))
except Exception as err:
self.parser.error(str(err))
print(report)
self.parser.exit() |
def from_args(cls, **kwargs):
"""
Generates one or more VSGSuite instances from command line arguments.
:param kwargs: List of additional keyworded arguments to be passed into the VSGSuite defined in the :meth:`~VSGSuite.make_parser` method.
"""
# Create a VSGSuite for each filename on the command line.
if kwargs.get('suite_commands', None) == 'generate':
filenames = kwargs.pop('configuration_filenames', [])
return [cls.from_file(f) for f in filenames]
# Create a VSGSuit from the target directory and override commands
if kwargs.get('suite_commands', None) == 'auto':
type = kwargs.get('suite_type', None)
return [cls.from_directory('', type, **kwargs)]
# Create nothing.
return [] | Generates one or more VSGSuite instances from command line arguments.
:param kwargs: List of additional keyworded arguments to be passed into the VSGSuite defined in the :meth:`~VSGSuite.make_parser` method. | Below is the the instruction that describes the task:
### Input:
Generates one or more VSGSuite instances from command line arguments.
:param kwargs: List of additional keyworded arguments to be passed into the VSGSuite defined in the :meth:`~VSGSuite.make_parser` method.
### Response:
def from_args(cls, **kwargs):
"""
Generates one or more VSGSuite instances from command line arguments.
:param kwargs: List of additional keyworded arguments to be passed into the VSGSuite defined in the :meth:`~VSGSuite.make_parser` method.
"""
# Create a VSGSuite for each filename on the command line.
if kwargs.get('suite_commands', None) == 'generate':
filenames = kwargs.pop('configuration_filenames', [])
return [cls.from_file(f) for f in filenames]
# Create a VSGSuit from the target directory and override commands
if kwargs.get('suite_commands', None) == 'auto':
type = kwargs.get('suite_type', None)
return [cls.from_directory('', type, **kwargs)]
# Create nothing.
return [] |
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content | Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string. | Below is the the instruction that describes the task:
### Input:
Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
### Response:
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content |
def getHeaders(self):
'''
从字符串中格式化出字典形式的Headers
'''
items = self.data
headers = {}
for item in items:
if len(item) > 0 and self._judeNOtIn(item, ['curl', 'GET', 'Cookie', 'cookie']):
sp = item.split(':')
headers[sp[0]] = sp[1]
return headers | 从字符串中格式化出字典形式的Headers | Below is the the instruction that describes the task:
### Input:
从字符串中格式化出字典形式的Headers
### Response:
def getHeaders(self):
'''
从字符串中格式化出字典形式的Headers
'''
items = self.data
headers = {}
for item in items:
if len(item) > 0 and self._judeNOtIn(item, ['curl', 'GET', 'Cookie', 'cookie']):
sp = item.split(':')
headers[sp[0]] = sp[1]
return headers |
def in6_iseui64(x):
"""
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
"""
eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
return x == eui64 | Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format. | Below is the the instruction that describes the task:
### Input:
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
### Response:
def in6_iseui64(x):
"""
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
"""
eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
return x == eui64 |
def _get_current_output(self):
"""
Get child modules output.
"""
output = []
for item in self.items:
out = self.py3.get_output(item)
if out and "separator" not in out[-1]:
out[-1]["separator"] = True
output += out
return output | Get child modules output. | Below is the the instruction that describes the task:
### Input:
Get child modules output.
### Response:
def _get_current_output(self):
"""
Get child modules output.
"""
output = []
for item in self.items:
out = self.py3.get_output(item)
if out and "separator" not in out[-1]:
out[-1]["separator"] = True
output += out
return output |
def parse_output(a1_text, a2_text, sentence_segmentations):
"""Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES
"""
# Parse the sentence segmentation document
tees_sentences = TEESSentences(sentence_segmentations)
# Parse the a1 (entities) file
entities = parse_a1(a1_text)
# Parse the a2 (events) file
events = parse_a2(a2_text, entities, tees_sentences)
return events | Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES | Below is the the instruction that describes the task:
### Input:
Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES
### Response:
def parse_output(a1_text, a2_text, sentence_segmentations):
"""Parses the output of the TEES reader and returns a networkx graph
with the event information.
Parameters
----------
a1_text : str
Contents of the TEES a1 output, specifying the entities
a1_text : str
Contents of the TEES a2 output, specifying the event graph
sentence_segmentations : str
Concents of the TEES sentence segmentation output XML
Returns
-------
events : networkx.DiGraph
networkx graph with the entities, events, and relationship between
extracted by TEES
"""
# Parse the sentence segmentation document
tees_sentences = TEESSentences(sentence_segmentations)
# Parse the a1 (entities) file
entities = parse_a1(a1_text)
# Parse the a2 (events) file
events = parse_a2(a2_text, entities, tees_sentences)
return events |
def one_phase_dP_acceleration(m, D, rho_o, rho_i):
r'''This function handles calculation of one-phase fluid pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation, providing the total differential in pressure for a given
length and should be called as part of a segment solver routine.
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
1}{\rho_o} - \frac{1}{\rho_i} \right]
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
rho_o : float
Fluid density out, [kg/m^3]
rho_i : float
Fluid density int, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for one-phase flow, [Pa]
Notes
-----
Examples
--------
>>> one_phase_dP_acceleration(m=1, D=0.1, rho_o=827.1, rho_i=830)
0.06848289670840459
'''
G = 4.0*m/(pi*D*D)
return G*G*(1.0/rho_o - 1.0/rho_i) | r'''This function handles calculation of one-phase fluid pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation, providing the total differential in pressure for a given
length and should be called as part of a segment solver routine.
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
1}{\rho_o} - \frac{1}{\rho_i} \right]
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
rho_o : float
Fluid density out, [kg/m^3]
rho_i : float
Fluid density int, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for one-phase flow, [Pa]
Notes
-----
Examples
--------
>>> one_phase_dP_acceleration(m=1, D=0.1, rho_o=827.1, rho_i=830)
0.06848289670840459 | Below is the the instruction that describes the task:
### Input:
r'''This function handles calculation of one-phase fluid pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation, providing the total differential in pressure for a given
length and should be called as part of a segment solver routine.
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
1}{\rho_o} - \frac{1}{\rho_i} \right]
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
rho_o : float
Fluid density out, [kg/m^3]
rho_i : float
Fluid density int, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for one-phase flow, [Pa]
Notes
-----
Examples
--------
>>> one_phase_dP_acceleration(m=1, D=0.1, rho_o=827.1, rho_i=830)
0.06848289670840459
### Response:
def one_phase_dP_acceleration(m, D, rho_o, rho_i):
r'''This function handles calculation of one-phase fluid pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation, providing the total differential in pressure for a given
length and should be called as part of a segment solver routine.
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
1}{\rho_o} - \frac{1}{\rho_i} \right]
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
rho_o : float
Fluid density out, [kg/m^3]
rho_i : float
Fluid density int, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for one-phase flow, [Pa]
Notes
-----
Examples
--------
>>> one_phase_dP_acceleration(m=1, D=0.1, rho_o=827.1, rho_i=830)
0.06848289670840459
'''
G = 4.0*m/(pi*D*D)
return G*G*(1.0/rho_o - 1.0/rho_i) |
def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url) | Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'. | Below is the the instruction that describes the task:
### Input:
Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
### Response:
def get_splits(self, id_num, unit='mi'):
"""Return the splits of the activity with the given id.
:param unit: The unit to use for splits. May be one of
'mi' or 'km'.
"""
url = self._build_url('my', 'activities', id_num, 'splits', unit)
return self._json(url) |
def eq(self, o):
"""
Equal
:param o: The ohter operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
if (self.is_integer
and o.is_integer
):
# Two integers
if self.lower_bound == o.lower_bound:
# They are equal
return TrueResult()
else:
# They are not equal
return FalseResult()
else:
if self.name == o.name:
return TrueResult() # They are the same guy
si_intersection = self.intersection(o)
if si_intersection.is_empty:
return FalseResult()
else:
return MaybeResult() | Equal
:param o: The ohter operand
:return: TrueResult(), FalseResult(), or MaybeResult() | Below is the the instruction that describes the task:
### Input:
Equal
:param o: The ohter operand
:return: TrueResult(), FalseResult(), or MaybeResult()
### Response:
def eq(self, o):
"""
Equal
:param o: The ohter operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
if (self.is_integer
and o.is_integer
):
# Two integers
if self.lower_bound == o.lower_bound:
# They are equal
return TrueResult()
else:
# They are not equal
return FalseResult()
else:
if self.name == o.name:
return TrueResult() # They are the same guy
si_intersection = self.intersection(o)
if si_intersection.is_empty:
return FalseResult()
else:
return MaybeResult() |
def clear_learning_objectives(self):
"""Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.clear_assets_template
if (self.get_learning_objectives_metadata().is_read_only() or
self.get_learning_objectives_metadata().is_required()):
raise errors.NoAccess()
self._my_map['learningObjectiveIds'] = self._learning_objectives_default | Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def clear_learning_objectives(self):
"""Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.clear_assets_template
if (self.get_learning_objectives_metadata().is_read_only() or
self.get_learning_objectives_metadata().is_required()):
raise errors.NoAccess()
self._my_map['learningObjectiveIds'] = self._learning_objectives_default |
def EncodeMessages(self,
message_list,
result,
destination=None,
timestamp=None,
api_version=3):
"""Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
"""
if api_version not in [3]:
raise RuntimeError(
"Unsupported api version: %s, expected 3." % api_version)
# TODO(amoser): This is actually not great, we have two
# communicator classes already, one for the client, one for the
# server. This should be different methods, not a single one that
# gets passed a destination (server side) or not (client side).
if destination is None:
destination = self.server_name
# For the client it makes sense to cache the server cipher since
# it's the only cipher it ever uses.
cipher = self._GetServerCipher()
else:
remote_public_key = self._GetRemotePublicKey(destination)
cipher = Cipher(self.common_name, self.private_key, remote_public_key)
# Make a nonce for this transaction
if timestamp is None:
self.timestamp = timestamp = int(time.time() * 1000000)
packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, packed_message_list)
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
# Include the encrypted cipher.
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = packed_message_list.SerializeToString()
# Encrypt the message symmetrically.
# New scheme cipher is signed plus hmac over message list.
result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list)
# This is to support older endpoints.
result.hmac = cipher.HMAC(result.encrypted)
# Newer endpoints only look at this HMAC. It is recalculated for each packet
# in the session. Note that encrypted_cipher and encrypted_cipher_metadata
# do not change between all packets in this session.
result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher,
result.encrypted_cipher_metadata,
result.packet_iv.SerializeToString(),
struct.pack("<I", api_version))
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
# Store the number of messages contained.
result.num_messages = len(message_list)
return timestamp | Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version. | Below is the the instruction that describes the task:
### Input:
Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
### Response:
def EncodeMessages(self,
message_list,
result,
destination=None,
timestamp=None,
api_version=3):
"""Accepts a list of messages and encodes for transmission.
This function signs and then encrypts the payload.
Args:
message_list: A MessageList rdfvalue containing a list of GrrMessages.
result: A ClientCommunication rdfvalue which will be filled in.
destination: The CN of the remote system this should go to.
timestamp: A timestamp to use for the signed messages. If None - use the
current time.
api_version: The api version which this should be encoded in.
Returns:
A nonce (based on time) which is inserted to the encrypted payload. The
client can verify that the server is able to decrypt the message and
return the nonce.
Raises:
RuntimeError: If we do not support this api version.
"""
if api_version not in [3]:
raise RuntimeError(
"Unsupported api version: %s, expected 3." % api_version)
# TODO(amoser): This is actually not great, we have two
# communicator classes already, one for the client, one for the
# server. This should be different methods, not a single one that
# gets passed a destination (server side) or not (client side).
if destination is None:
destination = self.server_name
# For the client it makes sense to cache the server cipher since
# it's the only cipher it ever uses.
cipher = self._GetServerCipher()
else:
remote_public_key = self._GetRemotePublicKey(destination)
cipher = Cipher(self.common_name, self.private_key, remote_public_key)
# Make a nonce for this transaction
if timestamp is None:
self.timestamp = timestamp = int(time.time() * 1000000)
packed_message_list = rdf_flows.PackedMessageList(timestamp=timestamp)
self.EncodeMessageList(message_list, packed_message_list)
result.encrypted_cipher_metadata = cipher.encrypted_cipher_metadata
# Include the encrypted cipher.
result.encrypted_cipher = cipher.encrypted_cipher
serialized_message_list = packed_message_list.SerializeToString()
# Encrypt the message symmetrically.
# New scheme cipher is signed plus hmac over message list.
result.packet_iv, result.encrypted = cipher.Encrypt(serialized_message_list)
# This is to support older endpoints.
result.hmac = cipher.HMAC(result.encrypted)
# Newer endpoints only look at this HMAC. It is recalculated for each packet
# in the session. Note that encrypted_cipher and encrypted_cipher_metadata
# do not change between all packets in this session.
result.full_hmac = cipher.HMAC(result.encrypted, result.encrypted_cipher,
result.encrypted_cipher_metadata,
result.packet_iv.SerializeToString(),
struct.pack("<I", api_version))
result.api_version = api_version
if isinstance(result, rdfvalue.RDFValue):
# Store the number of messages contained.
result.num_messages = len(message_list)
return timestamp |
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket | Below is the the instruction that describes the task:
### Input:
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
### Response:
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key |
def _sql_params(sql):
"""
Identify `sql` as either SQL string or 2-tuple of SQL and params.
Same format as supported by Django's RunSQL operation for sql/reverse_sql.
"""
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
return sql, params | Identify `sql` as either SQL string or 2-tuple of SQL and params.
Same format as supported by Django's RunSQL operation for sql/reverse_sql. | Below is the the instruction that describes the task:
### Input:
Identify `sql` as either SQL string or 2-tuple of SQL and params.
Same format as supported by Django's RunSQL operation for sql/reverse_sql.
### Response:
def _sql_params(sql):
"""
Identify `sql` as either SQL string or 2-tuple of SQL and params.
Same format as supported by Django's RunSQL operation for sql/reverse_sql.
"""
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
return sql, params |
def any(self, func):
"""
:param func:
:type func: (K, T) -> bool
:rtype: bool
Usage:
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 2)
True
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 3)
False
"""
return any([func(k, v) for k, v in self.items()]) | :param func:
:type func: (K, T) -> bool
:rtype: bool
Usage:
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 2)
True
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 3)
False | Below is the the instruction that describes the task:
### Input:
:param func:
:type func: (K, T) -> bool
:rtype: bool
Usage:
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 2)
True
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 3)
False
### Response:
def any(self, func):
"""
:param func:
:type func: (K, T) -> bool
:rtype: bool
Usage:
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 2)
True
>>> TDict(k1=1, k2=2, k3=3).any(lambda k, v: v > 3)
False
"""
return any([func(k, v) for k, v in self.items()]) |
def shutdown(self, channel=Channel.CHANNEL_ALL, shutdown_hardware=True):
"""
Shuts down all CAN interfaces and/or the hardware interface.
:param int channel:
CAN channel, to be used (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1` or
:data:`Channel.CHANNEL_ALL`)
:param bool shutdown_hardware: If true then the hardware interface will be closed too.
"""
# shutdown each channel if it's initialized
for _channel, is_initialized in self._ch_is_initialized.items():
if is_initialized and (_channel == channel or channel == Channel.CHANNEL_ALL or shutdown_hardware):
UcanDeinitCanEx(self._handle, _channel)
self._ch_is_initialized[_channel] = False
# shutdown hardware
if self._hw_is_initialized and shutdown_hardware:
UcanDeinitHardware(self._handle)
self._hw_is_initialized = False
self._handle = Handle(INVALID_HANDLE) | Shuts down all CAN interfaces and/or the hardware interface.
:param int channel:
CAN channel, to be used (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1` or
:data:`Channel.CHANNEL_ALL`)
:param bool shutdown_hardware: If true then the hardware interface will be closed too. | Below is the the instruction that describes the task:
### Input:
Shuts down all CAN interfaces and/or the hardware interface.
:param int channel:
CAN channel, to be used (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1` or
:data:`Channel.CHANNEL_ALL`)
:param bool shutdown_hardware: If true then the hardware interface will be closed too.
### Response:
def shutdown(self, channel=Channel.CHANNEL_ALL, shutdown_hardware=True):
"""
Shuts down all CAN interfaces and/or the hardware interface.
:param int channel:
CAN channel, to be used (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1` or
:data:`Channel.CHANNEL_ALL`)
:param bool shutdown_hardware: If true then the hardware interface will be closed too.
"""
# shutdown each channel if it's initialized
for _channel, is_initialized in self._ch_is_initialized.items():
if is_initialized and (_channel == channel or channel == Channel.CHANNEL_ALL or shutdown_hardware):
UcanDeinitCanEx(self._handle, _channel)
self._ch_is_initialized[_channel] = False
# shutdown hardware
if self._hw_is_initialized and shutdown_hardware:
UcanDeinitHardware(self._handle)
self._hw_is_initialized = False
self._handle = Handle(INVALID_HANDLE) |
def link(self):
"""Ensure that an appropriate link to the cellpy-files exists for
each cell.
The experiment will then contain a CellpyData object for each cell
(in the cell_data_frames attribute) with only the step-table stored.
Remark that running update persists the summary frames instead (or
everything in case you specify all_in_memory=True).
This might be considered "a strange and unexpected behaviour". Sorry
for that (but the authors of this package is also a bit strange...).
"""
logging.info("[estblishing links]")
logging.debug("checking and establishing link to data")
cell_data_frames = dict()
counter = 0
errors = []
try:
for indx, row in self.journal.pages.iterrows():
counter += 1
l_txt = "starting to process file # %i (index=%s)" % (counter, indx)
logging.debug(l_txt)
logging.info(f"linking cellpy-file: {row.cellpy_file_names}")
if not os.path.isfile(row.cellpy_file_names):
logging.error("File does not exist")
raise IOError
cell_data_frames[indx] = cellreader.CellpyData(initialize=True)
step_table = helper.look_up_and_get(
row.cellpy_file_names,
"step_table"
)
cell_data_frames[indx].dataset.step_table = step_table
self.cell_data_frames = cell_data_frames
except IOError as e:
logging.warning(e)
e_txt = "links not established - try update"
logging.warning(e_txt)
errors.append(e_txt)
self.errors["link"] = errors | Ensure that an appropriate link to the cellpy-files exists for
each cell.
The experiment will then contain a CellpyData object for each cell
(in the cell_data_frames attribute) with only the step-table stored.
Remark that running update persists the summary frames instead (or
everything in case you specify all_in_memory=True).
This might be considered "a strange and unexpected behaviour". Sorry
for that (but the authors of this package is also a bit strange...). | Below is the the instruction that describes the task:
### Input:
Ensure that an appropriate link to the cellpy-files exists for
each cell.
The experiment will then contain a CellpyData object for each cell
(in the cell_data_frames attribute) with only the step-table stored.
Remark that running update persists the summary frames instead (or
everything in case you specify all_in_memory=True).
This might be considered "a strange and unexpected behaviour". Sorry
for that (but the authors of this package is also a bit strange...).
### Response:
def link(self):
"""Ensure that an appropriate link to the cellpy-files exists for
each cell.
The experiment will then contain a CellpyData object for each cell
(in the cell_data_frames attribute) with only the step-table stored.
Remark that running update persists the summary frames instead (or
everything in case you specify all_in_memory=True).
This might be considered "a strange and unexpected behaviour". Sorry
for that (but the authors of this package is also a bit strange...).
"""
logging.info("[estblishing links]")
logging.debug("checking and establishing link to data")
cell_data_frames = dict()
counter = 0
errors = []
try:
for indx, row in self.journal.pages.iterrows():
counter += 1
l_txt = "starting to process file # %i (index=%s)" % (counter, indx)
logging.debug(l_txt)
logging.info(f"linking cellpy-file: {row.cellpy_file_names}")
if not os.path.isfile(row.cellpy_file_names):
logging.error("File does not exist")
raise IOError
cell_data_frames[indx] = cellreader.CellpyData(initialize=True)
step_table = helper.look_up_and_get(
row.cellpy_file_names,
"step_table"
)
cell_data_frames[indx].dataset.step_table = step_table
self.cell_data_frames = cell_data_frames
except IOError as e:
logging.warning(e)
e_txt = "links not established - try update"
logging.warning(e_txt)
errors.append(e_txt)
self.errors["link"] = errors |
def sites_at_edges( self ):
"""
Finds the six sites with the maximum and minimum coordinates along x, y, and z.
Args:
None
Returns:
(List(List)): In the order [ +x, -x, +y, -y, +z, -z ]
"""
min_x = min( [ s.r[0] for s in self.sites ] )
max_x = max( [ s.r[0] for s in self.sites ] )
min_y = min( [ s.r[1] for s in self.sites ] )
max_y = max( [ s.r[1] for s in self.sites ] )
min_z = min( [ s.r[2] for s in self.sites ] )
max_z = max( [ s.r[2] for s in self.sites ] )
x_max = [ s for s in self.sites if s.r[0] == min_x ]
x_min = [ s for s in self.sites if s.r[0] == max_x ]
y_max = [ s for s in self.sites if s.r[1] == min_y ]
y_min = [ s for s in self.sites if s.r[1] == max_y ]
z_max = [ s for s in self.sites if s.r[2] == min_z ]
z_min = [ s for s in self.sites if s.r[2] == max_z ]
return ( x_max, x_min, y_max, y_min, z_max, z_min ) | Finds the six sites with the maximum and minimum coordinates along x, y, and z.
Args:
None
Returns:
(List(List)): In the order [ +x, -x, +y, -y, +z, -z ] | Below is the the instruction that describes the task:
### Input:
Finds the six sites with the maximum and minimum coordinates along x, y, and z.
Args:
None
Returns:
(List(List)): In the order [ +x, -x, +y, -y, +z, -z ]
### Response:
def sites_at_edges( self ):
"""
Finds the six sites with the maximum and minimum coordinates along x, y, and z.
Args:
None
Returns:
(List(List)): In the order [ +x, -x, +y, -y, +z, -z ]
"""
min_x = min( [ s.r[0] for s in self.sites ] )
max_x = max( [ s.r[0] for s in self.sites ] )
min_y = min( [ s.r[1] for s in self.sites ] )
max_y = max( [ s.r[1] for s in self.sites ] )
min_z = min( [ s.r[2] for s in self.sites ] )
max_z = max( [ s.r[2] for s in self.sites ] )
x_max = [ s for s in self.sites if s.r[0] == min_x ]
x_min = [ s for s in self.sites if s.r[0] == max_x ]
y_max = [ s for s in self.sites if s.r[1] == min_y ]
y_min = [ s for s in self.sites if s.r[1] == max_y ]
z_max = [ s for s in self.sites if s.r[2] == min_z ]
z_min = [ s for s in self.sites if s.r[2] == max_z ]
return ( x_max, x_min, y_max, y_min, z_max, z_min ) |
def send_ip_route_add(
self, prefix, nexthops=None,
safi=packet_safi.UNICAST, flags=zebra.ZEBRA_FLAG_INTERNAL,
distance=None, metric=None, mtu=None, tag=None):
"""
Sends ZEBRA_IPV4/v6_ROUTE_ADD message to Zebra daemon.
:param prefix: IPv4/v6 Prefix to advertise.
:param nexthops: List of nexthop addresses.
:param safi: SAFI to advertise.
:param flags: Message flags to advertise. See "ZEBRA_FLAG_*".
:param distance: (Optional) Distance to advertise.
:param metric: (Optional) Metric to advertise.
:param mtu: (Optional) MTU size to advertise.
:param tag: (Optional) TAG information to advertise.
:return: Zebra message instance to be sent. None if failed.
"""
try:
return self._send_ip_route_impl(
prefix=prefix, nexthops=nexthops, safi=safi, flags=flags,
distance=distance, metric=metric, mtu=mtu, tag=tag,
is_withdraw=False)
except ValueError as e:
self.logger.exception(
'Cannot send IP route add message: %s', e)
return None | Sends ZEBRA_IPV4/v6_ROUTE_ADD message to Zebra daemon.
:param prefix: IPv4/v6 Prefix to advertise.
:param nexthops: List of nexthop addresses.
:param safi: SAFI to advertise.
:param flags: Message flags to advertise. See "ZEBRA_FLAG_*".
:param distance: (Optional) Distance to advertise.
:param metric: (Optional) Metric to advertise.
:param mtu: (Optional) MTU size to advertise.
:param tag: (Optional) TAG information to advertise.
:return: Zebra message instance to be sent. None if failed. | Below is the the instruction that describes the task:
### Input:
Sends ZEBRA_IPV4/v6_ROUTE_ADD message to Zebra daemon.
:param prefix: IPv4/v6 Prefix to advertise.
:param nexthops: List of nexthop addresses.
:param safi: SAFI to advertise.
:param flags: Message flags to advertise. See "ZEBRA_FLAG_*".
:param distance: (Optional) Distance to advertise.
:param metric: (Optional) Metric to advertise.
:param mtu: (Optional) MTU size to advertise.
:param tag: (Optional) TAG information to advertise.
:return: Zebra message instance to be sent. None if failed.
### Response:
def send_ip_route_add(
self, prefix, nexthops=None,
safi=packet_safi.UNICAST, flags=zebra.ZEBRA_FLAG_INTERNAL,
distance=None, metric=None, mtu=None, tag=None):
"""
Sends ZEBRA_IPV4/v6_ROUTE_ADD message to Zebra daemon.
:param prefix: IPv4/v6 Prefix to advertise.
:param nexthops: List of nexthop addresses.
:param safi: SAFI to advertise.
:param flags: Message flags to advertise. See "ZEBRA_FLAG_*".
:param distance: (Optional) Distance to advertise.
:param metric: (Optional) Metric to advertise.
:param mtu: (Optional) MTU size to advertise.
:param tag: (Optional) TAG information to advertise.
:return: Zebra message instance to be sent. None if failed.
"""
try:
return self._send_ip_route_impl(
prefix=prefix, nexthops=nexthops, safi=safi, flags=flags,
distance=distance, metric=metric, mtu=mtu, tag=tag,
is_withdraw=False)
except ValueError as e:
self.logger.exception(
'Cannot send IP route add message: %s', e)
return None |
def list_formats(self, node, path=(), formats=None):
"""
Lists the object formats in sorted order.
:param node: Root node to start listing the formats from.
:type node: AbstractCompositeNode
:param path: Walked paths.
:type path: tuple
:param formats: Formats.
:type formats: list
:return: Formats.
:rtype: list
"""
if formats == None:
formats = []
for child in node.children:
self.list_formats(child, path + (child.name,), formats)
path and formats.append(".".join(path))
return sorted(formats) | Lists the object formats in sorted order.
:param node: Root node to start listing the formats from.
:type node: AbstractCompositeNode
:param path: Walked paths.
:type path: tuple
:param formats: Formats.
:type formats: list
:return: Formats.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Lists the object formats in sorted order.
:param node: Root node to start listing the formats from.
:type node: AbstractCompositeNode
:param path: Walked paths.
:type path: tuple
:param formats: Formats.
:type formats: list
:return: Formats.
:rtype: list
### Response:
def list_formats(self, node, path=(), formats=None):
"""
Lists the object formats in sorted order.
:param node: Root node to start listing the formats from.
:type node: AbstractCompositeNode
:param path: Walked paths.
:type path: tuple
:param formats: Formats.
:type formats: list
:return: Formats.
:rtype: list
"""
if formats == None:
formats = []
for child in node.children:
self.list_formats(child, path + (child.name,), formats)
path and formats.append(".".join(path))
return sorted(formats) |
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents | Link hosts with their parents
:return: None | Below is the the instruction that describes the task:
### Input:
Link hosts with their parents
:return: None
### Response:
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents |
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.codes[lev] for lev in level],
orders=ascending)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(codes=new_codes, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer | Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index. | Below is the the instruction that describes the task:
### Input:
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
### Response:
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.codes[lev] for lev in level],
orders=ascending)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(codes=new_codes, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer |
def zlines(f = None, sep = "\0", osep = None, size = 8192): # {{{1
"""File iterator that uses alternative line terminators."""
if f is None: f = sys.stdin
if osep is None: osep = sep
buf = ""
while True:
chars = f.read(size)
if not chars: break
buf += chars; lines = buf.split(sep); buf = lines.pop()
for line in lines: yield line + osep
if buf: yield buf | File iterator that uses alternative line terminators. | Below is the the instruction that describes the task:
### Input:
File iterator that uses alternative line terminators.
### Response:
def zlines(f = None, sep = "\0", osep = None, size = 8192): # {{{1
"""File iterator that uses alternative line terminators."""
if f is None: f = sys.stdin
if osep is None: osep = sep
buf = ""
while True:
chars = f.read(size)
if not chars: break
buf += chars; lines = buf.split(sep); buf = lines.pop()
for line in lines: yield line + osep
if buf: yield buf |
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name) | Return the formatted data as a unicode string. | Below is the the instruction that describes the task:
### Input:
Return the formatted data as a unicode string.
### Response:
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name) |
def desc(self, table):
'''Returns table description
>>> yql.desc('geo.countries')
>>>
'''
query = "desc {0}".format(table)
response = self.raw_query(query)
return response | Returns table description
>>> yql.desc('geo.countries')
>>> | Below is the the instruction that describes the task:
### Input:
Returns table description
>>> yql.desc('geo.countries')
>>>
### Response:
def desc(self, table):
'''Returns table description
>>> yql.desc('geo.countries')
>>>
'''
query = "desc {0}".format(table)
response = self.raw_query(query)
return response |
def add_model_name_to_payload(cls, payload):
"""
Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`.
"""
if not cls.MODEL_NAME in payload:
payload = {cls.MODEL_NAME: payload}
return payload | Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`. | Below is the the instruction that describes the task:
### Input:
Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`.
### Response:
def add_model_name_to_payload(cls, payload):
"""
Checks whether the model name in question is in the payload. If not, the entire payload
is set as a value of a key by the name of the model. This method is useful when some
server-side Rails API calls expect the parameters to include the parameterized model name.
For example, server-side endpoints that handle the updating of a biosample record or the
creation of a new biosmample record will expect the payload to be of the form::
{ "biosample": {
"name": "new biosample",
"donor": 3,
...
}
}
Args:
payload: `dict`. The data to send in an HTTP request.
Returns:
`dict`.
"""
if not cls.MODEL_NAME in payload:
payload = {cls.MODEL_NAME: payload}
return payload |
def _parse_config(self, requires_cfg=True):
"""Parse the configuration file, if one is configured, and add it to
the `Bison` state.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True)
"""
if len(self.config_paths) > 0:
try:
self._find_config()
except BisonError:
if not requires_cfg:
return
raise
try:
with open(self.config_file, 'r') as f:
parsed = self._fmt_to_parser[self.config_format](f)
except Exception as e:
raise BisonError(
'Failed to parse config file: {}'.format(self.config_file)
) from e
# the configuration changes, so we invalidate the cached config
self._full_config = None
self._config = parsed | Parse the configuration file, if one is configured, and add it to
the `Bison` state.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True) | Below is the the instruction that describes the task:
### Input:
Parse the configuration file, if one is configured, and add it to
the `Bison` state.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True)
### Response:
def _parse_config(self, requires_cfg=True):
"""Parse the configuration file, if one is configured, and add it to
the `Bison` state.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True)
"""
if len(self.config_paths) > 0:
try:
self._find_config()
except BisonError:
if not requires_cfg:
return
raise
try:
with open(self.config_file, 'r') as f:
parsed = self._fmt_to_parser[self.config_format](f)
except Exception as e:
raise BisonError(
'Failed to parse config file: {}'.format(self.config_file)
) from e
# the configuration changes, so we invalidate the cached config
self._full_config = None
self._config = parsed |
def cbox(i, gray=False, spectrum="alternate", reverse=False, **kwargs):
"""
Access a modular list of colors for plotting.
Defines colours using rgb.
:param i: (int), index to access color
:param gray: (bool), if true then color is return as grayscale value
:param spectrum: (str), choice of spectrum to use
:param reverse: (bool), reverses the color order
:param kwargs:
:return:
"""
CD = {}
CD['dark blue'] = (0.0, 0.0, 0.55) # 0
CD['greenish blue'] = (0.12, .8, .8) # 10
CD['dark green'] = (0.15, 0.35, 0.0) # 1
CD['yellow'] = (1.0, 1.0, 0.0) # 6
CD['orangish yellow'] = (1.0, 0.9, 0.1) # 6
CD['dark red'] = (0.73, 0.0, 0.0) # 2
CD['dark purple'] = (0.8, 0.0, 0.8) # 3
CD['light green'] = (0.49, 0.64, 0.0) # 4
CD['orange'] = (1.0, 0.5, 0.0) # 5
CD['light blue'] = (0.5, 0.85, 1.0) # 6
CD['pink'] = (1.0, 0.8, 0.8) # 7
CD['brown'] = (0.5, 0.3, 0.0) # 8
CD['red'] = (0.9, 0.0, 0.0) # 9
CD['bluey purple'] = (0.8, 0.85, 1.0) # 12
CD['dark gray'] = (0.25, 0.25, 0.25) #
CD['mid gray'] = (0.5, 0.5, 0.5) #
CD['light gray'] = (0.75, 0.75, 0.75) #
CD['dark grey'] = (0.25, 0.25, 0.25) #
CD['mid grey'] = (0.5, 0.5, 0.5) #
CD['light grey'] = (0.75, 0.75, 0.75) #
CD['black5'] = (0.05, 0.05, 0.05) #
CD['black'] = (0.0, 0.0, 0.0) #
CD['white'] = (1.0, 1.0, 1.0) #
if isinstance(i, int):
i = i
elif isinstance(i, float):
i = int(i)
elif isinstance(i, str):
dat = CD[i]
return dat
if spectrum == "alternate":
order = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'yellow', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
elif spectrum == "lighten":
order = ['dark blue', 'dark green', 'dark red', 'brown',
'light green', 'orange', 'light blue', 'pink', 'dark purple',
'red', 'greenish blue', 'bluey purple', 'yellow',
'dark gray', 'mid gray', 'light gray']
elif spectrum == 'dots':
order = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
elif spectrum == "traffic":
order = ['dark green', 'orange', 'red']
else: # warnings
order = ['light green', 'orangish yellow', 'orange', 'red', 'black', 'dark gray']
index = i % len(order)
if reverse:
index = len(order) - index - 1
rgb = CD[order[index]]
gray_value = 0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2] # calculate the gray scale value
if gray:
return gray_value, gray_value, gray_value
return rgb | Access a modular list of colors for plotting.
Defines colours using rgb.
:param i: (int), index to access color
:param gray: (bool), if true then color is return as grayscale value
:param spectrum: (str), choice of spectrum to use
:param reverse: (bool), reverses the color order
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Access a modular list of colors for plotting.
Defines colours using rgb.
:param i: (int), index to access color
:param gray: (bool), if true then color is return as grayscale value
:param spectrum: (str), choice of spectrum to use
:param reverse: (bool), reverses the color order
:param kwargs:
:return:
### Response:
def cbox(i, gray=False, spectrum="alternate", reverse=False, **kwargs):
"""
Access a modular list of colors for plotting.
Defines colours using rgb.
:param i: (int), index to access color
:param gray: (bool), if true then color is return as grayscale value
:param spectrum: (str), choice of spectrum to use
:param reverse: (bool), reverses the color order
:param kwargs:
:return:
"""
CD = {}
CD['dark blue'] = (0.0, 0.0, 0.55) # 0
CD['greenish blue'] = (0.12, .8, .8) # 10
CD['dark green'] = (0.15, 0.35, 0.0) # 1
CD['yellow'] = (1.0, 1.0, 0.0) # 6
CD['orangish yellow'] = (1.0, 0.9, 0.1) # 6
CD['dark red'] = (0.73, 0.0, 0.0) # 2
CD['dark purple'] = (0.8, 0.0, 0.8) # 3
CD['light green'] = (0.49, 0.64, 0.0) # 4
CD['orange'] = (1.0, 0.5, 0.0) # 5
CD['light blue'] = (0.5, 0.85, 1.0) # 6
CD['pink'] = (1.0, 0.8, 0.8) # 7
CD['brown'] = (0.5, 0.3, 0.0) # 8
CD['red'] = (0.9, 0.0, 0.0) # 9
CD['bluey purple'] = (0.8, 0.85, 1.0) # 12
CD['dark gray'] = (0.25, 0.25, 0.25) #
CD['mid gray'] = (0.5, 0.5, 0.5) #
CD['light gray'] = (0.75, 0.75, 0.75) #
CD['dark grey'] = (0.25, 0.25, 0.25) #
CD['mid grey'] = (0.5, 0.5, 0.5) #
CD['light grey'] = (0.75, 0.75, 0.75) #
CD['black5'] = (0.05, 0.05, 0.05) #
CD['black'] = (0.0, 0.0, 0.0) #
CD['white'] = (1.0, 1.0, 1.0) #
if isinstance(i, int):
i = i
elif isinstance(i, float):
i = int(i)
elif isinstance(i, str):
dat = CD[i]
return dat
if spectrum == "alternate":
order = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'yellow', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
elif spectrum == "lighten":
order = ['dark blue', 'dark green', 'dark red', 'brown',
'light green', 'orange', 'light blue', 'pink', 'dark purple',
'red', 'greenish blue', 'bluey purple', 'yellow',
'dark gray', 'mid gray', 'light gray']
elif spectrum == 'dots':
order = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
elif spectrum == "traffic":
order = ['dark green', 'orange', 'red']
else: # warnings
order = ['light green', 'orangish yellow', 'orange', 'red', 'black', 'dark gray']
index = i % len(order)
if reverse:
index = len(order) - index - 1
rgb = CD[order[index]]
gray_value = 0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2] # calculate the gray scale value
if gray:
return gray_value, gray_value, gray_value
return rgb |
def get_github_cache(self, kind, key_):
""" Get cache data for items of _type using key_ as the cache dict key """
cache = {}
res_size = 100 # best size?
from_ = 0
index_github = "github/" + kind
url = self.elastic.url + "/" + index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logger.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[key_]] = item
from_ += res_size
r = self.requests.get(url + "&from=%i" % from_)
type_items = r.json()
if 'hits' not in type_items:
break
return cache | Get cache data for items of _type using key_ as the cache dict key | Below is the the instruction that describes the task:
### Input:
Get cache data for items of _type using key_ as the cache dict key
### Response:
def get_github_cache(self, kind, key_):
""" Get cache data for items of _type using key_ as the cache dict key """
cache = {}
res_size = 100 # best size?
from_ = 0
index_github = "github/" + kind
url = self.elastic.url + "/" + index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logger.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[key_]] = item
from_ += res_size
r = self.requests.get(url + "&from=%i" % from_)
type_items = r.json()
if 'hits' not in type_items:
break
return cache |
def match_config(regex):
'''
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'match', regex)
else:
cmd = _traffic_line('-m', regex)
return _subprocess(cmd) | Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex | Below is the the instruction that describes the task:
### Input:
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
### Response:
def match_config(regex):
'''
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'match', regex)
else:
cmd = _traffic_line('-m', regex)
return _subprocess(cmd) |
def main():
"""
NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
"""
args = sys.argv
if '-h' in args:
print(do_help())
sys.exit()
# def k15_magic(k15file, specnum=0, sample_naming_con='1', er_location_name="unknown", measfile='magic_measurements.txt', sampfile="er_samples.txt", aniso_outfile='rmag_anisotropy.txt', result_file="rmag_results.txt", input_dir_path='.', output_dir_path='.'):
dataframe = extractor.command_line_dataframe([['f', True, ''], ['F', False, 'measurements.txt'], ['Fsa', False, 'samples.txt'], ['Fa', False, 'specimens.txt'], [
'Fr', False, 'rmag_results.txt'], ['spc', False, 0], ['ncn', False, '1'], ['loc', False, 'unknown'], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
k15file, measfile, sampfile, aniso_outfile, result_file, specnum, sample_naming_con, location_name, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(
['f', 'F', 'Fsa', 'Fa', 'Fr', 'spc', 'ncn', 'loc', 'WD', 'ID', 'DM'], checked_args)
program_ran, error_message = convert.k15(k15file, specnum=specnum, sample_naming_con=sample_naming_con, location=location_name, meas_file=measfile,
samp_file=sampfile, aniso_outfile=aniso_outfile, result_file=result_file, input_dir_path=input_dir_path, dir_path=output_dir_path, data_model_num=data_model_num) | NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen | Below is the the instruction that describes the task:
### Input:
NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
### Response:
def main():
"""
NAME
k15_magic.py
DESCRIPTION
converts .k15 format data to magic_measurements format.
assums Jelinek Kappabridge measurement scheme
SYNTAX
k15_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-DM DATA_MODEL: specify data model 2 or 3 (default 3)
-f KFILE: specify .k15 format input file
-F MFILE: specify measurement output file
-Fsa SFILE, specify sample file for output
-Fa AFILE, specify specimen file for output [rmag_anisotropy for data model 2 only]
#-ins INST: specify instrument that measurements were made on # not implemented
-spc NUM: specify number of digits for specimen ID, default is 0
-ncn NCOM: specify naming convention (default is #1)
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
DEFAULTS
MFILE: measurements.txt
SFILE: samples.txt
AFILE: specimens.txt
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
"""
args = sys.argv
if '-h' in args:
print(do_help())
sys.exit()
# def k15_magic(k15file, specnum=0, sample_naming_con='1', er_location_name="unknown", measfile='magic_measurements.txt', sampfile="er_samples.txt", aniso_outfile='rmag_anisotropy.txt', result_file="rmag_results.txt", input_dir_path='.', output_dir_path='.'):
dataframe = extractor.command_line_dataframe([['f', True, ''], ['F', False, 'measurements.txt'], ['Fsa', False, 'samples.txt'], ['Fa', False, 'specimens.txt'], [
'Fr', False, 'rmag_results.txt'], ['spc', False, 0], ['ncn', False, '1'], ['loc', False, 'unknown'], ['WD', False, '.'], ['ID', False, '.'], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
k15file, measfile, sampfile, aniso_outfile, result_file, specnum, sample_naming_con, location_name, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(
['f', 'F', 'Fsa', 'Fa', 'Fr', 'spc', 'ncn', 'loc', 'WD', 'ID', 'DM'], checked_args)
program_ran, error_message = convert.k15(k15file, specnum=specnum, sample_naming_con=sample_naming_con, location=location_name, meas_file=measfile,
samp_file=sampfile, aniso_outfile=aniso_outfile, result_file=result_file, input_dir_path=input_dir_path, dir_path=output_dir_path, data_model_num=data_model_num) |
def send(r, stream=False):
"""Just sends the request using its send method and returns its response. """
r.send(stream=stream)
return r.response | Just sends the request using its send method and returns its response. | Below is the the instruction that describes the task:
### Input:
Just sends the request using its send method and returns its response.
### Response:
def send(r, stream=False):
"""Just sends the request using its send method and returns its response. """
r.send(stream=stream)
return r.response |
def read(fname, fail_silently=False):
"""
Read the content of the given file. The path is evaluated from the
directory containing this file.
"""
try:
filepath = os.path.join(os.path.dirname(__file__), fname)
with io.open(filepath, 'rt', encoding='utf8') as f:
return f.read()
except:
if not fail_silently:
raise
return '' | Read the content of the given file. The path is evaluated from the
directory containing this file. | Below is the the instruction that describes the task:
### Input:
Read the content of the given file. The path is evaluated from the
directory containing this file.
### Response:
def read(fname, fail_silently=False):
"""
Read the content of the given file. The path is evaluated from the
directory containing this file.
"""
try:
filepath = os.path.join(os.path.dirname(__file__), fname)
with io.open(filepath, 'rt', encoding='utf8') as f:
return f.read()
except:
if not fail_silently:
raise
return '' |
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style | Return the text style dict for a text instance | Below is the the instruction that describes the task:
### Input:
Return the text style dict for a text instance
### Response:
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style |
def register(self, plugin):
"""
Register a plugin. New plugins are added to the end of the plugins list.
:param samtranslator.plugins.BasePlugin plugin: Instance/subclass of BasePlugin class that implements hooks
:raises ValueError: If plugin is not an instance of samtranslator.plugins.BasePlugin or if it is already
registered
:return: None
"""
if not plugin or not isinstance(plugin, BasePlugin):
raise ValueError("Plugin must be implemented as a subclass of BasePlugin class")
if self.is_registered(plugin.name):
raise ValueError("Plugin with name {} is already registered".format(plugin.name))
self._plugins.append(plugin) | Register a plugin. New plugins are added to the end of the plugins list.
:param samtranslator.plugins.BasePlugin plugin: Instance/subclass of BasePlugin class that implements hooks
:raises ValueError: If plugin is not an instance of samtranslator.plugins.BasePlugin or if it is already
registered
:return: None | Below is the the instruction that describes the task:
### Input:
Register a plugin. New plugins are added to the end of the plugins list.
:param samtranslator.plugins.BasePlugin plugin: Instance/subclass of BasePlugin class that implements hooks
:raises ValueError: If plugin is not an instance of samtranslator.plugins.BasePlugin or if it is already
registered
:return: None
### Response:
def register(self, plugin):
"""
Register a plugin. New plugins are added to the end of the plugins list.
:param samtranslator.plugins.BasePlugin plugin: Instance/subclass of BasePlugin class that implements hooks
:raises ValueError: If plugin is not an instance of samtranslator.plugins.BasePlugin or if it is already
registered
:return: None
"""
if not plugin or not isinstance(plugin, BasePlugin):
raise ValueError("Plugin must be implemented as a subclass of BasePlugin class")
if self.is_registered(plugin.name):
raise ValueError("Plugin with name {} is already registered".format(plugin.name))
self._plugins.append(plugin) |
def scramble_itemdata(self, oid, value):
"""If metadata provided, use it to scramble the value based on data type"""
if self.metadata is not None:
path = ".//{0}[@{1}='{2}']".format(E_ODM.ITEM_DEF.value, A_ODM.OID.value, oid)
elem = self.metadata.find(path)
# for elem in self.metadata.iter(E_ODM.ITEM_DEF.value):
datatype = elem.get(A_ODM.DATATYPE.value)
codelist = None
for el in elem.iter(E_ODM.CODELIST_REF.value):
codelist = el.get(A_ODM.CODELIST_OID.value)
length = 1 if not A_ODM.LENGTH in elem else int(elem.get(A_ODM.LENGTH.value))
if A_ODM.SIGNIFICANT_DIGITS.value in elem.keys():
sd = elem.get(A_ODM.SIGNIFICANT_DIGITS.value)
else:
sd = 0
if A_ODM.DATETIME_FORMAT.value in elem.keys():
dt_format = elem.get(A_ODM.DATETIME_FORMAT.value)
for fmt in [('yyyy', '%Y'), ('MMM', '%b'), ('dd', '%d'), ('HH', '%H'), ('nn', '%M'), ('ss', '%S'),
('-', '')]:
dt_format = dt_format.replace(fmt[0], fmt[1])
if codelist is not None:
return self.scramble_codelist(codelist)
elif datatype == 'integer':
return self.scramble_int(length)
elif datatype == 'float':
return self.scramble_float(length, sd)
elif datatype in ['string', 'text']:
return self.scramble_string(length)
elif datatype in ['date', 'datetime']:
return self.scramble_date(value, dt_format)
elif datatype in ['time']:
return self.scramble_time(dt_format)
else:
return self.scramble_value(value)
else:
return self.scramble_value(value) | If metadata provided, use it to scramble the value based on data type | Below is the the instruction that describes the task:
### Input:
If metadata provided, use it to scramble the value based on data type
### Response:
def scramble_itemdata(self, oid, value):
"""If metadata provided, use it to scramble the value based on data type"""
if self.metadata is not None:
path = ".//{0}[@{1}='{2}']".format(E_ODM.ITEM_DEF.value, A_ODM.OID.value, oid)
elem = self.metadata.find(path)
# for elem in self.metadata.iter(E_ODM.ITEM_DEF.value):
datatype = elem.get(A_ODM.DATATYPE.value)
codelist = None
for el in elem.iter(E_ODM.CODELIST_REF.value):
codelist = el.get(A_ODM.CODELIST_OID.value)
length = 1 if not A_ODM.LENGTH in elem else int(elem.get(A_ODM.LENGTH.value))
if A_ODM.SIGNIFICANT_DIGITS.value in elem.keys():
sd = elem.get(A_ODM.SIGNIFICANT_DIGITS.value)
else:
sd = 0
if A_ODM.DATETIME_FORMAT.value in elem.keys():
dt_format = elem.get(A_ODM.DATETIME_FORMAT.value)
for fmt in [('yyyy', '%Y'), ('MMM', '%b'), ('dd', '%d'), ('HH', '%H'), ('nn', '%M'), ('ss', '%S'),
('-', '')]:
dt_format = dt_format.replace(fmt[0], fmt[1])
if codelist is not None:
return self.scramble_codelist(codelist)
elif datatype == 'integer':
return self.scramble_int(length)
elif datatype == 'float':
return self.scramble_float(length, sd)
elif datatype in ['string', 'text']:
return self.scramble_string(length)
elif datatype in ['date', 'datetime']:
return self.scramble_date(value, dt_format)
elif datatype in ['time']:
return self.scramble_time(dt_format)
else:
return self.scramble_value(value)
else:
return self.scramble_value(value) |
def _indexes(self):
"""Instantiate the indexes only when asked
Returns
-------
list
An empty list if the field is not indexable, else a list of all indexes
tied to the field.
If no indexes where passed when creating the field, the default indexes
from the field/model/database will be used.
If still no index classes, it will raise
Raises
------
ImplementationError
If no index classes available for this field
"""
if not self.indexable:
return []
if not self.index_classes:
self.index_classes = self.get_default_indexes()[::1]
if not self.index_classes:
raise ImplementationError('%s field is indexable but has no indexes attached' %
self.__class__.__name__)
return [index_class(field=self) for index_class in self.index_classes] | Instantiate the indexes only when asked
Returns
-------
list
An empty list if the field is not indexable, else a list of all indexes
tied to the field.
If no indexes where passed when creating the field, the default indexes
from the field/model/database will be used.
If still no index classes, it will raise
Raises
------
ImplementationError
If no index classes available for this field | Below is the the instruction that describes the task:
### Input:
Instantiate the indexes only when asked
Returns
-------
list
An empty list if the field is not indexable, else a list of all indexes
tied to the field.
If no indexes where passed when creating the field, the default indexes
from the field/model/database will be used.
If still no index classes, it will raise
Raises
------
ImplementationError
If no index classes available for this field
### Response:
def _indexes(self):
"""Instantiate the indexes only when asked
Returns
-------
list
An empty list if the field is not indexable, else a list of all indexes
tied to the field.
If no indexes where passed when creating the field, the default indexes
from the field/model/database will be used.
If still no index classes, it will raise
Raises
------
ImplementationError
If no index classes available for this field
"""
if not self.indexable:
return []
if not self.index_classes:
self.index_classes = self.get_default_indexes()[::1]
if not self.index_classes:
raise ImplementationError('%s field is indexable but has no indexes attached' %
self.__class__.__name__)
return [index_class(field=self) for index_class in self.index_classes] |
def _ParseCommentRecord(self, structure):
"""Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
"""
comment = structure[1]
if comment.startswith('Version'):
_, _, self._version = comment.partition(':')
elif comment.startswith('Software'):
_, _, self._software = comment.partition(':')
elif comment.startswith('Time'):
_, _, time_format = comment.partition(':')
if 'local' in time_format.lower():
self._use_local_timezone = True | Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line. | Below is the the instruction that describes the task:
### Input:
Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
### Response:
def _ParseCommentRecord(self, structure):
"""Parse a comment and store appropriate attributes.
Args:
structure (pyparsing.ParseResults): parsed log line.
"""
comment = structure[1]
if comment.startswith('Version'):
_, _, self._version = comment.partition(':')
elif comment.startswith('Software'):
_, _, self._software = comment.partition(':')
elif comment.startswith('Time'):
_, _, time_format = comment.partition(':')
if 'local' in time_format.lower():
self._use_local_timezone = True |
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
) | :calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
### Response:
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/comments <http://developer.github.com/v3/repos/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
"""
return github.PaginatedList.PaginatedList(
github.CommitComment.CommitComment,
self._requester,
self.url + "/comments",
None
) |
def is_first_root(self):
"""Return ``True`` if this page is the first root pages."""
if self.parent:
return False
if self._is_first_root is not None:
return self._is_first_root
first_root_id = cache.get('PAGE_FIRST_ROOT_ID')
if first_root_id is not None:
self._is_first_root = first_root_id == self.id
return self._is_first_root
try:
first_root_id = Page.objects.root().values('id')[0]['id']
except IndexError:
first_root_id = None
if first_root_id is not None:
cache.set('PAGE_FIRST_ROOT_ID', first_root_id)
self._is_first_root = self.id == first_root_id
return self._is_first_root | Return ``True`` if this page is the first root pages. | Below is the the instruction that describes the task:
### Input:
Return ``True`` if this page is the first root pages.
### Response:
def is_first_root(self):
"""Return ``True`` if this page is the first root pages."""
if self.parent:
return False
if self._is_first_root is not None:
return self._is_first_root
first_root_id = cache.get('PAGE_FIRST_ROOT_ID')
if first_root_id is not None:
self._is_first_root = first_root_id == self.id
return self._is_first_root
try:
first_root_id = Page.objects.root().values('id')[0]['id']
except IndexError:
first_root_id = None
if first_root_id is not None:
cache.set('PAGE_FIRST_ROOT_ID', first_root_id)
self._is_first_root = self.id == first_root_id
return self._is_first_root |
def model_to_dict(model, exclude=None):
"""
Extract a SQLAlchemy model instance to a dictionary
:param model: the model to be extracted
:param exclude: Any keys to be excluded
:return: New dictionary consisting of property-values
"""
exclude = exclude or []
exclude.append('_sa_instance_state')
return {k: v for k, v in model.__dict__.items() if k not in exclude} | Extract a SQLAlchemy model instance to a dictionary
:param model: the model to be extracted
:param exclude: Any keys to be excluded
:return: New dictionary consisting of property-values | Below is the the instruction that describes the task:
### Input:
Extract a SQLAlchemy model instance to a dictionary
:param model: the model to be extracted
:param exclude: Any keys to be excluded
:return: New dictionary consisting of property-values
### Response:
def model_to_dict(model, exclude=None):
"""
Extract a SQLAlchemy model instance to a dictionary
:param model: the model to be extracted
:param exclude: Any keys to be excluded
:return: New dictionary consisting of property-values
"""
exclude = exclude or []
exclude.append('_sa_instance_state')
return {k: v for k, v in model.__dict__.items() if k not in exclude} |
def save(self, models_dir, model_names_to_write=None, write_metadata=True):
"""
Serialize the predictor to a directory on disk. If the directory does
not exist it will be created.
The serialization format consists of a file called "manifest.csv" with
the configurations of each Class1NeuralNetwork, along with per-network
files giving the model weights. If there are pan-allele predictors in
the ensemble, the allele sequences are also stored in the
directory. There is also a small file "index.txt" with basic metadata:
when the models were trained, by whom, on what host.
Parameters
----------
models_dir : string
Path to directory
model_names_to_write : list of string, optional
Only write the weights for the specified models. Useful for
incremental updates during training.
write_metadata : boolean, optional
Whether to write optional metadata
"""
num_models = len(self.class1_pan_allele_models) + sum(
len(v) for v in self.allele_to_allele_specific_models.values())
assert len(self.manifest_df) == num_models, (
"Manifest seems out of sync with models: %d vs %d entries" % (
len(self.manifest_df), num_models))
if model_names_to_write is None:
# Write all models
model_names_to_write = self.manifest_df.model_name.values
if not exists(models_dir):
mkdir(models_dir)
sub_manifest_df = self.manifest_df.ix[
self.manifest_df.model_name.isin(model_names_to_write)
]
for (_, row) in sub_manifest_df.iterrows():
weights_path = self.weights_path(models_dir, row.model_name)
Class1AffinityPredictor.save_weights(
row.model.get_weights(), weights_path)
logging.info("Wrote: %s" % weights_path)
write_manifest_df = self.manifest_df[[
c for c in self.manifest_df.columns if c != "model"
]]
manifest_path = join(models_dir, "manifest.csv")
write_manifest_df.to_csv(manifest_path, index=False)
logging.info("Wrote: %s" % manifest_path)
if write_metadata:
# Write "info.txt"
info_path = join(models_dir, "info.txt")
rows = [
("trained on", time.asctime()),
("package ", "mhcflurry %s" % __version__),
("hostname ", gethostname()),
("user ", getuser()),
]
pandas.DataFrame(rows).to_csv(
info_path, sep="\t", header=False, index=False)
if self.metadata_dataframes:
for (name, df) in self.metadata_dataframes.items():
metadata_df_path = join(models_dir, "%s.csv.bz2" % name)
df.to_csv(metadata_df_path, index=False, compression="bz2")
if self.allele_to_fixed_length_sequence is not None:
allele_to_sequence_df = pandas.DataFrame(
list(self.allele_to_fixed_length_sequence.items()),
columns=['allele', 'sequence']
)
allele_to_sequence_df.to_csv(
join(models_dir, "allele_sequences.csv"), index=False)
logging.info("Wrote: %s" % join(models_dir, "allele_sequences.csv"))
if self.allele_to_percent_rank_transform:
percent_ranks_df = None
for (allele, transform) in self.allele_to_percent_rank_transform.items():
series = transform.to_series()
if percent_ranks_df is None:
percent_ranks_df = pandas.DataFrame(index=series.index)
assert_equal(series.index.values, percent_ranks_df.index.values)
percent_ranks_df[allele] = series
percent_ranks_path = join(models_dir, "percent_ranks.csv")
percent_ranks_df.to_csv(
percent_ranks_path,
index=True,
index_label="bin")
logging.info("Wrote: %s" % percent_ranks_path) | Serialize the predictor to a directory on disk. If the directory does
not exist it will be created.
The serialization format consists of a file called "manifest.csv" with
the configurations of each Class1NeuralNetwork, along with per-network
files giving the model weights. If there are pan-allele predictors in
the ensemble, the allele sequences are also stored in the
directory. There is also a small file "index.txt" with basic metadata:
when the models were trained, by whom, on what host.
Parameters
----------
models_dir : string
Path to directory
model_names_to_write : list of string, optional
Only write the weights for the specified models. Useful for
incremental updates during training.
write_metadata : boolean, optional
Whether to write optional metadata | Below is the the instruction that describes the task:
### Input:
Serialize the predictor to a directory on disk. If the directory does
not exist it will be created.
The serialization format consists of a file called "manifest.csv" with
the configurations of each Class1NeuralNetwork, along with per-network
files giving the model weights. If there are pan-allele predictors in
the ensemble, the allele sequences are also stored in the
directory. There is also a small file "index.txt" with basic metadata:
when the models were trained, by whom, on what host.
Parameters
----------
models_dir : string
Path to directory
model_names_to_write : list of string, optional
Only write the weights for the specified models. Useful for
incremental updates during training.
write_metadata : boolean, optional
Whether to write optional metadata
### Response:
def save(self, models_dir, model_names_to_write=None, write_metadata=True):
"""
Serialize the predictor to a directory on disk. If the directory does
not exist it will be created.
The serialization format consists of a file called "manifest.csv" with
the configurations of each Class1NeuralNetwork, along with per-network
files giving the model weights. If there are pan-allele predictors in
the ensemble, the allele sequences are also stored in the
directory. There is also a small file "index.txt" with basic metadata:
when the models were trained, by whom, on what host.
Parameters
----------
models_dir : string
Path to directory
model_names_to_write : list of string, optional
Only write the weights for the specified models. Useful for
incremental updates during training.
write_metadata : boolean, optional
Whether to write optional metadata
"""
num_models = len(self.class1_pan_allele_models) + sum(
len(v) for v in self.allele_to_allele_specific_models.values())
assert len(self.manifest_df) == num_models, (
"Manifest seems out of sync with models: %d vs %d entries" % (
len(self.manifest_df), num_models))
if model_names_to_write is None:
# Write all models
model_names_to_write = self.manifest_df.model_name.values
if not exists(models_dir):
mkdir(models_dir)
sub_manifest_df = self.manifest_df.ix[
self.manifest_df.model_name.isin(model_names_to_write)
]
for (_, row) in sub_manifest_df.iterrows():
weights_path = self.weights_path(models_dir, row.model_name)
Class1AffinityPredictor.save_weights(
row.model.get_weights(), weights_path)
logging.info("Wrote: %s" % weights_path)
write_manifest_df = self.manifest_df[[
c for c in self.manifest_df.columns if c != "model"
]]
manifest_path = join(models_dir, "manifest.csv")
write_manifest_df.to_csv(manifest_path, index=False)
logging.info("Wrote: %s" % manifest_path)
if write_metadata:
# Write "info.txt"
info_path = join(models_dir, "info.txt")
rows = [
("trained on", time.asctime()),
("package ", "mhcflurry %s" % __version__),
("hostname ", gethostname()),
("user ", getuser()),
]
pandas.DataFrame(rows).to_csv(
info_path, sep="\t", header=False, index=False)
if self.metadata_dataframes:
for (name, df) in self.metadata_dataframes.items():
metadata_df_path = join(models_dir, "%s.csv.bz2" % name)
df.to_csv(metadata_df_path, index=False, compression="bz2")
if self.allele_to_fixed_length_sequence is not None:
allele_to_sequence_df = pandas.DataFrame(
list(self.allele_to_fixed_length_sequence.items()),
columns=['allele', 'sequence']
)
allele_to_sequence_df.to_csv(
join(models_dir, "allele_sequences.csv"), index=False)
logging.info("Wrote: %s" % join(models_dir, "allele_sequences.csv"))
if self.allele_to_percent_rank_transform:
percent_ranks_df = None
for (allele, transform) in self.allele_to_percent_rank_transform.items():
series = transform.to_series()
if percent_ranks_df is None:
percent_ranks_df = pandas.DataFrame(index=series.index)
assert_equal(series.index.values, percent_ranks_df.index.values)
percent_ranks_df[allele] = series
percent_ranks_path = join(models_dir, "percent_ranks.csv")
percent_ranks_df.to_csv(
percent_ranks_path,
index=True,
index_label="bin")
logging.info("Wrote: %s" % percent_ranks_path) |
def str_get(x, i):
"""Extract a character from each sample at the specified position from a string column.
Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan.
:param int i: The index location, at which to extract the character.
:returns: an expression containing the extracted characters.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.get(5)
Expression = str_get(text, 5)
Length: 5 dtype: str (expression)
---------------------------------
0 h
1 p
2 m
3
4
"""
x = _to_string_sequence(x)
if i == -1:
sl = x.slice_string_end(-1)
else:
sl = x.slice_string(i, i+1)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | Extract a character from each sample at the specified position from a string column.
Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan.
:param int i: The index location, at which to extract the character.
:returns: an expression containing the extracted characters.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.get(5)
Expression = str_get(text, 5)
Length: 5 dtype: str (expression)
---------------------------------
0 h
1 p
2 m
3
4 | Below is the the instruction that describes the task:
### Input:
Extract a character from each sample at the specified position from a string column.
Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan.
:param int i: The index location, at which to extract the character.
:returns: an expression containing the extracted characters.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.get(5)
Expression = str_get(text, 5)
Length: 5 dtype: str (expression)
---------------------------------
0 h
1 p
2 m
3
4
### Response:
def str_get(x, i):
"""Extract a character from each sample at the specified position from a string column.
Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan.
:param int i: The index location, at which to extract the character.
:returns: an expression containing the extracted characters.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.get(5)
Expression = str_get(text, 5)
Length: 5 dtype: str (expression)
---------------------------------
0 h
1 p
2 m
3
4
"""
x = _to_string_sequence(x)
if i == -1:
sl = x.slice_string_end(-1)
else:
sl = x.slice_string(i, i+1)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) |
def MoveWindow(self, x: int, y: int, width: int, height: int, repaint: bool = True) -> bool:
"""
Call native MoveWindow if control has a valid native handle.
x: int.
y: int.
width: int.
height: int.
repaint: bool.
Return bool, True if succeed otherwise False.
"""
handle = self.NativeWindowHandle
if handle:
return MoveWindow(handle, x, y, width, height, int(repaint))
return False | Call native MoveWindow if control has a valid native handle.
x: int.
y: int.
width: int.
height: int.
repaint: bool.
Return bool, True if succeed otherwise False. | Below is the the instruction that describes the task:
### Input:
Call native MoveWindow if control has a valid native handle.
x: int.
y: int.
width: int.
height: int.
repaint: bool.
Return bool, True if succeed otherwise False.
### Response:
def MoveWindow(self, x: int, y: int, width: int, height: int, repaint: bool = True) -> bool:
"""
Call native MoveWindow if control has a valid native handle.
x: int.
y: int.
width: int.
height: int.
repaint: bool.
Return bool, True if succeed otherwise False.
"""
handle = self.NativeWindowHandle
if handle:
return MoveWindow(handle, x, y, width, height, int(repaint))
return False |
def plots(data, **kwargs):
"""
simple wrapper plot with labels and skip x
:param yonly_or_xy:
:param kwargs:
:return:
"""
labels = kwargs.pop('labels', '')
loc = kwargs.pop('loc', 1)
# if len(yonly_or_xy) == 1:
# x = range(len(yonly_or_xy))
# y = yonly_or_xy
# else:
# x = yonly_or_xy[0]
# y = yonly_or_xy[1:]
lines = plt.plot(np.asarray(data).T, **kwargs)
if labels:
plt.legend(lines, labels, loc=loc)
return lines | simple wrapper plot with labels and skip x
:param yonly_or_xy:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
simple wrapper plot with labels and skip x
:param yonly_or_xy:
:param kwargs:
:return:
### Response:
def plots(data, **kwargs):
"""
simple wrapper plot with labels and skip x
:param yonly_or_xy:
:param kwargs:
:return:
"""
labels = kwargs.pop('labels', '')
loc = kwargs.pop('loc', 1)
# if len(yonly_or_xy) == 1:
# x = range(len(yonly_or_xy))
# y = yonly_or_xy
# else:
# x = yonly_or_xy[0]
# y = yonly_or_xy[1:]
lines = plt.plot(np.asarray(data).T, **kwargs)
if labels:
plt.legend(lines, labels, loc=loc)
return lines |
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
"""
altitude = None
if len(inputData) == 4:
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray(
(coordinate, radius), output) | See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array | Below is the the instruction that describes the task:
### Input:
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
### Response:
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
:param: inputData (tuple) Contains speed (float), longitude (float),
latitude (float), altitude (float)
:param: output (numpy.array) Stores encoded SDR in this numpy array
"""
altitude = None
if len(inputData) == 4:
(speed, longitude, latitude, altitude) = inputData
else:
(speed, longitude, latitude) = inputData
coordinate = self.coordinateForPosition(longitude, latitude, altitude)
radius = self.radiusForSpeed(speed)
super(GeospatialCoordinateEncoder, self).encodeIntoArray(
(coordinate, radius), output) |
def setPoint(self, i, p):
"""
Set specific `i-th` point coordinates in mesh.
Actor transformation is reset to its original mesh position/orientation.
:param int i: index of vertex point.
:param list p: new coordinates of mesh point.
.. warning:: if used in a loop this can slow down the execution by a lot.
.. seealso:: ``actor.Points()``
"""
poly = self.polydata(False)
poly.GetPoints().SetPoint(i, p)
poly.GetPoints().Modified()
# reset actor to identity matrix position/rotation:
self.PokeMatrix(vtk.vtkMatrix4x4())
return self | Set specific `i-th` point coordinates in mesh.
Actor transformation is reset to its original mesh position/orientation.
:param int i: index of vertex point.
:param list p: new coordinates of mesh point.
.. warning:: if used in a loop this can slow down the execution by a lot.
.. seealso:: ``actor.Points()`` | Below is the the instruction that describes the task:
### Input:
Set specific `i-th` point coordinates in mesh.
Actor transformation is reset to its original mesh position/orientation.
:param int i: index of vertex point.
:param list p: new coordinates of mesh point.
.. warning:: if used in a loop this can slow down the execution by a lot.
.. seealso:: ``actor.Points()``
### Response:
def setPoint(self, i, p):
"""
Set specific `i-th` point coordinates in mesh.
Actor transformation is reset to its original mesh position/orientation.
:param int i: index of vertex point.
:param list p: new coordinates of mesh point.
.. warning:: if used in a loop this can slow down the execution by a lot.
.. seealso:: ``actor.Points()``
"""
poly = self.polydata(False)
poly.GetPoints().SetPoint(i, p)
poly.GetPoints().Modified()
# reset actor to identity matrix position/rotation:
self.PokeMatrix(vtk.vtkMatrix4x4())
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.