Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,300 | nmohoric/nypl-digital-collections | nyplcollections/nyplcollections.py | NYPLsearch.uuid | def uuid(self, type, val):
"""Return the item-uuid for a identifier"""
picker = lambda x: x.get('uuid', x)
return self._get((type, val), picker) | python | def uuid(self, type, val):
"""Return the item-uuid for a identifier"""
picker = lambda x: x.get('uuid', x)
return self._get((type, val), picker) | ['def', 'uuid', '(', 'self', ',', 'type', ',', 'val', ')', ':', 'picker', '=', 'lambda', 'x', ':', 'x', '.', 'get', '(', "'uuid'", ',', 'x', ')', 'return', 'self', '.', '_get', '(', '(', 'type', ',', 'val', ')', ',', 'picker', ')'] | Return the item-uuid for a identifier | ['Return', 'the', 'item', '-', 'uuid', 'for', 'a', 'identifier'] | train | https://github.com/nmohoric/nypl-digital-collections/blob/f66cd0a11e7ea2b6c3c327d2693211e2c4609231/nyplcollections/nyplcollections.py#L20-L23 |
7,301 | iotile/coretools | iotilebuild/iotile/build/config/site_scons/trub_script.py | _build_reflash_script_action | def _build_reflash_script_action(target, source, env):
"""Create a TRUB script containing tile and controller reflashes and/or sensorgraph
If the app_info is provided, then the final source file will be a sensorgraph.
All subsequent files in source must be in intel hex format. This is guaranteed
by the ensure_image_is_hex call in build_update_script.
"""
out_path = str(target[0])
source = [str(x) for x in source]
records = []
if env['USE_SAFEUPDATE']:
sgf_off = SendRPCRecord(8,0x2005,bytearray([0])) # Disable Sensorgraph
records.append(sgf_off)
safemode_enable = SendRPCRecord(8,0x1006,bytearray([1])) # Enable Safemode
records.append(safemode_enable)
# Update application firmwares
if env['SLOTS'] is not None:
for (controller, slot_id), image_path in zip(env['SLOTS'], source):
hex_data = IntelHex(image_path)
hex_data.padding = 0xFF
offset = hex_data.minaddr()
bin_data = bytearray(hex_data.tobinarray(offset, hex_data.maxaddr()))
if controller:
record = ReflashControllerRecord(bin_data, offset)
else:
record = ReflashTileRecord(slot_id, bin_data, offset)
records.append(record)
# Update sensorgraph
if env['UPDATE_SENSORGRAPH']:
sensor_graph_file = source[-1]
sensor_graph = compile_sgf(sensor_graph_file)
output = format_script(sensor_graph)
records += UpdateScript.FromBinary(output).records
# Update App and OS Tag
os_info = env['OS_INFO']
app_info = env['APP_INFO']
if os_info is not None:
os_tag, os_version = os_info
records.append(SetDeviceTagRecord(os_tag=os_tag, os_version=os_version))
if app_info is not None:
app_tag, app_version = app_info
records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version))
if env['USE_SAFEUPDATE']:
safemode_disable = SendRPCRecord(8,0x1006,bytearray([0])) # Disable safemode
records.append(safemode_disable)
sgf_on = SendRPCRecord(8,0x2005,bytearray([1])) # Enable Sensorgraph
records.append(sgf_on)
script = UpdateScript(records)
with open(out_path, "wb") as outfile:
outfile.write(script.encode()) | python | def _build_reflash_script_action(target, source, env):
"""Create a TRUB script containing tile and controller reflashes and/or sensorgraph
If the app_info is provided, then the final source file will be a sensorgraph.
All subsequent files in source must be in intel hex format. This is guaranteed
by the ensure_image_is_hex call in build_update_script.
"""
out_path = str(target[0])
source = [str(x) for x in source]
records = []
if env['USE_SAFEUPDATE']:
sgf_off = SendRPCRecord(8,0x2005,bytearray([0])) # Disable Sensorgraph
records.append(sgf_off)
safemode_enable = SendRPCRecord(8,0x1006,bytearray([1])) # Enable Safemode
records.append(safemode_enable)
# Update application firmwares
if env['SLOTS'] is not None:
for (controller, slot_id), image_path in zip(env['SLOTS'], source):
hex_data = IntelHex(image_path)
hex_data.padding = 0xFF
offset = hex_data.minaddr()
bin_data = bytearray(hex_data.tobinarray(offset, hex_data.maxaddr()))
if controller:
record = ReflashControllerRecord(bin_data, offset)
else:
record = ReflashTileRecord(slot_id, bin_data, offset)
records.append(record)
# Update sensorgraph
if env['UPDATE_SENSORGRAPH']:
sensor_graph_file = source[-1]
sensor_graph = compile_sgf(sensor_graph_file)
output = format_script(sensor_graph)
records += UpdateScript.FromBinary(output).records
# Update App and OS Tag
os_info = env['OS_INFO']
app_info = env['APP_INFO']
if os_info is not None:
os_tag, os_version = os_info
records.append(SetDeviceTagRecord(os_tag=os_tag, os_version=os_version))
if app_info is not None:
app_tag, app_version = app_info
records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version))
if env['USE_SAFEUPDATE']:
safemode_disable = SendRPCRecord(8,0x1006,bytearray([0])) # Disable safemode
records.append(safemode_disable)
sgf_on = SendRPCRecord(8,0x2005,bytearray([1])) # Enable Sensorgraph
records.append(sgf_on)
script = UpdateScript(records)
with open(out_path, "wb") as outfile:
outfile.write(script.encode()) | ['def', '_build_reflash_script_action', '(', 'target', ',', 'source', ',', 'env', ')', ':', 'out_path', '=', 'str', '(', 'target', '[', '0', ']', ')', 'source', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'source', ']', 'records', '=', '[', ']', 'if', 'env', '[', "'USE_SAFEUPDATE'", ']', ':', 'sgf_off', '=', 'SendRPCRecord', '(', '8', ',', '0x2005', ',', 'bytearray', '(', '[', '0', ']', ')', ')', '# Disable Sensorgraph', 'records', '.', 'append', '(', 'sgf_off', ')', 'safemode_enable', '=', 'SendRPCRecord', '(', '8', ',', '0x1006', ',', 'bytearray', '(', '[', '1', ']', ')', ')', '# Enable Safemode', 'records', '.', 'append', '(', 'safemode_enable', ')', '# Update application firmwares', 'if', 'env', '[', "'SLOTS'", ']', 'is', 'not', 'None', ':', 'for', '(', 'controller', ',', 'slot_id', ')', ',', 'image_path', 'in', 'zip', '(', 'env', '[', "'SLOTS'", ']', ',', 'source', ')', ':', 'hex_data', '=', 'IntelHex', '(', 'image_path', ')', 'hex_data', '.', 'padding', '=', '0xFF', 'offset', '=', 'hex_data', '.', 'minaddr', '(', ')', 'bin_data', '=', 'bytearray', '(', 'hex_data', '.', 'tobinarray', '(', 'offset', ',', 'hex_data', '.', 'maxaddr', '(', ')', ')', ')', 'if', 'controller', ':', 'record', '=', 'ReflashControllerRecord', '(', 'bin_data', ',', 'offset', ')', 'else', ':', 'record', '=', 'ReflashTileRecord', '(', 'slot_id', ',', 'bin_data', ',', 'offset', ')', 'records', '.', 'append', '(', 'record', ')', '# Update sensorgraph', 'if', 'env', '[', "'UPDATE_SENSORGRAPH'", ']', ':', 'sensor_graph_file', '=', 'source', '[', '-', '1', ']', 'sensor_graph', '=', 'compile_sgf', '(', 'sensor_graph_file', ')', 'output', '=', 'format_script', '(', 'sensor_graph', ')', 'records', '+=', 'UpdateScript', '.', 'FromBinary', '(', 'output', ')', '.', 'records', '# Update App and OS Tag', 'os_info', '=', 'env', '[', "'OS_INFO'", ']', 'app_info', '=', 'env', '[', "'APP_INFO'", ']', 'if', 'os_info', 'is', 'not', 'None', ':', 'os_tag', ',', 'os_version', '=', 'os_info', 'records', '.', 'append', '(', 'SetDeviceTagRecord', '(', 'os_tag', '=', 'os_tag', ',', 'os_version', '=', 'os_version', ')', ')', 'if', 'app_info', 'is', 'not', 'None', ':', 'app_tag', ',', 'app_version', '=', 'app_info', 'records', '.', 'append', '(', 'SetDeviceTagRecord', '(', 'app_tag', '=', 'app_tag', ',', 'app_version', '=', 'app_version', ')', ')', 'if', 'env', '[', "'USE_SAFEUPDATE'", ']', ':', 'safemode_disable', '=', 'SendRPCRecord', '(', '8', ',', '0x1006', ',', 'bytearray', '(', '[', '0', ']', ')', ')', '# Disable safemode', 'records', '.', 'append', '(', 'safemode_disable', ')', 'sgf_on', '=', 'SendRPCRecord', '(', '8', ',', '0x2005', ',', 'bytearray', '(', '[', '1', ']', ')', ')', '# Enable Sensorgraph', 'records', '.', 'append', '(', 'sgf_on', ')', 'script', '=', 'UpdateScript', '(', 'records', ')', 'with', 'open', '(', 'out_path', ',', '"wb"', ')', 'as', 'outfile', ':', 'outfile', '.', 'write', '(', 'script', '.', 'encode', '(', ')', ')'] | Create a TRUB script containing tile and controller reflashes and/or sensorgraph
If the app_info is provided, then the final source file will be a sensorgraph.
All subsequent files in source must be in intel hex format. This is guaranteed
by the ensure_image_is_hex call in build_update_script. | ['Create', 'a', 'TRUB', 'script', 'containing', 'tile', 'and', 'controller', 'reflashes', 'and', '/', 'or', 'sensorgraph'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/site_scons/trub_script.py#L64-L124 |
7,302 | lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py | HtmlStats.create_pie_chart | def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename)) | python | def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
#sizelist = [x*0.01 for x in sizelist]
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8,8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename)) | ['def', 'create_pie_chart', '(', 'self', ',', 'snapshot', ',', 'filename', '=', "''", ')', ':', 'try', ':', 'from', 'pylab', 'import', 'figure', ',', 'title', ',', 'pie', ',', 'axes', ',', 'savefig', 'from', 'pylab', 'import', 'sum', 'as', 'pylab_sum', 'except', 'ImportError', ':', 'return', 'self', '.', 'nopylab_msg', '%', '(', '"pie_chart"', ')', "# Don't bother illustrating a pie without pieces.", 'if', 'not', 'snapshot', '.', 'tracked_total', ':', 'return', "''", 'classlist', '=', '[', ']', 'sizelist', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'list', '(', 'snapshot', '.', 'classes', '.', 'items', '(', ')', ')', ':', 'if', 'v', '[', "'pct'", ']', '>', '3.0', ':', 'classlist', '.', 'append', '(', 'k', ')', 'sizelist', '.', 'append', '(', 'v', '[', "'sum'", ']', ')', 'sizelist', '.', 'insert', '(', '0', ',', 'snapshot', '.', 'asizeof_total', '-', 'pylab_sum', '(', 'sizelist', ')', ')', 'classlist', '.', 'insert', '(', '0', ',', "'Other'", ')', '#sizelist = [x*0.01 for x in sizelist]', 'title', '(', '"Snapshot (%s) Memory Distribution"', '%', '(', 'snapshot', '.', 'desc', ')', ')', 'figure', '(', 'figsize', '=', '(', '8', ',', '8', ')', ')', 'axes', '(', '[', '0.1', ',', '0.1', ',', '0.8', ',', '0.8', ']', ')', 'pie', '(', 'sizelist', ',', 'labels', '=', 'classlist', ')', 'savefig', '(', 'filename', ',', 'dpi', '=', '50', ')', 'return', 'self', '.', 'chart_tag', '%', '(', 'self', '.', 'relative_path', '(', 'filename', ')', ')'] | Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`. | ['Create', 'a', 'pie', 'chart', 'that', 'depicts', 'the', 'distribution', 'of', 'the', 'allocated', 'memory', 'for', 'a', 'given', 'snapshot', '.', 'The', 'chart', 'is', 'saved', 'to', 'filename', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L680-L711 |
7,303 | resonai/ybt | yabt/builders/cpp.py | compile_cc | def compile_cc(build_context, compiler_config, buildenv, sources,
workspace_dir, buildenv_workspace, cmd_env):
"""Compile list of C++ source files in a buildenv image
and return list of generated object file.
"""
objects = []
for src in sources:
obj_rel_path = '{}.o'.format(splitext(src)[0])
obj_file = join(buildenv_workspace, obj_rel_path)
include_paths = [buildenv_workspace] + compiler_config.include_path
compile_cmd = (
[compiler_config.compiler, '-o', obj_file, '-c'] +
compiler_config.compile_flags +
['-I{}'.format(path) for path in include_paths] +
[join(buildenv_workspace, src)])
# TODO: capture and transform error messages from compiler so file
# paths match host paths for smooth(er) editor / IDE integration
build_context.run_in_buildenv(buildenv, compile_cmd, cmd_env)
objects.append(
join(relpath(workspace_dir, build_context.conf.project_root),
obj_rel_path))
return objects | python | def compile_cc(build_context, compiler_config, buildenv, sources,
workspace_dir, buildenv_workspace, cmd_env):
"""Compile list of C++ source files in a buildenv image
and return list of generated object file.
"""
objects = []
for src in sources:
obj_rel_path = '{}.o'.format(splitext(src)[0])
obj_file = join(buildenv_workspace, obj_rel_path)
include_paths = [buildenv_workspace] + compiler_config.include_path
compile_cmd = (
[compiler_config.compiler, '-o', obj_file, '-c'] +
compiler_config.compile_flags +
['-I{}'.format(path) for path in include_paths] +
[join(buildenv_workspace, src)])
# TODO: capture and transform error messages from compiler so file
# paths match host paths for smooth(er) editor / IDE integration
build_context.run_in_buildenv(buildenv, compile_cmd, cmd_env)
objects.append(
join(relpath(workspace_dir, build_context.conf.project_root),
obj_rel_path))
return objects | ['def', 'compile_cc', '(', 'build_context', ',', 'compiler_config', ',', 'buildenv', ',', 'sources', ',', 'workspace_dir', ',', 'buildenv_workspace', ',', 'cmd_env', ')', ':', 'objects', '=', '[', ']', 'for', 'src', 'in', 'sources', ':', 'obj_rel_path', '=', "'{}.o'", '.', 'format', '(', 'splitext', '(', 'src', ')', '[', '0', ']', ')', 'obj_file', '=', 'join', '(', 'buildenv_workspace', ',', 'obj_rel_path', ')', 'include_paths', '=', '[', 'buildenv_workspace', ']', '+', 'compiler_config', '.', 'include_path', 'compile_cmd', '=', '(', '[', 'compiler_config', '.', 'compiler', ',', "'-o'", ',', 'obj_file', ',', "'-c'", ']', '+', 'compiler_config', '.', 'compile_flags', '+', '[', "'-I{}'", '.', 'format', '(', 'path', ')', 'for', 'path', 'in', 'include_paths', ']', '+', '[', 'join', '(', 'buildenv_workspace', ',', 'src', ')', ']', ')', '# TODO: capture and transform error messages from compiler so file', '# paths match host paths for smooth(er) editor / IDE integration', 'build_context', '.', 'run_in_buildenv', '(', 'buildenv', ',', 'compile_cmd', ',', 'cmd_env', ')', 'objects', '.', 'append', '(', 'join', '(', 'relpath', '(', 'workspace_dir', ',', 'build_context', '.', 'conf', '.', 'project_root', ')', ',', 'obj_rel_path', ')', ')', 'return', 'objects'] | Compile list of C++ source files in a buildenv image
and return list of generated object file. | ['Compile', 'list', 'of', 'C', '++', 'source', 'files', 'in', 'a', 'buildenv', 'image', 'and', 'return', 'list', 'of', 'generated', 'object', 'file', '.'] | train | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/builders/cpp.py#L244-L265 |
7,304 | DiamondLightSource/python-workflows | workflows/services/common_service.py | CommonService.__start_command_queue_listener | def __start_command_queue_listener(self):
"""Start the function __command_queue_listener in a separate thread. This
function continuously listens to the pipe connected to the frontend.
"""
thread_function = self.__command_queue_listener
class QueueListenerThread(threading.Thread):
def run(qltself):
thread_function()
assert not hasattr(self, "__queue_listener_thread")
self.log.debug("Starting queue listener thread")
self.__queue_listener_thread = QueueListenerThread()
self.__queue_listener_thread.daemon = True
self.__queue_listener_thread.name = "Command Queue Listener"
self.__queue_listener_thread.start() | python | def __start_command_queue_listener(self):
"""Start the function __command_queue_listener in a separate thread. This
function continuously listens to the pipe connected to the frontend.
"""
thread_function = self.__command_queue_listener
class QueueListenerThread(threading.Thread):
def run(qltself):
thread_function()
assert not hasattr(self, "__queue_listener_thread")
self.log.debug("Starting queue listener thread")
self.__queue_listener_thread = QueueListenerThread()
self.__queue_listener_thread.daemon = True
self.__queue_listener_thread.name = "Command Queue Listener"
self.__queue_listener_thread.start() | ['def', '__start_command_queue_listener', '(', 'self', ')', ':', 'thread_function', '=', 'self', '.', '__command_queue_listener', 'class', 'QueueListenerThread', '(', 'threading', '.', 'Thread', ')', ':', 'def', 'run', '(', 'qltself', ')', ':', 'thread_function', '(', ')', 'assert', 'not', 'hasattr', '(', 'self', ',', '"__queue_listener_thread"', ')', 'self', '.', 'log', '.', 'debug', '(', '"Starting queue listener thread"', ')', 'self', '.', '__queue_listener_thread', '=', 'QueueListenerThread', '(', ')', 'self', '.', '__queue_listener_thread', '.', 'daemon', '=', 'True', 'self', '.', '__queue_listener_thread', '.', 'name', '=', '"Command Queue Listener"', 'self', '.', '__queue_listener_thread', '.', 'start', '(', ')'] | Start the function __command_queue_listener in a separate thread. This
function continuously listens to the pipe connected to the frontend. | ['Start', 'the', 'function', '__command_queue_listener', 'in', 'a', 'separate', 'thread', '.', 'This', 'function', 'continuously', 'listens', 'to', 'the', 'pipe', 'connected', 'to', 'the', 'frontend', '.'] | train | https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L263-L278 |
7,305 | tdegeus/GooseMPL | GooseMPL/__init__.py | subplots | def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs) | python | def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs) | ['def', 'subplots', '(', 'scale_x', '=', 'None', ',', 'scale_y', '=', 'None', ',', 'scale', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', "'figsize'", 'in', 'kwargs', ':', 'return', 'plt', '.', 'subplots', '(', '*', '*', 'kwargs', ')', 'width', ',', 'height', '=', 'mpl', '.', 'rcParams', '[', "'figure.figsize'", ']', 'if', 'scale', 'is', 'not', 'None', ':', 'width', '*=', 'scale', 'height', '*=', 'scale', 'if', 'scale_x', 'is', 'not', 'None', ':', 'width', '*=', 'scale_x', 'if', 'scale_y', 'is', 'not', 'None', ':', 'height', '*=', 'scale_y', 'nrows', '=', 'kwargs', '.', 'pop', '(', "'nrows'", ',', '1', ')', 'ncols', '=', 'kwargs', '.', 'pop', '(', "'ncols'", ',', '1', ')', 'width', '=', 'ncols', '*', 'width', 'height', '=', 'nrows', '*', 'height', 'return', 'plt', '.', 'subplots', '(', 'nrows', '=', 'nrows', ',', 'ncols', '=', 'ncols', ',', 'figsize', '=', '(', 'width', ',', 'height', ')', ',', '*', '*', 'kwargs', ')'] | r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions). | ['r', 'Run', 'matplotlib', '.', 'pyplot', '.', 'subplots', 'with', 'figsize', 'set', 'to', 'the', 'correct', 'multiple', 'of', 'the', 'default', '.'] | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L367-L397 |
7,306 | rueckstiess/mtools | mtools/util/logfile.py | LogFile.fast_forward | def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True) | python | def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True) | ['def', 'fast_forward', '(', 'self', ',', 'start_dt', ')', ':', 'if', 'self', '.', 'from_stdin', ':', '# skip lines until start_dt is reached', 'return', 'else', ':', '# fast bisection path', 'max_mark', '=', 'self', '.', 'filesize', 'step_size', '=', 'max_mark', '# check if start_dt is already smaller than first datetime', 'self', '.', 'filehandle', '.', 'seek', '(', '0', ')', 'le', '=', 'self', '.', 'next', '(', ')', 'if', 'le', '.', 'datetime', 'and', 'le', '.', 'datetime', '>=', 'start_dt', ':', 'self', '.', 'filehandle', '.', 'seek', '(', '0', ')', 'return', 'le', '=', 'None', 'self', '.', 'filehandle', '.', 'seek', '(', '0', ')', '# search for lower bound', 'while', 'abs', '(', 'step_size', ')', '>', '100', ':', 'step_size', '=', 'ceil', '(', 'step_size', '/', '2.', ')', 'self', '.', 'filehandle', '.', 'seek', '(', 'step_size', ',', '1', ')', 'le', '=', 'self', '.', '_find_curr_line', '(', ')', 'if', 'not', 'le', ':', 'break', 'if', 'le', '.', 'datetime', '>=', 'start_dt', ':', 'step_size', '=', '-', 'abs', '(', 'step_size', ')', 'else', ':', 'step_size', '=', 'abs', '(', 'step_size', ')', 'if', 'not', 'le', ':', 'return', '# now walk backwards until we found a truly smaller line', 'while', 'self', '.', 'filehandle', '.', 'tell', '(', ')', '>=', '2', 'and', '(', 'le', '.', 'datetime', 'is', 'None', 'or', 'le', '.', 'datetime', '>=', 'start_dt', ')', ':', 'self', '.', 'filehandle', '.', 'seek', '(', '-', '2', ',', '1', ')', 'le', '=', 'self', '.', '_find_curr_line', '(', 'prev', '=', 'True', ')'] | Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line). | ['Fast', '-', 'forward', 'file', 'to', 'given', 'start_dt', 'datetime', 'obj', 'using', 'binary', 'search', '.'] | train | https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logfile.py#L517-L566 |
7,307 | Kautenja/nes-py | nes_py/app/play_random.py | play_random | def play_random(env, steps):
"""
Play the environment making uniformly random decisions.
Args:
env (gym.Env): the initialized gym environment to play
steps (int): the number of random steps to take
Returns:
None
"""
try:
done = True
progress = tqdm(range(steps))
for _ in progress:
if done:
_ = env.reset()
action = env.action_space.sample()
_, reward, done, info = env.step(action)
progress.set_postfix(reward=reward, info=info)
env.render()
except KeyboardInterrupt:
pass
# close the environment
env.close() | python | def play_random(env, steps):
"""
Play the environment making uniformly random decisions.
Args:
env (gym.Env): the initialized gym environment to play
steps (int): the number of random steps to take
Returns:
None
"""
try:
done = True
progress = tqdm(range(steps))
for _ in progress:
if done:
_ = env.reset()
action = env.action_space.sample()
_, reward, done, info = env.step(action)
progress.set_postfix(reward=reward, info=info)
env.render()
except KeyboardInterrupt:
pass
# close the environment
env.close() | ['def', 'play_random', '(', 'env', ',', 'steps', ')', ':', 'try', ':', 'done', '=', 'True', 'progress', '=', 'tqdm', '(', 'range', '(', 'steps', ')', ')', 'for', '_', 'in', 'progress', ':', 'if', 'done', ':', '_', '=', 'env', '.', 'reset', '(', ')', 'action', '=', 'env', '.', 'action_space', '.', 'sample', '(', ')', '_', ',', 'reward', ',', 'done', ',', 'info', '=', 'env', '.', 'step', '(', 'action', ')', 'progress', '.', 'set_postfix', '(', 'reward', '=', 'reward', ',', 'info', '=', 'info', ')', 'env', '.', 'render', '(', ')', 'except', 'KeyboardInterrupt', ':', 'pass', '# close the environment', 'env', '.', 'close', '(', ')'] | Play the environment making uniformly random decisions.
Args:
env (gym.Env): the initialized gym environment to play
steps (int): the number of random steps to take
Returns:
None | ['Play', 'the', 'environment', 'making', 'uniformly', 'random', 'decisions', '.'] | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/play_random.py#L5-L30 |
7,308 | tumblr/pytumblr | pytumblr/request.py | TumblrRequest.post_multipart | def post_multipart(self, url, params, files):
"""
Generates and issues a multipart request for data files
:param url: a string, the url you are requesting
:param params: a dict, a key-value of all the parameters
:param files: a dict, matching the form '{name: file descriptor}'
:returns: a dict parsed from the JSON response
"""
resp = requests.post(
url,
data=params,
params=params,
files=files,
headers=self.headers,
allow_redirects=False,
auth=self.oauth
)
return self.json_parse(resp) | python | def post_multipart(self, url, params, files):
"""
Generates and issues a multipart request for data files
:param url: a string, the url you are requesting
:param params: a dict, a key-value of all the parameters
:param files: a dict, matching the form '{name: file descriptor}'
:returns: a dict parsed from the JSON response
"""
resp = requests.post(
url,
data=params,
params=params,
files=files,
headers=self.headers,
allow_redirects=False,
auth=self.oauth
)
return self.json_parse(resp) | ['def', 'post_multipart', '(', 'self', ',', 'url', ',', 'params', ',', 'files', ')', ':', 'resp', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'params', ',', 'params', '=', 'params', ',', 'files', '=', 'files', ',', 'headers', '=', 'self', '.', 'headers', ',', 'allow_redirects', '=', 'False', ',', 'auth', '=', 'self', '.', 'oauth', ')', 'return', 'self', '.', 'json_parse', '(', 'resp', ')'] | Generates and issues a multipart request for data files
:param url: a string, the url you are requesting
:param params: a dict, a key-value of all the parameters
:param files: a dict, matching the form '{name: file descriptor}'
:returns: a dict parsed from the JSON response | ['Generates', 'and', 'issues', 'a', 'multipart', 'request', 'for', 'data', 'files'] | train | https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/request.py#L100-L119 |
7,309 | gabstopper/smc-python | smc/core/general.py | SNMP.update_configuration | def update_configuration(self, **kwargs):
"""
Update the SNMP configuration using any kwargs supported in the
`enable` constructor. Return whether a change was made. You must call
update on the engine to commit any changes.
:param dict kwargs: keyword arguments supported by enable constructor
:rtype: bool
"""
updated = False
if 'snmp_agent' in kwargs:
kwargs.update(snmp_agent_ref=kwargs.pop('snmp_agent'))
snmp_interface = kwargs.pop('snmp_interface', None)
for name, value in kwargs.items():
_value = element_resolver(value)
if getattr(self.engine, name, None) != _value:
self.engine.data[name] = _value
updated = True
if snmp_interface is not None:
_snmp_interface = getattr(self.engine, 'snmp_interface', [])
if not len(snmp_interface) and len(_snmp_interface):
self.engine.data.update(snmp_interface=[])
updated = True
elif len(snmp_interface):
if set(self._nicids) ^ set(map(str, snmp_interface)):
self.engine.data.update(
snmp_interface=self._iface_dict(snmp_interface))
updated = True
return updated | python | def update_configuration(self, **kwargs):
"""
Update the SNMP configuration using any kwargs supported in the
`enable` constructor. Return whether a change was made. You must call
update on the engine to commit any changes.
:param dict kwargs: keyword arguments supported by enable constructor
:rtype: bool
"""
updated = False
if 'snmp_agent' in kwargs:
kwargs.update(snmp_agent_ref=kwargs.pop('snmp_agent'))
snmp_interface = kwargs.pop('snmp_interface', None)
for name, value in kwargs.items():
_value = element_resolver(value)
if getattr(self.engine, name, None) != _value:
self.engine.data[name] = _value
updated = True
if snmp_interface is not None:
_snmp_interface = getattr(self.engine, 'snmp_interface', [])
if not len(snmp_interface) and len(_snmp_interface):
self.engine.data.update(snmp_interface=[])
updated = True
elif len(snmp_interface):
if set(self._nicids) ^ set(map(str, snmp_interface)):
self.engine.data.update(
snmp_interface=self._iface_dict(snmp_interface))
updated = True
return updated | ['def', 'update_configuration', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'updated', '=', 'False', 'if', "'snmp_agent'", 'in', 'kwargs', ':', 'kwargs', '.', 'update', '(', 'snmp_agent_ref', '=', 'kwargs', '.', 'pop', '(', "'snmp_agent'", ')', ')', 'snmp_interface', '=', 'kwargs', '.', 'pop', '(', "'snmp_interface'", ',', 'None', ')', 'for', 'name', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ':', '_value', '=', 'element_resolver', '(', 'value', ')', 'if', 'getattr', '(', 'self', '.', 'engine', ',', 'name', ',', 'None', ')', '!=', '_value', ':', 'self', '.', 'engine', '.', 'data', '[', 'name', ']', '=', '_value', 'updated', '=', 'True', 'if', 'snmp_interface', 'is', 'not', 'None', ':', '_snmp_interface', '=', 'getattr', '(', 'self', '.', 'engine', ',', "'snmp_interface'", ',', '[', ']', ')', 'if', 'not', 'len', '(', 'snmp_interface', ')', 'and', 'len', '(', '_snmp_interface', ')', ':', 'self', '.', 'engine', '.', 'data', '.', 'update', '(', 'snmp_interface', '=', '[', ']', ')', 'updated', '=', 'True', 'elif', 'len', '(', 'snmp_interface', ')', ':', 'if', 'set', '(', 'self', '.', '_nicids', ')', '^', 'set', '(', 'map', '(', 'str', ',', 'snmp_interface', ')', ')', ':', 'self', '.', 'engine', '.', 'data', '.', 'update', '(', 'snmp_interface', '=', 'self', '.', '_iface_dict', '(', 'snmp_interface', ')', ')', 'updated', '=', 'True', 'return', 'updated'] | Update the SNMP configuration using any kwargs supported in the
`enable` constructor. Return whether a change was made. You must call
update on the engine to commit any changes.
:param dict kwargs: keyword arguments supported by enable constructor
:rtype: bool | ['Update', 'the', 'SNMP', 'configuration', 'using', 'any', 'kwargs', 'supported', 'in', 'the', 'enable', 'constructor', '.', 'Return', 'whether', 'a', 'change', 'was', 'made', '.', 'You', 'must', 'call', 'update', 'on', 'the', 'engine', 'to', 'commit', 'any', 'changes', '.', ':', 'param', 'dict', 'kwargs', ':', 'keyword', 'arguments', 'supported', 'by', 'enable', 'constructor', ':', 'rtype', ':', 'bool'] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/general.py#L85-L114 |
7,310 | aws/aws-encryption-sdk-python | src/aws_encryption_sdk/key_providers/kms.py | _region_from_key_id | def _region_from_key_id(key_id, default_region=None):
"""Determine the target region from a key ID, falling back to a default region if provided.
:param str key_id: AWS KMS key ID
:param str default_region: Region to use if no region found in key_id
:returns: region name
:rtype: str
:raises UnknownRegionError: if no region found in key_id and no default_region provided
"""
try:
region_name = key_id.split(":", 4)[3]
except IndexError:
if default_region is None:
raise UnknownRegionError(
"No default region found and no region determinable from key id: {}".format(key_id)
)
region_name = default_region
return region_name | python | def _region_from_key_id(key_id, default_region=None):
"""Determine the target region from a key ID, falling back to a default region if provided.
:param str key_id: AWS KMS key ID
:param str default_region: Region to use if no region found in key_id
:returns: region name
:rtype: str
:raises UnknownRegionError: if no region found in key_id and no default_region provided
"""
try:
region_name = key_id.split(":", 4)[3]
except IndexError:
if default_region is None:
raise UnknownRegionError(
"No default region found and no region determinable from key id: {}".format(key_id)
)
region_name = default_region
return region_name | ['def', '_region_from_key_id', '(', 'key_id', ',', 'default_region', '=', 'None', ')', ':', 'try', ':', 'region_name', '=', 'key_id', '.', 'split', '(', '":"', ',', '4', ')', '[', '3', ']', 'except', 'IndexError', ':', 'if', 'default_region', 'is', 'None', ':', 'raise', 'UnknownRegionError', '(', '"No default region found and no region determinable from key id: {}"', '.', 'format', '(', 'key_id', ')', ')', 'region_name', '=', 'default_region', 'return', 'region_name'] | Determine the target region from a key ID, falling back to a default region if provided.
:param str key_id: AWS KMS key ID
:param str default_region: Region to use if no region found in key_id
:returns: region name
:rtype: str
:raises UnknownRegionError: if no region found in key_id and no default_region provided | ['Determine', 'the', 'target', 'region', 'from', 'a', 'key', 'ID', 'falling', 'back', 'to', 'a', 'default', 'region', 'if', 'provided', '.'] | train | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/key_providers/kms.py#L35-L52 |
7,311 | PaulHancock/Aegean | AegeanTools/cluster.py | regroup_vectorized | def regroup_vectorized(srccat, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order).
"""
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
# most negative declination first
# XXX: kind='mergesort' ensures stable sorting for determinism.
# Do we need this?
order = np.argsort(srccat.dec, kind='mergesort')[::-1]
# TODO: is it better to store groups as arrays even if appends are more
# costly?
groups = [[order[0]]]
for idx in order[1:]:
rec = srccat[idx]
# TODO: Find out if groups are big enough for this to give us a speed
# gain. If not, get distance to all entries in groups above
# decmin simultaneously.
decmin = rec.dec - far
for group in reversed(groups):
# when an island's largest (last) declination is smaller than
# decmin, we don't need to look at any more islands
if srccat.dec[group[-1]] < decmin:
# new group
groups.append([idx])
rafar = far / np.cos(np.radians(rec.dec))
group_recs = np.take(srccat, group, mode='clip')
group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]
if len(group_recs) and dist(rec, group_recs).min() < eps:
group.append(idx)
break
else:
# new group
groups.append([idx])
# TODO?: a more numpy-like interface would return only an array providing
# the mapping:
# group_idx = np.empty(len(srccat), dtype=int)
# for i, group in enumerate(groups):
# group_idx[group] = i
# return group_idx
return groups | python | def regroup_vectorized(srccat, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order).
"""
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
# most negative declination first
# XXX: kind='mergesort' ensures stable sorting for determinism.
# Do we need this?
order = np.argsort(srccat.dec, kind='mergesort')[::-1]
# TODO: is it better to store groups as arrays even if appends are more
# costly?
groups = [[order[0]]]
for idx in order[1:]:
rec = srccat[idx]
# TODO: Find out if groups are big enough for this to give us a speed
# gain. If not, get distance to all entries in groups above
# decmin simultaneously.
decmin = rec.dec - far
for group in reversed(groups):
# when an island's largest (last) declination is smaller than
# decmin, we don't need to look at any more islands
if srccat.dec[group[-1]] < decmin:
# new group
groups.append([idx])
rafar = far / np.cos(np.radians(rec.dec))
group_recs = np.take(srccat, group, mode='clip')
group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]
if len(group_recs) and dist(rec, group_recs).min() < eps:
group.append(idx)
break
else:
# new group
groups.append([idx])
# TODO?: a more numpy-like interface would return only an array providing
# the mapping:
# group_idx = np.empty(len(srccat), dtype=int)
# for i, group in enumerate(groups):
# group_idx[group] = i
# return group_idx
return groups | ['def', 'regroup_vectorized', '(', 'srccat', ',', 'eps', ',', 'far', '=', 'None', ',', 'dist', '=', 'norm_dist', ')', ':', 'if', 'far', 'is', 'None', ':', 'far', '=', '0.5', '# 10*max(a.a/3600 for a in srccat)', '# most negative declination first', "# XXX: kind='mergesort' ensures stable sorting for determinism.", '# Do we need this?', 'order', '=', 'np', '.', 'argsort', '(', 'srccat', '.', 'dec', ',', 'kind', '=', "'mergesort'", ')', '[', ':', ':', '-', '1', ']', '# TODO: is it better to store groups as arrays even if appends are more', '# costly?', 'groups', '=', '[', '[', 'order', '[', '0', ']', ']', ']', 'for', 'idx', 'in', 'order', '[', '1', ':', ']', ':', 'rec', '=', 'srccat', '[', 'idx', ']', '# TODO: Find out if groups are big enough for this to give us a speed', '# gain. If not, get distance to all entries in groups above', '# decmin simultaneously.', 'decmin', '=', 'rec', '.', 'dec', '-', 'far', 'for', 'group', 'in', 'reversed', '(', 'groups', ')', ':', "# when an island's largest (last) declination is smaller than", "# decmin, we don't need to look at any more islands", 'if', 'srccat', '.', 'dec', '[', 'group', '[', '-', '1', ']', ']', '<', 'decmin', ':', '# new group', 'groups', '.', 'append', '(', '[', 'idx', ']', ')', 'rafar', '=', 'far', '/', 'np', '.', 'cos', '(', 'np', '.', 'radians', '(', 'rec', '.', 'dec', ')', ')', 'group_recs', '=', 'np', '.', 'take', '(', 'srccat', ',', 'group', ',', 'mode', '=', "'clip'", ')', 'group_recs', '=', 'group_recs', '[', 'abs', '(', 'rec', '.', 'ra', '-', 'group_recs', '.', 'ra', ')', '<=', 'rafar', ']', 'if', 'len', '(', 'group_recs', ')', 'and', 'dist', '(', 'rec', ',', 'group_recs', ')', '.', 'min', '(', ')', '<', 'eps', ':', 'group', '.', 'append', '(', 'idx', ')', 'break', 'else', ':', '# new group', 'groups', '.', 'append', '(', '[', 'idx', ']', ')', '# TODO?: a more numpy-like interface would return only an array providing', '# the mapping:', '# group_idx = np.empty(len(srccat), dtype=int)', '# for i, group in enumerate(groups):', '# group_idx[group] = i', '# return group_idx', 'return', 'groups'] | Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order). | ['Regroup', 'the', 'islands', 'of', 'a', 'catalog', 'according', 'to', 'their', 'normalised', 'distance', '.'] | train | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/cluster.py#L138-L206 |
7,312 | uw-it-aca/uw-restclients-nws | uw_nws/__init__.py | NWS.get_message_type_by_id | def get_message_type_by_id(self, message_type_id):
"""
Get a message type by message type ID
:param message_type_id: is the message type that
the client wants to retrieve
"""
self._validate_uuid(message_type_id)
url = "/notification/v1/message-type/{}".format(message_type_id)
response = NWS_DAO().getURL(url, self._write_headers())
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._message_type_from_json(data.get("MessageType")) | python | def get_message_type_by_id(self, message_type_id):
"""
Get a message type by message type ID
:param message_type_id: is the message type that
the client wants to retrieve
"""
self._validate_uuid(message_type_id)
url = "/notification/v1/message-type/{}".format(message_type_id)
response = NWS_DAO().getURL(url, self._write_headers())
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._message_type_from_json(data.get("MessageType")) | ['def', 'get_message_type_by_id', '(', 'self', ',', 'message_type_id', ')', ':', 'self', '.', '_validate_uuid', '(', 'message_type_id', ')', 'url', '=', '"/notification/v1/message-type/{}"', '.', 'format', '(', 'message_type_id', ')', 'response', '=', 'NWS_DAO', '(', ')', '.', 'getURL', '(', 'url', ',', 'self', '.', '_write_headers', '(', ')', ')', 'if', 'response', '.', 'status', '!=', '200', ':', 'raise', 'DataFailureException', '(', 'url', ',', 'response', '.', 'status', ',', 'response', '.', 'data', ')', 'data', '=', 'json', '.', 'loads', '(', 'response', '.', 'data', ')', 'return', 'self', '.', '_message_type_from_json', '(', 'data', '.', 'get', '(', '"MessageType"', ')', ')'] | Get a message type by message type ID
:param message_type_id: is the message type that
the client wants to retrieve | ['Get', 'a', 'message', 'type', 'by', 'message', 'type', 'ID', ':', 'param', 'message_type_id', ':', 'is', 'the', 'message', 'type', 'that', 'the', 'client', 'wants', 'to', 'retrieve'] | train | https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L445-L459 |
7,313 | tensorflow/tensor2tensor | tensor2tensor/models/research/moe_experiments.py | xmoe_2d | def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams | python | def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams | ['def', 'xmoe_2d', '(', ')', ':', 'hparams', '=', 'xmoe_top_2', '(', ')', 'hparams', '.', 'decoder_layers', '=', '[', '"att"', ',', '"hmoe"', ']', '*', '4', 'hparams', '.', 'mesh_shape', '=', '"b0:2;b1:4"', 'hparams', '.', 'outer_batch_size', '=', '4', 'hparams', '.', 'layout', '=', '"outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"', 'hparams', '.', 'moe_num_experts', '=', '[', '4', ',', '4', ']', 'return', 'hparams'] | Two-dimensional hierarchical mixture of 16 experts. | ['Two', '-', 'dimensional', 'hierarchical', 'mixture', 'of', '16', 'experts', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/moe_experiments.py#L185-L193 |
7,314 | boriel/zxbasic | arch/zx48k/optimizer.py | BasicBlock.requires | def requires(self, i=0, end_=None):
""" Returns a list of registers and variables this block requires.
By default checks from the beginning (i = 0).
:param i: initial position of the block to examine
:param end_: final position to examine
:returns: registers safe to write
"""
if i < 0:
i = 0
end_ = len(self) if end_ is None or end_ > len(self) else end_
regs = {'a', 'b', 'c', 'd', 'e', 'f', 'h', 'l', 'i', 'ixh', 'ixl', 'iyh', 'iyl', 'sp'}
result = []
for ii in range(i, end_):
for r in self.mem[ii].requires:
r = r.lower()
if r in regs:
result.append(r)
regs.remove(r)
for r in self.mem[ii].destroys:
r = r.lower()
if r in regs:
regs.remove(r)
if not regs:
break
return result | python | def requires(self, i=0, end_=None):
""" Returns a list of registers and variables this block requires.
By default checks from the beginning (i = 0).
:param i: initial position of the block to examine
:param end_: final position to examine
:returns: registers safe to write
"""
if i < 0:
i = 0
end_ = len(self) if end_ is None or end_ > len(self) else end_
regs = {'a', 'b', 'c', 'd', 'e', 'f', 'h', 'l', 'i', 'ixh', 'ixl', 'iyh', 'iyl', 'sp'}
result = []
for ii in range(i, end_):
for r in self.mem[ii].requires:
r = r.lower()
if r in regs:
result.append(r)
regs.remove(r)
for r in self.mem[ii].destroys:
r = r.lower()
if r in regs:
regs.remove(r)
if not regs:
break
return result | ['def', 'requires', '(', 'self', ',', 'i', '=', '0', ',', 'end_', '=', 'None', ')', ':', 'if', 'i', '<', '0', ':', 'i', '=', '0', 'end_', '=', 'len', '(', 'self', ')', 'if', 'end_', 'is', 'None', 'or', 'end_', '>', 'len', '(', 'self', ')', 'else', 'end_', 'regs', '=', '{', "'a'", ',', "'b'", ',', "'c'", ',', "'d'", ',', "'e'", ',', "'f'", ',', "'h'", ',', "'l'", ',', "'i'", ',', "'ixh'", ',', "'ixl'", ',', "'iyh'", ',', "'iyl'", ',', "'sp'", '}', 'result', '=', '[', ']', 'for', 'ii', 'in', 'range', '(', 'i', ',', 'end_', ')', ':', 'for', 'r', 'in', 'self', '.', 'mem', '[', 'ii', ']', '.', 'requires', ':', 'r', '=', 'r', '.', 'lower', '(', ')', 'if', 'r', 'in', 'regs', ':', 'result', '.', 'append', '(', 'r', ')', 'regs', '.', 'remove', '(', 'r', ')', 'for', 'r', 'in', 'self', '.', 'mem', '[', 'ii', ']', '.', 'destroys', ':', 'r', '=', 'r', '.', 'lower', '(', ')', 'if', 'r', 'in', 'regs', ':', 'regs', '.', 'remove', '(', 'r', ')', 'if', 'not', 'regs', ':', 'break', 'return', 'result'] | Returns a list of registers and variables this block requires.
By default checks from the beginning (i = 0).
:param i: initial position of the block to examine
:param end_: final position to examine
:returns: registers safe to write | ['Returns', 'a', 'list', 'of', 'registers', 'and', 'variables', 'this', 'block', 'requires', '.', 'By', 'default', 'checks', 'from', 'the', 'beginning', '(', 'i', '=', '0', ')', '.', ':', 'param', 'i', ':', 'initial', 'position', 'of', 'the', 'block', 'to', 'examine', ':', 'param', 'end_', ':', 'final', 'position', 'to', 'examine', ':', 'returns', ':', 'registers', 'safe', 'to', 'write'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1580-L1608 |
7,315 | google/grr | grr/core/grr_response_core/lib/rdfvalues/structs.py | ProtoRDFValue._GetPrimitiveEncoder | def _GetPrimitiveEncoder(self):
"""Finds the primitive encoder according to the type's data_store_type."""
# Decide what should the primitive type be for packing the target rdfvalue
# into the protobuf and create a delegate descriptor to control that.
primitive_cls = self._PROTO_DATA_STORE_LOOKUP[self.type.data_store_type]
self.primitive_desc = primitive_cls(**self._kwargs)
# Our wiretype is the same as the delegate's.
self.wire_type = self.primitive_desc.wire_type
self.proto_type_name = self.primitive_desc.proto_type_name
# Recalculate our tags.
self.CalculateTags() | python | def _GetPrimitiveEncoder(self):
"""Finds the primitive encoder according to the type's data_store_type."""
# Decide what should the primitive type be for packing the target rdfvalue
# into the protobuf and create a delegate descriptor to control that.
primitive_cls = self._PROTO_DATA_STORE_LOOKUP[self.type.data_store_type]
self.primitive_desc = primitive_cls(**self._kwargs)
# Our wiretype is the same as the delegate's.
self.wire_type = self.primitive_desc.wire_type
self.proto_type_name = self.primitive_desc.proto_type_name
# Recalculate our tags.
self.CalculateTags() | ['def', '_GetPrimitiveEncoder', '(', 'self', ')', ':', '# Decide what should the primitive type be for packing the target rdfvalue', '# into the protobuf and create a delegate descriptor to control that.', 'primitive_cls', '=', 'self', '.', '_PROTO_DATA_STORE_LOOKUP', '[', 'self', '.', 'type', '.', 'data_store_type', ']', 'self', '.', 'primitive_desc', '=', 'primitive_cls', '(', '*', '*', 'self', '.', '_kwargs', ')', "# Our wiretype is the same as the delegate's.", 'self', '.', 'wire_type', '=', 'self', '.', 'primitive_desc', '.', 'wire_type', 'self', '.', 'proto_type_name', '=', 'self', '.', 'primitive_desc', '.', 'proto_type_name', '# Recalculate our tags.', 'self', '.', 'CalculateTags', '(', ')'] | Finds the primitive encoder according to the type's data_store_type. | ['Finds', 'the', 'primitive', 'encoder', 'according', 'to', 'the', 'type', 's', 'data_store_type', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L1527-L1539 |
7,316 | jonathanslenders/textfsm | jtextfsm.py | TextFSMValue.Parse | def Parse(self, value):
"""Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
"""
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
# Call option OnCreateOptions callbacks
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
# There were no valid options, so there are no options.
# Treat this argument as the name.
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex) | python | def Parse(self, value):
"""Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
"""
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
# Call option OnCreateOptions callbacks
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
# There were no valid options, so there are no options.
# Treat this argument as the name.
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex) | ['def', 'Parse', '(', 'self', ',', 'value', ')', ':', 'value_line', '=', 'value', '.', 'split', '(', "' '", ')', 'if', 'len', '(', 'value_line', ')', '<', '3', ':', 'raise', 'TextFSMTemplateError', '(', "'Expect at least 3 tokens on line.'", ')', 'if', 'not', 'value_line', '[', '2', ']', '.', 'startswith', '(', "'('", ')', ':', '# Options are present', 'options', '=', 'value_line', '[', '1', ']', 'for', 'option', 'in', 'options', '.', 'split', '(', "','", ')', ':', 'self', '.', '_AddOption', '(', 'option', ')', '# Call option OnCreateOptions callbacks', '[', 'option', '.', 'OnCreateOptions', '(', ')', 'for', 'option', 'in', 'self', '.', 'options', ']', 'self', '.', 'name', '=', 'value_line', '[', '2', ']', 'self', '.', 'regex', '=', "' '", '.', 'join', '(', 'value_line', '[', '3', ':', ']', ')', 'else', ':', '# There were no valid options, so there are no options.', '# Treat this argument as the name.', 'self', '.', 'name', '=', 'value_line', '[', '1', ']', 'self', '.', 'regex', '=', "' '", '.', 'join', '(', 'value_line', '[', '2', ':', ']', ')', 'if', 'len', '(', 'self', '.', 'name', ')', '>', 'self', '.', 'max_name_len', ':', 'raise', 'TextFSMTemplateError', '(', '"Invalid Value name \'%s\' or name too long."', '%', 'self', '.', 'name', ')', 'if', '(', 'not', 're', '.', 'match', '(', "r'^\\(.*\\)$'", ',', 'self', '.', 'regex', ')', 'or', 'self', '.', 'regex', '.', 'count', '(', "'('", ')', '!=', 'self', '.', 'regex', '.', 'count', '(', "')'", ')', ')', ':', 'raise', 'TextFSMTemplateError', '(', '"Value \'%s\' must be contained within a \'()\' pair."', '%', 'self', '.', 'regex', ')', 'self', '.', 'template', '=', 're', '.', 'sub', '(', "r'^\\('", ',', "'(?P<%s>'", '%', 'self', '.', 'name', ',', 'self', '.', 'regex', ')'] | Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error. | ['Parse', 'a', 'Value', 'declaration', '.'] | train | https://github.com/jonathanslenders/textfsm/blob/cca5084512d14bc367205aceb34c938ac1c65daf/jtextfsm.py#L251-L291 |
7,317 | pkgw/pwkit | pwkit/sherpa.py | make_multi_qq_plots | def make_multi_qq_plots(arrays, key_text):
"""Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here.
"""
import omega as om
p = om.RectPlot()
p.addXY([0, 1.], [0, 1.], '1:1')
for index, array in enumerate(arrays):
kev, obs, mdl = array
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = 0.5 * (c_obs[-1] + c_mdl[-1])
c_obs /= mx
c_mdl /= mx
p.addXY(c_mdl, c_obs, '%s #%d' % (key_text, index))
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
#
# Note: this reuses the variables from the last loop iteration.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = 1.05
c1 = 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')
p.defaultKeyOverlay.vAlign = 0.3
return p | python | def make_multi_qq_plots(arrays, key_text):
"""Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here.
"""
import omega as om
p = om.RectPlot()
p.addXY([0, 1.], [0, 1.], '1:1')
for index, array in enumerate(arrays):
kev, obs, mdl = array
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = 0.5 * (c_obs[-1] + c_mdl[-1])
c_obs /= mx
c_mdl /= mx
p.addXY(c_mdl, c_obs, '%s #%d' % (key_text, index))
# HACK: this range of numbers is chosen to give reasonable sampling for my
# sources, which are typically quite soft.
#
# Note: this reuses the variables from the last loop iteration.
locs = np.array([0, 0.05, 0.08, 0.11, 0.17, 0.3, 0.4, 0.7, 1]) * (kev.size - 2)
c0 = 1.05
c1 = 1.1
for loc in locs:
i0 = int(np.floor(loc))
frac = loc - i0
kevval = (1 - frac) * kev[i0] + frac * kev[i0+1]
mdlval = (1 - frac) * c_mdl[i0] + frac * c_mdl[i0+1]
obsval = (1 - frac) * c_obs[i0] + frac * c_obs[i0+1]
p.addXY([mdlval, mdlval], [c0, c1], '%.2f keV' % kevval, dsn=2)
p.addXY([c0, c1], [obsval, obsval], None, dsn=2)
p.setLabels('Cumulative rescaled model', 'Cumulative rescaled data')
p.defaultKeyOverlay.vAlign = 0.3
return p | ['def', 'make_multi_qq_plots', '(', 'arrays', ',', 'key_text', ')', ':', 'import', 'omega', 'as', 'om', 'p', '=', 'om', '.', 'RectPlot', '(', ')', 'p', '.', 'addXY', '(', '[', '0', ',', '1.', ']', ',', '[', '0', ',', '1.', ']', ',', "'1:1'", ')', 'for', 'index', ',', 'array', 'in', 'enumerate', '(', 'arrays', ')', ':', 'kev', ',', 'obs', ',', 'mdl', '=', 'array', 'c_obs', '=', 'np', '.', 'cumsum', '(', 'obs', ')', 'c_mdl', '=', 'np', '.', 'cumsum', '(', 'mdl', ')', 'mx', '=', '0.5', '*', '(', 'c_obs', '[', '-', '1', ']', '+', 'c_mdl', '[', '-', '1', ']', ')', 'c_obs', '/=', 'mx', 'c_mdl', '/=', 'mx', 'p', '.', 'addXY', '(', 'c_mdl', ',', 'c_obs', ',', "'%s #%d'", '%', '(', 'key_text', ',', 'index', ')', ')', '# HACK: this range of numbers is chosen to give reasonable sampling for my', '# sources, which are typically quite soft.', '#', '# Note: this reuses the variables from the last loop iteration.', 'locs', '=', 'np', '.', 'array', '(', '[', '0', ',', '0.05', ',', '0.08', ',', '0.11', ',', '0.17', ',', '0.3', ',', '0.4', ',', '0.7', ',', '1', ']', ')', '*', '(', 'kev', '.', 'size', '-', '2', ')', 'c0', '=', '1.05', 'c1', '=', '1.1', 'for', 'loc', 'in', 'locs', ':', 'i0', '=', 'int', '(', 'np', '.', 'floor', '(', 'loc', ')', ')', 'frac', '=', 'loc', '-', 'i0', 'kevval', '=', '(', '1', '-', 'frac', ')', '*', 'kev', '[', 'i0', ']', '+', 'frac', '*', 'kev', '[', 'i0', '+', '1', ']', 'mdlval', '=', '(', '1', '-', 'frac', ')', '*', 'c_mdl', '[', 'i0', ']', '+', 'frac', '*', 'c_mdl', '[', 'i0', '+', '1', ']', 'obsval', '=', '(', '1', '-', 'frac', ')', '*', 'c_obs', '[', 'i0', ']', '+', 'frac', '*', 'c_obs', '[', 'i0', '+', '1', ']', 'p', '.', 'addXY', '(', '[', 'mdlval', ',', 'mdlval', ']', ',', '[', 'c0', ',', 'c1', ']', ',', "'%.2f keV'", '%', 'kevval', ',', 'dsn', '=', '2', ')', 'p', '.', 'addXY', '(', '[', 'c0', ',', 'c1', ']', ',', '[', 'obsval', ',', 'obsval', ']', ',', 'None', ',', 'dsn', '=', '2', ')', 'p', '.', 'setLabels', '(', "'Cumulative rescaled model'", ',', "'Cumulative rescaled data'", ')', 'p', '.', 'defaultKeyOverlay', '.', 'vAlign', '=', '0.3', 'return', 'p'] | Make a quantile-quantile plot comparing multiple sets of events and models.
*arrays*
X.
*key_text*
Text describing the quantile-quantile comparison quantity; will be
shown on the plot legend.
Returns:
An :class:`omega.RectPlot` instance.
*TODO*: nothing about this is Sherpa-specific. Same goes for some of the
plotting routines in :mod:`pkwit.environments.casa.data`; might be
reasonable to add a submodule for generic X-ray-y plotting routines.
*TODO*: Some gross code duplication here. | ['Make', 'a', 'quantile', '-', 'quantile', 'plot', 'comparing', 'multiple', 'sets', 'of', 'events', 'and', 'models', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/sherpa.py#L478-L531 |
7,318 | erdewit/ib_insync | ib_insync/ib.py | IB.reqTickByTickData | def reqTickByTickData(
self, contract: Contract, tickType: str,
numberOfTicks: int = 0, ignoreSize: bool = False) -> Ticker:
"""
Subscribe to tick-by-tick data and return the Ticker that
holds the ticks in ticker.tickByTicks.
https://interactivebrokers.github.io/tws-api/tick_data.html
Args:
contract: Contract of interest.
tickType: One of 'Last', 'AllLast', 'BidAsk' or 'MidPoint'.
numberOfTicks: Number of ticks or 0 for unlimited.
ignoreSize: Ignore bid/ask ticks that only update the size.
"""
reqId = self.client.getReqId()
ticker = self.wrapper.startTicker(reqId, contract, tickType)
self.client.reqTickByTickData(
reqId, contract, tickType, numberOfTicks, ignoreSize)
return ticker | python | def reqTickByTickData(
self, contract: Contract, tickType: str,
numberOfTicks: int = 0, ignoreSize: bool = False) -> Ticker:
"""
Subscribe to tick-by-tick data and return the Ticker that
holds the ticks in ticker.tickByTicks.
https://interactivebrokers.github.io/tws-api/tick_data.html
Args:
contract: Contract of interest.
tickType: One of 'Last', 'AllLast', 'BidAsk' or 'MidPoint'.
numberOfTicks: Number of ticks or 0 for unlimited.
ignoreSize: Ignore bid/ask ticks that only update the size.
"""
reqId = self.client.getReqId()
ticker = self.wrapper.startTicker(reqId, contract, tickType)
self.client.reqTickByTickData(
reqId, contract, tickType, numberOfTicks, ignoreSize)
return ticker | ['def', 'reqTickByTickData', '(', 'self', ',', 'contract', ':', 'Contract', ',', 'tickType', ':', 'str', ',', 'numberOfTicks', ':', 'int', '=', '0', ',', 'ignoreSize', ':', 'bool', '=', 'False', ')', '->', 'Ticker', ':', 'reqId', '=', 'self', '.', 'client', '.', 'getReqId', '(', ')', 'ticker', '=', 'self', '.', 'wrapper', '.', 'startTicker', '(', 'reqId', ',', 'contract', ',', 'tickType', ')', 'self', '.', 'client', '.', 'reqTickByTickData', '(', 'reqId', ',', 'contract', ',', 'tickType', ',', 'numberOfTicks', ',', 'ignoreSize', ')', 'return', 'ticker'] | Subscribe to tick-by-tick data and return the Ticker that
holds the ticks in ticker.tickByTicks.
https://interactivebrokers.github.io/tws-api/tick_data.html
Args:
contract: Contract of interest.
tickType: One of 'Last', 'AllLast', 'BidAsk' or 'MidPoint'.
numberOfTicks: Number of ticks or 0 for unlimited.
ignoreSize: Ignore bid/ask ticks that only update the size. | ['Subscribe', 'to', 'tick', '-', 'by', '-', 'tick', 'data', 'and', 'return', 'the', 'Ticker', 'that', 'holds', 'the', 'ticks', 'in', 'ticker', '.', 'tickByTicks', '.'] | train | https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L1172-L1191 |
7,319 | nyaruka/smartmin | smartmin/management/commands/collect_sql.py | Command.normalize_operations | def normalize_operations(self, operations):
"""
Removes redundant SQL operations - e.g. a CREATE X followed by a DROP X
"""
normalized = OrderedDict()
for operation in operations:
op_key = (operation.sql_type, operation.obj_name)
# do we already have an operation for this object?
if op_key in normalized:
if self.verbosity >= 2:
self.stdout.write(" < %s" % normalized[op_key])
del normalized[op_key]
# don't add DROP operations for objects not previously created
if operation.is_create:
normalized[op_key] = operation
elif self.verbosity >= 2:
self.stdout.write(" < %s" % operation)
return normalized.values() | python | def normalize_operations(self, operations):
"""
Removes redundant SQL operations - e.g. a CREATE X followed by a DROP X
"""
normalized = OrderedDict()
for operation in operations:
op_key = (operation.sql_type, operation.obj_name)
# do we already have an operation for this object?
if op_key in normalized:
if self.verbosity >= 2:
self.stdout.write(" < %s" % normalized[op_key])
del normalized[op_key]
# don't add DROP operations for objects not previously created
if operation.is_create:
normalized[op_key] = operation
elif self.verbosity >= 2:
self.stdout.write(" < %s" % operation)
return normalized.values() | ['def', 'normalize_operations', '(', 'self', ',', 'operations', ')', ':', 'normalized', '=', 'OrderedDict', '(', ')', 'for', 'operation', 'in', 'operations', ':', 'op_key', '=', '(', 'operation', '.', 'sql_type', ',', 'operation', '.', 'obj_name', ')', '# do we already have an operation for this object?', 'if', 'op_key', 'in', 'normalized', ':', 'if', 'self', '.', 'verbosity', '>=', '2', ':', 'self', '.', 'stdout', '.', 'write', '(', '" < %s"', '%', 'normalized', '[', 'op_key', ']', ')', 'del', 'normalized', '[', 'op_key', ']', "# don't add DROP operations for objects not previously created", 'if', 'operation', '.', 'is_create', ':', 'normalized', '[', 'op_key', ']', '=', 'operation', 'elif', 'self', '.', 'verbosity', '>=', '2', ':', 'self', '.', 'stdout', '.', 'write', '(', '" < %s"', '%', 'operation', ')', 'return', 'normalized', '.', 'values', '(', ')'] | Removes redundant SQL operations - e.g. a CREATE X followed by a DROP X | ['Removes', 'redundant', 'SQL', 'operations', '-', 'e', '.', 'g', '.', 'a', 'CREATE', 'X', 'followed', 'by', 'a', 'DROP', 'X'] | train | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/commands/collect_sql.py#L156-L178 |
7,320 | thebigmunch/google-music | src/google_music/clients/mobileclient.py | MobileClient.playlist_song_delete | def playlist_song_delete(self, playlist_song):
"""Delete song from playlist.
Parameters:
playlist_song (str): A playlist song dict.
Returns:
dict: Playlist dict including songs.
"""
self.playlist_songs_delete([playlist_song])
return self.playlist(playlist_song['playlistId'], include_songs=True) | python | def playlist_song_delete(self, playlist_song):
"""Delete song from playlist.
Parameters:
playlist_song (str): A playlist song dict.
Returns:
dict: Playlist dict including songs.
"""
self.playlist_songs_delete([playlist_song])
return self.playlist(playlist_song['playlistId'], include_songs=True) | ['def', 'playlist_song_delete', '(', 'self', ',', 'playlist_song', ')', ':', 'self', '.', 'playlist_songs_delete', '(', '[', 'playlist_song', ']', ')', 'return', 'self', '.', 'playlist', '(', 'playlist_song', '[', "'playlistId'", ']', ',', 'include_songs', '=', 'True', ')'] | Delete song from playlist.
Parameters:
playlist_song (str): A playlist song dict.
Returns:
dict: Playlist dict including songs. | ['Delete', 'song', 'from', 'playlist', '.'] | train | https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L552-L564 |
7,321 | jpscaletti/pyceo | pyceo/manager.py | Manager.command | def command(self, group=None, help="", name=None):
"""Decorator for adding a command to this manager."""
def decorator(func):
return self.add_command(func, group=group, help=help, name=name)
return decorator | python | def command(self, group=None, help="", name=None):
"""Decorator for adding a command to this manager."""
def decorator(func):
return self.add_command(func, group=group, help=help, name=name)
return decorator | ['def', 'command', '(', 'self', ',', 'group', '=', 'None', ',', 'help', '=', '""', ',', 'name', '=', 'None', ')', ':', 'def', 'decorator', '(', 'func', ')', ':', 'return', 'self', '.', 'add_command', '(', 'func', ',', 'group', '=', 'group', ',', 'help', '=', 'help', ',', 'name', '=', 'name', ')', 'return', 'decorator'] | Decorator for adding a command to this manager. | ['Decorator', 'for', 'adding', 'a', 'command', 'to', 'this', 'manager', '.'] | train | https://github.com/jpscaletti/pyceo/blob/7f37eaf8e557d25f8e54634176139e0aad84b8df/pyceo/manager.py#L57-L61 |
7,322 | timothyb0912/pylogit | pylogit/bootstrap_sampler.py | create_deepcopied_groupby_dict | def create_deepcopied_groupby_dict(orig_df, obs_id_col):
"""
Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
"""
# Get the observation id values
obs_id_vals = orig_df[obs_id_col].values
# Get the unique observation ids
unique_obs_ids = np.unique(obs_id_vals)
# Initialize the dictionary to be returned.
groupby_dict = {}
# Populate the dictionary with dataframes for each individual.
for obs_id in unique_obs_ids:
# Filter out only the rows corresponding to the current observation id.
desired_rows = obs_id_vals == obs_id
# Add the desired dataframe to the dictionary.
groupby_dict[obs_id] = orig_df.loc[desired_rows].copy(deep=True)
# Return the desired object.
return groupby_dict | python | def create_deepcopied_groupby_dict(orig_df, obs_id_col):
"""
Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
"""
# Get the observation id values
obs_id_vals = orig_df[obs_id_col].values
# Get the unique observation ids
unique_obs_ids = np.unique(obs_id_vals)
# Initialize the dictionary to be returned.
groupby_dict = {}
# Populate the dictionary with dataframes for each individual.
for obs_id in unique_obs_ids:
# Filter out only the rows corresponding to the current observation id.
desired_rows = obs_id_vals == obs_id
# Add the desired dataframe to the dictionary.
groupby_dict[obs_id] = orig_df.loc[desired_rows].copy(deep=True)
# Return the desired object.
return groupby_dict | ['def', 'create_deepcopied_groupby_dict', '(', 'orig_df', ',', 'obs_id_col', ')', ':', '# Get the observation id values', 'obs_id_vals', '=', 'orig_df', '[', 'obs_id_col', ']', '.', 'values', '# Get the unique observation ids', 'unique_obs_ids', '=', 'np', '.', 'unique', '(', 'obs_id_vals', ')', '# Initialize the dictionary to be returned.', 'groupby_dict', '=', '{', '}', '# Populate the dictionary with dataframes for each individual.', 'for', 'obs_id', 'in', 'unique_obs_ids', ':', '# Filter out only the rows corresponding to the current observation id.', 'desired_rows', '=', 'obs_id_vals', '==', 'obs_id', '# Add the desired dataframe to the dictionary.', 'groupby_dict', '[', 'obs_id', ']', '=', 'orig_df', '.', 'loc', '[', 'desired_rows', ']', '.', 'copy', '(', 'deep', '=', 'True', ')', '# Return the desired object.', 'return', 'groupby_dict'] | Will create a dictionary where each key corresponds to a unique value in
`orig_df[obs_id_col]` and each value corresponds to all of the rows of
`orig_df` where `orig_df[obs_id_col] == key`.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
Returns
-------
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. | ['Will', 'create', 'a', 'dictionary', 'where', 'each', 'key', 'corresponds', 'to', 'a', 'unique', 'value', 'in', 'orig_df', '[', 'obs_id_col', ']', 'and', 'each', 'value', 'corresponds', 'to', 'all', 'of', 'the', 'rows', 'of', 'orig_df', 'where', 'orig_df', '[', 'obs_id_col', ']', '==', 'key', '.'] | train | https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L207-L242 |
7,323 | dnanexus/dx-toolkit | src/python/dxpy/bindings/dxanalysis.py | DXAnalysis.add_tags | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the analysis
:type tags: list of strings
Adds each of the specified tags to the analysis. Takes no
action for tags that are already listed for the analysis.
"""
dxpy.api.analysis_add_tags(self._dxid, {"tags": tags}, **kwargs) | python | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the analysis
:type tags: list of strings
Adds each of the specified tags to the analysis. Takes no
action for tags that are already listed for the analysis.
"""
dxpy.api.analysis_add_tags(self._dxid, {"tags": tags}, **kwargs) | ['def', 'add_tags', '(', 'self', ',', 'tags', ',', '*', '*', 'kwargs', ')', ':', 'dxpy', '.', 'api', '.', 'analysis_add_tags', '(', 'self', '.', '_dxid', ',', '{', '"tags"', ':', 'tags', '}', ',', '*', '*', 'kwargs', ')'] | :param tags: Tags to add to the analysis
:type tags: list of strings
Adds each of the specified tags to the analysis. Takes no
action for tags that are already listed for the analysis. | [':', 'param', 'tags', ':', 'Tags', 'to', 'add', 'to', 'the', 'analysis', ':', 'type', 'tags', ':', 'list', 'of', 'strings'] | train | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxanalysis.py#L70-L80 |
7,324 | facebook/pyre-check | sapp/sapp/analysis_output.py | AnalysisOutput.file_handles | def file_handles(self) -> Iterable[IO[str]]:
"""Generates all file handles represented by the analysis.
Callee owns file handle and closes it when the next is yielded or the
generator ends.
"""
if self.file_handle:
yield self.file_handle
self.file_handle.close()
self.file_handle = None
else:
for name in self.file_names():
with open(name, "r") as f:
yield f | python | def file_handles(self) -> Iterable[IO[str]]:
"""Generates all file handles represented by the analysis.
Callee owns file handle and closes it when the next is yielded or the
generator ends.
"""
if self.file_handle:
yield self.file_handle
self.file_handle.close()
self.file_handle = None
else:
for name in self.file_names():
with open(name, "r") as f:
yield f | ['def', 'file_handles', '(', 'self', ')', '->', 'Iterable', '[', 'IO', '[', 'str', ']', ']', ':', 'if', 'self', '.', 'file_handle', ':', 'yield', 'self', '.', 'file_handle', 'self', '.', 'file_handle', '.', 'close', '(', ')', 'self', '.', 'file_handle', '=', 'None', 'else', ':', 'for', 'name', 'in', 'self', '.', 'file_names', '(', ')', ':', 'with', 'open', '(', 'name', ',', '"r"', ')', 'as', 'f', ':', 'yield', 'f'] | Generates all file handles represented by the analysis.
Callee owns file handle and closes it when the next is yielded or the
generator ends. | ['Generates', 'all', 'file', 'handles', 'represented', 'by', 'the', 'analysis', '.', 'Callee', 'owns', 'file', 'handle', 'and', 'closes', 'it', 'when', 'the', 'next', 'is', 'yielded', 'or', 'the', 'generator', 'ends', '.'] | train | https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/analysis_output.py#L107-L119 |
7,325 | airspeed-velocity/asv | asv/benchmark.py | Benchmark.insert_param | def insert_param(self, param):
"""
Insert a parameter at the front of the parameter list.
"""
self._current_params = tuple([param] + list(self._current_params)) | python | def insert_param(self, param):
"""
Insert a parameter at the front of the parameter list.
"""
self._current_params = tuple([param] + list(self._current_params)) | ['def', 'insert_param', '(', 'self', ',', 'param', ')', ':', 'self', '.', '_current_params', '=', 'tuple', '(', '[', 'param', ']', '+', 'list', '(', 'self', '.', '_current_params', ')', ')'] | Insert a parameter at the front of the parameter list. | ['Insert', 'a', 'parameter', 'at', 'the', 'front', 'of', 'the', 'parameter', 'list', '.'] | train | https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/benchmark.py#L462-L466 |
7,326 | mailgun/talon | talon/utils.py | to_utf8 | def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if not isinstance(str_or_unicode, six.text_type):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode) | python | def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if not isinstance(str_or_unicode, six.text_type):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode) | ['def', 'to_utf8', '(', 'str_or_unicode', ')', ':', 'if', 'not', 'isinstance', '(', 'str_or_unicode', ',', 'six', '.', 'text_type', ')', ':', 'return', 'str_or_unicode', '.', 'encode', '(', '"utf-8"', ',', '"ignore"', ')', 'return', 'str', '(', 'str_or_unicode', ')'] | Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi' | ['Safely', 'returns', 'a', 'UTF', '-', '8', 'version', 'of', 'a', 'given', 'string', '>>>', 'utils', '.', 'to_utf8', '(', 'u', 'hi', ')', 'hi'] | train | https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/utils.py#L89-L97 |
7,327 | Yelp/detect-secrets | detect_secrets/core/secrets_collection.py | SecretsCollection._extract_secrets_from_patch | def _extract_secrets_from_patch(self, f, plugin, filename):
"""Extract secrets from a given patch file object.
Note that we only want to capture incoming secrets (so added lines).
:type f: unidiff.patch.PatchedFile
:type plugin: detect_secrets.plugins.base.BasePlugin
:type filename: str
"""
output = {}
for chunk in f:
# target_lines refers to incoming (new) changes
for line in chunk.target_lines():
if line.is_added:
output.update(
plugin.analyze_string(
line.value,
line.target_line_no,
filename,
),
)
return output | python | def _extract_secrets_from_patch(self, f, plugin, filename):
"""Extract secrets from a given patch file object.
Note that we only want to capture incoming secrets (so added lines).
:type f: unidiff.patch.PatchedFile
:type plugin: detect_secrets.plugins.base.BasePlugin
:type filename: str
"""
output = {}
for chunk in f:
# target_lines refers to incoming (new) changes
for line in chunk.target_lines():
if line.is_added:
output.update(
plugin.analyze_string(
line.value,
line.target_line_no,
filename,
),
)
return output | ['def', '_extract_secrets_from_patch', '(', 'self', ',', 'f', ',', 'plugin', ',', 'filename', ')', ':', 'output', '=', '{', '}', 'for', 'chunk', 'in', 'f', ':', '# target_lines refers to incoming (new) changes', 'for', 'line', 'in', 'chunk', '.', 'target_lines', '(', ')', ':', 'if', 'line', '.', 'is_added', ':', 'output', '.', 'update', '(', 'plugin', '.', 'analyze_string', '(', 'line', '.', 'value', ',', 'line', '.', 'target_line_no', ',', 'filename', ',', ')', ',', ')', 'return', 'output'] | Extract secrets from a given patch file object.
Note that we only want to capture incoming secrets (so added lines).
:type f: unidiff.patch.PatchedFile
:type plugin: detect_secrets.plugins.base.BasePlugin
:type filename: str | ['Extract', 'secrets', 'from', 'a', 'given', 'patch', 'file', 'object', '.'] | train | https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L312-L334 |
7,328 | gear11/pypelogs | pypein/flickr.py | Flickr.search_groups | def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group') | python | def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group') | ['def', 'search_groups', '(', 'self', ',', 'args', ')', ':', 'kwargs', '=', '{', "'text'", ':', 'args', '[', '0', ']', '}', 'return', 'self', '.', '_paged_api_call', '(', 'self', '.', 'flickr', '.', 'groups_search', ',', 'kwargs', ',', "'group'", ')'] | Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)... | ['Executes', 'a', 'search'] | train | https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L90-L97 |
7,329 | michael-lazar/rtv | rtv/packages/praw/__init__.py | PrivateMessagesMixin.get_messages | def get_messages(self, *args, **kwargs):
"""Return a get_content generator for inbox (messages only).
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['messages'], *args, **kwargs) | python | def get_messages(self, *args, **kwargs):
"""Return a get_content generator for inbox (messages only).
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['messages'], *args, **kwargs) | ['def', 'get_messages', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'get_content', '(', 'self', '.', 'config', '[', "'messages'", ']', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Return a get_content generator for inbox (messages only).
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered. | ['Return', 'a', 'get_content', 'generator', 'for', 'inbox', '(', 'messages', 'only', ')', '.'] | train | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L2511-L2518 |
7,330 | oleg-golovanov/unilog | unilog/unilog.py | as_unicode | def as_unicode(obj, encoding=convert.LOCALE, pretty=False):
"""
Representing any object to <unicode> string (python2.7) or <str> string (python3.0).
:param obj: any object
:type encoding: str
:param encoding: codec for encoding unicode strings
(locale.getpreferredencoding() by default)
:type pretty: bool
:param pretty: pretty print
:rtype: unicode
:return: any object as unicode string
"""
return convert.convert(obj, encoding, 0 if pretty else None) | python | def as_unicode(obj, encoding=convert.LOCALE, pretty=False):
"""
Representing any object to <unicode> string (python2.7) or <str> string (python3.0).
:param obj: any object
:type encoding: str
:param encoding: codec for encoding unicode strings
(locale.getpreferredencoding() by default)
:type pretty: bool
:param pretty: pretty print
:rtype: unicode
:return: any object as unicode string
"""
return convert.convert(obj, encoding, 0 if pretty else None) | ['def', 'as_unicode', '(', 'obj', ',', 'encoding', '=', 'convert', '.', 'LOCALE', ',', 'pretty', '=', 'False', ')', ':', 'return', 'convert', '.', 'convert', '(', 'obj', ',', 'encoding', ',', '0', 'if', 'pretty', 'else', 'None', ')'] | Representing any object to <unicode> string (python2.7) or <str> string (python3.0).
:param obj: any object
:type encoding: str
:param encoding: codec for encoding unicode strings
(locale.getpreferredencoding() by default)
:type pretty: bool
:param pretty: pretty print
:rtype: unicode
:return: any object as unicode string | ['Representing', 'any', 'object', 'to', '<unicode', '>', 'string', '(', 'python2', '.', '7', ')', 'or', '<str', '>', 'string', '(', 'python3', '.', '0', ')', '.'] | train | https://github.com/oleg-golovanov/unilog/blob/4d59cd910032383a71796c4df7446fd5875938c3/unilog/unilog.py#L7-L22 |
7,331 | walter426/Python_GoogleMapsApi | GoogleMapsApi/geocode.py | Geocoding.reverse | def reverse(self, point, language=None, sensor=False):
'''Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
'''
params = {
'latlng': point,
'sensor': str(sensor).lower()
}
if language:
params['language'] = language
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | python | def reverse(self, point, language=None, sensor=False):
'''Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
'''
params = {
'latlng': point,
'sensor': str(sensor).lower()
}
if language:
params['language'] = language
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | ['def', 'reverse', '(', 'self', ',', 'point', ',', 'language', '=', 'None', ',', 'sensor', '=', 'False', ')', ':', 'params', '=', '{', "'latlng'", ':', 'point', ',', "'sensor'", ':', 'str', '(', 'sensor', ')', '.', 'lower', '(', ')', '}', 'if', 'language', ':', 'params', '[', "'language'", ']', '=', 'language', 'if', 'not', 'self', '.', 'premier', ':', 'url', '=', 'self', '.', 'get_url', '(', 'params', ')', 'else', ':', 'url', '=', 'self', '.', 'get_signed_url', '(', 'params', ')', 'return', 'self', '.', 'GetService_url', '(', 'url', ')'] | Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters | ['Reverse', 'geocode', 'a', 'point', '.', 'Pls', 'refer', 'to', 'the', 'Google', 'Maps', 'Web', 'API', 'for', 'the', 'details', 'of', 'the', 'parameters'] | train | https://github.com/walter426/Python_GoogleMapsApi/blob/4832b293a0027446941a5f00ecc66256f92ddbce/GoogleMapsApi/geocode.py#L66-L83 |
7,332 | evandempsey/fp-growth | pyfpgrowth/pyfpgrowth.py | FPNode.add_child | def add_child(self, value):
"""
Add a node as a child node.
"""
child = FPNode(value, 1, self)
self.children.append(child)
return child | python | def add_child(self, value):
"""
Add a node as a child node.
"""
child = FPNode(value, 1, self)
self.children.append(child)
return child | ['def', 'add_child', '(', 'self', ',', 'value', ')', ':', 'child', '=', 'FPNode', '(', 'value', ',', '1', ',', 'self', ')', 'self', '.', 'children', '.', 'append', '(', 'child', ')', 'return', 'child'] | Add a node as a child node. | ['Add', 'a', 'node', 'as', 'a', 'child', 'node', '.'] | train | https://github.com/evandempsey/fp-growth/blob/6bf4503024e86c5bbea8a05560594f2f7f061c15/pyfpgrowth/pyfpgrowth.py#L39-L45 |
7,333 | fhcrc/seqmagick | seqmagick/transform.py | drop_columns | def drop_columns(records, slices):
"""
Drop all columns present in ``slices`` from records
"""
for record in records:
# Generate a set of indices to remove
drop = set(i for slice in slices
for i in range(*slice.indices(len(record))))
keep = [i not in drop for i in range(len(record))]
record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet)
yield record | python | def drop_columns(records, slices):
"""
Drop all columns present in ``slices`` from records
"""
for record in records:
# Generate a set of indices to remove
drop = set(i for slice in slices
for i in range(*slice.indices(len(record))))
keep = [i not in drop for i in range(len(record))]
record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet)
yield record | ['def', 'drop_columns', '(', 'records', ',', 'slices', ')', ':', 'for', 'record', 'in', 'records', ':', '# Generate a set of indices to remove', 'drop', '=', 'set', '(', 'i', 'for', 'slice', 'in', 'slices', 'for', 'i', 'in', 'range', '(', '*', 'slice', '.', 'indices', '(', 'len', '(', 'record', ')', ')', ')', ')', 'keep', '=', '[', 'i', 'not', 'in', 'drop', 'for', 'i', 'in', 'range', '(', 'len', '(', 'record', ')', ')', ']', 'record', '.', 'seq', '=', 'Seq', '(', "''", '.', 'join', '(', 'itertools', '.', 'compress', '(', 'record', '.', 'seq', ',', 'keep', ')', ')', ',', 'record', '.', 'seq', '.', 'alphabet', ')', 'yield', 'record'] | Drop all columns present in ``slices`` from records | ['Drop', 'all', 'columns', 'present', 'in', 'slices', 'from', 'records'] | train | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L184-L194 |
7,334 | pycontribs/pyrax | pyrax/object_storage.py | StorageClient.get_container_metadata | def get_container_metadata(self, container, prefix=None):
"""
Returns a dictionary containing the metadata for the container.
"""
return self._manager.get_metadata(container, prefix=prefix) | python | def get_container_metadata(self, container, prefix=None):
"""
Returns a dictionary containing the metadata for the container.
"""
return self._manager.get_metadata(container, prefix=prefix) | ['def', 'get_container_metadata', '(', 'self', ',', 'container', ',', 'prefix', '=', 'None', ')', ':', 'return', 'self', '.', '_manager', '.', 'get_metadata', '(', 'container', ',', 'prefix', '=', 'prefix', ')'] | Returns a dictionary containing the metadata for the container. | ['Returns', 'a', 'dictionary', 'containing', 'the', 'metadata', 'for', 'the', 'container', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2530-L2534 |
7,335 | jmurty/xml4h | xml4h/nodes.py | Node.xml | def xml(self, indent=4, **kwargs):
"""
:return: this node as XML text.
Delegates to :meth:`write`
"""
writer = StringIO()
self.write(writer, indent=indent, **kwargs)
return writer.getvalue() | python | def xml(self, indent=4, **kwargs):
"""
:return: this node as XML text.
Delegates to :meth:`write`
"""
writer = StringIO()
self.write(writer, indent=indent, **kwargs)
return writer.getvalue() | ['def', 'xml', '(', 'self', ',', 'indent', '=', '4', ',', '*', '*', 'kwargs', ')', ':', 'writer', '=', 'StringIO', '(', ')', 'self', '.', 'write', '(', 'writer', ',', 'indent', '=', 'indent', ',', '*', '*', 'kwargs', ')', 'return', 'writer', '.', 'getvalue', '(', ')'] | :return: this node as XML text.
Delegates to :meth:`write` | [':', 'return', ':', 'this', 'node', 'as', 'XML', 'text', '.'] | train | https://github.com/jmurty/xml4h/blob/adbb45e27a01a869a505aee7bc16bad2f517b511/xml4h/nodes.py#L503-L511 |
7,336 | django-extensions/django-extensions | django_extensions/management/commands/graph_models.py | Command.render_output_pygraphviz | def render_output_pygraphviz(self, dotdata, **kwargs):
"""Render model data as image using pygraphviz"""
if not HAS_PYGRAPHVIZ:
raise CommandError("You need to install pygraphviz python module")
version = pygraphviz.__version__.rstrip("-svn")
try:
if tuple(int(v) for v in version.split('.')) < (0, 36):
# HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version)
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.write(dotdata)
tmpfile.seek(0)
dotdata = tmpfile.name
except ValueError:
pass
graph = pygraphviz.AGraph(dotdata)
graph.layout(prog=kwargs['layout'])
graph.draw(kwargs['outputfile']) | python | def render_output_pygraphviz(self, dotdata, **kwargs):
"""Render model data as image using pygraphviz"""
if not HAS_PYGRAPHVIZ:
raise CommandError("You need to install pygraphviz python module")
version = pygraphviz.__version__.rstrip("-svn")
try:
if tuple(int(v) for v in version.split('.')) < (0, 36):
# HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version)
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.write(dotdata)
tmpfile.seek(0)
dotdata = tmpfile.name
except ValueError:
pass
graph = pygraphviz.AGraph(dotdata)
graph.layout(prog=kwargs['layout'])
graph.draw(kwargs['outputfile']) | ['def', 'render_output_pygraphviz', '(', 'self', ',', 'dotdata', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'HAS_PYGRAPHVIZ', ':', 'raise', 'CommandError', '(', '"You need to install pygraphviz python module"', ')', 'version', '=', 'pygraphviz', '.', '__version__', '.', 'rstrip', '(', '"-svn"', ')', 'try', ':', 'if', 'tuple', '(', 'int', '(', 'v', ')', 'for', 'v', 'in', 'version', '.', 'split', '(', "'.'", ')', ')', '<', '(', '0', ',', '36', ')', ':', '# HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version)', 'tmpfile', '=', 'tempfile', '.', 'NamedTemporaryFile', '(', ')', 'tmpfile', '.', 'write', '(', 'dotdata', ')', 'tmpfile', '.', 'seek', '(', '0', ')', 'dotdata', '=', 'tmpfile', '.', 'name', 'except', 'ValueError', ':', 'pass', 'graph', '=', 'pygraphviz', '.', 'AGraph', '(', 'dotdata', ')', 'graph', '.', 'layout', '(', 'prog', '=', 'kwargs', '[', "'layout'", ']', ')', 'graph', '.', 'draw', '(', 'kwargs', '[', "'outputfile'", ']', ')'] | Render model data as image using pygraphviz | ['Render', 'model', 'data', 'as', 'image', 'using', 'pygraphviz'] | train | https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/graph_models.py#L254-L272 |
7,337 | fastai/fastai | old/fastai/dataset.py | ImageClassifierData.from_csv | def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None),
val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8, cat_separator=' '):
""" Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.
If None, default arguments to get_cv_idxs are used.
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: if True, the data set is used to train regression models. If False, it is used
to train classification models.
skip_header: skip the first row of the CSV file.
num_workers: number of workers
cat_separator: Labels category separator
Returns:
ImageClassifierData
"""
assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets"
assert not (os.path.isabs(folder)), "folder needs to be a relative path"
fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator)
return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name,
num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous) | python | def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None),
val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8, cat_separator=' '):
""" Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.
If None, default arguments to get_cv_idxs are used.
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: if True, the data set is used to train regression models. If False, it is used
to train classification models.
skip_header: skip the first row of the CSV file.
num_workers: number of workers
cat_separator: Labels category separator
Returns:
ImageClassifierData
"""
assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets"
assert not (os.path.isabs(folder)), "folder needs to be a relative path"
fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator)
return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name,
num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous) | ['def', 'from_csv', '(', 'cls', ',', 'path', ',', 'folder', ',', 'csv_fname', ',', 'bs', '=', '64', ',', 'tfms', '=', '(', 'None', ',', 'None', ')', ',', 'val_idxs', '=', 'None', ',', 'suffix', '=', "''", ',', 'test_name', '=', 'None', ',', 'continuous', '=', 'False', ',', 'skip_header', '=', 'True', ',', 'num_workers', '=', '8', ',', 'cat_separator', '=', "' '", ')', ':', 'assert', 'not', '(', 'tfms', '[', '0', ']', 'is', 'None', 'or', 'tfms', '[', '1', ']', 'is', 'None', ')', ',', '"please provide transformations for your train and validation sets"', 'assert', 'not', '(', 'os', '.', 'path', '.', 'isabs', '(', 'folder', ')', ')', ',', '"folder needs to be a relative path"', 'fnames', ',', 'y', ',', 'classes', '=', 'csv_source', '(', 'folder', ',', 'csv_fname', ',', 'skip_header', ',', 'suffix', ',', 'continuous', '=', 'continuous', ',', 'cat_separator', '=', 'cat_separator', ')', 'return', 'cls', '.', 'from_names_and_array', '(', 'path', ',', 'fnames', ',', 'y', ',', 'classes', ',', 'val_idxs', ',', 'test_name', ',', 'num_workers', '=', 'num_workers', ',', 'suffix', '=', 'suffix', ',', 'tfms', '=', 'tfms', ',', 'bs', '=', 'bs', ',', 'continuous', '=', 'continuous', ')'] | Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.
If None, default arguments to get_cv_idxs are used.
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: if True, the data set is used to train regression models. If False, it is used
to train classification models.
skip_header: skip the first row of the CSV file.
num_workers: number of workers
cat_separator: Labels category separator
Returns:
ImageClassifierData | ['Read', 'in', 'images', 'and', 'their', 'labels', 'given', 'as', 'a', 'CSV', 'file', '.'] | train | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/dataset.py#L522-L553 |
7,338 | keras-rl/keras-rl | rl/policy.py | SoftmaxPolicy.select_action | def select_action(self, nb_actions, probs):
"""Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
"""
action = np.random.choice(range(nb_actions), p=probs)
return action | python | def select_action(self, nb_actions, probs):
"""Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action
"""
action = np.random.choice(range(nb_actions), p=probs)
return action | ['def', 'select_action', '(', 'self', ',', 'nb_actions', ',', 'probs', ')', ':', 'action', '=', 'np', '.', 'random', '.', 'choice', '(', 'range', '(', 'nb_actions', ')', ',', 'p', '=', 'probs', ')', 'return', 'action'] | Return the selected action
# Arguments
probs (np.ndarray) : Probabilty for each action
# Returns
action | ['Return', 'the', 'selected', 'action'] | train | https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L128-L139 |
7,339 | codeinthehole/purl | purl/url.py | URL.path_segments | def path_segments(self, value=None):
"""
Return the path segments
:param list value: the new path segments to use
"""
if value is not None:
encoded_values = map(unicode_quote_path_segment, value)
new_path = '/' + '/'.join(encoded_values)
return URL._mutate(self, path=new_path)
parts = self._tuple.path.split('/')
segments = parts[1:]
if self._tuple.path.endswith('/'):
segments.pop()
segments = map(unicode_unquote, segments)
return tuple(segments) | python | def path_segments(self, value=None):
"""
Return the path segments
:param list value: the new path segments to use
"""
if value is not None:
encoded_values = map(unicode_quote_path_segment, value)
new_path = '/' + '/'.join(encoded_values)
return URL._mutate(self, path=new_path)
parts = self._tuple.path.split('/')
segments = parts[1:]
if self._tuple.path.endswith('/'):
segments.pop()
segments = map(unicode_unquote, segments)
return tuple(segments) | ['def', 'path_segments', '(', 'self', ',', 'value', '=', 'None', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'encoded_values', '=', 'map', '(', 'unicode_quote_path_segment', ',', 'value', ')', 'new_path', '=', "'/'", '+', "'/'", '.', 'join', '(', 'encoded_values', ')', 'return', 'URL', '.', '_mutate', '(', 'self', ',', 'path', '=', 'new_path', ')', 'parts', '=', 'self', '.', '_tuple', '.', 'path', '.', 'split', '(', "'/'", ')', 'segments', '=', 'parts', '[', '1', ':', ']', 'if', 'self', '.', '_tuple', '.', 'path', '.', 'endswith', '(', "'/'", ')', ':', 'segments', '.', 'pop', '(', ')', 'segments', '=', 'map', '(', 'unicode_unquote', ',', 'segments', ')', 'return', 'tuple', '(', 'segments', ')'] | Return the path segments
:param list value: the new path segments to use | ['Return', 'the', 'path', 'segments'] | train | https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L385-L400 |
7,340 | scidam/cachepy | utils.py | Helpers.encode_safely | def encode_safely(self, data):
"""Encode the data.
"""
encoder = self.base_encoder
result = settings.null
try:
result = encoder(pickle.dumps(data))
except:
warnings.warn("Data could not be serialized.", RuntimeWarning)
return result | python | def encode_safely(self, data):
"""Encode the data.
"""
encoder = self.base_encoder
result = settings.null
try:
result = encoder(pickle.dumps(data))
except:
warnings.warn("Data could not be serialized.", RuntimeWarning)
return result | ['def', 'encode_safely', '(', 'self', ',', 'data', ')', ':', 'encoder', '=', 'self', '.', 'base_encoder', 'result', '=', 'settings', '.', 'null', 'try', ':', 'result', '=', 'encoder', '(', 'pickle', '.', 'dumps', '(', 'data', ')', ')', 'except', ':', 'warnings', '.', 'warn', '(', '"Data could not be serialized."', ',', 'RuntimeWarning', ')', 'return', 'result'] | Encode the data. | ['Encode', 'the', 'data', '.'] | train | https://github.com/scidam/cachepy/blob/680eeb7ff04ec9bb634b71cceb0841abaf2d530e/utils.py#L49-L60 |
7,341 | jonathf/chaospy | chaospy/bertran/operators.py | olindex | def olindex(order, dim):
"""
Create an lexiographical sorted basis for a given order.
Examples
--------
>>> chaospy.bertran.olindex(3, 2)
array([[0, 3],
[1, 2],
[2, 1],
[3, 0]])
"""
idxm = [0]*dim
out = []
def _olindex(idx):
"""Recursive backend for olindex."""
if numpy.sum(idxm) == order:
out.append(idxm[:])
return
if idx == dim:
return
idxm_sum = numpy.sum(idxm)
idx_saved = idxm[idx]
for idxi in range(order - numpy.sum(idxm) + 1):
idxm[idx] = idxi
if idxm_sum < order:
_olindex(idx+1)
else:
break
idxm[idx] = idx_saved
_olindex(0)
return numpy.array(out) | python | def olindex(order, dim):
"""
Create an lexiographical sorted basis for a given order.
Examples
--------
>>> chaospy.bertran.olindex(3, 2)
array([[0, 3],
[1, 2],
[2, 1],
[3, 0]])
"""
idxm = [0]*dim
out = []
def _olindex(idx):
"""Recursive backend for olindex."""
if numpy.sum(idxm) == order:
out.append(idxm[:])
return
if idx == dim:
return
idxm_sum = numpy.sum(idxm)
idx_saved = idxm[idx]
for idxi in range(order - numpy.sum(idxm) + 1):
idxm[idx] = idxi
if idxm_sum < order:
_olindex(idx+1)
else:
break
idxm[idx] = idx_saved
_olindex(0)
return numpy.array(out) | ['def', 'olindex', '(', 'order', ',', 'dim', ')', ':', 'idxm', '=', '[', '0', ']', '*', 'dim', 'out', '=', '[', ']', 'def', '_olindex', '(', 'idx', ')', ':', '"""Recursive backend for olindex."""', 'if', 'numpy', '.', 'sum', '(', 'idxm', ')', '==', 'order', ':', 'out', '.', 'append', '(', 'idxm', '[', ':', ']', ')', 'return', 'if', 'idx', '==', 'dim', ':', 'return', 'idxm_sum', '=', 'numpy', '.', 'sum', '(', 'idxm', ')', 'idx_saved', '=', 'idxm', '[', 'idx', ']', 'for', 'idxi', 'in', 'range', '(', 'order', '-', 'numpy', '.', 'sum', '(', 'idxm', ')', '+', '1', ')', ':', 'idxm', '[', 'idx', ']', '=', 'idxi', 'if', 'idxm_sum', '<', 'order', ':', '_olindex', '(', 'idx', '+', '1', ')', 'else', ':', 'break', 'idxm', '[', 'idx', ']', '=', 'idx_saved', '_olindex', '(', '0', ')', 'return', 'numpy', '.', 'array', '(', 'out', ')'] | Create an lexiographical sorted basis for a given order.
Examples
--------
>>> chaospy.bertran.olindex(3, 2)
array([[0, 3],
[1, 2],
[2, 1],
[3, 0]]) | ['Create', 'an', 'lexiographical', 'sorted', 'basis', 'for', 'a', 'given', 'order', '.'] | train | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/bertran/operators.py#L291-L330 |
7,342 | Karaage-Cluster/karaage | karaage/common/decorators.py | login_required | def login_required(function=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this is the admin site only admin access
if settings.ADMIN_REQUIRED and not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator | python | def login_required(function=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this is the admin site only admin access
if settings.ADMIN_REQUIRED and not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator | ['def', 'login_required', '(', 'function', '=', 'None', ')', ':', 'def', 'check_perms', '(', 'user', ')', ':', '# if user not logged in, show login form', 'if', 'not', 'user', '.', 'is_authenticated', ':', 'return', 'False', '# if this is the admin site only admin access', 'if', 'settings', '.', 'ADMIN_REQUIRED', 'and', 'not', 'user', '.', 'is_admin', ':', 'raise', 'PermissionDenied', 'return', 'True', 'actual_decorator', '=', 'user_passes_test', '(', 'check_perms', ',', 'login_url', '=', '_login_url', ')', 'if', 'function', ':', 'return', 'actual_decorator', '(', 'function', ')', 'return', 'actual_decorator'] | Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary. | ['Decorator', 'for', 'views', 'that', 'checks', 'that', 'the', 'user', 'is', 'logged', 'in', 'redirecting', 'to', 'the', 'log', '-', 'in', 'page', 'if', 'necessary', '.'] | train | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/common/decorators.py#L53-L70 |
7,343 | spacetelescope/acstools | acstools/utils_calib.py | get_corner | def get_corner(hdr, rsize=1):
"""Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
"""
ltm, ltv = get_lt(hdr)
return from_lt(rsize, ltm, ltv) | python | def get_corner(hdr, rsize=1):
"""Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
"""
ltm, ltv = get_lt(hdr)
return from_lt(rsize, ltm, ltv) | ['def', 'get_corner', '(', 'hdr', ',', 'rsize', '=', '1', ')', ':', 'ltm', ',', 'ltv', '=', 'get_lt', '(', 'hdr', ')', 'return', 'from_lt', '(', 'rsize', ',', 'ltm', ',', 'ltv', ')'] | Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y. | ['Obtain', 'bin', 'and', 'corner', 'information', 'for', 'a', 'subarray', '.'] | train | https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/utils_calib.py#L354-L388 |
7,344 | portfors-lab/sparkle | sparkle/gui/stim/tuning_curve.py | TuningCurveEditor.setStimDuration | def setStimDuration(self):
"""Sets the duration of the StimulusModel from values pulled from
this widget"""
duration = self.ui.durSpnbx.value()
self.tone.setDuration(duration) | python | def setStimDuration(self):
"""Sets the duration of the StimulusModel from values pulled from
this widget"""
duration = self.ui.durSpnbx.value()
self.tone.setDuration(duration) | ['def', 'setStimDuration', '(', 'self', ')', ':', 'duration', '=', 'self', '.', 'ui', '.', 'durSpnbx', '.', 'value', '(', ')', 'self', '.', 'tone', '.', 'setDuration', '(', 'duration', ')'] | Sets the duration of the StimulusModel from values pulled from
this widget | ['Sets', 'the', 'duration', 'of', 'the', 'StimulusModel', 'from', 'values', 'pulled', 'from', 'this', 'widget'] | train | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L83-L87 |
7,345 | luckydonald/pytgbot | examples/cli.py | CLI.print_peer | def print_peer(self, peer, show_id=True, id_prefix="", reply=True):
"""
:param id_prefix: Prefix of the #id thing. Set a string, or true to have it generated.
:type id_prefix: str|bool
"""
if isinstance(id_prefix, bool):
if id_prefix: # True
if isinstance(peer, User):
id_prefix = "user"
elif isinstance(peer, Chat):
id_prefix = peer.type
else:
id_prefix = "unknown"
# end if
else: # False
id_prefix = ""
# end if
# end if
peer_string = self.peer_to_string(peer)
if show_id and "id" in peer:
peer_string += " ({color_lightblue}{id_prefix}#{id}{color_off})".format(id_prefix=id_prefix, id=peer.id, **self.color.formatter)
return peer_string | python | def print_peer(self, peer, show_id=True, id_prefix="", reply=True):
"""
:param id_prefix: Prefix of the #id thing. Set a string, or true to have it generated.
:type id_prefix: str|bool
"""
if isinstance(id_prefix, bool):
if id_prefix: # True
if isinstance(peer, User):
id_prefix = "user"
elif isinstance(peer, Chat):
id_prefix = peer.type
else:
id_prefix = "unknown"
# end if
else: # False
id_prefix = ""
# end if
# end if
peer_string = self.peer_to_string(peer)
if show_id and "id" in peer:
peer_string += " ({color_lightblue}{id_prefix}#{id}{color_off})".format(id_prefix=id_prefix, id=peer.id, **self.color.formatter)
return peer_string | ['def', 'print_peer', '(', 'self', ',', 'peer', ',', 'show_id', '=', 'True', ',', 'id_prefix', '=', '""', ',', 'reply', '=', 'True', ')', ':', 'if', 'isinstance', '(', 'id_prefix', ',', 'bool', ')', ':', 'if', 'id_prefix', ':', '# True', 'if', 'isinstance', '(', 'peer', ',', 'User', ')', ':', 'id_prefix', '=', '"user"', 'elif', 'isinstance', '(', 'peer', ',', 'Chat', ')', ':', 'id_prefix', '=', 'peer', '.', 'type', 'else', ':', 'id_prefix', '=', '"unknown"', '# end if', 'else', ':', '# False', 'id_prefix', '=', '""', '# end if', '# end if', 'peer_string', '=', 'self', '.', 'peer_to_string', '(', 'peer', ')', 'if', 'show_id', 'and', '"id"', 'in', 'peer', ':', 'peer_string', '+=', '" ({color_lightblue}{id_prefix}#{id}{color_off})"', '.', 'format', '(', 'id_prefix', '=', 'id_prefix', ',', 'id', '=', 'peer', '.', 'id', ',', '*', '*', 'self', '.', 'color', '.', 'formatter', ')', 'return', 'peer_string'] | :param id_prefix: Prefix of the #id thing. Set a string, or true to have it generated.
:type id_prefix: str|bool | [':', 'param', 'id_prefix', ':', 'Prefix', 'of', 'the', '#id', 'thing', '.', 'Set', 'a', 'string', 'or', 'true', 'to', 'have', 'it', 'generated', '.', ':', 'type', 'id_prefix', ':', 'str|bool'] | train | https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/examples/cli.py#L580-L601 |
7,346 | saltstack/salt | salt/modules/zonecfg.py | update_resource | def update_resource(zone, resource_type, resource_selector, **kwargs):
'''
Add a resource
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
kwargs : string|int|...
resource properties
.. note::
Set resource_selector to None for resource that do not require one.
CLI Example:
.. code-block:: bash
salt '*' zonecfg.update_resource tallgeese rctl name name=zone.max-locked-memory value='(priv=privileged,limit=33554432,action=deny)'
'''
return _resource('update', zone, resource_type, resource_selector, **kwargs) | python | def update_resource(zone, resource_type, resource_selector, **kwargs):
'''
Add a resource
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
kwargs : string|int|...
resource properties
.. note::
Set resource_selector to None for resource that do not require one.
CLI Example:
.. code-block:: bash
salt '*' zonecfg.update_resource tallgeese rctl name name=zone.max-locked-memory value='(priv=privileged,limit=33554432,action=deny)'
'''
return _resource('update', zone, resource_type, resource_selector, **kwargs) | ['def', 'update_resource', '(', 'zone', ',', 'resource_type', ',', 'resource_selector', ',', '*', '*', 'kwargs', ')', ':', 'return', '_resource', '(', "'update'", ',', 'zone', ',', 'resource_type', ',', 'resource_selector', ',', '*', '*', 'kwargs', ')'] | Add a resource
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
kwargs : string|int|...
resource properties
.. note::
Set resource_selector to None for resource that do not require one.
CLI Example:
.. code-block:: bash
salt '*' zonecfg.update_resource tallgeese rctl name name=zone.max-locked-memory value='(priv=privileged,limit=33554432,action=deny)' | ['Add', 'a', 'resource'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zonecfg.py#L593-L615 |
7,347 | merll/docker-map | dockermap/map/action/script.py | ScriptActionGenerator.get_state_actions | def get_state_actions(self, state, **kwargs):
"""
For dependent items, inherits the behavior from :class:`dockermap.map.action.resume.ResumeActionGenerator`.
For other the main container, checks if containers exist, and depending on the ``remove_existing_before``
option either fails or removes them. Otherwise runs the script.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction]
"""
if state.config_flags & ConfigFlags.DEPENDENT or state.config_id.config_type != ItemType.CONTAINER:
return super(ScriptActionGenerator, self).get_state_actions(state, **kwargs)
if state.base_state == State.ABSENT:
actions = []
else:
log.debug("Found existing script containers: %s", state.config_id)
if not self.remove_existing_before:
config_id = state.config_id
c_name = self._policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
if state.client_name == self._policy.default_client_name:
error_msg = "Container {0} existed prior to running the script.".format(c_name)
else:
error_msg = ("Container {0} existed on client {1} prior to running the "
"script.").format(c_name, state.client_name)
raise ScriptActionException(error_msg)
if state.base_state == State.RUNNING or state.state_flags & StateFlags.RESTARTING:
log.debug("Preparing shutdown of existing container: %s", state.config_id)
actions = [ItemAction(state, DerivedAction.SHUTDOWN_CONTAINER)]
else:
log.debug("Preparing removal existing container: %s", state.config_id)
actions = [ItemAction(state, Action.REMOVE)]
actions.append(ItemAction(state, ContainerUtilAction.SCRIPT, extra_data=kwargs))
return actions | python | def get_state_actions(self, state, **kwargs):
"""
For dependent items, inherits the behavior from :class:`dockermap.map.action.resume.ResumeActionGenerator`.
For other the main container, checks if containers exist, and depending on the ``remove_existing_before``
option either fails or removes them. Otherwise runs the script.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction]
"""
if state.config_flags & ConfigFlags.DEPENDENT or state.config_id.config_type != ItemType.CONTAINER:
return super(ScriptActionGenerator, self).get_state_actions(state, **kwargs)
if state.base_state == State.ABSENT:
actions = []
else:
log.debug("Found existing script containers: %s", state.config_id)
if not self.remove_existing_before:
config_id = state.config_id
c_name = self._policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
if state.client_name == self._policy.default_client_name:
error_msg = "Container {0} existed prior to running the script.".format(c_name)
else:
error_msg = ("Container {0} existed on client {1} prior to running the "
"script.").format(c_name, state.client_name)
raise ScriptActionException(error_msg)
if state.base_state == State.RUNNING or state.state_flags & StateFlags.RESTARTING:
log.debug("Preparing shutdown of existing container: %s", state.config_id)
actions = [ItemAction(state, DerivedAction.SHUTDOWN_CONTAINER)]
else:
log.debug("Preparing removal existing container: %s", state.config_id)
actions = [ItemAction(state, Action.REMOVE)]
actions.append(ItemAction(state, ContainerUtilAction.SCRIPT, extra_data=kwargs))
return actions | ['def', 'get_state_actions', '(', 'self', ',', 'state', ',', '*', '*', 'kwargs', ')', ':', 'if', 'state', '.', 'config_flags', '&', 'ConfigFlags', '.', 'DEPENDENT', 'or', 'state', '.', 'config_id', '.', 'config_type', '!=', 'ItemType', '.', 'CONTAINER', ':', 'return', 'super', '(', 'ScriptActionGenerator', ',', 'self', ')', '.', 'get_state_actions', '(', 'state', ',', '*', '*', 'kwargs', ')', 'if', 'state', '.', 'base_state', '==', 'State', '.', 'ABSENT', ':', 'actions', '=', '[', ']', 'else', ':', 'log', '.', 'debug', '(', '"Found existing script containers: %s"', ',', 'state', '.', 'config_id', ')', 'if', 'not', 'self', '.', 'remove_existing_before', ':', 'config_id', '=', 'state', '.', 'config_id', 'c_name', '=', 'self', '.', '_policy', '.', 'cname', '(', 'config_id', '.', 'map_name', ',', 'config_id', '.', 'config_name', ',', 'config_id', '.', 'instance_name', ')', 'if', 'state', '.', 'client_name', '==', 'self', '.', '_policy', '.', 'default_client_name', ':', 'error_msg', '=', '"Container {0} existed prior to running the script."', '.', 'format', '(', 'c_name', ')', 'else', ':', 'error_msg', '=', '(', '"Container {0} existed on client {1} prior to running the "', '"script."', ')', '.', 'format', '(', 'c_name', ',', 'state', '.', 'client_name', ')', 'raise', 'ScriptActionException', '(', 'error_msg', ')', 'if', 'state', '.', 'base_state', '==', 'State', '.', 'RUNNING', 'or', 'state', '.', 'state_flags', '&', 'StateFlags', '.', 'RESTARTING', ':', 'log', '.', 'debug', '(', '"Preparing shutdown of existing container: %s"', ',', 'state', '.', 'config_id', ')', 'actions', '=', '[', 'ItemAction', '(', 'state', ',', 'DerivedAction', '.', 'SHUTDOWN_CONTAINER', ')', ']', 'else', ':', 'log', '.', 'debug', '(', '"Preparing removal existing container: %s"', ',', 'state', '.', 'config_id', ')', 'actions', '=', '[', 'ItemAction', '(', 'state', ',', 'Action', '.', 'REMOVE', ')', ']', 'actions', '.', 'append', '(', 'ItemAction', '(', 'state', ',', 'ContainerUtilAction', '.', 'SCRIPT', ',', 'extra_data', '=', 'kwargs', ')', ')', 'return', 'actions'] | For dependent items, inherits the behavior from :class:`dockermap.map.action.resume.ResumeActionGenerator`.
For other the main container, checks if containers exist, and depending on the ``remove_existing_before``
option either fails or removes them. Otherwise runs the script.
:param state: Configuration state.
:type state: dockermap.map.state.ConfigState
:param kwargs: Additional keyword arguments.
:return: Actions on the client, map, and configurations.
:rtype: list[dockermap.map.action.ItemAction] | ['For', 'dependent', 'items', 'inherits', 'the', 'behavior', 'from', ':', 'class', ':', 'dockermap', '.', 'map', '.', 'action', '.', 'resume', '.', 'ResumeActionGenerator', '.', 'For', 'other', 'the', 'main', 'container', 'checks', 'if', 'containers', 'exist', 'and', 'depending', 'on', 'the', 'remove_existing_before', 'option', 'either', 'fails', 'or', 'removes', 'them', '.', 'Otherwise', 'runs', 'the', 'script', '.'] | train | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/action/script.py#L21-L58 |
7,348 | cloudsmith-io/cloudsmith-cli | cloudsmith_cli/cli/commands/copy.py | copy | def copy(
ctx,
opts,
owner_repo_package,
destination,
skip_errors,
wait_interval,
no_wait_for_sync,
sync_attempts,
):
"""
Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
"""
owner, source, slug = owner_repo_package
click.echo(
"Copying %(slug)s package from %(source)s to %(dest)s ... "
% {
"slug": click.style(slug, bold=True),
"source": click.style(source, bold=True),
"dest": click.style(destination, bold=True),
},
nl=False,
)
context_msg = "Failed to copy package!"
with handle_api_exceptions(
ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors
):
with maybe_spinner(opts):
_, new_slug = copy_package(
owner=owner, repo=source, identifier=slug, destination=destination
)
click.secho("OK", fg="green")
if no_wait_for_sync:
return
wait_for_package_sync(
ctx=ctx,
opts=opts,
owner=owner,
repo=destination,
slug=new_slug,
wait_interval=wait_interval,
skip_errors=skip_errors,
attempts=sync_attempts,
) | python | def copy(
ctx,
opts,
owner_repo_package,
destination,
skip_errors,
wait_interval,
no_wait_for_sync,
sync_attempts,
):
"""
Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
"""
owner, source, slug = owner_repo_package
click.echo(
"Copying %(slug)s package from %(source)s to %(dest)s ... "
% {
"slug": click.style(slug, bold=True),
"source": click.style(source, bold=True),
"dest": click.style(destination, bold=True),
},
nl=False,
)
context_msg = "Failed to copy package!"
with handle_api_exceptions(
ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors
):
with maybe_spinner(opts):
_, new_slug = copy_package(
owner=owner, repo=source, identifier=slug, destination=destination
)
click.secho("OK", fg="green")
if no_wait_for_sync:
return
wait_for_package_sync(
ctx=ctx,
opts=opts,
owner=owner,
repo=destination,
slug=new_slug,
wait_interval=wait_interval,
skip_errors=skip_errors,
attempts=sync_attempts,
) | ['def', 'copy', '(', 'ctx', ',', 'opts', ',', 'owner_repo_package', ',', 'destination', ',', 'skip_errors', ',', 'wait_interval', ',', 'no_wait_for_sync', ',', 'sync_attempts', ',', ')', ':', 'owner', ',', 'source', ',', 'slug', '=', 'owner_repo_package', 'click', '.', 'echo', '(', '"Copying %(slug)s package from %(source)s to %(dest)s ... "', '%', '{', '"slug"', ':', 'click', '.', 'style', '(', 'slug', ',', 'bold', '=', 'True', ')', ',', '"source"', ':', 'click', '.', 'style', '(', 'source', ',', 'bold', '=', 'True', ')', ',', '"dest"', ':', 'click', '.', 'style', '(', 'destination', ',', 'bold', '=', 'True', ')', ',', '}', ',', 'nl', '=', 'False', ',', ')', 'context_msg', '=', '"Failed to copy package!"', 'with', 'handle_api_exceptions', '(', 'ctx', ',', 'opts', '=', 'opts', ',', 'context_msg', '=', 'context_msg', ',', 'reraise_on_error', '=', 'skip_errors', ')', ':', 'with', 'maybe_spinner', '(', 'opts', ')', ':', '_', ',', 'new_slug', '=', 'copy_package', '(', 'owner', '=', 'owner', ',', 'repo', '=', 'source', ',', 'identifier', '=', 'slug', ',', 'destination', '=', 'destination', ')', 'click', '.', 'secho', '(', '"OK"', ',', 'fg', '=', '"green"', ')', 'if', 'no_wait_for_sync', ':', 'return', 'wait_for_package_sync', '(', 'ctx', '=', 'ctx', ',', 'opts', '=', 'opts', ',', 'owner', '=', 'owner', ',', 'repo', '=', 'destination', ',', 'slug', '=', 'new_slug', ',', 'wait_interval', '=', 'wait_interval', ',', 'skip_errors', '=', 'skip_errors', ',', 'attempts', '=', 'sync_attempts', ',', ')'] | Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo | ['Copy', 'a', 'package', 'to', 'another', 'repository', '.'] | train | https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/commands/copy.py#L28-L94 |
7,349 | lemieuxl/pyGenClean | pyGenClean/RelatedSamples/find_related_samples.py | checkArgs | def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the indep-pairwise option
# The two first must be int, the last one float
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
# Check the maf value
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
# Check the number of line per file
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
# Check the minimum number of SNPs
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True | python | def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the indep-pairwise option
# The two first must be int, the last one float
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
# Check the maf value
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
# Check the number of line per file
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
# Check the minimum number of SNPs
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True | ['def', 'checkArgs', '(', 'args', ')', ':', '# Check if we have the tped and the tfam files', 'for', 'fileName', 'in', '[', 'args', '.', 'bfile', '+', 'i', 'for', 'i', 'in', '[', '".bed"', ',', '".bim"', ',', '".fam"', ']', ']', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'fileName', ')', ':', 'msg', '=', '"%(fileName)s: no such file"', '%', 'locals', '(', ')', 'raise', 'ProgramError', '(', 'msg', ')', '# Check the indep-pairwise option', '# The two first must be int, the last one float', 'try', ':', 'for', 'i', 'in', 'xrange', '(', '2', ')', ':', 'tmp', '=', 'int', '(', 'args', '.', 'indep_pairwise', '[', 'i', ']', ')', 'tmp', '=', 'float', '(', 'args', '.', 'indep_pairwise', '[', '2', ']', ')', 'except', 'ValueError', ':', 'msg', '=', '"indep-pairwise: need INT INT FLOAT"', 'raise', 'ProgramError', '(', 'msg', ')', '# Check the maf value', 'tmpMAF', '=', 'None', 'try', ':', 'tmpMAF', '=', 'float', '(', 'args', '.', 'maf', ')', 'except', 'ValueError', ':', 'msg', '=', '"maf: must be a float, not %s"', '%', 'args', '.', 'maf', 'raise', 'ProgramError', '(', 'msg', ')', 'if', '(', 'tmpMAF', '>', '0.5', ')', 'or', '(', 'tmpMAF', '<', '0.0', ')', ':', 'msg', '=', '"maf: must be between 0.0 and 0.5, not %s"', '%', 'args', '.', 'maf', 'raise', 'ProgramError', '(', 'msg', ')', '# Check the number of line per file', 'if', 'args', '.', 'line_per_file_for_sge', '<', '1', ':', 'msg', '=', '"line-per-file-for-sge: must be above 0, not "', '"%d"', '%', 'args', '.', 'line_per_file_for_sge', 'raise', 'ProgramError', '(', 'msg', ')', '# Check the minimum number of SNPs', 'if', 'args', '.', 'min_nb_snp', '<', '1', ':', 'msg', '=', '"min-nb-snp: must be above 1"', 'raise', 'ProgramError', '(', 'msg', ')', 'return', 'True'] | Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1. | ['Checks', 'the', 'arguments', 'and', 'options', '.'] | train | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/RelatedSamples/find_related_samples.py#L783-L835 |
7,350 | EpistasisLab/tpot | tpot/builtins/one_hot_encoder.py | OneHotEncoder._transform | def _transform(self, X):
"""Asssume X contains only categorical features.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
"""
X = self._matrix_adjust(X)
X = check_array(X, accept_sparse='csc', force_all_finite=False,
dtype=int)
if X.min() < 0:
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# Replace all indicators which were below `minimum_fraction` in the
# training set by 'other'
if self.minimum_fraction is not None:
for column in range(X.shape[1]):
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
unique = np.unique(X.data[indptr_start:indptr_end])
else:
unique = np.unique(X[:, column])
for unique_value in unique:
if unique_value not in self.do_not_replace_by_other_[column]:
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
X.data[indptr_start:indptr_end][
X.data[indptr_start:indptr_end] ==
unique_value] = SPARSE_ENCODINGS['OTHER']
else:
X[:, column][X[:, column] == unique_value] = SPARSE_ENCODINGS['OTHER']
if sparse.issparse(X):
n_values_check = X.max(axis=0).toarray().flatten() + 1
else:
n_values_check = np.max(X, axis=0) + 1
# Replace all indicators which are out of bounds by 'other' (index 0)
if (n_values_check > self.n_values_).any():
# raise ValueError("Feature out of bounds. Try setting n_values.")
for i, n_value_check in enumerate(n_values_check):
if (n_value_check - 1) >= self.n_values_[i]:
if sparse.issparse(X):
indptr_start = X.indptr[i]
indptr_end = X.indptr[i+1]
X.data[indptr_start:indptr_end][X.data[indptr_start:indptr_end] >= self.n_values_[i]] = 0
else:
X[:, i][X[:, i] >= self.n_values_[i]] = 0
if sparse.issparse(X):
row_indices = X.indices
column_indices = []
for i in range(len(X.indptr) - 1):
nbr = X.indptr[i + 1] - X.indptr[i]
column_indices_ = [indices[i]] * nbr
column_indices_ += X.data[X.indptr[i]:X.indptr[i + 1]]
column_indices.extend(column_indices_)
data = np.ones(X.data.size)
else:
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsc()
out = out[:, self.active_features_]
return out.tocsr() if self.sparse else out.toarray() | python | def _transform(self, X):
"""Asssume X contains only categorical features.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
"""
X = self._matrix_adjust(X)
X = check_array(X, accept_sparse='csc', force_all_finite=False,
dtype=int)
if X.min() < 0:
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# Replace all indicators which were below `minimum_fraction` in the
# training set by 'other'
if self.minimum_fraction is not None:
for column in range(X.shape[1]):
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
unique = np.unique(X.data[indptr_start:indptr_end])
else:
unique = np.unique(X[:, column])
for unique_value in unique:
if unique_value not in self.do_not_replace_by_other_[column]:
if sparse.issparse(X):
indptr_start = X.indptr[column]
indptr_end = X.indptr[column + 1]
X.data[indptr_start:indptr_end][
X.data[indptr_start:indptr_end] ==
unique_value] = SPARSE_ENCODINGS['OTHER']
else:
X[:, column][X[:, column] == unique_value] = SPARSE_ENCODINGS['OTHER']
if sparse.issparse(X):
n_values_check = X.max(axis=0).toarray().flatten() + 1
else:
n_values_check = np.max(X, axis=0) + 1
# Replace all indicators which are out of bounds by 'other' (index 0)
if (n_values_check > self.n_values_).any():
# raise ValueError("Feature out of bounds. Try setting n_values.")
for i, n_value_check in enumerate(n_values_check):
if (n_value_check - 1) >= self.n_values_[i]:
if sparse.issparse(X):
indptr_start = X.indptr[i]
indptr_end = X.indptr[i+1]
X.data[indptr_start:indptr_end][X.data[indptr_start:indptr_end] >= self.n_values_[i]] = 0
else:
X[:, i][X[:, i] >= self.n_values_[i]] = 0
if sparse.issparse(X):
row_indices = X.indices
column_indices = []
for i in range(len(X.indptr) - 1):
nbr = X.indptr[i + 1] - X.indptr[i]
column_indices_ = [indices[i]] * nbr
column_indices_ += X.data[X.indptr[i]:X.indptr[i + 1]]
column_indices.extend(column_indices_)
data = np.ones(X.data.size)
else:
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsc()
out = out[:, self.active_features_]
return out.tocsr() if self.sparse else out.toarray() | ['def', '_transform', '(', 'self', ',', 'X', ')', ':', 'X', '=', 'self', '.', '_matrix_adjust', '(', 'X', ')', 'X', '=', 'check_array', '(', 'X', ',', 'accept_sparse', '=', "'csc'", ',', 'force_all_finite', '=', 'False', ',', 'dtype', '=', 'int', ')', 'if', 'X', '.', 'min', '(', ')', '<', '0', ':', 'raise', 'ValueError', '(', '"X needs to contain only non-negative integers."', ')', 'n_samples', ',', 'n_features', '=', 'X', '.', 'shape', 'indices', '=', 'self', '.', 'feature_indices_', 'if', 'n_features', '!=', 'indices', '.', 'shape', '[', '0', ']', '-', '1', ':', 'raise', 'ValueError', '(', '"X has different shape than during fitting."', '" Expected %d, got %d."', '%', '(', 'indices', '.', 'shape', '[', '0', ']', '-', '1', ',', 'n_features', ')', ')', '# Replace all indicators which were below `minimum_fraction` in the', "# training set by 'other'", 'if', 'self', '.', 'minimum_fraction', 'is', 'not', 'None', ':', 'for', 'column', 'in', 'range', '(', 'X', '.', 'shape', '[', '1', ']', ')', ':', 'if', 'sparse', '.', 'issparse', '(', 'X', ')', ':', 'indptr_start', '=', 'X', '.', 'indptr', '[', 'column', ']', 'indptr_end', '=', 'X', '.', 'indptr', '[', 'column', '+', '1', ']', 'unique', '=', 'np', '.', 'unique', '(', 'X', '.', 'data', '[', 'indptr_start', ':', 'indptr_end', ']', ')', 'else', ':', 'unique', '=', 'np', '.', 'unique', '(', 'X', '[', ':', ',', 'column', ']', ')', 'for', 'unique_value', 'in', 'unique', ':', 'if', 'unique_value', 'not', 'in', 'self', '.', 'do_not_replace_by_other_', '[', 'column', ']', ':', 'if', 'sparse', '.', 'issparse', '(', 'X', ')', ':', 'indptr_start', '=', 'X', '.', 'indptr', '[', 'column', ']', 'indptr_end', '=', 'X', '.', 'indptr', '[', 'column', '+', '1', ']', 'X', '.', 'data', '[', 'indptr_start', ':', 'indptr_end', ']', '[', 'X', '.', 'data', '[', 'indptr_start', ':', 'indptr_end', ']', '==', 'unique_value', ']', '=', 'SPARSE_ENCODINGS', '[', "'OTHER'", ']', 'else', ':', 'X', '[', ':', ',', 'column', ']', '[', 'X', '[', ':', ',', 'column', ']', '==', 'unique_value', ']', '=', 'SPARSE_ENCODINGS', '[', "'OTHER'", ']', 'if', 'sparse', '.', 'issparse', '(', 'X', ')', ':', 'n_values_check', '=', 'X', '.', 'max', '(', 'axis', '=', '0', ')', '.', 'toarray', '(', ')', '.', 'flatten', '(', ')', '+', '1', 'else', ':', 'n_values_check', '=', 'np', '.', 'max', '(', 'X', ',', 'axis', '=', '0', ')', '+', '1', "# Replace all indicators which are out of bounds by 'other' (index 0)", 'if', '(', 'n_values_check', '>', 'self', '.', 'n_values_', ')', '.', 'any', '(', ')', ':', '# raise ValueError("Feature out of bounds. Try setting n_values.")', 'for', 'i', ',', 'n_value_check', 'in', 'enumerate', '(', 'n_values_check', ')', ':', 'if', '(', 'n_value_check', '-', '1', ')', '>=', 'self', '.', 'n_values_', '[', 'i', ']', ':', 'if', 'sparse', '.', 'issparse', '(', 'X', ')', ':', 'indptr_start', '=', 'X', '.', 'indptr', '[', 'i', ']', 'indptr_end', '=', 'X', '.', 'indptr', '[', 'i', '+', '1', ']', 'X', '.', 'data', '[', 'indptr_start', ':', 'indptr_end', ']', '[', 'X', '.', 'data', '[', 'indptr_start', ':', 'indptr_end', ']', '>=', 'self', '.', 'n_values_', '[', 'i', ']', ']', '=', '0', 'else', ':', 'X', '[', ':', ',', 'i', ']', '[', 'X', '[', ':', ',', 'i', ']', '>=', 'self', '.', 'n_values_', '[', 'i', ']', ']', '=', '0', 'if', 'sparse', '.', 'issparse', '(', 'X', ')', ':', 'row_indices', '=', 'X', '.', 'indices', 'column_indices', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'X', '.', 'indptr', ')', '-', '1', ')', ':', 'nbr', '=', 'X', '.', 'indptr', '[', 'i', '+', '1', ']', '-', 'X', '.', 'indptr', '[', 'i', ']', 'column_indices_', '=', '[', 'indices', '[', 'i', ']', ']', '*', 'nbr', 'column_indices_', '+=', 'X', '.', 'data', '[', 'X', '.', 'indptr', '[', 'i', ']', ':', 'X', '.', 'indptr', '[', 'i', '+', '1', ']', ']', 'column_indices', '.', 'extend', '(', 'column_indices_', ')', 'data', '=', 'np', '.', 'ones', '(', 'X', '.', 'data', '.', 'size', ')', 'else', ':', 'column_indices', '=', '(', 'X', '+', 'indices', '[', ':', '-', '1', ']', ')', '.', 'ravel', '(', ')', 'row_indices', '=', 'np', '.', 'repeat', '(', 'np', '.', 'arange', '(', 'n_samples', ',', 'dtype', '=', 'np', '.', 'int32', ')', ',', 'n_features', ')', 'data', '=', 'np', '.', 'ones', '(', 'n_samples', '*', 'n_features', ')', 'out', '=', 'sparse', '.', 'coo_matrix', '(', '(', 'data', ',', '(', 'row_indices', ',', 'column_indices', ')', ')', ',', 'shape', '=', '(', 'n_samples', ',', 'indices', '[', '-', '1', ']', ')', ',', 'dtype', '=', 'self', '.', 'dtype', ')', '.', 'tocsc', '(', ')', 'out', '=', 'out', '[', ':', ',', 'self', '.', 'active_features_', ']', 'return', 'out', '.', 'tocsr', '(', ')', 'if', 'self', '.', 'sparse', 'else', 'out', '.', 'toarray', '(', ')'] | Asssume X contains only categorical features.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix. | ['Asssume', 'X', 'contains', 'only', 'categorical', 'features', '.'] | train | https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L399-L479 |
7,351 | kytos/python-openflow | pyof/foundation/network_types.py | Ethernet.unpack | def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Ethernet headers may have VLAN tags. If no VLAN tag is found, a
'wildcard VLAN tag' is inserted to assure correct unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
UnpackException: If there is a struct unpacking error.
"""
begin = offset
vlan_length = self._get_vlan_length(buff)
for attribute_name, class_attribute in self.get_class_attributes():
attribute = deepcopy(class_attribute)
if attribute_name == 'vlans':
attribute.unpack(buff[begin:begin+vlan_length])
else:
attribute.unpack(buff, begin)
setattr(self, attribute_name, attribute)
begin += attribute.get_size() | python | def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Ethernet headers may have VLAN tags. If no VLAN tag is found, a
'wildcard VLAN tag' is inserted to assure correct unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
UnpackException: If there is a struct unpacking error.
"""
begin = offset
vlan_length = self._get_vlan_length(buff)
for attribute_name, class_attribute in self.get_class_attributes():
attribute = deepcopy(class_attribute)
if attribute_name == 'vlans':
attribute.unpack(buff[begin:begin+vlan_length])
else:
attribute.unpack(buff, begin)
setattr(self, attribute_name, attribute)
begin += attribute.get_size() | ['def', 'unpack', '(', 'self', ',', 'buff', ',', 'offset', '=', '0', ')', ':', 'begin', '=', 'offset', 'vlan_length', '=', 'self', '.', '_get_vlan_length', '(', 'buff', ')', 'for', 'attribute_name', ',', 'class_attribute', 'in', 'self', '.', 'get_class_attributes', '(', ')', ':', 'attribute', '=', 'deepcopy', '(', 'class_attribute', ')', 'if', 'attribute_name', '==', "'vlans'", ':', 'attribute', '.', 'unpack', '(', 'buff', '[', 'begin', ':', 'begin', '+', 'vlan_length', ']', ')', 'else', ':', 'attribute', '.', 'unpack', '(', 'buff', ',', 'begin', ')', 'setattr', '(', 'self', ',', 'attribute_name', ',', 'attribute', ')', 'begin', '+=', 'attribute', '.', 'get_size', '(', ')'] | Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Ethernet headers may have VLAN tags. If no VLAN tag is found, a
'wildcard VLAN tag' is inserted to assure correct unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
UnpackException: If there is a struct unpacking error. | ['Unpack', 'a', 'binary', 'message', 'into', 'this', 'object', 's', 'attributes', '.'] | train | https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/network_types.py#L306-L334 |
7,352 | DBuildService/dockerfile-parse | dockerfile_parse/parser.py | DockerfileParser._instruction_getter | def _instruction_getter(self, name, env_replace):
"""
Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance
"""
if name != 'LABEL' and name != 'ENV':
raise ValueError("Unsupported instruction '%s'", name)
instructions = {}
envs = {}
for instruction_desc in self.structure:
this_instruction = instruction_desc['instruction']
if this_instruction == 'FROM':
instructions.clear()
envs = self.parent_env.copy()
elif this_instruction in (name, 'ENV'):
logger.debug("%s value: %r", name.lower(), instruction_desc['value'])
key_val_list = extract_labels_or_envs(env_replace=env_replace,
envs=envs,
instruction_value=instruction_desc['value'])
for key, value in key_val_list:
if this_instruction == name:
instructions[key] = value
logger.debug("new %s %r=%r", name.lower(), key, value)
if this_instruction == 'ENV':
envs[key] = value
logger.debug("instructions: %r", instructions)
return Labels(instructions, self) if name == 'LABEL' else Envs(instructions, self) | python | def _instruction_getter(self, name, env_replace):
"""
Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance
"""
if name != 'LABEL' and name != 'ENV':
raise ValueError("Unsupported instruction '%s'", name)
instructions = {}
envs = {}
for instruction_desc in self.structure:
this_instruction = instruction_desc['instruction']
if this_instruction == 'FROM':
instructions.clear()
envs = self.parent_env.copy()
elif this_instruction in (name, 'ENV'):
logger.debug("%s value: %r", name.lower(), instruction_desc['value'])
key_val_list = extract_labels_or_envs(env_replace=env_replace,
envs=envs,
instruction_value=instruction_desc['value'])
for key, value in key_val_list:
if this_instruction == name:
instructions[key] = value
logger.debug("new %s %r=%r", name.lower(), key, value)
if this_instruction == 'ENV':
envs[key] = value
logger.debug("instructions: %r", instructions)
return Labels(instructions, self) if name == 'LABEL' else Envs(instructions, self) | ['def', '_instruction_getter', '(', 'self', ',', 'name', ',', 'env_replace', ')', ':', 'if', 'name', '!=', "'LABEL'", 'and', 'name', '!=', "'ENV'", ':', 'raise', 'ValueError', '(', '"Unsupported instruction \'%s\'"', ',', 'name', ')', 'instructions', '=', '{', '}', 'envs', '=', '{', '}', 'for', 'instruction_desc', 'in', 'self', '.', 'structure', ':', 'this_instruction', '=', 'instruction_desc', '[', "'instruction'", ']', 'if', 'this_instruction', '==', "'FROM'", ':', 'instructions', '.', 'clear', '(', ')', 'envs', '=', 'self', '.', 'parent_env', '.', 'copy', '(', ')', 'elif', 'this_instruction', 'in', '(', 'name', ',', "'ENV'", ')', ':', 'logger', '.', 'debug', '(', '"%s value: %r"', ',', 'name', '.', 'lower', '(', ')', ',', 'instruction_desc', '[', "'value'", ']', ')', 'key_val_list', '=', 'extract_labels_or_envs', '(', 'env_replace', '=', 'env_replace', ',', 'envs', '=', 'envs', ',', 'instruction_value', '=', 'instruction_desc', '[', "'value'", ']', ')', 'for', 'key', ',', 'value', 'in', 'key_val_list', ':', 'if', 'this_instruction', '==', 'name', ':', 'instructions', '[', 'key', ']', '=', 'value', 'logger', '.', 'debug', '(', '"new %s %r=%r"', ',', 'name', '.', 'lower', '(', ')', ',', 'key', ',', 'value', ')', 'if', 'this_instruction', '==', "'ENV'", ':', 'envs', '[', 'key', ']', '=', 'value', 'logger', '.', 'debug', '(', '"instructions: %r"', ',', 'instructions', ')', 'return', 'Labels', '(', 'instructions', ',', 'self', ')', 'if', 'name', '==', "'LABEL'", 'else', 'Envs', '(', 'instructions', ',', 'self', ')'] | Get LABEL or ENV instructions with environment replacement
:param name: e.g. 'LABEL' or 'ENV'
:param env_replace: bool, whether to perform ENV substitution
:return: Labels instance or Envs instance | ['Get', 'LABEL', 'or', 'ENV', 'instructions', 'with', 'environment', 'replacement'] | train | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L391-L422 |
7,353 | tensorflow/tensorboard | tensorboard/plugins/histogram/summary.py | op | def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | python | def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | ['def', 'op', '(', 'name', ',', 'data', ',', 'bucket_count', '=', 'None', ',', 'display_name', '=', 'None', ',', 'description', '=', 'None', ',', 'collections', '=', 'None', ')', ':', '# TODO(nickfelt): remove on-demand imports once dep situation is fixed.', 'import', 'tensorflow', '.', 'compat', '.', 'v1', 'as', 'tf', 'if', 'display_name', 'is', 'None', ':', 'display_name', '=', 'name', 'summary_metadata', '=', 'metadata', '.', 'create_summary_metadata', '(', 'display_name', '=', 'display_name', ',', 'description', '=', 'description', ')', 'with', 'tf', '.', 'name_scope', '(', 'name', ')', ':', 'tensor', '=', '_buckets', '(', 'data', ',', 'bucket_count', '=', 'bucket_count', ')', 'return', 'tf', '.', 'summary', '.', 'tensor_summary', '(', 'name', '=', "'histogram_summary'", ',', 'tensor', '=', 'tensor', ',', 'collections', '=', 'collections', ',', 'summary_metadata', '=', 'summary_metadata', ')'] | Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op. | ['Create', 'a', 'legacy', 'histogram', 'summary', 'op', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/histogram/summary.py#L105-L144 |
7,354 | Microsoft/nni | src/sdk/pynni/nni/networkmorphism_tuner/graph_transformer.py | to_deeper_graph | def to_deeper_graph(graph):
''' deeper graph
'''
weighted_layer_ids = graph.deep_layer_ids()
if len(weighted_layer_ids) >= Constant.MAX_LAYERS:
return None
deeper_layer_ids = sample(weighted_layer_ids, 1)
for layer_id in deeper_layer_ids:
layer = graph.layer_list[layer_id]
new_layer = create_new_layer(layer, graph.n_dim)
graph.to_deeper_model(layer_id, new_layer)
return graph | python | def to_deeper_graph(graph):
''' deeper graph
'''
weighted_layer_ids = graph.deep_layer_ids()
if len(weighted_layer_ids) >= Constant.MAX_LAYERS:
return None
deeper_layer_ids = sample(weighted_layer_ids, 1)
for layer_id in deeper_layer_ids:
layer = graph.layer_list[layer_id]
new_layer = create_new_layer(layer, graph.n_dim)
graph.to_deeper_model(layer_id, new_layer)
return graph | ['def', 'to_deeper_graph', '(', 'graph', ')', ':', 'weighted_layer_ids', '=', 'graph', '.', 'deep_layer_ids', '(', ')', 'if', 'len', '(', 'weighted_layer_ids', ')', '>=', 'Constant', '.', 'MAX_LAYERS', ':', 'return', 'None', 'deeper_layer_ids', '=', 'sample', '(', 'weighted_layer_ids', ',', '1', ')', 'for', 'layer_id', 'in', 'deeper_layer_ids', ':', 'layer', '=', 'graph', '.', 'layer_list', '[', 'layer_id', ']', 'new_layer', '=', 'create_new_layer', '(', 'layer', ',', 'graph', '.', 'n_dim', ')', 'graph', '.', 'to_deeper_model', '(', 'layer_id', ',', 'new_layer', ')', 'return', 'graph'] | deeper graph | ['deeper', 'graph'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/graph_transformer.py#L127-L141 |
7,355 | IdentityPython/pysaml2 | src/saml2/attribute_converter.py | AttributeConverter.to_ | def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
name = self._to.get(key.lower())
if name:
if name == "urn:oid:1.3.6.1.4.1.5923.1.1.1.10":
# special case for eduPersonTargetedID
attr_value = []
for v in value:
extension_element = ExtensionElement("NameID", NAMESPACE,
attributes={'Format': NAMEID_FORMAT_PERSISTENT}, text=v)
attrval = saml.AttributeValue(extension_elements=[extension_element])
attr_value.append(attrval)
else:
attr_value = do_ava(value)
attributes.append(factory(saml.Attribute,
name=name,
name_format=self.name_format,
friendly_name=key,
attribute_value=attr_value))
else:
attributes.append(factory(saml.Attribute,
name=key,
attribute_value=do_ava(value)))
return attributes | python | def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
name = self._to.get(key.lower())
if name:
if name == "urn:oid:1.3.6.1.4.1.5923.1.1.1.10":
# special case for eduPersonTargetedID
attr_value = []
for v in value:
extension_element = ExtensionElement("NameID", NAMESPACE,
attributes={'Format': NAMEID_FORMAT_PERSISTENT}, text=v)
attrval = saml.AttributeValue(extension_elements=[extension_element])
attr_value.append(attrval)
else:
attr_value = do_ava(value)
attributes.append(factory(saml.Attribute,
name=name,
name_format=self.name_format,
friendly_name=key,
attribute_value=attr_value))
else:
attributes.append(factory(saml.Attribute,
name=key,
attribute_value=do_ava(value)))
return attributes | ['def', 'to_', '(', 'self', ',', 'attrvals', ')', ':', 'attributes', '=', '[', ']', 'for', 'key', ',', 'value', 'in', 'attrvals', '.', 'items', '(', ')', ':', 'name', '=', 'self', '.', '_to', '.', 'get', '(', 'key', '.', 'lower', '(', ')', ')', 'if', 'name', ':', 'if', 'name', '==', '"urn:oid:1.3.6.1.4.1.5923.1.1.1.10"', ':', '# special case for eduPersonTargetedID', 'attr_value', '=', '[', ']', 'for', 'v', 'in', 'value', ':', 'extension_element', '=', 'ExtensionElement', '(', '"NameID"', ',', 'NAMESPACE', ',', 'attributes', '=', '{', "'Format'", ':', 'NAMEID_FORMAT_PERSISTENT', '}', ',', 'text', '=', 'v', ')', 'attrval', '=', 'saml', '.', 'AttributeValue', '(', 'extension_elements', '=', '[', 'extension_element', ']', ')', 'attr_value', '.', 'append', '(', 'attrval', ')', 'else', ':', 'attr_value', '=', 'do_ava', '(', 'value', ')', 'attributes', '.', 'append', '(', 'factory', '(', 'saml', '.', 'Attribute', ',', 'name', '=', 'name', ',', 'name_format', '=', 'self', '.', 'name_format', ',', 'friendly_name', '=', 'key', ',', 'attribute_value', '=', 'attr_value', ')', ')', 'else', ':', 'attributes', '.', 'append', '(', 'factory', '(', 'saml', '.', 'Attribute', ',', 'name', '=', 'key', ',', 'attribute_value', '=', 'do_ava', '(', 'value', ')', ')', ')', 'return', 'attributes'] | Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances | ['Create', 'a', 'list', 'of', 'Attribute', 'instances', '.'] | train | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/attribute_converter.py#L423-L453 |
7,356 | minhhoit/yacms | yacms/blog/management/base.py | BaseImporterCommand.add_page | def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
"""
Adds a page to the list of pages to be imported - used by the
Wordpress importer.
"""
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
}) | python | def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
"""
Adds a page to the list of pages to be imported - used by the
Wordpress importer.
"""
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
}) | ['def', 'add_page', '(', 'self', ',', 'title', '=', 'None', ',', 'content', '=', 'None', ',', 'old_url', '=', 'None', ',', 'tags', '=', 'None', ',', 'old_id', '=', 'None', ',', 'old_parent_id', '=', 'None', ')', ':', 'if', 'not', 'title', ':', 'text', '=', 'decode_entities', '(', 'strip_tags', '(', 'content', ')', ')', '.', 'replace', '(', '"\\n"', ',', '" "', ')', 'title', '=', 'text', '.', 'split', '(', '". "', ')', '[', '0', ']', 'if', 'tags', 'is', 'None', ':', 'tags', '=', '[', ']', 'self', '.', 'pages', '.', 'append', '(', '{', '"title"', ':', 'title', ',', '"content"', ':', 'content', ',', '"tags"', ':', 'tags', ',', '"old_url"', ':', 'old_url', ',', '"old_id"', ':', 'old_id', ',', '"old_parent_id"', ':', 'old_parent_id', ',', '}', ')'] | Adds a page to the list of pages to be imported - used by the
Wordpress importer. | ['Adds', 'a', 'page', 'to', 'the', 'list', 'of', 'pages', 'to', 'be', 'imported', '-', 'used', 'by', 'the', 'Wordpress', 'importer', '.'] | train | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/base.py#L86-L104 |
7,357 | skorokithakis/shortuuid | shortuuid/main.py | ShortUUID.set_alphabet | def set_alphabet(self, alphabet):
"""Set the alphabet to be used for new UUIDs."""
# Turn the alphabet into a set and sort it to prevent duplicates
# and ensure reproducibility.
new_alphabet = list(sorted(set(alphabet)))
if len(new_alphabet) > 1:
self._alphabet = new_alphabet
self._alpha_len = len(self._alphabet)
else:
raise ValueError("Alphabet with more than " "one unique symbols required.") | python | def set_alphabet(self, alphabet):
"""Set the alphabet to be used for new UUIDs."""
# Turn the alphabet into a set and sort it to prevent duplicates
# and ensure reproducibility.
new_alphabet = list(sorted(set(alphabet)))
if len(new_alphabet) > 1:
self._alphabet = new_alphabet
self._alpha_len = len(self._alphabet)
else:
raise ValueError("Alphabet with more than " "one unique symbols required.") | ['def', 'set_alphabet', '(', 'self', ',', 'alphabet', ')', ':', '# Turn the alphabet into a set and sort it to prevent duplicates', '# and ensure reproducibility.', 'new_alphabet', '=', 'list', '(', 'sorted', '(', 'set', '(', 'alphabet', ')', ')', ')', 'if', 'len', '(', 'new_alphabet', ')', '>', '1', ':', 'self', '.', '_alphabet', '=', 'new_alphabet', 'self', '.', '_alpha_len', '=', 'len', '(', 'self', '.', '_alphabet', ')', 'else', ':', 'raise', 'ValueError', '(', '"Alphabet with more than "', '"one unique symbols required."', ')'] | Set the alphabet to be used for new UUIDs. | ['Set', 'the', 'alphabet', 'to', 'be', 'used', 'for', 'new', 'UUIDs', '.'] | train | https://github.com/skorokithakis/shortuuid/blob/4da632a986c3a43f75c7df64f27a90bbf7ff8039/shortuuid/main.py#L111-L121 |
7,358 | martinpitt/python-dbusmock | dbusmock/templates/ofono.py | add_simmanager_api | def add_simmanager_api(self, mock):
'''Add org.ofono.SimManager API to a mock'''
iface = 'org.ofono.SimManager'
mock.AddProperties(iface, {
'BarredDialing': _parameters.get('BarredDialing', False),
'CardIdentifier': _parameters.get('CardIdentifier', new_iccid(self)),
'FixedDialing': _parameters.get('FixedDialing', False),
'LockedPins': _parameters.get('LockedPins', dbus.Array([], signature='s')),
'MobileCountryCode': _parameters.get('MobileCountryCode', '310'),
'MobileNetworkCode': _parameters.get('MobileNetworkCode', '150'),
'PreferredLanguages': _parameters.get('PreferredLanguages', ['en']),
'Present': _parameters.get('Present', dbus.Boolean(True)),
'Retries': _parameters.get('Retries', dbus.Dictionary([["pin", dbus.Byte(3)], ["puk", dbus.Byte(10)]])),
'PinRequired': _parameters.get('PinRequired', "none"),
'SubscriberNumbers': _parameters.get('SubscriberNumbers', ['123456789', '234567890']),
'SubscriberIdentity': _parameters.get('SubscriberIdentity', new_imsi(self)),
})
mock.AddMethods(iface, [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("%s")' % iface),
('SetProperty', 'sv', '', 'self.Set("%(i)s", args[0], args[1]); '
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", [args[0], args[1]])' % {'i': iface}),
('ChangePin', 'sss', '', ''),
('EnterPin', 'ss', '',
'correctPin = "1234"\n'
'newRetries = self.Get("%(i)s", "Retries")\n'
'if args[0] == "pin" and args[1] != correctPin:\n'
' newRetries["pin"] = dbus.Byte(newRetries["pin"] - 1)\n'
'elif args[0] == "pin":\n'
' newRetries["pin"] = dbus.Byte(3)\n'
'self.Set("%(i)s", "Retries", newRetries)\n'
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\n'
'if args[0] == "pin" and args[1] != correctPin:\n'
' class Failed(dbus.exceptions.DBusException):\n'
' _dbus_error_name = "org.ofono.Error.Failed"\n'
' raise Failed("Operation failed")' % {'i': iface}),
('ResetPin', 'sss', '',
'correctPuk = "12345678"\n'
'newRetries = self.Get("%(i)s", "Retries")\n'
'if args[0] == "puk" and args[1] != correctPuk:\n'
' newRetries["puk"] = dbus.Byte(newRetries["puk"] - 1)\n'
'elif args[0] == "puk":\n'
' newRetries["pin"] = dbus.Byte(3)\n'
' newRetries["puk"] = dbus.Byte(10)\n'
'self.Set("%(i)s", "Retries", newRetries)\n'
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\n'
'if args[0] == "puk" and args[1] != correctPuk:\n'
' class Failed(dbus.exceptions.DBusException):\n'
' _dbus_error_name = "org.ofono.Error.Failed"\n'
' raise Failed("Operation failed")' % {'i': iface}),
('LockPin', 'ss', '', ''),
('UnlockPin', 'ss', '', ''),
]) | python | def add_simmanager_api(self, mock):
'''Add org.ofono.SimManager API to a mock'''
iface = 'org.ofono.SimManager'
mock.AddProperties(iface, {
'BarredDialing': _parameters.get('BarredDialing', False),
'CardIdentifier': _parameters.get('CardIdentifier', new_iccid(self)),
'FixedDialing': _parameters.get('FixedDialing', False),
'LockedPins': _parameters.get('LockedPins', dbus.Array([], signature='s')),
'MobileCountryCode': _parameters.get('MobileCountryCode', '310'),
'MobileNetworkCode': _parameters.get('MobileNetworkCode', '150'),
'PreferredLanguages': _parameters.get('PreferredLanguages', ['en']),
'Present': _parameters.get('Present', dbus.Boolean(True)),
'Retries': _parameters.get('Retries', dbus.Dictionary([["pin", dbus.Byte(3)], ["puk", dbus.Byte(10)]])),
'PinRequired': _parameters.get('PinRequired', "none"),
'SubscriberNumbers': _parameters.get('SubscriberNumbers', ['123456789', '234567890']),
'SubscriberIdentity': _parameters.get('SubscriberIdentity', new_imsi(self)),
})
mock.AddMethods(iface, [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("%s")' % iface),
('SetProperty', 'sv', '', 'self.Set("%(i)s", args[0], args[1]); '
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", [args[0], args[1]])' % {'i': iface}),
('ChangePin', 'sss', '', ''),
('EnterPin', 'ss', '',
'correctPin = "1234"\n'
'newRetries = self.Get("%(i)s", "Retries")\n'
'if args[0] == "pin" and args[1] != correctPin:\n'
' newRetries["pin"] = dbus.Byte(newRetries["pin"] - 1)\n'
'elif args[0] == "pin":\n'
' newRetries["pin"] = dbus.Byte(3)\n'
'self.Set("%(i)s", "Retries", newRetries)\n'
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\n'
'if args[0] == "pin" and args[1] != correctPin:\n'
' class Failed(dbus.exceptions.DBusException):\n'
' _dbus_error_name = "org.ofono.Error.Failed"\n'
' raise Failed("Operation failed")' % {'i': iface}),
('ResetPin', 'sss', '',
'correctPuk = "12345678"\n'
'newRetries = self.Get("%(i)s", "Retries")\n'
'if args[0] == "puk" and args[1] != correctPuk:\n'
' newRetries["puk"] = dbus.Byte(newRetries["puk"] - 1)\n'
'elif args[0] == "puk":\n'
' newRetries["pin"] = dbus.Byte(3)\n'
' newRetries["puk"] = dbus.Byte(10)\n'
'self.Set("%(i)s", "Retries", newRetries)\n'
'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\n'
'if args[0] == "puk" and args[1] != correctPuk:\n'
' class Failed(dbus.exceptions.DBusException):\n'
' _dbus_error_name = "org.ofono.Error.Failed"\n'
' raise Failed("Operation failed")' % {'i': iface}),
('LockPin', 'ss', '', ''),
('UnlockPin', 'ss', '', ''),
]) | ['def', 'add_simmanager_api', '(', 'self', ',', 'mock', ')', ':', 'iface', '=', "'org.ofono.SimManager'", 'mock', '.', 'AddProperties', '(', 'iface', ',', '{', "'BarredDialing'", ':', '_parameters', '.', 'get', '(', "'BarredDialing'", ',', 'False', ')', ',', "'CardIdentifier'", ':', '_parameters', '.', 'get', '(', "'CardIdentifier'", ',', 'new_iccid', '(', 'self', ')', ')', ',', "'FixedDialing'", ':', '_parameters', '.', 'get', '(', "'FixedDialing'", ',', 'False', ')', ',', "'LockedPins'", ':', '_parameters', '.', 'get', '(', "'LockedPins'", ',', 'dbus', '.', 'Array', '(', '[', ']', ',', 'signature', '=', "'s'", ')', ')', ',', "'MobileCountryCode'", ':', '_parameters', '.', 'get', '(', "'MobileCountryCode'", ',', "'310'", ')', ',', "'MobileNetworkCode'", ':', '_parameters', '.', 'get', '(', "'MobileNetworkCode'", ',', "'150'", ')', ',', "'PreferredLanguages'", ':', '_parameters', '.', 'get', '(', "'PreferredLanguages'", ',', '[', "'en'", ']', ')', ',', "'Present'", ':', '_parameters', '.', 'get', '(', "'Present'", ',', 'dbus', '.', 'Boolean', '(', 'True', ')', ')', ',', "'Retries'", ':', '_parameters', '.', 'get', '(', "'Retries'", ',', 'dbus', '.', 'Dictionary', '(', '[', '[', '"pin"', ',', 'dbus', '.', 'Byte', '(', '3', ')', ']', ',', '[', '"puk"', ',', 'dbus', '.', 'Byte', '(', '10', ')', ']', ']', ')', ')', ',', "'PinRequired'", ':', '_parameters', '.', 'get', '(', "'PinRequired'", ',', '"none"', ')', ',', "'SubscriberNumbers'", ':', '_parameters', '.', 'get', '(', "'SubscriberNumbers'", ',', '[', "'123456789'", ',', "'234567890'", ']', ')', ',', "'SubscriberIdentity'", ':', '_parameters', '.', 'get', '(', "'SubscriberIdentity'", ',', 'new_imsi', '(', 'self', ')', ')', ',', '}', ')', 'mock', '.', 'AddMethods', '(', 'iface', ',', '[', '(', "'GetProperties'", ',', "''", ',', "'a{sv}'", ',', '\'ret = self.GetAll("%s")\'', '%', 'iface', ')', ',', '(', "'SetProperty'", ',', "'sv'", ',', "''", ',', '\'self.Set("%(i)s", args[0], args[1]); \'', '\'self.EmitSignal("%(i)s", "PropertyChanged", "sv", [args[0], args[1]])\'', '%', '{', "'i'", ':', 'iface', '}', ')', ',', '(', "'ChangePin'", ',', "'sss'", ',', "''", ',', "''", ')', ',', '(', "'EnterPin'", ',', "'ss'", ',', "''", ',', '\'correctPin = "1234"\\n\'', '\'newRetries = self.Get("%(i)s", "Retries")\\n\'', '\'if args[0] == "pin" and args[1] != correctPin:\\n\'', '\' newRetries["pin"] = dbus.Byte(newRetries["pin"] - 1)\\n\'', '\'elif args[0] == "pin":\\n\'', '\' newRetries["pin"] = dbus.Byte(3)\\n\'', '\'self.Set("%(i)s", "Retries", newRetries)\\n\'', '\'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\\n\'', '\'if args[0] == "pin" and args[1] != correctPin:\\n\'', "' class Failed(dbus.exceptions.DBusException):\\n'", '\' _dbus_error_name = "org.ofono.Error.Failed"\\n\'', '\' raise Failed("Operation failed")\'', '%', '{', "'i'", ':', 'iface', '}', ')', ',', '(', "'ResetPin'", ',', "'sss'", ',', "''", ',', '\'correctPuk = "12345678"\\n\'', '\'newRetries = self.Get("%(i)s", "Retries")\\n\'', '\'if args[0] == "puk" and args[1] != correctPuk:\\n\'', '\' newRetries["puk"] = dbus.Byte(newRetries["puk"] - 1)\\n\'', '\'elif args[0] == "puk":\\n\'', '\' newRetries["pin"] = dbus.Byte(3)\\n\'', '\' newRetries["puk"] = dbus.Byte(10)\\n\'', '\'self.Set("%(i)s", "Retries", newRetries)\\n\'', '\'self.EmitSignal("%(i)s", "PropertyChanged", "sv", ["Retries", newRetries])\\n\'', '\'if args[0] == "puk" and args[1] != correctPuk:\\n\'', "' class Failed(dbus.exceptions.DBusException):\\n'", '\' _dbus_error_name = "org.ofono.Error.Failed"\\n\'', '\' raise Failed("Operation failed")\'', '%', '{', "'i'", ':', 'iface', '}', ')', ',', '(', "'LockPin'", ',', "'ss'", ',', "''", ',', "''", ')', ',', '(', "'UnlockPin'", ',', "'ss'", ',', "''", ',', "''", ')', ',', ']', ')'] | Add org.ofono.SimManager API to a mock | ['Add', 'org', '.', 'ofono', '.', 'SimManager', 'API', 'to', 'a', 'mock'] | train | https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/ofono.py#L329-L388 |
7,359 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/interactive.py | ConsoleDebugger.do_search | def do_search(self, arg):
"""
[~process] s [address-address] <search string>
[~process] search [address-address] <search string>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_bytes(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
# TODO: need a prettier output here!
for addr in iter:
print(HexDump.address(addr, addr_width)) | python | def do_search(self, arg):
"""
[~process] s [address-address] <search string>
[~process] search [address-address] <search string>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_bytes(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
# TODO: need a prettier output here!
for addr in iter:
print(HexDump.address(addr, addr_width)) | ['def', 'do_search', '(', 'self', ',', 'arg', ')', ':', 'token_list', '=', 'self', '.', 'split_tokens', '(', 'arg', ',', '1', ',', '3', ')', 'pid', ',', 'tid', '=', 'self', '.', 'get_process_and_thread_ids_from_prefix', '(', ')', 'process', '=', 'self', '.', 'get_process', '(', 'pid', ')', 'if', 'len', '(', 'token_list', ')', '==', '1', ':', 'pattern', '=', 'token_list', '[', '0', ']', 'minAddr', '=', 'None', 'maxAddr', '=', 'None', 'else', ':', 'pattern', '=', 'token_list', '[', '-', '1', ']', 'addr', ',', 'size', '=', 'self', '.', 'input_address_range', '(', 'token_list', '[', ':', '-', '1', ']', ',', 'pid', ',', 'tid', ')', 'minAddr', '=', 'addr', 'maxAddr', '=', 'addr', '+', 'size', 'iter', '=', 'process', '.', 'search_bytes', '(', 'pattern', ')', 'if', 'process', '.', 'get_bits', '(', ')', '==', '32', ':', 'addr_width', '=', '8', 'else', ':', 'addr_width', '=', '16', '# TODO: need a prettier output here!', 'for', 'addr', 'in', 'iter', ':', 'print', '(', 'HexDump', '.', 'address', '(', 'addr', ',', 'addr_width', ')', ')'] | [~process] s [address-address] <search string>
[~process] search [address-address] <search string> | ['[', '~process', ']', 's', '[', 'address', '-', 'address', ']', '<search', 'string', '>', '[', '~process', ']', 'search', '[', 'address', '-', 'address', ']', '<search', 'string', '>'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1854-L1878 |
7,360 | getpelican/pelican-plugins | filetime_from_git/content_adapter.py | GitContentAdapter.get_newest_commit_date | def get_newest_commit_date(self):
'''
Get datetime of newest commit involving this file
:returns: Datetime of newest commit
'''
newest_commit = self.get_newest_commit()
return self.git.get_commit_date(newest_commit, self.tz_name) | python | def get_newest_commit_date(self):
'''
Get datetime of newest commit involving this file
:returns: Datetime of newest commit
'''
newest_commit = self.get_newest_commit()
return self.git.get_commit_date(newest_commit, self.tz_name) | ['def', 'get_newest_commit_date', '(', 'self', ')', ':', 'newest_commit', '=', 'self', '.', 'get_newest_commit', '(', ')', 'return', 'self', '.', 'git', '.', 'get_commit_date', '(', 'newest_commit', ',', 'self', '.', 'tz_name', ')'] | Get datetime of newest commit involving this file
:returns: Datetime of newest commit | ['Get', 'datetime', 'of', 'newest', 'commit', 'involving', 'this', 'file'] | train | https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/filetime_from_git/content_adapter.py#L92-L99 |
7,361 | openstack/hacking | hacking/checks/python23.py | hacking_python3x_print_function | def hacking_python3x_print_function(logical_line, noqa):
r"""Check that all print occurrences look like print functions.
Check that all occurrences of print look like functions, not
print operator. As of Python 3.x, the print operator has
been removed.
Okay: print(msg)
Okay: print (msg)
Okay: print msg # noqa
Okay: print()
H233: print msg
H233: print >>sys.stderr, "hello"
H233: print msg,
H233: print
"""
if noqa:
return
for match in RE_PRINT.finditer(logical_line):
yield match.start(0), (
"H233: Python 3.x incompatible use of print operator") | python | def hacking_python3x_print_function(logical_line, noqa):
r"""Check that all print occurrences look like print functions.
Check that all occurrences of print look like functions, not
print operator. As of Python 3.x, the print operator has
been removed.
Okay: print(msg)
Okay: print (msg)
Okay: print msg # noqa
Okay: print()
H233: print msg
H233: print >>sys.stderr, "hello"
H233: print msg,
H233: print
"""
if noqa:
return
for match in RE_PRINT.finditer(logical_line):
yield match.start(0), (
"H233: Python 3.x incompatible use of print operator") | ['def', 'hacking_python3x_print_function', '(', 'logical_line', ',', 'noqa', ')', ':', 'if', 'noqa', ':', 'return', 'for', 'match', 'in', 'RE_PRINT', '.', 'finditer', '(', 'logical_line', ')', ':', 'yield', 'match', '.', 'start', '(', '0', ')', ',', '(', '"H233: Python 3.x incompatible use of print operator"', ')'] | r"""Check that all print occurrences look like print functions.
Check that all occurrences of print look like functions, not
print operator. As of Python 3.x, the print operator has
been removed.
Okay: print(msg)
Okay: print (msg)
Okay: print msg # noqa
Okay: print()
H233: print msg
H233: print >>sys.stderr, "hello"
H233: print msg,
H233: print | ['r', 'Check', 'that', 'all', 'print', 'occurrences', 'look', 'like', 'print', 'functions', '.'] | train | https://github.com/openstack/hacking/blob/10e58f907181cac91d3b2af422c2458b04a1ec79/hacking/checks/python23.py#L81-L102 |
7,362 | lemieuxl/pyGenClean | pyGenClean/SampleMissingness/sample_missingness.py | runPlink | def runPlink(options):
"""Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace
"""
# The plink command
plinkCommand = [
"plink",
"--noweb",
"--bfile" if options.is_bfile else "--tfile",
options.ifile,
"--mind",
str(options.mind),
"--make-bed",
"--out",
options.out,
]
output = None
try:
output = subprocess.check_output(plinkCommand,
stderr=subprocess.STDOUT, shell=False)
except subprocess.CalledProcessError:
msg = "plink: couldn't run plink"
raise ProgramError(msg) | python | def runPlink(options):
"""Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace
"""
# The plink command
plinkCommand = [
"plink",
"--noweb",
"--bfile" if options.is_bfile else "--tfile",
options.ifile,
"--mind",
str(options.mind),
"--make-bed",
"--out",
options.out,
]
output = None
try:
output = subprocess.check_output(plinkCommand,
stderr=subprocess.STDOUT, shell=False)
except subprocess.CalledProcessError:
msg = "plink: couldn't run plink"
raise ProgramError(msg) | ['def', 'runPlink', '(', 'options', ')', ':', '# The plink command', 'plinkCommand', '=', '[', '"plink"', ',', '"--noweb"', ',', '"--bfile"', 'if', 'options', '.', 'is_bfile', 'else', '"--tfile"', ',', 'options', '.', 'ifile', ',', '"--mind"', ',', 'str', '(', 'options', '.', 'mind', ')', ',', '"--make-bed"', ',', '"--out"', ',', 'options', '.', 'out', ',', ']', 'output', '=', 'None', 'try', ':', 'output', '=', 'subprocess', '.', 'check_output', '(', 'plinkCommand', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ',', 'shell', '=', 'False', ')', 'except', 'subprocess', '.', 'CalledProcessError', ':', 'msg', '=', '"plink: couldn\'t run plink"', 'raise', 'ProgramError', '(', 'msg', ')'] | Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace | ['Run', 'Plink', 'with', 'the', 'mind', 'option', '.'] | train | https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SampleMissingness/sample_missingness.py#L56-L83 |
7,363 | mitsei/dlkit | dlkit/json_/repository/sessions.py | RepositoryHierarchySession.get_child_repository_ids | def get_child_repository_ids(self, repository_id):
"""Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=repository_id)
return self._hierarchy_session.get_children(id_=repository_id) | python | def get_child_repository_ids(self, repository_id):
"""Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=repository_id)
return self._hierarchy_session.get_children(id_=repository_id) | ['def', 'get_child_repository_ids', '(', 'self', ',', 'repository_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchySession.get_child_bin_ids', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'get_child_catalog_ids', '(', 'catalog_id', '=', 'repository_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'get_children', '(', 'id_', '=', 'repository_id', ')'] | Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'Ids', 'of', 'the', 'children', 'of', 'the', 'given', 'repository', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L5623-L5639 |
7,364 | michael-lazar/rtv | rtv/packages/praw/__init__.py | Config.ua_string | def ua_string(praw_info):
"""Return the user-agent string.
The user-agent string contains PRAW version and platform version info.
"""
if os.environ.get('SERVER_SOFTWARE') is not None:
# Google App Engine information
# https://developers.google.com/appengine/docs/python/
info = os.environ.get('SERVER_SOFTWARE')
else:
# Standard platform information
info = platform.platform(True).encode('ascii', 'ignore')
return '{0} PRAW/{1} Python/{2} {3}'.format(
praw_info, __version__, sys.version.split()[0], info) | python | def ua_string(praw_info):
"""Return the user-agent string.
The user-agent string contains PRAW version and platform version info.
"""
if os.environ.get('SERVER_SOFTWARE') is not None:
# Google App Engine information
# https://developers.google.com/appengine/docs/python/
info = os.environ.get('SERVER_SOFTWARE')
else:
# Standard platform information
info = platform.platform(True).encode('ascii', 'ignore')
return '{0} PRAW/{1} Python/{2} {3}'.format(
praw_info, __version__, sys.version.split()[0], info) | ['def', 'ua_string', '(', 'praw_info', ')', ':', 'if', 'os', '.', 'environ', '.', 'get', '(', "'SERVER_SOFTWARE'", ')', 'is', 'not', 'None', ':', '# Google App Engine information', '# https://developers.google.com/appengine/docs/python/', 'info', '=', 'os', '.', 'environ', '.', 'get', '(', "'SERVER_SOFTWARE'", ')', 'else', ':', '# Standard platform information', 'info', '=', 'platform', '.', 'platform', '(', 'True', ')', '.', 'encode', '(', "'ascii'", ',', "'ignore'", ')', 'return', "'{0} PRAW/{1} Python/{2} {3}'", '.', 'format', '(', 'praw_info', ',', '__version__', ',', 'sys', '.', 'version', '.', 'split', '(', ')', '[', '0', ']', ',', 'info', ')'] | Return the user-agent string.
The user-agent string contains PRAW version and platform version info. | ['Return', 'the', 'user', '-', 'agent', 'string', '.'] | train | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L197-L212 |
7,365 | klen/peewee_migrate | peewee_migrate/migrator.py | SchemaMigrator.from_database | def from_database(cls, database):
"""Initialize migrator by db."""
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
if isinstance(database, SqliteDatabase):
return SqliteMigrator(database)
if isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
return super(SchemaMigrator, cls).from_database(database) | python | def from_database(cls, database):
"""Initialize migrator by db."""
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
if isinstance(database, SqliteDatabase):
return SqliteMigrator(database)
if isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
return super(SchemaMigrator, cls).from_database(database) | ['def', 'from_database', '(', 'cls', ',', 'database', ')', ':', 'if', 'isinstance', '(', 'database', ',', 'PostgresqlDatabase', ')', ':', 'return', 'PostgresqlMigrator', '(', 'database', ')', 'if', 'isinstance', '(', 'database', ',', 'SqliteDatabase', ')', ':', 'return', 'SqliteMigrator', '(', 'database', ')', 'if', 'isinstance', '(', 'database', ',', 'MySQLDatabase', ')', ':', 'return', 'MySQLMigrator', '(', 'database', ')', 'return', 'super', '(', 'SchemaMigrator', ',', 'cls', ')', '.', 'from_database', '(', 'database', ')'] | Initialize migrator by db. | ['Initialize', 'migrator', 'by', 'db', '.'] | train | https://github.com/klen/peewee_migrate/blob/b77895ab1c9be3121bc127e0c2dfb047eed8b24c/peewee_migrate/migrator.py#L20-L28 |
7,366 | fhcrc/taxtastic | taxtastic/utils.py | getlines | def getlines(fname):
"""
Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character.
"""
with open(fname, 'rU') as f:
for line in f:
if line.strip() and not line.startswith('#'):
yield line.split('#', 1)[0].strip() | python | def getlines(fname):
"""
Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character.
"""
with open(fname, 'rU') as f:
for line in f:
if line.strip() and not line.startswith('#'):
yield line.split('#', 1)[0].strip() | ['def', 'getlines', '(', 'fname', ')', ':', 'with', 'open', '(', 'fname', ',', "'rU'", ')', 'as', 'f', ':', 'for', 'line', 'in', 'f', ':', 'if', 'line', '.', 'strip', '(', ')', 'and', 'not', 'line', '.', 'startswith', '(', "'#'", ')', ':', 'yield', 'line', '.', 'split', '(', "'#'", ',', '1', ')', '[', '0', ']', '.', 'strip', '(', ')'] | Returns iterator of whitespace-stripped lines in file, omitting
blank lines, lines beginning with '#', and line contents following
the first '#' character. | ['Returns', 'iterator', 'of', 'whitespace', '-', 'stripped', 'lines', 'in', 'file', 'omitting', 'blank', 'lines', 'lines', 'beginning', 'with', '#', 'and', 'line', 'contents', 'following', 'the', 'first', '#', 'character', '.'] | train | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L51-L61 |
7,367 | NoneGG/aredis | aredis/commands/hyperlog.py | ClusterHyperLogCommandMixin.pfmerge | async def pfmerge(self, dest, *sources):
"""
Merge N different HyperLogLogs into a single one.
Cluster impl:
Very special implementation is required to make pfmerge() work
But it works :]
It works by first fetching all HLL objects that should be merged and
move them to one hashslot so that pfmerge operation can be performed without
any 'CROSSSLOT' error.
After the PFMERGE operation is done then it will be moved to the correct location
within the cluster and cleanup is done.
This operation is no longer atomic because of all the operations that has to be done.
"""
all_k = []
# Fetch all HLL objects via GET and store them client side as strings
all_hll_objects = list()
for hll_key in sources:
all_hll_objects.append(await self.get(hll_key))
# Randomize a keyslot hash that should be used inside {} when doing SET
random_hash_slot = self._random_id()
# Special handling of dest variable if it allready exists, then it shold be included in the HLL merge
# dest can exists anywhere in the cluster.
dest_data = await self.get(dest)
if dest_data:
all_hll_objects.append(dest_data)
# SET all stored HLL objects with SET {RandomHash}RandomKey hll_obj
for hll_object in all_hll_objects:
k = self._random_good_hashslot_key(random_hash_slot)
all_k.append(k)
await self.set(k, hll_object)
# Do regular PFMERGE operation and store value in random key in {RandomHash}
tmp_dest = self._random_good_hashslot_key(random_hash_slot)
await self.execute_command("PFMERGE", tmp_dest, *all_k)
# Do GET and SET so that result will be stored in the destination object any where in the cluster
parsed_dest = await self.get(tmp_dest)
await self.set(dest, parsed_dest)
# Cleanup tmp variables
await self.delete(tmp_dest)
for k in all_k:
await self.delete(k)
return True | python | async def pfmerge(self, dest, *sources):
"""
Merge N different HyperLogLogs into a single one.
Cluster impl:
Very special implementation is required to make pfmerge() work
But it works :]
It works by first fetching all HLL objects that should be merged and
move them to one hashslot so that pfmerge operation can be performed without
any 'CROSSSLOT' error.
After the PFMERGE operation is done then it will be moved to the correct location
within the cluster and cleanup is done.
This operation is no longer atomic because of all the operations that has to be done.
"""
all_k = []
# Fetch all HLL objects via GET and store them client side as strings
all_hll_objects = list()
for hll_key in sources:
all_hll_objects.append(await self.get(hll_key))
# Randomize a keyslot hash that should be used inside {} when doing SET
random_hash_slot = self._random_id()
# Special handling of dest variable if it allready exists, then it shold be included in the HLL merge
# dest can exists anywhere in the cluster.
dest_data = await self.get(dest)
if dest_data:
all_hll_objects.append(dest_data)
# SET all stored HLL objects with SET {RandomHash}RandomKey hll_obj
for hll_object in all_hll_objects:
k = self._random_good_hashslot_key(random_hash_slot)
all_k.append(k)
await self.set(k, hll_object)
# Do regular PFMERGE operation and store value in random key in {RandomHash}
tmp_dest = self._random_good_hashslot_key(random_hash_slot)
await self.execute_command("PFMERGE", tmp_dest, *all_k)
# Do GET and SET so that result will be stored in the destination object any where in the cluster
parsed_dest = await self.get(tmp_dest)
await self.set(dest, parsed_dest)
# Cleanup tmp variables
await self.delete(tmp_dest)
for k in all_k:
await self.delete(k)
return True | ['async', 'def', 'pfmerge', '(', 'self', ',', 'dest', ',', '*', 'sources', ')', ':', 'all_k', '=', '[', ']', '# Fetch all HLL objects via GET and store them client side as strings', 'all_hll_objects', '=', 'list', '(', ')', 'for', 'hll_key', 'in', 'sources', ':', 'all_hll_objects', '.', 'append', '(', 'await', 'self', '.', 'get', '(', 'hll_key', ')', ')', '# Randomize a keyslot hash that should be used inside {} when doing SET', 'random_hash_slot', '=', 'self', '.', '_random_id', '(', ')', '# Special handling of dest variable if it allready exists, then it shold be included in the HLL merge', '# dest can exists anywhere in the cluster.', 'dest_data', '=', 'await', 'self', '.', 'get', '(', 'dest', ')', 'if', 'dest_data', ':', 'all_hll_objects', '.', 'append', '(', 'dest_data', ')', '# SET all stored HLL objects with SET {RandomHash}RandomKey hll_obj', 'for', 'hll_object', 'in', 'all_hll_objects', ':', 'k', '=', 'self', '.', '_random_good_hashslot_key', '(', 'random_hash_slot', ')', 'all_k', '.', 'append', '(', 'k', ')', 'await', 'self', '.', 'set', '(', 'k', ',', 'hll_object', ')', '# Do regular PFMERGE operation and store value in random key in {RandomHash}', 'tmp_dest', '=', 'self', '.', '_random_good_hashslot_key', '(', 'random_hash_slot', ')', 'await', 'self', '.', 'execute_command', '(', '"PFMERGE"', ',', 'tmp_dest', ',', '*', 'all_k', ')', '# Do GET and SET so that result will be stored in the destination object any where in the cluster', 'parsed_dest', '=', 'await', 'self', '.', 'get', '(', 'tmp_dest', ')', 'await', 'self', '.', 'set', '(', 'dest', ',', 'parsed_dest', ')', '# Cleanup tmp variables', 'await', 'self', '.', 'delete', '(', 'tmp_dest', ')', 'for', 'k', 'in', 'all_k', ':', 'await', 'self', '.', 'delete', '(', 'k', ')', 'return', 'True'] | Merge N different HyperLogLogs into a single one.
Cluster impl:
Very special implementation is required to make pfmerge() work
But it works :]
It works by first fetching all HLL objects that should be merged and
move them to one hashslot so that pfmerge operation can be performed without
any 'CROSSSLOT' error.
After the PFMERGE operation is done then it will be moved to the correct location
within the cluster and cleanup is done.
This operation is no longer atomic because of all the operations that has to be done. | ['Merge', 'N', 'different', 'HyperLogLogs', 'into', 'a', 'single', 'one', '.'] | train | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/hyperlog.py#L35-L87 |
7,368 | saltstack/salt | salt/modules/inspectlib/fsdb.py | CsvDB.list_tables | def list_tables(self):
'''
Load existing tables and their descriptions.
:return:
'''
if not self._tables:
for table_name in os.listdir(self.db_path):
self._tables[table_name] = self._load_table(table_name)
return self._tables.keys() | python | def list_tables(self):
'''
Load existing tables and their descriptions.
:return:
'''
if not self._tables:
for table_name in os.listdir(self.db_path):
self._tables[table_name] = self._load_table(table_name)
return self._tables.keys() | ['def', 'list_tables', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_tables', ':', 'for', 'table_name', 'in', 'os', '.', 'listdir', '(', 'self', '.', 'db_path', ')', ':', 'self', '.', '_tables', '[', 'table_name', ']', '=', 'self', '.', '_load_table', '(', 'table_name', ')', 'return', 'self', '.', '_tables', '.', 'keys', '(', ')'] | Load existing tables and their descriptions.
:return: | ['Load', 'existing', 'tables', 'and', 'their', 'descriptions', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/fsdb.py#L126-L136 |
7,369 | KelSolaar/Foundations | foundations/nodes.py | AbstractNode.get_attributes | def get_attributes(self):
"""
Returns the Node attributes.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(value="A"), attributeB=Attribute(value="B"))
>>> node_a.get_attributes()
[<Attribute object at 0x7fa471d3b5e0>, <Attribute object at 0x101e6c4a0>]
:return: Attributes.
:rtype: list
"""
return [attribute for attribute in self.itervalues() if issubclass(attribute.__class__, Attribute)] | python | def get_attributes(self):
"""
Returns the Node attributes.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(value="A"), attributeB=Attribute(value="B"))
>>> node_a.get_attributes()
[<Attribute object at 0x7fa471d3b5e0>, <Attribute object at 0x101e6c4a0>]
:return: Attributes.
:rtype: list
"""
return [attribute for attribute in self.itervalues() if issubclass(attribute.__class__, Attribute)] | ['def', 'get_attributes', '(', 'self', ')', ':', 'return', '[', 'attribute', 'for', 'attribute', 'in', 'self', '.', 'itervalues', '(', ')', 'if', 'issubclass', '(', 'attribute', '.', '__class__', ',', 'Attribute', ')', ']'] | Returns the Node attributes.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(value="A"), attributeB=Attribute(value="B"))
>>> node_a.get_attributes()
[<Attribute object at 0x7fa471d3b5e0>, <Attribute object at 0x101e6c4a0>]
:return: Attributes.
:rtype: list | ['Returns', 'the', 'Node', 'attributes', '.'] | train | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/nodes.py#L452-L466 |
7,370 | geophysics-ubonn/reda | lib/reda/utils/pseudo_positions.py | get_xy_simple_dipole_dipole | def get_xy_simple_dipole_dipole(dataframe, spacing=1, indices=None):
"""For each configuration indicated by the numerical index array, compute
(x,z) pseudo locations based on the paper from XX.
All positions are computed for indices=None.
"""
if indices is None:
indices = slice(None)
abmn = dataframe.ix[indices, ['a', 'b', 'm', 'n']].values
posx = np.mean(abmn[:, 0:4], axis=1)
posz = np.abs(
np.min(abmn[:, 0:2], axis=1) - np.max(abmn[:, 2:4], axis=1)
) * -0.192
# scale the positions with the electrode spacing
posx *= spacing
posz *= spacing
print(abmn.shape, posx.shape)
print('posxz', np.vstack((abmn.T, posx, posz)).T)
return posx, posz | python | def get_xy_simple_dipole_dipole(dataframe, spacing=1, indices=None):
"""For each configuration indicated by the numerical index array, compute
(x,z) pseudo locations based on the paper from XX.
All positions are computed for indices=None.
"""
if indices is None:
indices = slice(None)
abmn = dataframe.ix[indices, ['a', 'b', 'm', 'n']].values
posx = np.mean(abmn[:, 0:4], axis=1)
posz = np.abs(
np.min(abmn[:, 0:2], axis=1) - np.max(abmn[:, 2:4], axis=1)
) * -0.192
# scale the positions with the electrode spacing
posx *= spacing
posz *= spacing
print(abmn.shape, posx.shape)
print('posxz', np.vstack((abmn.T, posx, posz)).T)
return posx, posz | ['def', 'get_xy_simple_dipole_dipole', '(', 'dataframe', ',', 'spacing', '=', '1', ',', 'indices', '=', 'None', ')', ':', 'if', 'indices', 'is', 'None', ':', 'indices', '=', 'slice', '(', 'None', ')', 'abmn', '=', 'dataframe', '.', 'ix', '[', 'indices', ',', '[', "'a'", ',', "'b'", ',', "'m'", ',', "'n'", ']', ']', '.', 'values', 'posx', '=', 'np', '.', 'mean', '(', 'abmn', '[', ':', ',', '0', ':', '4', ']', ',', 'axis', '=', '1', ')', 'posz', '=', 'np', '.', 'abs', '(', 'np', '.', 'min', '(', 'abmn', '[', ':', ',', '0', ':', '2', ']', ',', 'axis', '=', '1', ')', '-', 'np', '.', 'max', '(', 'abmn', '[', ':', ',', '2', ':', '4', ']', ',', 'axis', '=', '1', ')', ')', '*', '-', '0.192', '# scale the positions with the electrode spacing', 'posx', '*=', 'spacing', 'posz', '*=', 'spacing', 'print', '(', 'abmn', '.', 'shape', ',', 'posx', '.', 'shape', ')', 'print', '(', "'posxz'", ',', 'np', '.', 'vstack', '(', '(', 'abmn', '.', 'T', ',', 'posx', ',', 'posz', ')', ')', '.', 'T', ')', 'return', 'posx', ',', 'posz'] | For each configuration indicated by the numerical index array, compute
(x,z) pseudo locations based on the paper from XX.
All positions are computed for indices=None. | ['For', 'each', 'configuration', 'indicated', 'by', 'the', 'numerical', 'index', 'array', 'compute', '(', 'x', 'z', ')', 'pseudo', 'locations', 'based', 'on', 'the', 'paper', 'from', 'XX', '.'] | train | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/pseudo_positions.py#L7-L26 |
7,371 | ioos/compliance-checker | compliance_checker/suite.py | CheckSuite.load_all_available_checkers | def load_all_available_checkers(cls):
"""
Helper method to retrieve all sub checker classes derived from various
base classes.
"""
for x in working_set.iter_entry_points('compliance_checker.suites'):
try:
xl = x.resolve()
cls.checkers[':'.join((xl._cc_spec, xl._cc_spec_version))] = xl
# TODO: remove this once all checkers move over to the new
# _cc_spec, _cc_spec_version
except AttributeError:
# if there are versioned classes, it will get overwritten by the
# latest version later. If there are not, it will be assigned
# the checker as the main class
# TODO: nix name attribute in plugins. Keeping in for now
# to provide backwards compatibility
cls.checkers[getattr(xl, 'name', None) or xl._cc_spec] = xl
except Exception as e:
print("Could not load", x, ":", e, file=sys.stderr)
# find the latest version of versioned checkers and set that as the
# default checker for compliance checker if no version is specified
ver_checkers = sorted([c.split(':', 1) for c
in cls.checkers if ':' in c])
for spec, versions in itertools.groupby(ver_checkers, itemgetter(0)):
version_nums = [v[-1] for v in versions]
try:
latest_version = str(max(StrictVersion(v) for v
in version_nums))
# if the version can't be parsed as a StrictVersion, parse
# according to character collation
except ValueError:
latest_version = max(version_nums)
cls.checkers[spec] = cls.checkers[spec + ':latest'] = \
cls.checkers[':'.join((spec, latest_version))] | python | def load_all_available_checkers(cls):
"""
Helper method to retrieve all sub checker classes derived from various
base classes.
"""
for x in working_set.iter_entry_points('compliance_checker.suites'):
try:
xl = x.resolve()
cls.checkers[':'.join((xl._cc_spec, xl._cc_spec_version))] = xl
# TODO: remove this once all checkers move over to the new
# _cc_spec, _cc_spec_version
except AttributeError:
# if there are versioned classes, it will get overwritten by the
# latest version later. If there are not, it will be assigned
# the checker as the main class
# TODO: nix name attribute in plugins. Keeping in for now
# to provide backwards compatibility
cls.checkers[getattr(xl, 'name', None) or xl._cc_spec] = xl
except Exception as e:
print("Could not load", x, ":", e, file=sys.stderr)
# find the latest version of versioned checkers and set that as the
# default checker for compliance checker if no version is specified
ver_checkers = sorted([c.split(':', 1) for c
in cls.checkers if ':' in c])
for spec, versions in itertools.groupby(ver_checkers, itemgetter(0)):
version_nums = [v[-1] for v in versions]
try:
latest_version = str(max(StrictVersion(v) for v
in version_nums))
# if the version can't be parsed as a StrictVersion, parse
# according to character collation
except ValueError:
latest_version = max(version_nums)
cls.checkers[spec] = cls.checkers[spec + ':latest'] = \
cls.checkers[':'.join((spec, latest_version))] | ['def', 'load_all_available_checkers', '(', 'cls', ')', ':', 'for', 'x', 'in', 'working_set', '.', 'iter_entry_points', '(', "'compliance_checker.suites'", ')', ':', 'try', ':', 'xl', '=', 'x', '.', 'resolve', '(', ')', 'cls', '.', 'checkers', '[', "':'", '.', 'join', '(', '(', 'xl', '.', '_cc_spec', ',', 'xl', '.', '_cc_spec_version', ')', ')', ']', '=', 'xl', '# TODO: remove this once all checkers move over to the new', '# _cc_spec, _cc_spec_version', 'except', 'AttributeError', ':', '# if there are versioned classes, it will get overwritten by the', '# latest version later. If there are not, it will be assigned', '# the checker as the main class', '# TODO: nix name attribute in plugins. Keeping in for now', '# to provide backwards compatibility', 'cls', '.', 'checkers', '[', 'getattr', '(', 'xl', ',', "'name'", ',', 'None', ')', 'or', 'xl', '.', '_cc_spec', ']', '=', 'xl', 'except', 'Exception', 'as', 'e', ':', 'print', '(', '"Could not load"', ',', 'x', ',', '":"', ',', 'e', ',', 'file', '=', 'sys', '.', 'stderr', ')', '# find the latest version of versioned checkers and set that as the', '# default checker for compliance checker if no version is specified', 'ver_checkers', '=', 'sorted', '(', '[', 'c', '.', 'split', '(', "':'", ',', '1', ')', 'for', 'c', 'in', 'cls', '.', 'checkers', 'if', "':'", 'in', 'c', ']', ')', 'for', 'spec', ',', 'versions', 'in', 'itertools', '.', 'groupby', '(', 'ver_checkers', ',', 'itemgetter', '(', '0', ')', ')', ':', 'version_nums', '=', '[', 'v', '[', '-', '1', ']', 'for', 'v', 'in', 'versions', ']', 'try', ':', 'latest_version', '=', 'str', '(', 'max', '(', 'StrictVersion', '(', 'v', ')', 'for', 'v', 'in', 'version_nums', ')', ')', "# if the version can't be parsed as a StrictVersion, parse", '# according to character collation', 'except', 'ValueError', ':', 'latest_version', '=', 'max', '(', 'version_nums', ')', 'cls', '.', 'checkers', '[', 'spec', ']', '=', 'cls', '.', 'checkers', '[', 'spec', '+', "':latest'", ']', '=', 'cls', '.', 'checkers', '[', "':'", '.', 'join', '(', '(', 'spec', ',', 'latest_version', ')', ')', ']'] | Helper method to retrieve all sub checker classes derived from various
base classes. | ['Helper', 'method', 'to', 'retrieve', 'all', 'sub', 'checker', 'classes', 'derived', 'from', 'various', 'base', 'classes', '.'] | train | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/suite.py#L81-L116 |
7,372 | ibis-project/ibis | ibis/expr/api.py | geo_point_n | def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr() | python | def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr() | ['def', 'geo_point_n', '(', 'arg', ',', 'n', ')', ':', 'op', '=', 'ops', '.', 'GeoPointN', '(', 'arg', ',', 'n', ')', 'return', 'op', '.', 'to_expr', '(', ')'] | Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar | ['Return', 'the', 'Nth', 'point', 'in', 'a', 'single', 'linestring', 'in', 'the', 'geometry', '.', 'Negative', 'values', 'are', 'counted', 'backwards', 'from', 'the', 'end', 'of', 'the', 'LineString', 'so', 'that', '-', '1', 'is', 'the', 'last', 'point', '.', 'Returns', 'NULL', 'if', 'there', 'is', 'no', 'linestring', 'in', 'the', 'geometry'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1802-L1818 |
7,373 | thespacedoctor/qubits | qubits/datagenerator.py | extract_spectra_from_file | def extract_spectra_from_file(
log,
pathToSpectrum,
convertLumToFlux=False):
"""
*Given a spectrum file this function shall convert the two columns (wavelength and luminosity) to a wavelegnth (wavelengthArray) and flux (fluxArray) array*
**Key Arguments:**
- ``log`` -- logger
- ``pathToSpectrum`` -- absolute path the the spectrum file
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
import os
## THIRD PARTY ##
import numpy as np
## LOCAL APPLICATION ##
import dryxPython.astrotools as at
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
# USE numPy TO EXTRACT THE DATA FROM FILE
pwd = os.getcwd()
log.debug('pwd %s' % (pwd,))
log.debug('pathToSpectrum %s' % (pathToSpectrum,))
data = np.genfromtxt(pathToSpectrum, skip_header=0, usecols=(0, 1))
wavelengthArray = data[:, 0]
# minWl = wavelengthArray.min()
# maxWl = wavelengthArray.max()
luminosityArray = data[:, 1]
# CONVERT TO FLUX: F = L / 4*pi*(r**2)
if convertLumToFlux:
fluxArray = at.luminosity_to_flux(luminosityArray, 1e-5)
else:
fluxArray = luminosityArray
# DEBUG BLOCK
log.debug('pathToSpectrum: %s' % (pathToSpectrum,))
# for i in range(len(fluxArray)):
# print """%s\t%s\t%s""" % (wavelengthArray[i], luminosityArray[i], fluxArray[i] )
# print "\n\n\n"
return wavelengthArray, fluxArray | python | def extract_spectra_from_file(
log,
pathToSpectrum,
convertLumToFlux=False):
"""
*Given a spectrum file this function shall convert the two columns (wavelength and luminosity) to a wavelegnth (wavelengthArray) and flux (fluxArray) array*
**Key Arguments:**
- ``log`` -- logger
- ``pathToSpectrum`` -- absolute path the the spectrum file
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
import os
## THIRD PARTY ##
import numpy as np
## LOCAL APPLICATION ##
import dryxPython.astrotools as at
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
# USE numPy TO EXTRACT THE DATA FROM FILE
pwd = os.getcwd()
log.debug('pwd %s' % (pwd,))
log.debug('pathToSpectrum %s' % (pathToSpectrum,))
data = np.genfromtxt(pathToSpectrum, skip_header=0, usecols=(0, 1))
wavelengthArray = data[:, 0]
# minWl = wavelengthArray.min()
# maxWl = wavelengthArray.max()
luminosityArray = data[:, 1]
# CONVERT TO FLUX: F = L / 4*pi*(r**2)
if convertLumToFlux:
fluxArray = at.luminosity_to_flux(luminosityArray, 1e-5)
else:
fluxArray = luminosityArray
# DEBUG BLOCK
log.debug('pathToSpectrum: %s' % (pathToSpectrum,))
# for i in range(len(fluxArray)):
# print """%s\t%s\t%s""" % (wavelengthArray[i], luminosityArray[i], fluxArray[i] )
# print "\n\n\n"
return wavelengthArray, fluxArray | ['def', 'extract_spectra_from_file', '(', 'log', ',', 'pathToSpectrum', ',', 'convertLumToFlux', '=', 'False', ')', ':', '################ > IMPORTS ################', '## STANDARD LIB ##', 'import', 'os', '## THIRD PARTY ##', 'import', 'numpy', 'as', 'np', '## LOCAL APPLICATION ##', 'import', 'dryxPython', '.', 'astrotools', 'as', 'at', '################ > VARIABLE SETTINGS ######', '################ >ACTION(S) ################', '# USE numPy TO EXTRACT THE DATA FROM FILE', 'pwd', '=', 'os', '.', 'getcwd', '(', ')', 'log', '.', 'debug', '(', "'pwd %s'", '%', '(', 'pwd', ',', ')', ')', 'log', '.', 'debug', '(', "'pathToSpectrum %s'", '%', '(', 'pathToSpectrum', ',', ')', ')', 'data', '=', 'np', '.', 'genfromtxt', '(', 'pathToSpectrum', ',', 'skip_header', '=', '0', ',', 'usecols', '=', '(', '0', ',', '1', ')', ')', 'wavelengthArray', '=', 'data', '[', ':', ',', '0', ']', '# minWl = wavelengthArray.min()', '# maxWl = wavelengthArray.max()', 'luminosityArray', '=', 'data', '[', ':', ',', '1', ']', '# CONVERT TO FLUX: F = L / 4*pi*(r**2)', 'if', 'convertLumToFlux', ':', 'fluxArray', '=', 'at', '.', 'luminosity_to_flux', '(', 'luminosityArray', ',', '1e-5', ')', 'else', ':', 'fluxArray', '=', 'luminosityArray', '# DEBUG BLOCK', 'log', '.', 'debug', '(', "'pathToSpectrum: %s'", '%', '(', 'pathToSpectrum', ',', ')', ')', '# for i in range(len(fluxArray)):', '# print """%s\\t%s\\t%s""" % (wavelengthArray[i], luminosityArray[i], fluxArray[i] )', '# print "\\n\\n\\n"', 'return', 'wavelengthArray', ',', 'fluxArray'] | *Given a spectrum file this function shall convert the two columns (wavelength and luminosity) to a wavelegnth (wavelengthArray) and flux (fluxArray) array*
**Key Arguments:**
- ``log`` -- logger
- ``pathToSpectrum`` -- absolute path the the spectrum file
**Return:**
- None | ['*', 'Given', 'a', 'spectrum', 'file', 'this', 'function', 'shall', 'convert', 'the', 'two', 'columns', '(', 'wavelength', 'and', 'luminosity', ')', 'to', 'a', 'wavelegnth', '(', 'wavelengthArray', ')', 'and', 'flux', '(', 'fluxArray', ')', 'array', '*'] | train | https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/datagenerator.py#L195-L239 |
7,374 | streeter/pelican-gist | pelican_gist/plugin.py | replace_gist_tags | def replace_gist_tags(generator):
"""Replace gist tags in the article content."""
from jinja2 import Template
template = Template(gist_template)
should_cache = generator.context.get('GIST_CACHE_ENABLED')
cache_location = generator.context.get('GIST_CACHE_LOCATION')
pygments_style = generator.context.get('GIST_PYGMENTS_STYLE')
body = None
for article in generator.articles:
for match in gist_regex.findall(article._content):
gist_id = match[1]
filename = None
filetype = None
if match[3]:
filename = match[3]
if match[5]:
filetype = match[5]
logger.info('[gist]: Found gist id {} with filename {} and filetype {}'.format(
gist_id,
filename,
filetype,
))
if should_cache:
body = get_cache(cache_location, gist_id, filename)
# Fetch the gist
if not body:
logger.info('[gist]: Gist did not exist in cache, fetching...')
body = fetch_gist(gist_id, filename)
if should_cache:
logger.info('[gist]: Saving gist to cache...')
set_cache(cache_location, gist_id, body, filename)
else:
logger.info('[gist]: Found gist in cache.')
# Create a context to render with
context = generator.context.copy()
context.update({
'script_url': script_url(gist_id, filename),
'code': render_code(body, filetype, pygments_style)
})
# Render the template
replacement = template.render(context)
article._content = article._content.replace(match[0], replacement) | python | def replace_gist_tags(generator):
"""Replace gist tags in the article content."""
from jinja2 import Template
template = Template(gist_template)
should_cache = generator.context.get('GIST_CACHE_ENABLED')
cache_location = generator.context.get('GIST_CACHE_LOCATION')
pygments_style = generator.context.get('GIST_PYGMENTS_STYLE')
body = None
for article in generator.articles:
for match in gist_regex.findall(article._content):
gist_id = match[1]
filename = None
filetype = None
if match[3]:
filename = match[3]
if match[5]:
filetype = match[5]
logger.info('[gist]: Found gist id {} with filename {} and filetype {}'.format(
gist_id,
filename,
filetype,
))
if should_cache:
body = get_cache(cache_location, gist_id, filename)
# Fetch the gist
if not body:
logger.info('[gist]: Gist did not exist in cache, fetching...')
body = fetch_gist(gist_id, filename)
if should_cache:
logger.info('[gist]: Saving gist to cache...')
set_cache(cache_location, gist_id, body, filename)
else:
logger.info('[gist]: Found gist in cache.')
# Create a context to render with
context = generator.context.copy()
context.update({
'script_url': script_url(gist_id, filename),
'code': render_code(body, filetype, pygments_style)
})
# Render the template
replacement = template.render(context)
article._content = article._content.replace(match[0], replacement) | ['def', 'replace_gist_tags', '(', 'generator', ')', ':', 'from', 'jinja2', 'import', 'Template', 'template', '=', 'Template', '(', 'gist_template', ')', 'should_cache', '=', 'generator', '.', 'context', '.', 'get', '(', "'GIST_CACHE_ENABLED'", ')', 'cache_location', '=', 'generator', '.', 'context', '.', 'get', '(', "'GIST_CACHE_LOCATION'", ')', 'pygments_style', '=', 'generator', '.', 'context', '.', 'get', '(', "'GIST_PYGMENTS_STYLE'", ')', 'body', '=', 'None', 'for', 'article', 'in', 'generator', '.', 'articles', ':', 'for', 'match', 'in', 'gist_regex', '.', 'findall', '(', 'article', '.', '_content', ')', ':', 'gist_id', '=', 'match', '[', '1', ']', 'filename', '=', 'None', 'filetype', '=', 'None', 'if', 'match', '[', '3', ']', ':', 'filename', '=', 'match', '[', '3', ']', 'if', 'match', '[', '5', ']', ':', 'filetype', '=', 'match', '[', '5', ']', 'logger', '.', 'info', '(', "'[gist]: Found gist id {} with filename {} and filetype {}'", '.', 'format', '(', 'gist_id', ',', 'filename', ',', 'filetype', ',', ')', ')', 'if', 'should_cache', ':', 'body', '=', 'get_cache', '(', 'cache_location', ',', 'gist_id', ',', 'filename', ')', '# Fetch the gist', 'if', 'not', 'body', ':', 'logger', '.', 'info', '(', "'[gist]: Gist did not exist in cache, fetching...'", ')', 'body', '=', 'fetch_gist', '(', 'gist_id', ',', 'filename', ')', 'if', 'should_cache', ':', 'logger', '.', 'info', '(', "'[gist]: Saving gist to cache...'", ')', 'set_cache', '(', 'cache_location', ',', 'gist_id', ',', 'body', ',', 'filename', ')', 'else', ':', 'logger', '.', 'info', '(', "'[gist]: Found gist in cache.'", ')', '# Create a context to render with', 'context', '=', 'generator', '.', 'context', '.', 'copy', '(', ')', 'context', '.', 'update', '(', '{', "'script_url'", ':', 'script_url', '(', 'gist_id', ',', 'filename', ')', ',', "'code'", ':', 'render_code', '(', 'body', ',', 'filetype', ',', 'pygments_style', ')', '}', ')', '# Render the template', 'replacement', '=', 'template', '.', 'render', '(', 'context', ')', 'article', '.', '_content', '=', 'article', '.', '_content', '.', 'replace', '(', 'match', '[', '0', ']', ',', 'replacement', ')'] | Replace gist tags in the article content. | ['Replace', 'gist', 'tags', 'in', 'the', 'article', 'content', '.'] | train | https://github.com/streeter/pelican-gist/blob/395e619534b404fb2b94456dc400dc2a8a2f934a/pelican_gist/plugin.py#L103-L153 |
7,375 | diffeo/py-nilsimsa | nilsimsa/deprecated/_deprecated_nilsimsa.py | compare_hexdigests | def compare_hexdigests( digest1, digest2 ):
"""Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different"""
# convert to 32-tuple of unsighed two-byte INTs
digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)])
digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)])
bits = 0
for i in range(32):
bits += POPC[255 & digest1[i] ^ digest2[i]]
return 128 - bits | python | def compare_hexdigests( digest1, digest2 ):
"""Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different"""
# convert to 32-tuple of unsighed two-byte INTs
digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)])
digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)])
bits = 0
for i in range(32):
bits += POPC[255 & digest1[i] ^ digest2[i]]
return 128 - bits | ['def', 'compare_hexdigests', '(', 'digest1', ',', 'digest2', ')', ':', '# convert to 32-tuple of unsighed two-byte INTs', 'digest1', '=', 'tuple', '(', '[', 'int', '(', 'digest1', '[', 'i', ':', 'i', '+', '2', ']', ',', '16', ')', 'for', 'i', 'in', 'range', '(', '0', ',', '63', ',', '2', ')', ']', ')', 'digest2', '=', 'tuple', '(', '[', 'int', '(', 'digest2', '[', 'i', ':', 'i', '+', '2', ']', ',', '16', ')', 'for', 'i', 'in', 'range', '(', '0', ',', '63', ',', '2', ')', ']', ')', 'bits', '=', '0', 'for', 'i', 'in', 'range', '(', '32', ')', ':', 'bits', '+=', 'POPC', '[', '255', '&', 'digest1', '[', 'i', ']', '^', 'digest2', '[', 'i', ']', ']', 'return', '128', '-', 'bits'] | Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different | ['Compute', 'difference', 'in', 'bits', 'between', 'digest1', 'and', 'digest2', 'returns', '-', '127', 'to', '128', ';', '128', 'is', 'the', 'same', '-', '127', 'is', 'different'] | train | https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L196-L205 |
7,376 | mozilla/elasticutils | elasticutils/contrib/django/tasks.py | index_objects | def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
log.debug('Indexing objects {0}-{1}. [{2}]'.format(
ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj))
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(
obj, repr(exc)))
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index) | python | def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None):
"""Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`.
"""
if settings.ES_DISABLED:
return
log.debug('Indexing objects {0}-{1}. [{2}]'.format(
ids[0], ids[-1], len(ids)))
# Get the model this mapping type is based on.
model = mapping_type.get_model()
# Retrieve all the objects that we're going to index and do it in
# bulk.
for id_list in chunked(ids, chunk_size):
documents = []
for obj in model.objects.filter(id__in=id_list):
try:
documents.append(mapping_type.extract_document(obj.id, obj))
except Exception as exc:
log.exception('Unable to extract document {0}: {1}'.format(
obj, repr(exc)))
if documents:
mapping_type.bulk_index(documents, id_field='id', es=es, index=index) | ['def', 'index_objects', '(', 'mapping_type', ',', 'ids', ',', 'chunk_size', '=', '100', ',', 'es', '=', 'None', ',', 'index', '=', 'None', ')', ':', 'if', 'settings', '.', 'ES_DISABLED', ':', 'return', 'log', '.', 'debug', '(', "'Indexing objects {0}-{1}. [{2}]'", '.', 'format', '(', 'ids', '[', '0', ']', ',', 'ids', '[', '-', '1', ']', ',', 'len', '(', 'ids', ')', ')', ')', '# Get the model this mapping type is based on.', 'model', '=', 'mapping_type', '.', 'get_model', '(', ')', "# Retrieve all the objects that we're going to index and do it in", '# bulk.', 'for', 'id_list', 'in', 'chunked', '(', 'ids', ',', 'chunk_size', ')', ':', 'documents', '=', '[', ']', 'for', 'obj', 'in', 'model', '.', 'objects', '.', 'filter', '(', 'id__in', '=', 'id_list', ')', ':', 'try', ':', 'documents', '.', 'append', '(', 'mapping_type', '.', 'extract_document', '(', 'obj', '.', 'id', ',', 'obj', ')', ')', 'except', 'Exception', 'as', 'exc', ':', 'log', '.', 'exception', '(', "'Unable to extract document {0}: {1}'", '.', 'format', '(', 'obj', ',', 'repr', '(', 'exc', ')', ')', ')', 'if', 'documents', ':', 'mapping_type', '.', 'bulk_index', '(', 'documents', ',', 'id_field', '=', "'id'", ',', 'es', '=', 'es', ',', 'index', '=', 'index', ')'] | Index documents of a specified mapping type.
This allows for asynchronous indexing.
If a mapping_type extends Indexable, you can add a ``post_save``
hook for the model that it's based on like this::
@receiver(dbsignals.post_save, sender=MyModel)
def update_in_index(sender, instance, **kw):
from elasticutils.contrib.django import tasks
tasks.index_objects.delay(MyMappingType, [instance.id])
:arg mapping_type: the mapping type for these ids
:arg ids: the list of ids of things to index
:arg chunk_size: the size of the chunk for bulk indexing
.. Note::
The default chunk_size is 100. The number of documents you
can bulk index at once depends on the size of the
documents.
:arg es: The `Elasticsearch` to use. If you don't specify an
`Elasticsearch`, it'll use `mapping_type.get_es()`.
:arg index: The name of the index to use. If you don't specify one
it'll use `mapping_type.get_index()`. | ['Index', 'documents', 'of', 'a', 'specified', 'mapping', 'type', '.'] | train | https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/contrib/django/tasks.py#L13-L65 |
7,377 | toomore/grs | grs/best_buy_or_sell.py | BestFourPoint.bias_ratio | def bias_ratio(self, positive_or_negative=False):
""" 判斷乖離
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
"""
return self.data.check_moving_average_bias_ratio(
self.data.moving_average_bias_ratio(3, 6)[0],
positive_or_negative=positive_or_negative)[0] | python | def bias_ratio(self, positive_or_negative=False):
""" 判斷乖離
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
"""
return self.data.check_moving_average_bias_ratio(
self.data.moving_average_bias_ratio(3, 6)[0],
positive_or_negative=positive_or_negative)[0] | ['def', 'bias_ratio', '(', 'self', ',', 'positive_or_negative', '=', 'False', ')', ':', 'return', 'self', '.', 'data', '.', 'check_moving_average_bias_ratio', '(', 'self', '.', 'data', '.', 'moving_average_bias_ratio', '(', '3', ',', '6', ')', '[', '0', ']', ',', 'positive_or_negative', '=', 'positive_or_negative', ')', '[', '0', ']'] | 判斷乖離
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False | ['判斷乖離'] | train | https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/best_buy_or_sell.py#L32-L39 |
7,378 | YosaiProject/yosai | yosai/core/authc/authc.py | DefaultAuthenticator.validate_locked | def validate_locked(self, authc_token, failed_attempts):
"""
:param failed_attempts: the failed attempts for this type of credential
"""
if self.locking_limit and len(failed_attempts) > self.locking_limit:
msg = ('Authentication attempts breached threshold. Account'
' is now locked for: ' + str(authc_token.identifier))
self.locking_realm.lock_account(authc_token.identifier)
self.notify_event(authc_token.identifier, 'AUTHENTICATION.ACCOUNT_LOCKED')
raise LockedAccountException(msg) | python | def validate_locked(self, authc_token, failed_attempts):
"""
:param failed_attempts: the failed attempts for this type of credential
"""
if self.locking_limit and len(failed_attempts) > self.locking_limit:
msg = ('Authentication attempts breached threshold. Account'
' is now locked for: ' + str(authc_token.identifier))
self.locking_realm.lock_account(authc_token.identifier)
self.notify_event(authc_token.identifier, 'AUTHENTICATION.ACCOUNT_LOCKED')
raise LockedAccountException(msg) | ['def', 'validate_locked', '(', 'self', ',', 'authc_token', ',', 'failed_attempts', ')', ':', 'if', 'self', '.', 'locking_limit', 'and', 'len', '(', 'failed_attempts', ')', '>', 'self', '.', 'locking_limit', ':', 'msg', '=', '(', "'Authentication attempts breached threshold. Account'", "' is now locked for: '", '+', 'str', '(', 'authc_token', '.', 'identifier', ')', ')', 'self', '.', 'locking_realm', '.', 'lock_account', '(', 'authc_token', '.', 'identifier', ')', 'self', '.', 'notify_event', '(', 'authc_token', '.', 'identifier', ',', "'AUTHENTICATION.ACCOUNT_LOCKED'", ')', 'raise', 'LockedAccountException', '(', 'msg', ')'] | :param failed_attempts: the failed attempts for this type of credential | [':', 'param', 'failed_attempts', ':', 'the', 'failed', 'attempts', 'for', 'this', 'type', 'of', 'credential'] | train | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/authc/authc.py#L314-L323 |
7,379 | vanheeringen-lab/gimmemotifs | gimmemotifs/tools.py | MotifSampler.parse_out | def parse_out(self, fo):
"""
Convert MotifSampler output to motifs
Parameters
----------
fo : file-like
File object containing MotifSampler output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
nucs = {"A":0,"C":1,"G":2,"T":3}
pseudo = 0.0 # Should be 1/sqrt(# of seqs)
aligns = {}
for line in fo.readlines():
if line.startswith("#"):
pass
elif len(line) > 1:
vals = line.strip().split("\t")
m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x]
#if vals[6] == "+":
if site.upper().find("N") == -1:
aligns.setdefault(m_id, []).append(site)
#else:
# print site, rc(site)
# aligns.setdefault(id, []).append(rc(site))
for m_id, align in aligns.items():
#print id, len(align)
width = len(align[0])
pfm = [[0 for x in range(4)] for x in range(width)]
for row in align:
for i in range(len(row)):
pfm[i][nucs[row[i]]] += 1
total = float(len(align))
pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm]
m = Motif()
m.align = align[:]
m.pwm = pwm[:]
m.pfm = pfm[:]
m.id = m_id
motifs.append(m)
return motifs | python | def parse_out(self, fo):
"""
Convert MotifSampler output to motifs
Parameters
----------
fo : file-like
File object containing MotifSampler output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
nucs = {"A":0,"C":1,"G":2,"T":3}
pseudo = 0.0 # Should be 1/sqrt(# of seqs)
aligns = {}
for line in fo.readlines():
if line.startswith("#"):
pass
elif len(line) > 1:
vals = line.strip().split("\t")
m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x]
#if vals[6] == "+":
if site.upper().find("N") == -1:
aligns.setdefault(m_id, []).append(site)
#else:
# print site, rc(site)
# aligns.setdefault(id, []).append(rc(site))
for m_id, align in aligns.items():
#print id, len(align)
width = len(align[0])
pfm = [[0 for x in range(4)] for x in range(width)]
for row in align:
for i in range(len(row)):
pfm[i][nucs[row[i]]] += 1
total = float(len(align))
pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm]
m = Motif()
m.align = align[:]
m.pwm = pwm[:]
m.pfm = pfm[:]
m.id = m_id
motifs.append(m)
return motifs | ['def', 'parse_out', '(', 'self', ',', 'fo', ')', ':', 'motifs', '=', '[', ']', 'nucs', '=', '{', '"A"', ':', '0', ',', '"C"', ':', '1', ',', '"G"', ':', '2', ',', '"T"', ':', '3', '}', 'pseudo', '=', '0.0', '# Should be 1/sqrt(# of seqs)', 'aligns', '=', '{', '}', 'for', 'line', 'in', 'fo', '.', 'readlines', '(', ')', ':', 'if', 'line', '.', 'startswith', '(', '"#"', ')', ':', 'pass', 'elif', 'len', '(', 'line', ')', '>', '1', ':', 'vals', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', '"\\t"', ')', 'm_id', ',', 'site', '=', '[', 'x', '.', 'strip', '(', ')', '.', 'split', '(', '" "', ')', '[', '1', ']', '.', 'replace', '(', '\'"\'', ',', '""', ')', 'for', 'x', 'in', 'vals', '[', '8', ']', '.', 'split', '(', '";"', ')', 'if', 'x', ']', '#if vals[6] == "+":', 'if', 'site', '.', 'upper', '(', ')', '.', 'find', '(', '"N"', ')', '==', '-', '1', ':', 'aligns', '.', 'setdefault', '(', 'm_id', ',', '[', ']', ')', '.', 'append', '(', 'site', ')', '#else:', '# print site, rc(site)', '# aligns.setdefault(id, []).append(rc(site))', 'for', 'm_id', ',', 'align', 'in', 'aligns', '.', 'items', '(', ')', ':', '#print id, len(align)', 'width', '=', 'len', '(', 'align', '[', '0', ']', ')', 'pfm', '=', '[', '[', '0', 'for', 'x', 'in', 'range', '(', '4', ')', ']', 'for', 'x', 'in', 'range', '(', 'width', ')', ']', 'for', 'row', 'in', 'align', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'row', ')', ')', ':', 'pfm', '[', 'i', ']', '[', 'nucs', '[', 'row', '[', 'i', ']', ']', ']', '+=', '1', 'total', '=', 'float', '(', 'len', '(', 'align', ')', ')', 'pwm', '=', '[', '[', '(', 'x', '+', 'pseudo', '/', '4', ')', '/', 'total', '+', '(', 'pseudo', ')', 'for', 'x', 'in', 'row', ']', 'for', 'row', 'in', 'pfm', ']', 'm', '=', 'Motif', '(', ')', 'm', '.', 'align', '=', 'align', '[', ':', ']', 'm', '.', 'pwm', '=', 'pwm', '[', ':', ']', 'm', '.', 'pfm', '=', 'pfm', '[', ':', ']', 'm', '.', 'id', '=', 'm_id', 'motifs', '.', 'append', '(', 'm', ')', 'return', 'motifs'] | Convert MotifSampler output to motifs
Parameters
----------
fo : file-like
File object containing MotifSampler output.
Returns
-------
motifs : list
List of Motif instances. | ['Convert', 'MotifSampler', 'output', 'to', 'motifs', 'Parameters', '----------', 'fo', ':', 'file', '-', 'like', 'File', 'object', 'containing', 'MotifSampler', 'output', '.'] | train | https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1301-L1348 |
7,380 | sorgerlab/indra | indra/tools/assemble_corpus.py | run_preassembly_duplicate | def run_preassembly_duplicate(preassembler, beliefengine, **kwargs):
"""Run deduplication stage of preassembly on a list of statements.
Parameters
----------
preassembler : indra.preassembler.Preassembler
A Preassembler instance
beliefengine : indra.belief.BeliefEngine
A BeliefEngine instance.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of unique statements.
"""
logger.info('Combining duplicates on %d statements...' %
len(preassembler.stmts))
dump_pkl = kwargs.get('save')
stmts_out = preassembler.combine_duplicates()
beliefengine.set_prior_probs(stmts_out)
logger.info('%d unique statements' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | python | def run_preassembly_duplicate(preassembler, beliefengine, **kwargs):
"""Run deduplication stage of preassembly on a list of statements.
Parameters
----------
preassembler : indra.preassembler.Preassembler
A Preassembler instance
beliefengine : indra.belief.BeliefEngine
A BeliefEngine instance.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of unique statements.
"""
logger.info('Combining duplicates on %d statements...' %
len(preassembler.stmts))
dump_pkl = kwargs.get('save')
stmts_out = preassembler.combine_duplicates()
beliefengine.set_prior_probs(stmts_out)
logger.info('%d unique statements' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | ['def', 'run_preassembly_duplicate', '(', 'preassembler', ',', 'beliefengine', ',', '*', '*', 'kwargs', ')', ':', 'logger', '.', 'info', '(', "'Combining duplicates on %d statements...'", '%', 'len', '(', 'preassembler', '.', 'stmts', ')', ')', 'dump_pkl', '=', 'kwargs', '.', 'get', '(', "'save'", ')', 'stmts_out', '=', 'preassembler', '.', 'combine_duplicates', '(', ')', 'beliefengine', '.', 'set_prior_probs', '(', 'stmts_out', ')', 'logger', '.', 'info', '(', "'%d unique statements'", '%', 'len', '(', 'stmts_out', ')', ')', 'if', 'dump_pkl', ':', 'dump_statements', '(', 'stmts_out', ',', 'dump_pkl', ')', 'return', 'stmts_out'] | Run deduplication stage of preassembly on a list of statements.
Parameters
----------
preassembler : indra.preassembler.Preassembler
A Preassembler instance
beliefengine : indra.belief.BeliefEngine
A BeliefEngine instance.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of unique statements. | ['Run', 'deduplication', 'stage', 'of', 'preassembly', 'on', 'a', 'list', 'of', 'statements', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L401-L426 |
7,381 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/win32/version.py | _get_arch | def _get_arch():
"""
Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine.
"""
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN | python | def _get_arch():
"""
Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine.
"""
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN | ['def', '_get_arch', '(', ')', ':', 'try', ':', 'si', '=', 'GetNativeSystemInfo', '(', ')', 'except', 'Exception', ':', 'si', '=', 'GetSystemInfo', '(', ')', 'try', ':', 'return', '_arch_map', '[', 'si', '.', 'id', '.', 'w', '.', 'wProcessorArchitecture', ']', 'except', 'KeyError', ':', 'return', 'ARCH_UNKNOWN'] | Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine. | ['Determines', 'the', 'current', 'processor', 'architecture', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/win32/version.py#L675-L716 |
7,382 | yyuu/botornado | boto/mturk/connection.py | MTurkConnection.get_qualification_score | def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id}
return self._process_request('GetQualificationScore', params,
[('Qualification', Qualification),]) | python | def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id}
return self._process_request('GetQualificationScore', params,
[('Qualification', Qualification),]) | ['def', 'get_qualification_score', '(', 'self', ',', 'qualification_type_id', ',', 'worker_id', ')', ':', 'params', '=', '{', "'QualificationTypeId'", ':', 'qualification_type_id', ',', "'SubjectId'", ':', 'worker_id', '}', 'return', 'self', '.', '_process_request', '(', "'GetQualificationScore'", ',', 'params', ',', '[', '(', "'Qualification'", ',', 'Qualification', ')', ',', ']', ')'] | TODO: Document. | ['TODO', ':', 'Document', '.'] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L712-L717 |
7,383 | peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.fix_lockfile | def fix_lockfile(self):
"""Run each line of outfile through fix_pin"""
with open(self.outfile, 'rt') as fp:
lines = [
self.fix_pin(line)
for line in self.concatenated(fp)
]
with open(self.outfile, 'wt') as fp:
fp.writelines([
line + '\n'
for line in lines
if line is not None
]) | python | def fix_lockfile(self):
"""Run each line of outfile through fix_pin"""
with open(self.outfile, 'rt') as fp:
lines = [
self.fix_pin(line)
for line in self.concatenated(fp)
]
with open(self.outfile, 'wt') as fp:
fp.writelines([
line + '\n'
for line in lines
if line is not None
]) | ['def', 'fix_lockfile', '(', 'self', ')', ':', 'with', 'open', '(', 'self', '.', 'outfile', ',', "'rt'", ')', 'as', 'fp', ':', 'lines', '=', '[', 'self', '.', 'fix_pin', '(', 'line', ')', 'for', 'line', 'in', 'self', '.', 'concatenated', '(', 'fp', ')', ']', 'with', 'open', '(', 'self', '.', 'outfile', ',', "'wt'", ')', 'as', 'fp', ':', 'fp', '.', 'writelines', '(', '[', 'line', '+', "'\\n'", 'for', 'line', 'in', 'lines', 'if', 'line', 'is', 'not', 'None', ']', ')'] | Run each line of outfile through fix_pin | ['Run', 'each', 'line', 'of', 'outfile', 'through', 'fix_pin'] | train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L103-L115 |
7,384 | spacetelescope/drizzlepac | drizzlepac/createMedian.py | _writeImage | def _writeImage(dataArray=None, inputHeader=None):
""" Writes out the result of the combination step.
The header of the first 'outsingle' file in the
association parlist is used as the header of the
new image.
Parameters
----------
dataArray : arr
Array of data to be written to a fits.PrimaryHDU object
inputHeader : obj
fits.header.Header object to use as basis for the PrimaryHDU header
"""
prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader)
pf = fits.HDUList()
pf.append(prihdu)
return pf | python | def _writeImage(dataArray=None, inputHeader=None):
""" Writes out the result of the combination step.
The header of the first 'outsingle' file in the
association parlist is used as the header of the
new image.
Parameters
----------
dataArray : arr
Array of data to be written to a fits.PrimaryHDU object
inputHeader : obj
fits.header.Header object to use as basis for the PrimaryHDU header
"""
prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader)
pf = fits.HDUList()
pf.append(prihdu)
return pf | ['def', '_writeImage', '(', 'dataArray', '=', 'None', ',', 'inputHeader', '=', 'None', ')', ':', 'prihdu', '=', 'fits', '.', 'PrimaryHDU', '(', 'data', '=', 'dataArray', ',', 'header', '=', 'inputHeader', ')', 'pf', '=', 'fits', '.', 'HDUList', '(', ')', 'pf', '.', 'append', '(', 'prihdu', ')', 'return', 'pf'] | Writes out the result of the combination step.
The header of the first 'outsingle' file in the
association parlist is used as the header of the
new image.
Parameters
----------
dataArray : arr
Array of data to be written to a fits.PrimaryHDU object
inputHeader : obj
fits.header.Header object to use as basis for the PrimaryHDU header | ['Writes', 'out', 'the', 'result', 'of', 'the', 'combination', 'step', '.', 'The', 'header', 'of', 'the', 'first', 'outsingle', 'file', 'in', 'the', 'association', 'parlist', 'is', 'used', 'as', 'the', 'header', 'of', 'the', 'new', 'image', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/createMedian.py#L454-L472 |
7,385 | BYU-PCCL/holodeck | holodeck/util.py | convert_unicode | def convert_unicode(value):
"""Resolves python 2 issue with json loading in unicode instead of string
Args:
value (str): Unicode value to be converted
Returns:
(str): converted string
"""
if isinstance(value, dict):
return {convert_unicode(key): convert_unicode(value)
for key, value in value.iteritems()}
elif isinstance(value, list):
return [convert_unicode(item) for item in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value | python | def convert_unicode(value):
"""Resolves python 2 issue with json loading in unicode instead of string
Args:
value (str): Unicode value to be converted
Returns:
(str): converted string
"""
if isinstance(value, dict):
return {convert_unicode(key): convert_unicode(value)
for key, value in value.iteritems()}
elif isinstance(value, list):
return [convert_unicode(item) for item in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value | ['def', 'convert_unicode', '(', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'return', '{', 'convert_unicode', '(', 'key', ')', ':', 'convert_unicode', '(', 'value', ')', 'for', 'key', ',', 'value', 'in', 'value', '.', 'iteritems', '(', ')', '}', 'elif', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'return', '[', 'convert_unicode', '(', 'item', ')', 'for', 'item', 'in', 'value', ']', 'elif', 'isinstance', '(', 'value', ',', 'unicode', ')', ':', 'return', 'value', '.', 'encode', '(', "'utf-8'", ')', 'else', ':', 'return', 'value'] | Resolves python 2 issue with json loading in unicode instead of string
Args:
value (str): Unicode value to be converted
Returns:
(str): converted string | ['Resolves', 'python', '2', 'issue', 'with', 'json', 'loading', 'in', 'unicode', 'instead', 'of', 'string'] | train | https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/util.py#L27-L45 |
7,386 | saltstack/salt | salt/modules/bluez_bluetooth.py | block | def block(bdaddr):
'''
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
'''
if not salt.utils.validate.net.mac(bdaddr):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.block'
)
cmd = 'hciconfig {0} block'.format(bdaddr)
__salt__['cmd.run'](cmd).splitlines() | python | def block(bdaddr):
'''
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
'''
if not salt.utils.validate.net.mac(bdaddr):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.block'
)
cmd = 'hciconfig {0} block'.format(bdaddr)
__salt__['cmd.run'](cmd).splitlines() | ['def', 'block', '(', 'bdaddr', ')', ':', 'if', 'not', 'salt', '.', 'utils', '.', 'validate', '.', 'net', '.', 'mac', '(', 'bdaddr', ')', ':', 'raise', 'CommandExecutionError', '(', "'Invalid BD address passed to bluetooth.block'", ')', 'cmd', '=', "'hciconfig {0} block'", '.', 'format', '(', 'bdaddr', ')', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ')', '.', 'splitlines', '(', ')'] | Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE | ['Block', 'a', 'specific', 'bluetooth', 'device', 'by', 'BD', 'Address'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bluez_bluetooth.py#L192-L208 |
7,387 | hammerlab/cohorts | cohorts/plot.py | hide_ticks | def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False) | python | def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False) | ['def', 'hide_ticks', '(', 'plot', ',', 'min_tick_value', '=', 'None', ',', 'max_tick_value', '=', 'None', ')', ':', 'for', 'tick', ',', 'tick_value', 'in', 'zip', '(', 'plot', '.', 'get_yticklabels', '(', ')', ',', 'plot', '.', 'get_yticks', '(', ')', ')', ':', 'tick_label', '=', 'as_numeric', '(', 'tick_value', ')', 'if', 'tick_label', ':', 'if', '(', 'min_tick_value', 'is', 'not', 'None', 'and', 'tick_label', '<', 'min_tick_value', 'or', 'max_tick_value', 'is', 'not', 'None', 'and', 'tick_label', '>', 'max_tick_value', ')', ':', 'tick', '.', 'set_visible', '(', 'False', ')'] | Hide tick values that are outside of [min_tick_value, max_tick_value] | ['Hide', 'tick', 'values', 'that', 'are', 'outside', 'of', '[', 'min_tick_value', 'max_tick_value', ']'] | train | https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/plot.py#L37-L44 |
7,388 | chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector._build_search_query | def _build_search_query(self, from_date):
"""Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format
"""
sort = [{self._sort_on_field: {"order": "asc"}}]
filters = []
if self._repo:
filters.append({"term": {"origin": self._repo}})
if from_date:
filters.append({"range": {self._sort_on_field: {"gte": from_date}}})
if filters:
query = {"bool": {"filter": filters}}
else:
query = {"match_all": {}}
search_query = {
"query": query,
"sort": sort
}
return search_query | python | def _build_search_query(self, from_date):
"""Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format
"""
sort = [{self._sort_on_field: {"order": "asc"}}]
filters = []
if self._repo:
filters.append({"term": {"origin": self._repo}})
if from_date:
filters.append({"range": {self._sort_on_field: {"gte": from_date}}})
if filters:
query = {"bool": {"filter": filters}}
else:
query = {"match_all": {}}
search_query = {
"query": query,
"sort": sort
}
return search_query | ['def', '_build_search_query', '(', 'self', ',', 'from_date', ')', ':', 'sort', '=', '[', '{', 'self', '.', '_sort_on_field', ':', '{', '"order"', ':', '"asc"', '}', '}', ']', 'filters', '=', '[', ']', 'if', 'self', '.', '_repo', ':', 'filters', '.', 'append', '(', '{', '"term"', ':', '{', '"origin"', ':', 'self', '.', '_repo', '}', '}', ')', 'if', 'from_date', ':', 'filters', '.', 'append', '(', '{', '"range"', ':', '{', 'self', '.', '_sort_on_field', ':', '{', '"gte"', ':', 'from_date', '}', '}', '}', ')', 'if', 'filters', ':', 'query', '=', '{', '"bool"', ':', '{', '"filter"', ':', 'filters', '}', '}', 'else', ':', 'query', '=', '{', '"match_all"', ':', '{', '}', '}', 'search_query', '=', '{', '"query"', ':', 'query', ',', '"sort"', ':', 'sort', '}', 'return', 'search_query'] | Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format | ['Build', 'an', 'ElasticSearch', 'search', 'query', 'to', 'retrieve', 'items', 'for', 'read', 'methods', '.'] | train | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L304-L330 |
7,389 | learningequality/ricecooker | ricecooker/utils/pdf.py | PDFParser.get_toc | def get_toc(self, subchapters=False):
"""
Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method.
"""
self.check_path()
chapters = []
index = 0
for dest in self.pdf.getOutlines():
# Process chapters
if isinstance(dest, CustomDestination) and not isinstance(dest['/Page'], NullObject):
page_num = self.pdf.getDestinationPageNumber(dest)
chapter_pagerange = {
"title": dest['/Title'].replace('\xa0', ' '),
"page_start": page_num if index != 0 else 0,
"page_end": self.pdf.numPages,
}
if subchapters:
chapter_pagerange["children"] = []
chapters.append(chapter_pagerange)
if index > 0:
# Go back to previous chapter and set page_end
chapters[index - 1]["page_end"] = page_num
if subchapters:
previous_chapter = chapters[index - 1]
if previous_chapter["children"]:
# Go back to previous subchapter and set page_end
previous_chapter["children"][-1]["page_end"] = page_num
index += 1
# Attach subchapters (lists) as children to last chapter
elif subchapters and isinstance(dest, list):
parent = chapters[index - 1]
subindex = 0
for subdest in dest:
if isinstance(subdest, CustomDestination) and not isinstance(subdest['/Page'], NullObject):
subpage_num = self.pdf.getDestinationPageNumber(subdest)
parent['children'].append({
"title": subdest['/Title'].replace('\xa0', ' '),
"page_start": subpage_num,
"page_end": self.pdf.numPages
})
if subindex > 0:
parent['children'][subindex - 1]["page_end"] = subpage_num
subindex +=1
return chapters | python | def get_toc(self, subchapters=False):
"""
Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method.
"""
self.check_path()
chapters = []
index = 0
for dest in self.pdf.getOutlines():
# Process chapters
if isinstance(dest, CustomDestination) and not isinstance(dest['/Page'], NullObject):
page_num = self.pdf.getDestinationPageNumber(dest)
chapter_pagerange = {
"title": dest['/Title'].replace('\xa0', ' '),
"page_start": page_num if index != 0 else 0,
"page_end": self.pdf.numPages,
}
if subchapters:
chapter_pagerange["children"] = []
chapters.append(chapter_pagerange)
if index > 0:
# Go back to previous chapter and set page_end
chapters[index - 1]["page_end"] = page_num
if subchapters:
previous_chapter = chapters[index - 1]
if previous_chapter["children"]:
# Go back to previous subchapter and set page_end
previous_chapter["children"][-1]["page_end"] = page_num
index += 1
# Attach subchapters (lists) as children to last chapter
elif subchapters and isinstance(dest, list):
parent = chapters[index - 1]
subindex = 0
for subdest in dest:
if isinstance(subdest, CustomDestination) and not isinstance(subdest['/Page'], NullObject):
subpage_num = self.pdf.getDestinationPageNumber(subdest)
parent['children'].append({
"title": subdest['/Title'].replace('\xa0', ' '),
"page_start": subpage_num,
"page_end": self.pdf.numPages
})
if subindex > 0:
parent['children'][subindex - 1]["page_end"] = subpage_num
subindex +=1
return chapters | ['def', 'get_toc', '(', 'self', ',', 'subchapters', '=', 'False', ')', ':', 'self', '.', 'check_path', '(', ')', 'chapters', '=', '[', ']', 'index', '=', '0', 'for', 'dest', 'in', 'self', '.', 'pdf', '.', 'getOutlines', '(', ')', ':', '# Process chapters', 'if', 'isinstance', '(', 'dest', ',', 'CustomDestination', ')', 'and', 'not', 'isinstance', '(', 'dest', '[', "'/Page'", ']', ',', 'NullObject', ')', ':', 'page_num', '=', 'self', '.', 'pdf', '.', 'getDestinationPageNumber', '(', 'dest', ')', 'chapter_pagerange', '=', '{', '"title"', ':', 'dest', '[', "'/Title'", ']', '.', 'replace', '(', "'\\xa0'", ',', "' '", ')', ',', '"page_start"', ':', 'page_num', 'if', 'index', '!=', '0', 'else', '0', ',', '"page_end"', ':', 'self', '.', 'pdf', '.', 'numPages', ',', '}', 'if', 'subchapters', ':', 'chapter_pagerange', '[', '"children"', ']', '=', '[', ']', 'chapters', '.', 'append', '(', 'chapter_pagerange', ')', 'if', 'index', '>', '0', ':', '# Go back to previous chapter and set page_end', 'chapters', '[', 'index', '-', '1', ']', '[', '"page_end"', ']', '=', 'page_num', 'if', 'subchapters', ':', 'previous_chapter', '=', 'chapters', '[', 'index', '-', '1', ']', 'if', 'previous_chapter', '[', '"children"', ']', ':', '# Go back to previous subchapter and set page_end', 'previous_chapter', '[', '"children"', ']', '[', '-', '1', ']', '[', '"page_end"', ']', '=', 'page_num', 'index', '+=', '1', '# Attach subchapters (lists) as children to last chapter', 'elif', 'subchapters', 'and', 'isinstance', '(', 'dest', ',', 'list', ')', ':', 'parent', '=', 'chapters', '[', 'index', '-', '1', ']', 'subindex', '=', '0', 'for', 'subdest', 'in', 'dest', ':', 'if', 'isinstance', '(', 'subdest', ',', 'CustomDestination', ')', 'and', 'not', 'isinstance', '(', 'subdest', '[', "'/Page'", ']', ',', 'NullObject', ')', ':', 'subpage_num', '=', 'self', '.', 'pdf', '.', 'getDestinationPageNumber', '(', 'subdest', ')', 'parent', '[', "'children'", ']', '.', 'append', '(', '{', '"title"', ':', 'subdest', '[', "'/Title'", ']', '.', 'replace', '(', "'\\xa0'", ',', "' '", ')', ',', '"page_start"', ':', 'subpage_num', ',', '"page_end"', ':', 'self', '.', 'pdf', '.', 'numPages', '}', ')', 'if', 'subindex', '>', '0', ':', 'parent', '[', "'children'", ']', '[', 'subindex', '-', '1', ']', '[', '"page_end"', ']', '=', 'subpage_num', 'subindex', '+=', '1', 'return', 'chapters'] | Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method. | ['Returns', 'table', '-', 'of', '-', 'contents', 'information', 'extracted', 'from', 'the', 'PDF', 'doc', '.', 'When', 'subchapters', '=', 'False', 'the', 'output', 'is', 'a', 'list', 'of', 'this', 'form'] | train | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/pdf.py#L75-L136 |
7,390 | gem/oq-engine | openquake/risklib/scientific.py | VulnerabilityFunction._cov_for | def _cov_for(self, imls):
"""
Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269].
"""
return self._covs_i1d(
numpy.piecewise(
imls,
[imls > self.imls[-1], imls < self.imls[0]],
[self.imls[-1], self.imls[0], lambda x: x])) | python | def _cov_for(self, imls):
"""
Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269].
"""
return self._covs_i1d(
numpy.piecewise(
imls,
[imls > self.imls[-1], imls < self.imls[0]],
[self.imls[-1], self.imls[0], lambda x: x])) | ['def', '_cov_for', '(', 'self', ',', 'imls', ')', ':', 'return', 'self', '.', '_covs_i1d', '(', 'numpy', '.', 'piecewise', '(', 'imls', ',', '[', 'imls', '>', 'self', '.', 'imls', '[', '-', '1', ']', ',', 'imls', '<', 'self', '.', 'imls', '[', '0', ']', ']', ',', '[', 'self', '.', 'imls', '[', '-', '1', ']', ',', 'self', '.', 'imls', '[', '0', ']', ',', 'lambda', 'x', ':', 'x', ']', ')', ')'] | Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269]. | ['Clip', 'imls', 'to', 'the', 'range', 'associated', 'with', 'the', 'support', 'of', 'the', 'vulnerability', 'function', 'and', 'returns', 'the', 'corresponding', 'covariance', 'values', 'by', 'linear', 'interpolation', '.', 'For', 'instance', 'if', 'the', 'range', 'is', '[', '0', '.', '005', '0', '.', '0269', ']', 'and', 'the', 'imls', 'are', '[', '0', '.', '0049', '0', '.', '006', '0', '.', '027', ']', 'the', 'clipped', 'imls', 'are', '[', '0', '.', '005', '0', '.', '006', '0', '.', '0269', ']', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L242-L255 |
7,391 | 5monkeys/djedi-cms | djedi/admin/api.py | NodeApi.delete | def delete(self, request, uri):
"""
Delete versioned uri and return empty text response on success.
"""
uri = self.decode_uri(uri)
uris = cio.delete(uri)
if uri not in uris:
raise Http404
return self.render_to_response() | python | def delete(self, request, uri):
"""
Delete versioned uri and return empty text response on success.
"""
uri = self.decode_uri(uri)
uris = cio.delete(uri)
if uri not in uris:
raise Http404
return self.render_to_response() | ['def', 'delete', '(', 'self', ',', 'request', ',', 'uri', ')', ':', 'uri', '=', 'self', '.', 'decode_uri', '(', 'uri', ')', 'uris', '=', 'cio', '.', 'delete', '(', 'uri', ')', 'if', 'uri', 'not', 'in', 'uris', ':', 'raise', 'Http404', 'return', 'self', '.', 'render_to_response', '(', ')'] | Delete versioned uri and return empty text response on success. | ['Delete', 'versioned', 'uri', 'and', 'return', 'empty', 'text', 'response', 'on', 'success', '.'] | train | https://github.com/5monkeys/djedi-cms/blob/3c077edfda310717b9cdb4f2ee14e78723c94894/djedi/admin/api.py#L110-L120 |
7,392 | googleapis/google-cloud-python | bigquery/google/cloud/bigquery/_helpers.py | _rows_from_json | def _rows_from_json(values, schema):
"""Convert JSON row data to rows with appropriate types."""
from google.cloud.bigquery import Row
field_to_index = _field_to_index_mapping(schema)
return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values] | python | def _rows_from_json(values, schema):
"""Convert JSON row data to rows with appropriate types."""
from google.cloud.bigquery import Row
field_to_index = _field_to_index_mapping(schema)
return [Row(_row_tuple_from_json(r, schema), field_to_index) for r in values] | ['def', '_rows_from_json', '(', 'values', ',', 'schema', ')', ':', 'from', 'google', '.', 'cloud', '.', 'bigquery', 'import', 'Row', 'field_to_index', '=', '_field_to_index_mapping', '(', 'schema', ')', 'return', '[', 'Row', '(', '_row_tuple_from_json', '(', 'r', ',', 'schema', ')', ',', 'field_to_index', ')', 'for', 'r', 'in', 'values', ']'] | Convert JSON row data to rows with appropriate types. | ['Convert', 'JSON', 'row', 'data', 'to', 'rows', 'with', 'appropriate', 'types', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/_helpers.py#L226-L231 |
7,393 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py | AggregatorAccountAdminApi.get_account_certificate | def get_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501
"""Get trusted certificate by ID. # noqa: E501
An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be retrieved. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
return data | python | def get_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501
"""Get trusted certificate by ID. # noqa: E501
An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be retrieved. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
return data | ['def', 'get_account_certificate', '(', 'self', ',', 'account_id', ',', 'cert_id', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'get_account_certificate_with_http_info', '(', 'account_id', ',', 'cert_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'get_account_certificate_with_http_info', '(', 'account_id', ',', 'cert_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data'] | Get trusted certificate by ID. # noqa: E501
An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be retrieved. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread. | ['Get', 'trusted', 'certificate', 'by', 'ID', '.', '#', 'noqa', ':', 'E501'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L1744-L1765 |
7,394 | couchbase/couchbase-python-client | couchbase/bucket.py | Bucket.queue_push | def queue_push(self, key, value, create=False, **kwargs):
"""
Add an item to the end of a queue.
:param key: The document ID of the queue
:param value: The item to add to the queue
:param create: Whether the queue should be created if it does not exist
:param kwargs: Arguments to pass to :meth:`mutate_in`
:return: :class:`OperationResult`
:raise: :cb_exc:`NotFoundError` if the queue does not exist and
`create` was not specified.
example::
# Ensure it's removed first
cb.remove('a_queue')
cb.queue_push('a_queue', 'job9999', create=True)
cb.queue_pop('a_queue').value # => job9999
"""
return self.list_prepend(key, value, **kwargs) | python | def queue_push(self, key, value, create=False, **kwargs):
"""
Add an item to the end of a queue.
:param key: The document ID of the queue
:param value: The item to add to the queue
:param create: Whether the queue should be created if it does not exist
:param kwargs: Arguments to pass to :meth:`mutate_in`
:return: :class:`OperationResult`
:raise: :cb_exc:`NotFoundError` if the queue does not exist and
`create` was not specified.
example::
# Ensure it's removed first
cb.remove('a_queue')
cb.queue_push('a_queue', 'job9999', create=True)
cb.queue_pop('a_queue').value # => job9999
"""
return self.list_prepend(key, value, **kwargs) | ['def', 'queue_push', '(', 'self', ',', 'key', ',', 'value', ',', 'create', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'list_prepend', '(', 'key', ',', 'value', ',', '*', '*', 'kwargs', ')'] | Add an item to the end of a queue.
:param key: The document ID of the queue
:param value: The item to add to the queue
:param create: Whether the queue should be created if it does not exist
:param kwargs: Arguments to pass to :meth:`mutate_in`
:return: :class:`OperationResult`
:raise: :cb_exc:`NotFoundError` if the queue does not exist and
`create` was not specified.
example::
# Ensure it's removed first
cb.remove('a_queue')
cb.queue_push('a_queue', 'job9999', create=True)
cb.queue_pop('a_queue').value # => job9999 | ['Add', 'an', 'item', 'to', 'the', 'end', 'of', 'a', 'queue', '.'] | train | https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L2383-L2403 |
7,395 | sony/nnabla | python/src/nnabla/utils/converter/onnx/importer.py | generate_transpose | def generate_transpose(node_name, in_name, out_name, axes, base_name, func_counter):
"""Generate a Transpose operator to transpose the specified buffer.
"""
trans = nnabla_pb2.Function()
trans.type = "Transpose"
set_function_name(trans, node_name, base_name, func_counter)
trans.input.extend([in_name])
trans.output.extend([out_name])
tp = trans.transpose_param
tp.axes.extend(axes)
return trans | python | def generate_transpose(node_name, in_name, out_name, axes, base_name, func_counter):
"""Generate a Transpose operator to transpose the specified buffer.
"""
trans = nnabla_pb2.Function()
trans.type = "Transpose"
set_function_name(trans, node_name, base_name, func_counter)
trans.input.extend([in_name])
trans.output.extend([out_name])
tp = trans.transpose_param
tp.axes.extend(axes)
return trans | ['def', 'generate_transpose', '(', 'node_name', ',', 'in_name', ',', 'out_name', ',', 'axes', ',', 'base_name', ',', 'func_counter', ')', ':', 'trans', '=', 'nnabla_pb2', '.', 'Function', '(', ')', 'trans', '.', 'type', '=', '"Transpose"', 'set_function_name', '(', 'trans', ',', 'node_name', ',', 'base_name', ',', 'func_counter', ')', 'trans', '.', 'input', '.', 'extend', '(', '[', 'in_name', ']', ')', 'trans', '.', 'output', '.', 'extend', '(', '[', 'out_name', ']', ')', 'tp', '=', 'trans', '.', 'transpose_param', 'tp', '.', 'axes', '.', 'extend', '(', 'axes', ')', 'return', 'trans'] | Generate a Transpose operator to transpose the specified buffer. | ['Generate', 'a', 'Transpose', 'operator', 'to', 'transpose', 'the', 'specified', 'buffer', '.'] | train | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/importer.py#L162-L172 |
7,396 | obriencj/python-javatools | javatools/distdiff.py | DistReport.collect_impl | def collect_impl(self):
"""
overrides DistJarChange and DistClassChange from the underlying
DistChange with DistJarReport and DistClassReport instances
"""
for c in DistChange.collect_impl(self):
if isinstance(c, DistJarChange):
if c.is_change():
ln = DistJarReport.report_name
nr = self.reporter.subreporter(c.entry, ln)
c = DistJarReport(c.ldata, c.rdata, c.entry, nr)
elif isinstance(c, DistClassChange):
if c.is_change():
ln = DistClassReport.report_name
nr = self.reporter.subreporter(c.entry, ln)
c = DistClassReport(c.ldata, c.rdata, c.entry, nr)
yield c | python | def collect_impl(self):
"""
overrides DistJarChange and DistClassChange from the underlying
DistChange with DistJarReport and DistClassReport instances
"""
for c in DistChange.collect_impl(self):
if isinstance(c, DistJarChange):
if c.is_change():
ln = DistJarReport.report_name
nr = self.reporter.subreporter(c.entry, ln)
c = DistJarReport(c.ldata, c.rdata, c.entry, nr)
elif isinstance(c, DistClassChange):
if c.is_change():
ln = DistClassReport.report_name
nr = self.reporter.subreporter(c.entry, ln)
c = DistClassReport(c.ldata, c.rdata, c.entry, nr)
yield c | ['def', 'collect_impl', '(', 'self', ')', ':', 'for', 'c', 'in', 'DistChange', '.', 'collect_impl', '(', 'self', ')', ':', 'if', 'isinstance', '(', 'c', ',', 'DistJarChange', ')', ':', 'if', 'c', '.', 'is_change', '(', ')', ':', 'ln', '=', 'DistJarReport', '.', 'report_name', 'nr', '=', 'self', '.', 'reporter', '.', 'subreporter', '(', 'c', '.', 'entry', ',', 'ln', ')', 'c', '=', 'DistJarReport', '(', 'c', '.', 'ldata', ',', 'c', '.', 'rdata', ',', 'c', '.', 'entry', ',', 'nr', ')', 'elif', 'isinstance', '(', 'c', ',', 'DistClassChange', ')', ':', 'if', 'c', '.', 'is_change', '(', ')', ':', 'ln', '=', 'DistClassReport', '.', 'report_name', 'nr', '=', 'self', '.', 'reporter', '.', 'subreporter', '(', 'c', '.', 'entry', ',', 'ln', ')', 'c', '=', 'DistClassReport', '(', 'c', '.', 'ldata', ',', 'c', '.', 'rdata', ',', 'c', '.', 'entry', ',', 'nr', ')', 'yield', 'c'] | overrides DistJarChange and DistClassChange from the underlying
DistChange with DistJarReport and DistClassReport instances | ['overrides', 'DistJarChange', 'and', 'DistClassChange', 'from', 'the', 'underlying', 'DistChange', 'with', 'DistJarReport', 'and', 'DistClassReport', 'instances'] | train | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/distdiff.py#L383-L400 |
7,397 | bcbio/bcbio-nextgen | bcbio/variation/varscan.py | _get_jvm_opts | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | python | def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts) | ['def', '_get_jvm_opts', '(', 'config', ',', 'tmp_dir', ')', ':', 'resources', '=', 'config_utils', '.', 'get_resources', '(', '"varscan"', ',', 'config', ')', 'jvm_opts', '=', 'resources', '.', 'get', '(', '"jvm_opts"', ',', '[', '"-Xmx750m"', ',', '"-Xmx2g"', ']', ')', 'jvm_opts', '=', 'config_utils', '.', 'adjust_opts', '(', 'jvm_opts', ',', '{', '"algorithm"', ':', '{', '"memory_adjust"', ':', '{', '"magnitude"', ':', '1.1', ',', '"direction"', ':', '"decrease"', '}', '}', '}', ')', 'jvm_opts', '+=', '[', '"-Duser.language=en"', ',', '"-Duser.country=US"', ']', 'jvm_opts', '+=', 'broad', '.', 'get_default_jvm_opts', '(', 'tmp_dir', ')', 'return', '" "', '.', 'join', '(', 'jvm_opts', ')'] | Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF. | ['Retrieve', 'common', 'options', 'for', 'running', 'VarScan', '.', 'Handles', 'jvm_opts', 'setting', 'user', 'and', 'country', 'to', 'English', 'to', 'avoid', 'issues', 'with', 'different', 'locales', 'producing', 'non', '-', 'compliant', 'VCF', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/varscan.py#L36-L48 |
7,398 | jdoda/sdl2hl | sdl2hl/renderer.py | Renderer.draw_points | def draw_points(self, *points):
"""Draw multiple points on the current rendering target.
Args:
*points (Point): The points to draw.
Raises:
SDLError: If an error is encountered.
"""
point_array = ffi.new('SDL_Point[]', len(points))
for i, p in enumerate(points):
point_array[i] = p._ptr[0]
check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points))) | python | def draw_points(self, *points):
"""Draw multiple points on the current rendering target.
Args:
*points (Point): The points to draw.
Raises:
SDLError: If an error is encountered.
"""
point_array = ffi.new('SDL_Point[]', len(points))
for i, p in enumerate(points):
point_array[i] = p._ptr[0]
check_int_err(lib.SDL_RenderDrawPoints(self._ptr, point_array, len(points))) | ['def', 'draw_points', '(', 'self', ',', '*', 'points', ')', ':', 'point_array', '=', 'ffi', '.', 'new', '(', "'SDL_Point[]'", ',', 'len', '(', 'points', ')', ')', 'for', 'i', ',', 'p', 'in', 'enumerate', '(', 'points', ')', ':', 'point_array', '[', 'i', ']', '=', 'p', '.', '_ptr', '[', '0', ']', 'check_int_err', '(', 'lib', '.', 'SDL_RenderDrawPoints', '(', 'self', '.', '_ptr', ',', 'point_array', ',', 'len', '(', 'points', ')', ')', ')'] | Draw multiple points on the current rendering target.
Args:
*points (Point): The points to draw.
Raises:
SDLError: If an error is encountered. | ['Draw', 'multiple', 'points', 'on', 'the', 'current', 'rendering', 'target', '.'] | train | https://github.com/jdoda/sdl2hl/blob/3b477e1e01cea5d8e15e9e5ef3a302ea460f5946/sdl2hl/renderer.py#L204-L216 |
7,399 | GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/key_ranges.py | KeyRangesFactory.from_json | def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json) | python | def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json) | ['def', 'from_json', '(', 'cls', ',', 'json', ')', ':', 'if', 'json', '[', '"name"', ']', 'in', '_KEYRANGES_CLASSES', ':', 'return', '_KEYRANGES_CLASSES', '[', 'json', '[', '"name"', ']', ']', '.', 'from_json', '(', 'json', ')', 'raise', 'ValueError', '(', '"Invalid json %s"', ',', 'json', ')'] | Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid. | ['Deserialize', 'from', 'json', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/key_ranges.py#L58-L72 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.