code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''):
"""Helps to build a URI with resource path and its sub resource path.
Args:
resoure_id_or_uri: ID/URI of the main resource.
subresource_id__or_uri: ID/URI of the sub resource.
subresource_path: Sub resource path to be added with the URI.
Returns:
Returns URI
"""
if subresource_id_or_uri and "/" in subresource_id_or_uri:
return subresource_id_or_uri
else:
if not resource_id_or_uri:
raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED)
resource_uri = self.build_uri(resource_id_or_uri)
uri = "{}/{}/{}".format(resource_uri, subresource_path, str(subresource_id_or_uri or ''))
uri = uri.replace("//", "/")
if uri.endswith("/"):
uri = uri[:-1]
return uri | Helps to build a URI with resource path and its sub resource path.
Args:
resoure_id_or_uri: ID/URI of the main resource.
subresource_id__or_uri: ID/URI of the sub resource.
subresource_path: Sub resource path to be added with the URI.
Returns:
Returns URI | Below is the the instruction that describes the task:
### Input:
Helps to build a URI with resource path and its sub resource path.
Args:
resoure_id_or_uri: ID/URI of the main resource.
subresource_id__or_uri: ID/URI of the sub resource.
subresource_path: Sub resource path to be added with the URI.
Returns:
Returns URI
### Response:
def build_subresource_uri(self, resource_id_or_uri=None, subresource_id_or_uri=None, subresource_path=''):
"""Helps to build a URI with resource path and its sub resource path.
Args:
resoure_id_or_uri: ID/URI of the main resource.
subresource_id__or_uri: ID/URI of the sub resource.
subresource_path: Sub resource path to be added with the URI.
Returns:
Returns URI
"""
if subresource_id_or_uri and "/" in subresource_id_or_uri:
return subresource_id_or_uri
else:
if not resource_id_or_uri:
raise exceptions.HPOneViewValueError(RESOURCE_ID_OR_URI_REQUIRED)
resource_uri = self.build_uri(resource_id_or_uri)
uri = "{}/{}/{}".format(resource_uri, subresource_path, str(subresource_id_or_uri or ''))
uri = uri.replace("//", "/")
if uri.endswith("/"):
uri = uri[:-1]
return uri |
def _pick_colours(self, palette_name, selected=False):
"""
Pick the rendering colour for a widget based on the current state.
:param palette_name: The stem name for the widget - e.g. "button".
:param selected: Whether this item is selected or not.
:returns: A colour tuple (fg, attr, bg) to be used.
"""
return self._frame.palette[self._pick_palette_key(palette_name, selected)] | Pick the rendering colour for a widget based on the current state.
:param palette_name: The stem name for the widget - e.g. "button".
:param selected: Whether this item is selected or not.
:returns: A colour tuple (fg, attr, bg) to be used. | Below is the the instruction that describes the task:
### Input:
Pick the rendering colour for a widget based on the current state.
:param palette_name: The stem name for the widget - e.g. "button".
:param selected: Whether this item is selected or not.
:returns: A colour tuple (fg, attr, bg) to be used.
### Response:
def _pick_colours(self, palette_name, selected=False):
"""
Pick the rendering colour for a widget based on the current state.
:param palette_name: The stem name for the widget - e.g. "button".
:param selected: Whether this item is selected or not.
:returns: A colour tuple (fg, attr, bg) to be used.
"""
return self._frame.palette[self._pick_palette_key(palette_name, selected)] |
def html(text, extensions=0, render_flags=0):
"""
Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
"""
extensions = args_to_int(extension_map, extensions)
render_flags = args_to_int(html_flag_map, render_flags)
ib = lib.hoedown_buffer_new(IUNIT)
ob = lib.hoedown_buffer_new(OUNIT)
renderer = lib.hoedown_html_renderer_new(render_flags, 0)
document = lib.hoedown_document_new(renderer, extensions, 16);
lib.hoedown_buffer_puts(ib, text.encode('utf-8'))
lib.hoedown_document_render(document, ob, ib.data, ib.size);
lib.hoedown_buffer_free(ib);
lib.hoedown_document_free(document);
lib.hoedown_html_renderer_free(renderer);
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob); | Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``). | Below is the the instruction that describes the task:
### Input:
Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
### Response:
def html(text, extensions=0, render_flags=0):
"""
Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
"""
extensions = args_to_int(extension_map, extensions)
render_flags = args_to_int(html_flag_map, render_flags)
ib = lib.hoedown_buffer_new(IUNIT)
ob = lib.hoedown_buffer_new(OUNIT)
renderer = lib.hoedown_html_renderer_new(render_flags, 0)
document = lib.hoedown_document_new(renderer, extensions, 16);
lib.hoedown_buffer_puts(ib, text.encode('utf-8'))
lib.hoedown_document_render(document, ob, ib.data, ib.size);
lib.hoedown_buffer_free(ib);
lib.hoedown_document_free(document);
lib.hoedown_html_renderer_free(renderer);
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob); |
def add_occurrences(events, count):
"""
Adds an occurrence key to the event object w/ a list of occurrences
and adds a popover (for use with twitter bootstrap).
The occurrence is added so that each event can be aware of what
day(s) it occurs in the month.
"""
for day in count:
for item in count[day]:
for event in events:
if event.pk == item[1]:
try:
event.occurrence.append(day)
except AttributeError:
event.occurrence = []
event.occurrence.append(day) | Adds an occurrence key to the event object w/ a list of occurrences
and adds a popover (for use with twitter bootstrap).
The occurrence is added so that each event can be aware of what
day(s) it occurs in the month. | Below is the the instruction that describes the task:
### Input:
Adds an occurrence key to the event object w/ a list of occurrences
and adds a popover (for use with twitter bootstrap).
The occurrence is added so that each event can be aware of what
day(s) it occurs in the month.
### Response:
def add_occurrences(events, count):
"""
Adds an occurrence key to the event object w/ a list of occurrences
and adds a popover (for use with twitter bootstrap).
The occurrence is added so that each event can be aware of what
day(s) it occurs in the month.
"""
for day in count:
for item in count[day]:
for event in events:
if event.pk == item[1]:
try:
event.occurrence.append(day)
except AttributeError:
event.occurrence = []
event.occurrence.append(day) |
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None,
hierarchical_level=None):
""" Creates a node in the workflow for plotting cumulative histogram
of IFAR values.
"""
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_ifar', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--trigger-file', trigger_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files[0] | Creates a node in the workflow for plotting cumulative histogram
of IFAR values. | Below is the the instruction that describes the task:
### Input:
Creates a node in the workflow for plotting cumulative histogram
of IFAR values.
### Response:
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None,
hierarchical_level=None):
""" Creates a node in the workflow for plotting cumulative histogram
of IFAR values.
"""
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_ifar', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--trigger-file', trigger_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files[0] |
def deref(o, timeout_s=None, timeout_val=None):
"""Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned."""
if isinstance(o, IDeref):
return o.deref()
elif isinstance(o, IBlockingDeref):
return o.deref(timeout_s, timeout_val)
raise TypeError(f"Object of type {type(o)} cannot be dereferenced") | Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned. | Below is the the instruction that describes the task:
### Input:
Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned.
### Response:
def deref(o, timeout_s=None, timeout_val=None):
"""Dereference a Deref object and return its contents.
If o is an object implementing IBlockingDeref and timeout_s and
timeout_val are supplied, deref will wait at most timeout_s seconds,
returning timeout_val if timeout_s seconds elapse and o has not
returned."""
if isinstance(o, IDeref):
return o.deref()
elif isinstance(o, IBlockingDeref):
return o.deref(timeout_s, timeout_val)
raise TypeError(f"Object of type {type(o)} cannot be dereferenced") |
def container_setting(name, container, settings=None):
'''
Set the value of the setting for an IIS container.
:param str name: The name of the IIS container.
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
site0-apppool-setting:
win_iis.container_setting:
- name: site0
- container: AppPools
- settings:
managedPipelineMode: Integrated
processModel.maxProcesses: 1
processModel.userName: TestUser
processModel.password: TestPassword
processModel.identityType: SpecificUser
Example of usage for the ``Sites`` container:
.. code-block:: yaml
site0-site-setting:
win_iis.container_setting:
- name: site0
- container: Sites
- settings:
logFile.logFormat: W3C
logFile.period: Daily
limits.maxUrlSegments: 32
'''
identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'}
ret = {'name': name,
'changes': {},
'comment': str(),
'result': None}
if not settings:
ret['comment'] = 'No settings to change provided.'
ret['result'] = True
return ret
ret_settings = {
'changes': {},
'failures': {},
}
current_settings = __salt__['win_iis.get_container_setting'](name=name,
container=container,
settings=settings.keys())
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(current_settings[setting]):
ret_settings['changes'][setting] = {'old': current_settings[setting],
'new': settings[setting]}
if not ret_settings['changes']:
ret['comment'] = 'Settings already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Settings will be changed.'
ret['changes'] = ret_settings
return ret
__salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings)
new_settings = __salt__['win_iis.get_container_setting'](name=name,
container=container,
settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
ret_settings['failures'][setting] = {'old': current_settings[setting],
'new': new_settings[setting]}
ret_settings['changes'].pop(setting, None)
if ret_settings['failures']:
ret['comment'] = 'Some settings failed to change.'
ret['changes'] = ret_settings
ret['result'] = False
else:
ret['comment'] = 'Set settings to contain the provided values.'
ret['changes'] = ret_settings['changes']
ret['result'] = True
return ret | Set the value of the setting for an IIS container.
:param str name: The name of the IIS container.
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
site0-apppool-setting:
win_iis.container_setting:
- name: site0
- container: AppPools
- settings:
managedPipelineMode: Integrated
processModel.maxProcesses: 1
processModel.userName: TestUser
processModel.password: TestPassword
processModel.identityType: SpecificUser
Example of usage for the ``Sites`` container:
.. code-block:: yaml
site0-site-setting:
win_iis.container_setting:
- name: site0
- container: Sites
- settings:
logFile.logFormat: W3C
logFile.period: Daily
limits.maxUrlSegments: 32 | Below is the the instruction that describes the task:
### Input:
Set the value of the setting for an IIS container.
:param str name: The name of the IIS container.
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
site0-apppool-setting:
win_iis.container_setting:
- name: site0
- container: AppPools
- settings:
managedPipelineMode: Integrated
processModel.maxProcesses: 1
processModel.userName: TestUser
processModel.password: TestPassword
processModel.identityType: SpecificUser
Example of usage for the ``Sites`` container:
.. code-block:: yaml
site0-site-setting:
win_iis.container_setting:
- name: site0
- container: Sites
- settings:
logFile.logFormat: W3C
logFile.period: Daily
limits.maxUrlSegments: 32
### Response:
def container_setting(name, container, settings=None):
'''
Set the value of the setting for an IIS container.
:param str name: The name of the IIS container.
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
site0-apppool-setting:
win_iis.container_setting:
- name: site0
- container: AppPools
- settings:
managedPipelineMode: Integrated
processModel.maxProcesses: 1
processModel.userName: TestUser
processModel.password: TestPassword
processModel.identityType: SpecificUser
Example of usage for the ``Sites`` container:
.. code-block:: yaml
site0-site-setting:
win_iis.container_setting:
- name: site0
- container: Sites
- settings:
logFile.logFormat: W3C
logFile.period: Daily
limits.maxUrlSegments: 32
'''
identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'}
ret = {'name': name,
'changes': {},
'comment': str(),
'result': None}
if not settings:
ret['comment'] = 'No settings to change provided.'
ret['result'] = True
return ret
ret_settings = {
'changes': {},
'failures': {},
}
current_settings = __salt__['win_iis.get_container_setting'](name=name,
container=container,
settings=settings.keys())
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(current_settings[setting]):
ret_settings['changes'][setting] = {'old': current_settings[setting],
'new': settings[setting]}
if not ret_settings['changes']:
ret['comment'] = 'Settings already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Settings will be changed.'
ret['changes'] = ret_settings
return ret
__salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings)
new_settings = __salt__['win_iis.get_container_setting'](name=name,
container=container,
settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
ret_settings['failures'][setting] = {'old': current_settings[setting],
'new': new_settings[setting]}
ret_settings['changes'].pop(setting, None)
if ret_settings['failures']:
ret['comment'] = 'Some settings failed to change.'
ret['changes'] = ret_settings
ret['result'] = False
else:
ret['comment'] = 'Set settings to contain the provided values.'
ret['changes'] = ret_settings['changes']
ret['result'] = True
return ret |
def generate_encodeable_characters(characters: Iterable[str],
encodings: Iterable[str]) -> Iterable[str]:
"""Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
"""
for c in characters:
for encoding in encodings:
try:
c.encode(encoding)
yield c
except UnicodeEncodeError:
pass | Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings. | Below is the the instruction that describes the task:
### Input:
Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
### Response:
def generate_encodeable_characters(characters: Iterable[str],
encodings: Iterable[str]) -> Iterable[str]:
"""Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
"""
for c in characters:
for encoding in encodings:
try:
c.encode(encoding)
yield c
except UnicodeEncodeError:
pass |
def load(name, path=None, ext="dat", silent=False):
"""
Loads an object from file with given name and extension.
Optionally the path can be specified as well.
"""
filename = __get_filename(path, name, ext)
if not os.path.exists(filename):
if not silent:
raise ValueException("Specified input filename doesn't exist.")
return None
with open(filename, "rb") as f:
return pickle.load(f) | Loads an object from file with given name and extension.
Optionally the path can be specified as well. | Below is the the instruction that describes the task:
### Input:
Loads an object from file with given name and extension.
Optionally the path can be specified as well.
### Response:
def load(name, path=None, ext="dat", silent=False):
"""
Loads an object from file with given name and extension.
Optionally the path can be specified as well.
"""
filename = __get_filename(path, name, ext)
if not os.path.exists(filename):
if not silent:
raise ValueException("Specified input filename doesn't exist.")
return None
with open(filename, "rb") as f:
return pickle.load(f) |
def unconsume(self, seq):
'''Subtracts all k-mers in sequence.'''
for kmer in iter_kmers(seq, self.k, canonical=self.canonical):
self._decr(kmer) | Subtracts all k-mers in sequence. | Below is the the instruction that describes the task:
### Input:
Subtracts all k-mers in sequence.
### Response:
def unconsume(self, seq):
'''Subtracts all k-mers in sequence.'''
for kmer in iter_kmers(seq, self.k, canonical=self.canonical):
self._decr(kmer) |
def StartCli(args, adb_commands, extra=None, **device_kwargs):
"""Starts a common CLI interface for this usb path and protocol."""
try:
dev = adb_commands()
dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms,
**device_kwargs)
except usb_exceptions.DeviceNotFoundError as e:
print('No device found: {}'.format(e), file=sys.stderr)
return 1
except usb_exceptions.CommonUsbError as e:
print('Could not connect to device: {}'.format(e), file=sys.stderr)
return 1
try:
return _RunMethod(dev, args, extra or {})
except Exception as e: # pylint: disable=broad-except
sys.stdout.write(str(e))
return 1
finally:
dev.Close() | Starts a common CLI interface for this usb path and protocol. | Below is the the instruction that describes the task:
### Input:
Starts a common CLI interface for this usb path and protocol.
### Response:
def StartCli(args, adb_commands, extra=None, **device_kwargs):
"""Starts a common CLI interface for this usb path and protocol."""
try:
dev = adb_commands()
dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms,
**device_kwargs)
except usb_exceptions.DeviceNotFoundError as e:
print('No device found: {}'.format(e), file=sys.stderr)
return 1
except usb_exceptions.CommonUsbError as e:
print('Could not connect to device: {}'.format(e), file=sys.stderr)
return 1
try:
return _RunMethod(dev, args, extra or {})
except Exception as e: # pylint: disable=broad-except
sys.stdout.write(str(e))
return 1
finally:
dev.Close() |
def setColor(self, color):
'''Sets Card's color and escape code.'''
if color == 'blue':
self.color = 'blue'
self.colorCode = self.colors['blue']
self.colorCodeDark = self.colors['dblue']
elif color == 'red':
self.color = 'red'
self.colorCode = self.colors['red']
self.colorCodeDark = self.colors['dred']
elif color == 'yellow':
self.color = 'yellow'
self.colorCode = self.colors['yellow']
self.colorCodeDark = self.colors['dyellow']
elif color == 'green':
self.color = 'green'
self.colorCode = self.colors['green']
self.colorCodeDark = self.colors['dgreen']
elif color == 'wild': # No color modification
self.wild = True
self.color = 'wild'
self.colorCodeDark = self.colors['dwild']
self.colorCode = self.colors['wild'] | Sets Card's color and escape code. | Below is the the instruction that describes the task:
### Input:
Sets Card's color and escape code.
### Response:
def setColor(self, color):
'''Sets Card's color and escape code.'''
if color == 'blue':
self.color = 'blue'
self.colorCode = self.colors['blue']
self.colorCodeDark = self.colors['dblue']
elif color == 'red':
self.color = 'red'
self.colorCode = self.colors['red']
self.colorCodeDark = self.colors['dred']
elif color == 'yellow':
self.color = 'yellow'
self.colorCode = self.colors['yellow']
self.colorCodeDark = self.colors['dyellow']
elif color == 'green':
self.color = 'green'
self.colorCode = self.colors['green']
self.colorCodeDark = self.colors['dgreen']
elif color == 'wild': # No color modification
self.wild = True
self.color = 'wild'
self.colorCodeDark = self.colors['dwild']
self.colorCode = self.colors['wild'] |
def create_base_storage(self, logical_size, variant):
"""Starts creating a hard disk storage unit (fixed/dynamic, according
to the variant flags) in the background. The previous storage unit
created for this object, if any, must first be deleted using
:py:func:`delete_storage` , otherwise the operation will fail.
Before the operation starts, the medium is placed in
:py:attr:`MediumState.creating` state. If the create operation
fails, the medium will be placed back in :py:attr:`MediumState.not_created`
state.
After the returned progress object reports that the operation has
successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this
VirtualBox installation and may be attached to virtual machines.
in logical_size of type int
Maximum logical size of the medium in bytes.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
The variant of storage creation operation is not supported. See
"""
if not isinstance(logical_size, baseinteger):
raise TypeError("logical_size can only be an instance of type baseinteger")
if not isinstance(variant, list):
raise TypeError("variant can only be an instance of type list")
for a in variant[:10]:
if not isinstance(a, MediumVariant):
raise TypeError(
"array can only contain objects of type MediumVariant")
progress = self._call("createBaseStorage",
in_p=[logical_size, variant])
progress = IProgress(progress)
return progress | Starts creating a hard disk storage unit (fixed/dynamic, according
to the variant flags) in the background. The previous storage unit
created for this object, if any, must first be deleted using
:py:func:`delete_storage` , otherwise the operation will fail.
Before the operation starts, the medium is placed in
:py:attr:`MediumState.creating` state. If the create operation
fails, the medium will be placed back in :py:attr:`MediumState.not_created`
state.
After the returned progress object reports that the operation has
successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this
VirtualBox installation and may be attached to virtual machines.
in logical_size of type int
Maximum logical size of the medium in bytes.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
The variant of storage creation operation is not supported. See | Below is the the instruction that describes the task:
### Input:
Starts creating a hard disk storage unit (fixed/dynamic, according
to the variant flags) in the background. The previous storage unit
created for this object, if any, must first be deleted using
:py:func:`delete_storage` , otherwise the operation will fail.
Before the operation starts, the medium is placed in
:py:attr:`MediumState.creating` state. If the create operation
fails, the medium will be placed back in :py:attr:`MediumState.not_created`
state.
After the returned progress object reports that the operation has
successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this
VirtualBox installation and may be attached to virtual machines.
in logical_size of type int
Maximum logical size of the medium in bytes.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
The variant of storage creation operation is not supported. See
### Response:
def create_base_storage(self, logical_size, variant):
"""Starts creating a hard disk storage unit (fixed/dynamic, according
to the variant flags) in the background. The previous storage unit
created for this object, if any, must first be deleted using
:py:func:`delete_storage` , otherwise the operation will fail.
Before the operation starts, the medium is placed in
:py:attr:`MediumState.creating` state. If the create operation
fails, the medium will be placed back in :py:attr:`MediumState.not_created`
state.
After the returned progress object reports that the operation has
successfully completed, the medium state will be set to :py:attr:`MediumState.created` , the medium will be remembered by this
VirtualBox installation and may be attached to virtual machines.
in logical_size of type int
Maximum logical size of the medium in bytes.
in variant of type :class:`MediumVariant`
Exact image variant which should be created (as a combination of
:py:class:`MediumVariant` flags).
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorNotSupported`
The variant of storage creation operation is not supported. See
"""
if not isinstance(logical_size, baseinteger):
raise TypeError("logical_size can only be an instance of type baseinteger")
if not isinstance(variant, list):
raise TypeError("variant can only be an instance of type list")
for a in variant[:10]:
if not isinstance(a, MediumVariant):
raise TypeError(
"array can only contain objects of type MediumVariant")
progress = self._call("createBaseStorage",
in_p=[logical_size, variant])
progress = IProgress(progress)
return progress |
def has_scope(context=None):
'''
Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205.
'''
if not booted(context):
return False
_sd_version = version(context)
if _sd_version is None:
return False
return _sd_version >= 205 | Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205. | Below is the the instruction that describes the task:
### Input:
Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205.
### Response:
def has_scope(context=None):
'''
Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205.
'''
if not booted(context):
return False
_sd_version = version(context)
if _sd_version is None:
return False
return _sd_version >= 205 |
def docker_start(develop=True):
"""
Start docker container
"""
curr_dir = os.path.dirname(os.path.realpath(__file__))
local('docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'.format(curr_dir))
if develop:
# Install package in develop mode: the code in /code is mapped to the installed package.
docker_exec('python3 setup.py develop')
print('Jupyter available at http://127.0.0.1:8889') | Start docker container | Below is the the instruction that describes the task:
### Input:
Start docker container
### Response:
def docker_start(develop=True):
"""
Start docker container
"""
curr_dir = os.path.dirname(os.path.realpath(__file__))
local('docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'.format(curr_dir))
if develop:
# Install package in develop mode: the code in /code is mapped to the installed package.
docker_exec('python3 setup.py develop')
print('Jupyter available at http://127.0.0.1:8889') |
def choices(klass):
"""
Decorator to set `CHOICES` and other attributes
"""
_choices = []
for attr in user_attributes(klass.Meta):
val = getattr(klass.Meta, attr)
setattr(klass, attr, val[0])
_choices.append((val[0], val[1]))
setattr(klass, 'CHOICES', tuple(_choices))
return klass | Decorator to set `CHOICES` and other attributes | Below is the the instruction that describes the task:
### Input:
Decorator to set `CHOICES` and other attributes
### Response:
def choices(klass):
"""
Decorator to set `CHOICES` and other attributes
"""
_choices = []
for attr in user_attributes(klass.Meta):
val = getattr(klass.Meta, attr)
setattr(klass, attr, val[0])
_choices.append((val[0], val[1]))
setattr(klass, 'CHOICES', tuple(_choices))
return klass |
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None):
""" ed = 1 by default, pos - internal variable for algorithm """
alternates = cm.get(word_in[pos],[])
if not candidates:
candidates = []
assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]'
if (pos >len(word_in)) or ed == 0:
return candidates
pfx = ''
sfx = ''
curr_candidates = []
for p in range(0,pos):
pfx = pfx + word_in[p]
for p in range(pos+1,len(word_in)):
sfx = sfx + word_in[p]
for alt in alternates:
word_alt = pfx + alt + sfx
if not (word_alt in candidates):
candidates.append( word_alt )
curr_candidates.append( word_alt )
for n_pos in range(pos,len(word_in)):
# already what we have ' candidates ' of this round are edit-distance 1
for word in curr_candidates:
oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates)
if level == 0:
#candidates.append(word_in)
for n_pos in range(pos,len(word_in)):
oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates)
return candidates | ed = 1 by default, pos - internal variable for algorithm | Below is the the instruction that describes the task:
### Input:
ed = 1 by default, pos - internal variable for algorithm
### Response:
def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None):
""" ed = 1 by default, pos - internal variable for algorithm """
alternates = cm.get(word_in[pos],[])
if not candidates:
candidates = []
assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]'
if (pos >len(word_in)) or ed == 0:
return candidates
pfx = ''
sfx = ''
curr_candidates = []
for p in range(0,pos):
pfx = pfx + word_in[p]
for p in range(pos+1,len(word_in)):
sfx = sfx + word_in[p]
for alt in alternates:
word_alt = pfx + alt + sfx
if not (word_alt in candidates):
candidates.append( word_alt )
curr_candidates.append( word_alt )
for n_pos in range(pos,len(word_in)):
# already what we have ' candidates ' of this round are edit-distance 1
for word in curr_candidates:
oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates)
if level == 0:
#candidates.append(word_in)
for n_pos in range(pos,len(word_in)):
oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates)
return candidates |
def receive_loop_with_callback(self, queue_name, callback):
"""
Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return:
"""
self.connect()
channel = self.create_channel(queue_name)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue_name)
channel.start_consuming() | Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return: | Below is the the instruction that describes the task:
### Input:
Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return:
### Response:
def receive_loop_with_callback(self, queue_name, callback):
"""
Process incoming messages with callback until close is called.
:param queue_name: str: name of the queue to poll
:param callback: func(ch, method, properties, body) called with data when data arrives
:return:
"""
self.connect()
channel = self.create_channel(queue_name)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue_name)
channel.start_consuming() |
def _get_all_resource_attributes(network_id, template_id=None):
"""
Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
"""
base_qry = db.DBSession.query(
ResourceAttr.id.label('id'),
ResourceAttr.ref_key.label('ref_key'),
ResourceAttr.cr_date.label('cr_date'),
ResourceAttr.attr_is_var.label('attr_is_var'),
ResourceAttr.node_id.label('node_id'),
ResourceAttr.link_id.label('link_id'),
ResourceAttr.group_id.label('group_id'),
ResourceAttr.network_id.label('network_id'),
ResourceAttr.attr_id.label('attr_id'),
Attr.name.label('name'),
Attr.dimension_id.label('dimension_id'),
).filter(Attr.id==ResourceAttr.attr_id)
all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id==network_id)
all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id==network_id)
all_group_attribute_qry = base_qry.join(ResourceGroup).filter(ResourceGroup.network_id==network_id)
network_attribute_qry = base_qry.filter(ResourceAttr.network_id==network_id)
#Filter the group attributes by template
if template_id is not None:
all_node_attribute_qry = all_node_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
all_link_attribute_qry = all_link_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
all_group_attribute_qry = all_group_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
network_attribute_qry = network_attribute_qry.join(ResourceType, ResourceAttr.network_id==ResourceType.network_id).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
x = time.time()
logging.info("Getting all attributes using execute")
attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry, all_group_attribute_qry, network_attribute_qry)
all_attributes = db.DBSession.execute(attribute_qry.statement).fetchall()
log.info("%s attrs retrieved in %s", len(all_attributes), time.time()-x)
logging.info("Attributes retrieved. Processing results...")
x = time.time()
node_attr_dict = dict()
link_attr_dict = dict()
group_attr_dict = dict()
network_attr_dict = dict()
for attr in all_attributes:
if attr.ref_key == 'NODE':
nodeattr = node_attr_dict.get(attr.node_id, [])
nodeattr.append(attr)
node_attr_dict[attr.node_id] = nodeattr
elif attr.ref_key == 'LINK':
linkattr = link_attr_dict.get(attr.link_id, [])
linkattr.append(attr)
link_attr_dict[attr.link_id] = linkattr
elif attr.ref_key == 'GROUP':
groupattr = group_attr_dict.get(attr.group_id, [])
groupattr.append(attr)
group_attr_dict[attr.group_id] = groupattr
elif attr.ref_key == 'NETWORK':
networkattr = network_attr_dict.get(attr.network_id, [])
networkattr.append(attr)
network_attr_dict[attr.network_id] = networkattr
all_attributes = {
'NODE' : node_attr_dict,
'LINK' : link_attr_dict,
'GROUP': group_attr_dict,
'NETWORK': network_attr_dict,
}
logging.info("Attributes processed in %s", time.time()-x)
return all_attributes | Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link. | Below is the the instruction that describes the task:
### Input:
Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
### Response:
def _get_all_resource_attributes(network_id, template_id=None):
"""
Get all the attributes for the nodes, links and groups of a network.
Return these attributes as a dictionary, keyed on type (NODE, LINK, GROUP)
then by ID of the node or link.
"""
base_qry = db.DBSession.query(
ResourceAttr.id.label('id'),
ResourceAttr.ref_key.label('ref_key'),
ResourceAttr.cr_date.label('cr_date'),
ResourceAttr.attr_is_var.label('attr_is_var'),
ResourceAttr.node_id.label('node_id'),
ResourceAttr.link_id.label('link_id'),
ResourceAttr.group_id.label('group_id'),
ResourceAttr.network_id.label('network_id'),
ResourceAttr.attr_id.label('attr_id'),
Attr.name.label('name'),
Attr.dimension_id.label('dimension_id'),
).filter(Attr.id==ResourceAttr.attr_id)
all_node_attribute_qry = base_qry.join(Node).filter(Node.network_id==network_id)
all_link_attribute_qry = base_qry.join(Link).filter(Link.network_id==network_id)
all_group_attribute_qry = base_qry.join(ResourceGroup).filter(ResourceGroup.network_id==network_id)
network_attribute_qry = base_qry.filter(ResourceAttr.network_id==network_id)
#Filter the group attributes by template
if template_id is not None:
all_node_attribute_qry = all_node_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
all_link_attribute_qry = all_link_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
all_group_attribute_qry = all_group_attribute_qry.join(ResourceType).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
network_attribute_qry = network_attribute_qry.join(ResourceType, ResourceAttr.network_id==ResourceType.network_id).join(TemplateType).join(TypeAttr).filter(TemplateType.template_id==template_id).filter(ResourceAttr.attr_id==TypeAttr.attr_id)
x = time.time()
logging.info("Getting all attributes using execute")
attribute_qry = all_node_attribute_qry.union(all_link_attribute_qry, all_group_attribute_qry, network_attribute_qry)
all_attributes = db.DBSession.execute(attribute_qry.statement).fetchall()
log.info("%s attrs retrieved in %s", len(all_attributes), time.time()-x)
logging.info("Attributes retrieved. Processing results...")
x = time.time()
node_attr_dict = dict()
link_attr_dict = dict()
group_attr_dict = dict()
network_attr_dict = dict()
for attr in all_attributes:
if attr.ref_key == 'NODE':
nodeattr = node_attr_dict.get(attr.node_id, [])
nodeattr.append(attr)
node_attr_dict[attr.node_id] = nodeattr
elif attr.ref_key == 'LINK':
linkattr = link_attr_dict.get(attr.link_id, [])
linkattr.append(attr)
link_attr_dict[attr.link_id] = linkattr
elif attr.ref_key == 'GROUP':
groupattr = group_attr_dict.get(attr.group_id, [])
groupattr.append(attr)
group_attr_dict[attr.group_id] = groupattr
elif attr.ref_key == 'NETWORK':
networkattr = network_attr_dict.get(attr.network_id, [])
networkattr.append(attr)
network_attr_dict[attr.network_id] = networkattr
all_attributes = {
'NODE' : node_attr_dict,
'LINK' : link_attr_dict,
'GROUP': group_attr_dict,
'NETWORK': network_attr_dict,
}
logging.info("Attributes processed in %s", time.time()-x)
return all_attributes |
def rerun(client, revision, roots, siblings, inputs, paths):
"""Recreate files generated by a sequence of ``run`` commands."""
graph = Graph(client)
outputs = graph.build(paths=paths, revision=revision)
# Check or extend siblings of outputs.
outputs = siblings(graph, outputs)
output_paths = {node.path for node in outputs}
# Normalize and check all starting paths.
roots = {graph.normalize_path(root) for root in roots}
assert not roots & output_paths, '--from colides with output paths'
# Generate workflow and check inputs.
# NOTE The workflow creation is done before opening a new file.
workflow = inputs(
client,
graph.ascwl(
input_paths=roots,
output_paths=output_paths,
outputs=outputs,
)
)
# Make sure all inputs are pulled from a storage.
client.pull_paths_from_storage(
*(path for _, path in workflow.iter_input_files(client.workflow_path))
)
# Store the generated workflow used for updating paths.
import yaml
output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex)
with output_file.open('w') as f:
f.write(
yaml.dump(
ascwl(
workflow,
filter=lambda _, x: x is not None,
basedir=client.workflow_path,
),
default_flow_style=False
)
)
# Execute the workflow and relocate all output files.
from ._cwl import execute
# FIXME get new output paths for edited tools
# output_paths = {path for _, path in workflow.iter_output_files()}
execute(
client,
output_file,
output_paths=output_paths,
) | Recreate files generated by a sequence of ``run`` commands. | Below is the the instruction that describes the task:
### Input:
Recreate files generated by a sequence of ``run`` commands.
### Response:
def rerun(client, revision, roots, siblings, inputs, paths):
"""Recreate files generated by a sequence of ``run`` commands."""
graph = Graph(client)
outputs = graph.build(paths=paths, revision=revision)
# Check or extend siblings of outputs.
outputs = siblings(graph, outputs)
output_paths = {node.path for node in outputs}
# Normalize and check all starting paths.
roots = {graph.normalize_path(root) for root in roots}
assert not roots & output_paths, '--from colides with output paths'
# Generate workflow and check inputs.
# NOTE The workflow creation is done before opening a new file.
workflow = inputs(
client,
graph.ascwl(
input_paths=roots,
output_paths=output_paths,
outputs=outputs,
)
)
# Make sure all inputs are pulled from a storage.
client.pull_paths_from_storage(
*(path for _, path in workflow.iter_input_files(client.workflow_path))
)
# Store the generated workflow used for updating paths.
import yaml
output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex)
with output_file.open('w') as f:
f.write(
yaml.dump(
ascwl(
workflow,
filter=lambda _, x: x is not None,
basedir=client.workflow_path,
),
default_flow_style=False
)
)
# Execute the workflow and relocate all output files.
from ._cwl import execute
# FIXME get new output paths for edited tools
# output_paths = {path for _, path in workflow.iter_output_files()}
execute(
client,
output_file,
output_paths=output_paths,
) |
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self) | Looks up a track by its MusicBrainz ID | Below is the the instruction that describes the task:
### Input:
Looks up a track by its MusicBrainz ID
### Response:
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self) |
def ds2json(ds, u_var, v_var, lat_dim='latitude', lon_dim='longitude', units=None):
"""
Assumes that the velocity components are given on a regular grid
(fixed spacing in latitude and longitude).
Parameters
----------
u_var : str
Name of the U-component (zonal) variable.
v_var : str
Name of the V-component (meridional) variable.
lat_dim : str, optional
Name of the latitude dimension/coordinate
(default: 'latitude').
lon_dim : str, optional
Name of the longitude dimension/coordinate
(default: 'longitude').
units : str, optional
Velocity units (default: try getting units from the
'units' attributes of `u_var` and `v_var`).
"""
import numpy as np
ds = ds.copy()
for var_name in (u_var, v_var):
var_dims = ds[var_name].dims
if set(var_dims) != set([lat_dim, lon_dim]):
raise ValueError(
"Invalid dimensions for variable '{}' in Dataset: "
"should include only {}, found {}."
.format(var_name, (lat_dim, lon_dim), var_dims)
)
# If dataset contains nans replace with 0
ds[var_name] = ds[var_name].fillna(0)
if units is None:
u_var_units = ds[u_var].attrs.get('units')
v_var_units = ds[v_var].attrs.get('units')
if u_var_units != v_var_units:
raise ValueError(
"Different units found for U-component '{}' and "
"V-component '{}' variables: '{}' and '{}'"
.format(u_var, v_var, u_var_units, v_var_units))
units = u_var_units
if units is None:
units = ''
# Data should be in gaussian grid format (latitudes descending)
if np.any(np.diff(ds[lat_dim].values) >= 0):
ds = ds.sel(**{lat_dim: slice(None, None, -1)})
# infer grid specifications (assume a rectangular grid)
lat = ds[lat_dim].values
lon = ds[lon_dim].values
lon_left = float(lon.min())
lon_right = float(lon.max())
lat_lower = float(lat.min())
lat_upper = float(lat.max())
dx = float((lon_right - lon_left) / (lon.size - 1))
dy = float((lat_upper - lat_lower) / (lat.size - 1))
nx = lon.size
ny = lat.size
u_v_spec = ([2, 3],
["Eastward current", "Northward current"],
[u_var, v_var])
velocity_data = []
for p_number, p_name, var_name in zip(*u_v_spec):
velocity_data.append({
"header": {
"parameterUnit": units,
"parameterNumber": p_number,
"dx": dx, "dy": dy,
"parameterNumberName": p_name,
"la1": lat_upper,
"la2": lat_lower,
"parameterCategory": 2,
"lo2": lon_right,
"nx": nx,
"ny": ny,
"refTime": "2017-02-01 23:00:00",
"lo1": lon_left
},
"data": ds[var_name].values.flatten().tolist()
})
return velocity_data | Assumes that the velocity components are given on a regular grid
(fixed spacing in latitude and longitude).
Parameters
----------
u_var : str
Name of the U-component (zonal) variable.
v_var : str
Name of the V-component (meridional) variable.
lat_dim : str, optional
Name of the latitude dimension/coordinate
(default: 'latitude').
lon_dim : str, optional
Name of the longitude dimension/coordinate
(default: 'longitude').
units : str, optional
Velocity units (default: try getting units from the
'units' attributes of `u_var` and `v_var`). | Below is the the instruction that describes the task:
### Input:
Assumes that the velocity components are given on a regular grid
(fixed spacing in latitude and longitude).
Parameters
----------
u_var : str
Name of the U-component (zonal) variable.
v_var : str
Name of the V-component (meridional) variable.
lat_dim : str, optional
Name of the latitude dimension/coordinate
(default: 'latitude').
lon_dim : str, optional
Name of the longitude dimension/coordinate
(default: 'longitude').
units : str, optional
Velocity units (default: try getting units from the
'units' attributes of `u_var` and `v_var`).
### Response:
def ds2json(ds, u_var, v_var, lat_dim='latitude', lon_dim='longitude', units=None):
"""
Assumes that the velocity components are given on a regular grid
(fixed spacing in latitude and longitude).
Parameters
----------
u_var : str
Name of the U-component (zonal) variable.
v_var : str
Name of the V-component (meridional) variable.
lat_dim : str, optional
Name of the latitude dimension/coordinate
(default: 'latitude').
lon_dim : str, optional
Name of the longitude dimension/coordinate
(default: 'longitude').
units : str, optional
Velocity units (default: try getting units from the
'units' attributes of `u_var` and `v_var`).
"""
import numpy as np
ds = ds.copy()
for var_name in (u_var, v_var):
var_dims = ds[var_name].dims
if set(var_dims) != set([lat_dim, lon_dim]):
raise ValueError(
"Invalid dimensions for variable '{}' in Dataset: "
"should include only {}, found {}."
.format(var_name, (lat_dim, lon_dim), var_dims)
)
# If dataset contains nans replace with 0
ds[var_name] = ds[var_name].fillna(0)
if units is None:
u_var_units = ds[u_var].attrs.get('units')
v_var_units = ds[v_var].attrs.get('units')
if u_var_units != v_var_units:
raise ValueError(
"Different units found for U-component '{}' and "
"V-component '{}' variables: '{}' and '{}'"
.format(u_var, v_var, u_var_units, v_var_units))
units = u_var_units
if units is None:
units = ''
# Data should be in gaussian grid format (latitudes descending)
if np.any(np.diff(ds[lat_dim].values) >= 0):
ds = ds.sel(**{lat_dim: slice(None, None, -1)})
# infer grid specifications (assume a rectangular grid)
lat = ds[lat_dim].values
lon = ds[lon_dim].values
lon_left = float(lon.min())
lon_right = float(lon.max())
lat_lower = float(lat.min())
lat_upper = float(lat.max())
dx = float((lon_right - lon_left) / (lon.size - 1))
dy = float((lat_upper - lat_lower) / (lat.size - 1))
nx = lon.size
ny = lat.size
u_v_spec = ([2, 3],
["Eastward current", "Northward current"],
[u_var, v_var])
velocity_data = []
for p_number, p_name, var_name in zip(*u_v_spec):
velocity_data.append({
"header": {
"parameterUnit": units,
"parameterNumber": p_number,
"dx": dx, "dy": dy,
"parameterNumberName": p_name,
"la1": lat_upper,
"la2": lat_lower,
"parameterCategory": 2,
"lo2": lon_right,
"nx": nx,
"ny": ny,
"refTime": "2017-02-01 23:00:00",
"lo1": lon_left
},
"data": ds[var_name].values.flatten().tolist()
})
return velocity_data |
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) | is the given path executable? | Below is the the instruction that describes the task:
### Input:
is the given path executable?
### Response:
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) |
def run(self):
""" Fonctionnement du thread """
if self.debug:
print("Starting " + self.name)
# Lancement du programme du thread
if isinstance(self.function, str):
globals()[self.function](*self.args, **self.kwargs)
else:
self.function(*self.args, **self.kwargs)
if self.debug:
print("Exiting " + self.name) | Fonctionnement du thread | Below is the the instruction that describes the task:
### Input:
Fonctionnement du thread
### Response:
def run(self):
""" Fonctionnement du thread """
if self.debug:
print("Starting " + self.name)
# Lancement du programme du thread
if isinstance(self.function, str):
globals()[self.function](*self.args, **self.kwargs)
else:
self.function(*self.args, **self.kwargs)
if self.debug:
print("Exiting " + self.name) |
def smooth(x, rho, penalty, axis=0, newshape=None):
"""
Applies a smoothing operator along one dimension
currently only accepts a matrix as input
Parameters
----------
penalty : float
axis : int, optional
Axis along which to apply the smoothing (Default: 0)
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None)
"""
orig_shape = x.shape
if newshape is not None:
x = x.reshape(newshape)
# Apply Laplacian smoothing (l2 norm on the parameters multiplied by
# the laplacian)
n = x.shape[axis]
lap_op = spdiags([(2 + rho / penalty) * np.ones(n),
-1 * np.ones(n), -1 * np.ones(n)],
[0, -1, 1], n, n, format='csc')
A = penalty * lap_op
b = rho * np.rollaxis(x, axis, 0)
return np.rollaxis(spsolve(A, b), axis, 0).reshape(orig_shape) | Applies a smoothing operator along one dimension
currently only accepts a matrix as input
Parameters
----------
penalty : float
axis : int, optional
Axis along which to apply the smoothing (Default: 0)
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None) | Below is the the instruction that describes the task:
### Input:
Applies a smoothing operator along one dimension
currently only accepts a matrix as input
Parameters
----------
penalty : float
axis : int, optional
Axis along which to apply the smoothing (Default: 0)
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None)
### Response:
def smooth(x, rho, penalty, axis=0, newshape=None):
"""
Applies a smoothing operator along one dimension
currently only accepts a matrix as input
Parameters
----------
penalty : float
axis : int, optional
Axis along which to apply the smoothing (Default: 0)
newshape : tuple, optional
Desired shape of the parameters to apply the nuclear norm to. The given
parameters are reshaped to an array with this shape, or not reshaped if
the value of newshape is None. (Default: None)
"""
orig_shape = x.shape
if newshape is not None:
x = x.reshape(newshape)
# Apply Laplacian smoothing (l2 norm on the parameters multiplied by
# the laplacian)
n = x.shape[axis]
lap_op = spdiags([(2 + rho / penalty) * np.ones(n),
-1 * np.ones(n), -1 * np.ones(n)],
[0, -1, 1], n, n, format='csc')
A = penalty * lap_op
b = rho * np.rollaxis(x, axis, 0)
return np.rollaxis(spsolve(A, b), axis, 0).reshape(orig_shape) |
def _output(self, file_like_object, path=None):
"""Display or save file like object."""
if not path:
self._output_to_display(file_like_object)
else:
self._output_to_file(file_like_object, path) | Display or save file like object. | Below is the the instruction that describes the task:
### Input:
Display or save file like object.
### Response:
def _output(self, file_like_object, path=None):
"""Display or save file like object."""
if not path:
self._output_to_display(file_like_object)
else:
self._output_to_file(file_like_object, path) |
def device(dev, stats=False, config=False, internals=False, superblock=False):
'''
Check the state of a single bcache device
CLI example:
.. code-block:: bash
salt '*' bcache.device bcache0
salt '*' bcache.device /dev/sdc stats=True
:param stats: include statistics
:param settings: include all settings
:param internals: include all internals
:param superblock: include superblock info
'''
result = {}
if not _sysfs_attr(_bcpath(dev), None, 'error', '{0} is not a bcache fo any kind'.format(dev)):
return False
elif _bcsys(dev, 'set'):
# ---------------- It's the cache itself ----------------
result['uuid'] = uuid()
base_attr = ['block_size', 'bucket_size', 'cache_available_percent', 'cache_replacement_policy', 'congested']
# ---------------- Parse through both the blockdev & the FS ----------------
result.update(_sysfs_parse(_bcpath(dev), base_attr, stats, config, internals))
result.update(_sysfs_parse(_fspath(), base_attr, stats, config, internals))
result.update(result.pop('base'))
else:
# ---------------- It's a backing device ----------------
back_uuid = uuid(dev)
if back_uuid is not None:
result['cache'] = back_uuid
try:
result['dev'] = os.path.basename(_bcsys(dev, 'dev'))
except Exception:
pass
result['bdev'] = _bdev(dev)
base_attr = ['cache_mode', 'running', 'state', 'writeback_running']
base_path = _bcpath(dev)
result.update(_sysfs_parse(base_path, base_attr, stats, config, internals))
result.update(result.pop('base'))
# ---------------- Modifications ----------------
state = [result['state']]
if result.pop('running'):
state.append('running')
else:
state.append('stopped')
if 'writeback_running' in result:
if result.pop('writeback_running'):
state.append('writeback_running')
else:
state.append('writeback_stopped')
result['state'] = state
# ---------------- Statistics ----------------
if 'stats' in result:
replre = r'(stats|cache)_'
statres = result['stats']
for attr in result['stats']:
if '/' not in attr:
key = re.sub(replre, '', attr)
statres[key] = statres.pop(attr)
else:
stat, key = attr.split('/', 1)
stat = re.sub(replre, '', stat)
key = re.sub(replre, '', key)
if stat not in statres:
statres[stat] = {}
statres[stat][key] = statres.pop(attr)
result['stats'] = statres
# ---------------- Internals ----------------
if internals:
interres = result.pop('inter_ro', {})
interres.update(result.pop('inter_rw', {}))
if interres:
for key in interres:
if key.startswith('internal'):
nkey = re.sub(r'internal[s/]*', '', key)
interres[nkey] = interres.pop(key)
key = nkey
if key.startswith(('btree', 'writeback')):
mkey, skey = re.split(r'_', key, maxsplit=1)
if mkey not in interres:
interres[mkey] = {}
interres[mkey][skey] = interres.pop(key)
result['internals'] = interres
# ---------------- Config ----------------
if config:
configres = result['config']
for key in configres:
if key.startswith('writeback'):
mkey, skey = re.split(r'_', key, maxsplit=1)
if mkey not in configres:
configres[mkey] = {}
configres[mkey][skey] = configres.pop(key)
result['config'] = configres
# ---------------- Superblock ----------------
if superblock:
result['superblock'] = super_(dev)
return result | Check the state of a single bcache device
CLI example:
.. code-block:: bash
salt '*' bcache.device bcache0
salt '*' bcache.device /dev/sdc stats=True
:param stats: include statistics
:param settings: include all settings
:param internals: include all internals
:param superblock: include superblock info | Below is the the instruction that describes the task:
### Input:
Check the state of a single bcache device
CLI example:
.. code-block:: bash
salt '*' bcache.device bcache0
salt '*' bcache.device /dev/sdc stats=True
:param stats: include statistics
:param settings: include all settings
:param internals: include all internals
:param superblock: include superblock info
### Response:
def device(dev, stats=False, config=False, internals=False, superblock=False):
'''
Check the state of a single bcache device
CLI example:
.. code-block:: bash
salt '*' bcache.device bcache0
salt '*' bcache.device /dev/sdc stats=True
:param stats: include statistics
:param settings: include all settings
:param internals: include all internals
:param superblock: include superblock info
'''
result = {}
if not _sysfs_attr(_bcpath(dev), None, 'error', '{0} is not a bcache fo any kind'.format(dev)):
return False
elif _bcsys(dev, 'set'):
# ---------------- It's the cache itself ----------------
result['uuid'] = uuid()
base_attr = ['block_size', 'bucket_size', 'cache_available_percent', 'cache_replacement_policy', 'congested']
# ---------------- Parse through both the blockdev & the FS ----------------
result.update(_sysfs_parse(_bcpath(dev), base_attr, stats, config, internals))
result.update(_sysfs_parse(_fspath(), base_attr, stats, config, internals))
result.update(result.pop('base'))
else:
# ---------------- It's a backing device ----------------
back_uuid = uuid(dev)
if back_uuid is not None:
result['cache'] = back_uuid
try:
result['dev'] = os.path.basename(_bcsys(dev, 'dev'))
except Exception:
pass
result['bdev'] = _bdev(dev)
base_attr = ['cache_mode', 'running', 'state', 'writeback_running']
base_path = _bcpath(dev)
result.update(_sysfs_parse(base_path, base_attr, stats, config, internals))
result.update(result.pop('base'))
# ---------------- Modifications ----------------
state = [result['state']]
if result.pop('running'):
state.append('running')
else:
state.append('stopped')
if 'writeback_running' in result:
if result.pop('writeback_running'):
state.append('writeback_running')
else:
state.append('writeback_stopped')
result['state'] = state
# ---------------- Statistics ----------------
if 'stats' in result:
replre = r'(stats|cache)_'
statres = result['stats']
for attr in result['stats']:
if '/' not in attr:
key = re.sub(replre, '', attr)
statres[key] = statres.pop(attr)
else:
stat, key = attr.split('/', 1)
stat = re.sub(replre, '', stat)
key = re.sub(replre, '', key)
if stat not in statres:
statres[stat] = {}
statres[stat][key] = statres.pop(attr)
result['stats'] = statres
# ---------------- Internals ----------------
if internals:
interres = result.pop('inter_ro', {})
interres.update(result.pop('inter_rw', {}))
if interres:
for key in interres:
if key.startswith('internal'):
nkey = re.sub(r'internal[s/]*', '', key)
interres[nkey] = interres.pop(key)
key = nkey
if key.startswith(('btree', 'writeback')):
mkey, skey = re.split(r'_', key, maxsplit=1)
if mkey not in interres:
interres[mkey] = {}
interres[mkey][skey] = interres.pop(key)
result['internals'] = interres
# ---------------- Config ----------------
if config:
configres = result['config']
for key in configres:
if key.startswith('writeback'):
mkey, skey = re.split(r'_', key, maxsplit=1)
if mkey not in configres:
configres[mkey] = {}
configres[mkey][skey] = configres.pop(key)
result['config'] = configres
# ---------------- Superblock ----------------
if superblock:
result['superblock'] = super_(dev)
return result |
def _is_not_pickle_safe_gl_model_class(obj_class):
"""
Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe.
"""
if issubclass(obj_class, _toolkits._model.CustomModel):
return not obj_class._is_gl_pickle_safe()
return False | Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe. | Below is the the instruction that describes the task:
### Input:
Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe.
### Response:
def _is_not_pickle_safe_gl_model_class(obj_class):
"""
Check if a Turi create model is pickle safe.
The function does it by checking that _CustomModel is the base class.
Parameters
----------
obj_class : Class to be checked.
Returns
----------
True if the GLC class is a model and is pickle safe.
"""
if issubclass(obj_class, _toolkits._model.CustomModel):
return not obj_class._is_gl_pickle_safe()
return False |
def build_clnsig(clnsig_info):
"""docstring for build_clnsig"""
clnsig_obj = dict(
value = clnsig_info['value'],
accession = clnsig_info.get('accession'),
revstat = clnsig_info.get('revstat')
)
return clnsig_obj | docstring for build_clnsig | Below is the the instruction that describes the task:
### Input:
docstring for build_clnsig
### Response:
def build_clnsig(clnsig_info):
"""docstring for build_clnsig"""
clnsig_obj = dict(
value = clnsig_info['value'],
accession = clnsig_info.get('accession'),
revstat = clnsig_info.get('revstat')
)
return clnsig_obj |
def get_method_info(self, obj):
"""Returns the info for a Method
"""
info = self.get_base_info(obj)
info.update({})
return info | Returns the info for a Method | Below is the the instruction that describes the task:
### Input:
Returns the info for a Method
### Response:
def get_method_info(self, obj):
"""Returns the info for a Method
"""
info = self.get_base_info(obj)
info.update({})
return info |
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None):
"""
Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n))
df=pd.DataFrame(np.random.randn(n,n_traces),index=index,
columns=getName(n_traces,columns=columns,mode=mode))
return df.cumsum() | Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
### Response:
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None):
"""
Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n))
df=pd.DataFrame(np.random.randn(n,n_traces),index=index,
columns=getName(n_traces,columns=columns,mode=mode))
return df.cumsum() |
def server(description=None, **kwargs):
'''Create the :class:`.WSGIServer` running :func:`hello`.'''
description = description or 'Pulsar Hello World Application'
return wsgi.WSGIServer(hello, description=description, **kwargs) | Create the :class:`.WSGIServer` running :func:`hello`. | Below is the the instruction that describes the task:
### Input:
Create the :class:`.WSGIServer` running :func:`hello`.
### Response:
def server(description=None, **kwargs):
'''Create the :class:`.WSGIServer` running :func:`hello`.'''
description = description or 'Pulsar Hello World Application'
return wsgi.WSGIServer(hello, description=description, **kwargs) |
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed.
"""
# Default behavior is to execute to a table
if output_options is None:
output_options = QueryOutput.table()
# First, execute the query into a table, using a temporary one if no name is specified
batch = output_options.priority == 'low'
append = output_options.table_mode == 'append'
overwrite = output_options.table_mode == 'overwrite'
table_name = output_options.table_name
context = context or google.datalab.Context.default()
api = _api.Api(context)
if table_name is not None:
table_name = _utils.parse_table_name(table_name, api.project_id)
sql = self._expanded_sql(sampling)
try:
query_result = api.jobs_insert_query(sql, table_name=table_name,
append=append, overwrite=overwrite, batch=batch,
use_cache=output_options.use_cache,
allow_large_results=output_options.allow_large_results,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
if 'jobReference' not in query_result:
raise Exception('Unexpected response from server')
job_id = query_result['jobReference']['jobId']
if not table_name:
try:
destination = query_result['configuration']['query']['destinationTable']
table_name = (destination['projectId'], destination['datasetId'], destination['tableId'])
except KeyError:
# The query was in error
raise Exception(_utils.format_query_errors(query_result['status']['errors']))
execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context)
# If all we need is to execute the query to a table, we're done
if output_options.type == 'table':
return execute_job
# Otherwise, build an async Job that waits on the query execution then carries out
# the specific export operation
else:
export_args = export_kwargs = None
if output_options.type == 'file':
if output_options.file_path.startswith('gs://'):
export_func = execute_job.result().extract
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header,
'compress': output_options.compress_file
}
else:
export_func = execute_job.result().to_file
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header
}
elif output_options.type == 'dataframe':
export_func = execute_job.result().to_dataframe
export_args = []
export_kwargs = {
'start_row': output_options.dataframe_start_row,
'max_rows': output_options.dataframe_max_rows
}
# Perform the export operation with the specified parameters
export_func = google.datalab.utils.async_function(export_func)
return export_func(*export_args, **export_kwargs) | Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed. | Below is the the instruction that describes the task:
### Input:
Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed.
### Response:
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed.
"""
# Default behavior is to execute to a table
if output_options is None:
output_options = QueryOutput.table()
# First, execute the query into a table, using a temporary one if no name is specified
batch = output_options.priority == 'low'
append = output_options.table_mode == 'append'
overwrite = output_options.table_mode == 'overwrite'
table_name = output_options.table_name
context = context or google.datalab.Context.default()
api = _api.Api(context)
if table_name is not None:
table_name = _utils.parse_table_name(table_name, api.project_id)
sql = self._expanded_sql(sampling)
try:
query_result = api.jobs_insert_query(sql, table_name=table_name,
append=append, overwrite=overwrite, batch=batch,
use_cache=output_options.use_cache,
allow_large_results=output_options.allow_large_results,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
if 'jobReference' not in query_result:
raise Exception('Unexpected response from server')
job_id = query_result['jobReference']['jobId']
if not table_name:
try:
destination = query_result['configuration']['query']['destinationTable']
table_name = (destination['projectId'], destination['datasetId'], destination['tableId'])
except KeyError:
# The query was in error
raise Exception(_utils.format_query_errors(query_result['status']['errors']))
execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context)
# If all we need is to execute the query to a table, we're done
if output_options.type == 'table':
return execute_job
# Otherwise, build an async Job that waits on the query execution then carries out
# the specific export operation
else:
export_args = export_kwargs = None
if output_options.type == 'file':
if output_options.file_path.startswith('gs://'):
export_func = execute_job.result().extract
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header,
'compress': output_options.compress_file
}
else:
export_func = execute_job.result().to_file
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header
}
elif output_options.type == 'dataframe':
export_func = execute_job.result().to_dataframe
export_args = []
export_kwargs = {
'start_row': output_options.dataframe_start_row,
'max_rows': output_options.dataframe_max_rows
}
# Perform the export operation with the specified parameters
export_func = google.datalab.utils.async_function(export_func)
return export_func(*export_args, **export_kwargs) |
def som_get_capture_objects(som_pointer):
"""!
@brief Returns list of indexes of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_capture_objects.restype = POINTER(pyclustering_package)
package = ccore.som_get_capture_objects(som_pointer)
result = package_extractor(package).extract()
return result | !
@brief Returns list of indexes of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map. | Below is the the instruction that describes the task:
### Input:
!
@brief Returns list of indexes of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
### Response:
def som_get_capture_objects(som_pointer):
"""!
@brief Returns list of indexes of captured objects by each neuron.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_capture_objects.restype = POINTER(pyclustering_package)
package = ccore.som_get_capture_objects(som_pointer)
result = package_extractor(package).extract()
return result |
def get_current_stats(self, names=None):
"""Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names.
"""
if names is None:
names = self.default_stats
return self._current_stats.getstats(names) | Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names. | Below is the the instruction that describes the task:
### Input:
Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names.
### Response:
def get_current_stats(self, names=None):
"""Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names.
"""
if names is None:
names = self.default_stats
return self._current_stats.getstats(names) |
def detrended_price_oscillator(data, period):
"""
Detrended Price Oscillator.
Formula:
DPO = DATA[i] - Avg(DATA[period/2 + 1])
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))]
dop = fill_for_noncomputable_vals(data, dop)
return dop | Detrended Price Oscillator.
Formula:
DPO = DATA[i] - Avg(DATA[period/2 + 1]) | Below is the the instruction that describes the task:
### Input:
Detrended Price Oscillator.
Formula:
DPO = DATA[i] - Avg(DATA[period/2 + 1])
### Response:
def detrended_price_oscillator(data, period):
"""
Detrended Price Oscillator.
Formula:
DPO = DATA[i] - Avg(DATA[period/2 + 1])
"""
catch_errors.check_for_period_error(data, period)
period = int(period)
dop = [data[idx] - np.mean(data[idx+1-(int(period/2)+1):idx+1]) for idx in range(period-1, len(data))]
dop = fill_for_noncomputable_vals(data, dop)
return dop |
def ReadCronJobs(self, cronjob_ids=None):
"""Reads a cronjob from the database."""
if cronjob_ids is None:
res = [job.Copy() for job in itervalues(self.cronjobs)]
else:
res = []
for job_id in cronjob_ids:
try:
res.append(self.cronjobs[job_id].Copy())
except KeyError:
raise db.UnknownCronJobError("Cron job with id %s not found." %
job_id)
for job in res:
lease = self.cronjob_leases.get(job.cron_job_id)
if lease:
job.leased_until, job.leased_by = lease
return res | Reads a cronjob from the database. | Below is the the instruction that describes the task:
### Input:
Reads a cronjob from the database.
### Response:
def ReadCronJobs(self, cronjob_ids=None):
"""Reads a cronjob from the database."""
if cronjob_ids is None:
res = [job.Copy() for job in itervalues(self.cronjobs)]
else:
res = []
for job_id in cronjob_ids:
try:
res.append(self.cronjobs[job_id].Copy())
except KeyError:
raise db.UnknownCronJobError("Cron job with id %s not found." %
job_id)
for job in res:
lease = self.cronjob_leases.get(job.cron_job_id)
if lease:
job.leased_until, job.leased_by = lease
return res |
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
expression_engine = data.process.requirements.get('expression-engine', None)
if expression_engine is not None:
expression_engine = self.get_expression_engine(expression_engine)
# Parse steps.
steps = data.process.run.get('program', None)
if steps is None:
return
if not isinstance(steps, list):
raise ExecutionError('Workflow program must be a list of steps.')
# Expression engine evaluation context.
context = {
'input': data.input,
'steps': collections.OrderedDict(),
}
for index, step in enumerate(steps):
try:
step_id = step['id']
step_slug = step['run']
except KeyError as error:
raise ExecutionError('Incorrect definition of step "{}", missing property "{}".'.format(
step.get('id', index), error
))
# Fetch target process.
process = Process.objects.filter(slug=step_slug).order_by('-version').first()
if not process:
raise ExecutionError('Incorrect definition of step "{}", invalid process "{}".'.format(
step_id, step_slug
))
# Process all input variables.
step_input = step.get('input', {})
if not isinstance(step_input, dict):
raise ExecutionError('Incorrect definition of step "{}", input must be a dictionary.'.format(
step_id
))
data_input = self._evaluate_expressions(expression_engine, step_id, step_input, context)
# Create the data object.
data_object = Data.objects.create(
process=process,
contributor=data.contributor,
tags=data.tags,
input=data_input,
)
DataDependency.objects.create(
parent=data,
child=data_object,
kind=DataDependency.KIND_SUBPROCESS,
)
# Copy permissions.
copy_permissions(data, data_object)
# Copy collections.
for collection in data.collection_set.all():
collection.data.add(data_object)
context['steps'][step_id] = data_object.pk
# Immediately set our status to done and output all data object identifiers.
data.output = {
'steps': list(context['steps'].values()),
}
data.status = Data.STATUS_DONE | Evaluate the code needed to compute a given Data object. | Below is the the instruction that describes the task:
### Input:
Evaluate the code needed to compute a given Data object.
### Response:
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
expression_engine = data.process.requirements.get('expression-engine', None)
if expression_engine is not None:
expression_engine = self.get_expression_engine(expression_engine)
# Parse steps.
steps = data.process.run.get('program', None)
if steps is None:
return
if not isinstance(steps, list):
raise ExecutionError('Workflow program must be a list of steps.')
# Expression engine evaluation context.
context = {
'input': data.input,
'steps': collections.OrderedDict(),
}
for index, step in enumerate(steps):
try:
step_id = step['id']
step_slug = step['run']
except KeyError as error:
raise ExecutionError('Incorrect definition of step "{}", missing property "{}".'.format(
step.get('id', index), error
))
# Fetch target process.
process = Process.objects.filter(slug=step_slug).order_by('-version').first()
if not process:
raise ExecutionError('Incorrect definition of step "{}", invalid process "{}".'.format(
step_id, step_slug
))
# Process all input variables.
step_input = step.get('input', {})
if not isinstance(step_input, dict):
raise ExecutionError('Incorrect definition of step "{}", input must be a dictionary.'.format(
step_id
))
data_input = self._evaluate_expressions(expression_engine, step_id, step_input, context)
# Create the data object.
data_object = Data.objects.create(
process=process,
contributor=data.contributor,
tags=data.tags,
input=data_input,
)
DataDependency.objects.create(
parent=data,
child=data_object,
kind=DataDependency.KIND_SUBPROCESS,
)
# Copy permissions.
copy_permissions(data, data_object)
# Copy collections.
for collection in data.collection_set.all():
collection.data.add(data_object)
context['steps'][step_id] = data_object.pk
# Immediately set our status to done and output all data object identifiers.
data.output = {
'steps': list(context['steps'].values()),
}
data.status = Data.STATUS_DONE |
def document(self, document):
"""
Associate a :class:`~elasticsearch_dsl.Document` subclass with an index.
This means that, when this index is created, it will contain the
mappings for the ``Document``. If the ``Document`` class doesn't have a
default index yet (by defining ``class Index``), this instance will be
used. Can be used as a decorator::
i = Index('blog')
@i.document
class Post(Document):
title = Text()
# create the index, including Post mappings
i.create()
# .search() will now return a Search object that will return
# properly deserialized Post instances
s = i.search()
"""
self._doc_types.append(document)
# If the document index does not have any name, that means the user
# did not set any index already to the document.
# So set this index as document index
if document._index._name is None:
document._index = self
return document | Associate a :class:`~elasticsearch_dsl.Document` subclass with an index.
This means that, when this index is created, it will contain the
mappings for the ``Document``. If the ``Document`` class doesn't have a
default index yet (by defining ``class Index``), this instance will be
used. Can be used as a decorator::
i = Index('blog')
@i.document
class Post(Document):
title = Text()
# create the index, including Post mappings
i.create()
# .search() will now return a Search object that will return
# properly deserialized Post instances
s = i.search() | Below is the the instruction that describes the task:
### Input:
Associate a :class:`~elasticsearch_dsl.Document` subclass with an index.
This means that, when this index is created, it will contain the
mappings for the ``Document``. If the ``Document`` class doesn't have a
default index yet (by defining ``class Index``), this instance will be
used. Can be used as a decorator::
i = Index('blog')
@i.document
class Post(Document):
title = Text()
# create the index, including Post mappings
i.create()
# .search() will now return a Search object that will return
# properly deserialized Post instances
s = i.search()
### Response:
def document(self, document):
"""
Associate a :class:`~elasticsearch_dsl.Document` subclass with an index.
This means that, when this index is created, it will contain the
mappings for the ``Document``. If the ``Document`` class doesn't have a
default index yet (by defining ``class Index``), this instance will be
used. Can be used as a decorator::
i = Index('blog')
@i.document
class Post(Document):
title = Text()
# create the index, including Post mappings
i.create()
# .search() will now return a Search object that will return
# properly deserialized Post instances
s = i.search()
"""
self._doc_types.append(document)
# If the document index does not have any name, that means the user
# did not set any index already to the document.
# So set this index as document index
if document._index._name is None:
document._index = self
return document |
def get_cso_dataframe(self):
"""
get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame
"""
assert self.jco is not None
assert self.pst is not None
weights = self.pst.observation_data.loc[self.jco.to_dataframe().index,"weight"].copy().values
cso = np.diag(np.sqrt((self.qhalfx.x.dot(self.qhalfx.x.T))))/(float(self.pst.npar-1))
cso_df = pd.DataFrame.from_dict({'obnme':self.jco.to_dataframe().index,'cso':cso})
cso_df.index=cso_df['obnme']
cso_df.drop('obnme', axis=1, inplace=True)
return cso_df | get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame
### Response:
def get_cso_dataframe(self):
"""
get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame
"""
assert self.jco is not None
assert self.pst is not None
weights = self.pst.observation_data.loc[self.jco.to_dataframe().index,"weight"].copy().values
cso = np.diag(np.sqrt((self.qhalfx.x.dot(self.qhalfx.x.T))))/(float(self.pst.npar-1))
cso_df = pd.DataFrame.from_dict({'obnme':self.jco.to_dataframe().index,'cso':cso})
cso_df.index=cso_df['obnme']
cso_df.drop('obnme', axis=1, inplace=True)
return cso_df |
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator | Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance. | Below is the the instruction that describes the task:
### Input:
Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
### Response:
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator |
def create(host, port):
"""
Prepare server to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperServer], callable | None
"""
wrapper = WrapperEchoServer({
'server': None
})
d = {
'listen_port': port,
'changer': wrapper
}
if host:
d['listen_bind_ip'] = host
ses = EchoServer(d)
wrapper.server = ses
return [wrapper], cmd_line | Prepare server to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperServer], callable | None | Below is the the instruction that describes the task:
### Input:
Prepare server to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperServer], callable | None
### Response:
def create(host, port):
"""
Prepare server to execute
:return: Modules to execute, cmd line function
:rtype: list[WrapperServer], callable | None
"""
wrapper = WrapperEchoServer({
'server': None
})
d = {
'listen_port': port,
'changer': wrapper
}
if host:
d['listen_bind_ip'] = host
ses = EchoServer(d)
wrapper.server = ses
return [wrapper], cmd_line |
def dallinger():
"""Dallinger command-line utility."""
from logging.config import fileConfig
fileConfig(
os.path.join(os.path.dirname(__file__), "logging.ini"),
disable_existing_loggers=False,
) | Dallinger command-line utility. | Below is the the instruction that describes the task:
### Input:
Dallinger command-line utility.
### Response:
def dallinger():
"""Dallinger command-line utility."""
from logging.config import fileConfig
fileConfig(
os.path.join(os.path.dirname(__file__), "logging.ini"),
disable_existing_loggers=False,
) |
def disable(name, lbn, target, profile='default', tgt_type='glob'):
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
'''
return _talk2modjk(name, lbn, target, 'worker_disable', profile, tgt_type) | .. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
### Response:
def disable(name, lbn, target, profile='default', tgt_type='glob'):
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
'''
return _talk2modjk(name, lbn, target, 'worker_disable', profile, tgt_type) |
def add_group(self, group_attribs=None, parent=None):
"""Add an empty group element to the SVG."""
if parent is None:
parent = self.tree.getroot()
elif not self.contains_group(parent):
warnings.warn('The requested group {0} does not belong to '
'this Document'.format(parent))
if group_attribs is None:
group_attribs = {}
else:
group_attribs = group_attribs.copy()
return SubElement(parent, '{{{0}}}g'.format(
SVG_NAMESPACE['svg']), group_attribs) | Add an empty group element to the SVG. | Below is the the instruction that describes the task:
### Input:
Add an empty group element to the SVG.
### Response:
def add_group(self, group_attribs=None, parent=None):
"""Add an empty group element to the SVG."""
if parent is None:
parent = self.tree.getroot()
elif not self.contains_group(parent):
warnings.warn('The requested group {0} does not belong to '
'this Document'.format(parent))
if group_attribs is None:
group_attribs = {}
else:
group_attribs = group_attribs.copy()
return SubElement(parent, '{{{0}}}g'.format(
SVG_NAMESPACE['svg']), group_attribs) |
def list_observatories(self):
"""
Get the IDs of all observatories with have stored observations on this server.
:return: a sequence of strings containing observatories IDs
"""
response = requests.get(self.base_url + '/obstories').text
return safe_load(response) | Get the IDs of all observatories with have stored observations on this server.
:return: a sequence of strings containing observatories IDs | Below is the the instruction that describes the task:
### Input:
Get the IDs of all observatories with have stored observations on this server.
:return: a sequence of strings containing observatories IDs
### Response:
def list_observatories(self):
"""
Get the IDs of all observatories with have stored observations on this server.
:return: a sequence of strings containing observatories IDs
"""
response = requests.get(self.base_url + '/obstories').text
return safe_load(response) |
def do_with_ruby(ruby, cmdline, runas=None):
'''
Execute a ruby command with rbenv's shims using a specific ruby version
CLI Example:
.. code-block:: bash
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler'
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy
'''
if not cmdline:
# This is a positional argument so this should never happen, but this
# will handle cases where someone explicitly passes a false value for
# cmdline.
raise SaltInvocationError('Command must be specified')
try:
cmdline = salt.utils.args.shlex_split(cmdline)
except AttributeError:
cmdline = salt.utils.args.shlex_split(six.text_type(cmdline))
env = {}
if ruby:
env['RBENV_VERSION'] = ruby
cmd = cmdline
else:
cmd = cmdline
return do(cmd, runas=runas, env=env) | Execute a ruby command with rbenv's shims using a specific ruby version
CLI Example:
.. code-block:: bash
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler'
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy | Below is the the instruction that describes the task:
### Input:
Execute a ruby command with rbenv's shims using a specific ruby version
CLI Example:
.. code-block:: bash
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler'
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy
### Response:
def do_with_ruby(ruby, cmdline, runas=None):
'''
Execute a ruby command with rbenv's shims using a specific ruby version
CLI Example:
.. code-block:: bash
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler'
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy
'''
if not cmdline:
# This is a positional argument so this should never happen, but this
# will handle cases where someone explicitly passes a false value for
# cmdline.
raise SaltInvocationError('Command must be specified')
try:
cmdline = salt.utils.args.shlex_split(cmdline)
except AttributeError:
cmdline = salt.utils.args.shlex_split(six.text_type(cmdline))
env = {}
if ruby:
env['RBENV_VERSION'] = ruby
cmd = cmdline
else:
cmd = cmdline
return do(cmd, runas=runas, env=env) |
def make_tarball(base_name, base_dir, compress='gzip',
verbose=False, dry_run=False):
"""Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
# XXX GNU tar 1.13 has a nifty option to add a prefix directory.
# It's pretty new, though, so we certainly can't require it --
# but it would be nice to take advantage of it to skip the
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
compress_ext = { 'gzip': ".gz",
'bzip2': '.bz2',
'compress': ".Z" }
# flags for compression program, each element of list will be an argument
tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'}
compress_flags = {'compress': ["-f"]}
if compress is not None and compress not in compress_ext.keys():
raise ValueError("bad value for 'compress': must be None, 'gzip',"
"'bzip2' or 'compress'")
archive_name = base_name + ".tar"
if compress and compress in tarfile_compress_flag:
archive_name += compress_ext[compress]
mode = 'w:' + tarfile_compress_flag.get(compress, '')
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
log.info('Creating tar file %s with mode %s' % (archive_name, mode))
if not dry_run:
tar = tarfile.open(archive_name, mode=mode)
# This recursively adds everything underneath base_dir
tar.add(base_dir)
tar.close()
if compress and compress not in tarfile_compress_flag:
spawn([compress] + compress_flags[compress] + [archive_name],
dry_run=dry_run)
return archive_name + compress_ext[compress]
else:
return archive_name | Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename. | Below is the the instruction that describes the task:
### Input:
Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
### Response:
def make_tarball(base_name, base_dir, compress='gzip',
verbose=False, dry_run=False):
"""Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
# XXX GNU tar 1.13 has a nifty option to add a prefix directory.
# It's pretty new, though, so we certainly can't require it --
# but it would be nice to take advantage of it to skip the
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
compress_ext = { 'gzip': ".gz",
'bzip2': '.bz2',
'compress': ".Z" }
# flags for compression program, each element of list will be an argument
tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'}
compress_flags = {'compress': ["-f"]}
if compress is not None and compress not in compress_ext.keys():
raise ValueError("bad value for 'compress': must be None, 'gzip',"
"'bzip2' or 'compress'")
archive_name = base_name + ".tar"
if compress and compress in tarfile_compress_flag:
archive_name += compress_ext[compress]
mode = 'w:' + tarfile_compress_flag.get(compress, '')
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
log.info('Creating tar file %s with mode %s' % (archive_name, mode))
if not dry_run:
tar = tarfile.open(archive_name, mode=mode)
# This recursively adds everything underneath base_dir
tar.add(base_dir)
tar.close()
if compress and compress not in tarfile_compress_flag:
spawn([compress] + compress_flags[compress] + [archive_name],
dry_run=dry_run)
return archive_name + compress_ext[compress]
else:
return archive_name |
def read(self, size = -1):
"""
Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment
"""
if size < -1:
raise Exception('You shouldnt be doing this')
if size == -1:
t = self.current_segment.remaining_len(self.current_position)
if not t:
return None
old_new_pos = self.current_position
self.current_position = self.current_segment.end_address
return self.current_segment.data[old_new_pos - self.current_segment.start_address:]
t = self.current_position + size
if not self.current_segment.inrange(t):
raise Exception('Would read over segment boundaries!')
old_new_pos = self.current_position
self.current_position = t
return self.current_segment.data[old_new_pos - self.current_segment.start_address :t - self.current_segment.start_address] | Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment | Below is the the instruction that describes the task:
### Input:
Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment
### Response:
def read(self, size = -1):
"""
Returns data bytes of size size from the current segment. If size is -1 it returns all the remaining data bytes from memory segment
"""
if size < -1:
raise Exception('You shouldnt be doing this')
if size == -1:
t = self.current_segment.remaining_len(self.current_position)
if not t:
return None
old_new_pos = self.current_position
self.current_position = self.current_segment.end_address
return self.current_segment.data[old_new_pos - self.current_segment.start_address:]
t = self.current_position + size
if not self.current_segment.inrange(t):
raise Exception('Would read over segment boundaries!')
old_new_pos = self.current_position
self.current_position = t
return self.current_segment.data[old_new_pos - self.current_segment.start_address :t - self.current_segment.start_address] |
def _count_vocab(self, analyzed_docs):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
vocabulary = self.vocabulary_
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in analyzed_docs:
for feature in doc:
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
if self.binary:
X.data.fill(1)
return X | Create sparse feature matrix, and vocabulary where fixed_vocab=False | Below is the the instruction that describes the task:
### Input:
Create sparse feature matrix, and vocabulary where fixed_vocab=False
### Response:
def _count_vocab(self, analyzed_docs):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
vocabulary = self.vocabulary_
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in analyzed_docs:
for feature in doc:
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
if self.binary:
X.data.fill(1)
return X |
def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u'Expected None or QueryRoot root block, received: '
u'{} {}'.format(match_step.root_block, match_step))
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u'class: %s' % (start_class,))
# MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'.
if match_step.coerce_type_block is not None:
raise AssertionError(u'Invalid MATCH step: {}'.format(match_step))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),))
if match_step.as_block is None:
raise AssertionError(u'Found a MATCH step without a corresponding Location. '
u'This should never happen: {}'.format(match_step))
else:
match_step.as_block.validate()
parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),))
return u'{{ %s }}' % (u', '.join(parts),) | Transform the very first MATCH step into a MATCH query string. | Below is the the instruction that describes the task:
### Input:
Transform the very first MATCH step into a MATCH query string.
### Response:
def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u'Expected None or QueryRoot root block, received: '
u'{} {}'.format(match_step.root_block, match_step))
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u'class: %s' % (start_class,))
# MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'.
if match_step.coerce_type_block is not None:
raise AssertionError(u'Invalid MATCH step: {}'.format(match_step))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),))
if match_step.as_block is None:
raise AssertionError(u'Found a MATCH step without a corresponding Location. '
u'This should never happen: {}'.format(match_step))
else:
match_step.as_block.validate()
parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),))
return u'{{ %s }}' % (u', '.join(parts),) |
def _create_translation_file(feature_folder,
dataset_name,
translation,
formula_id2index):
"""
Write a loop-up file that contains the direct (record-wise) lookup
information.
Parameters
----------
feature_folder :
Path to the feature files.
dataset_name :
'traindata', 'validdata' or 'testdata'.
translation : list of triples
(raw data id, formula in latex, formula id)
"""
translationfilename = "%s/translation-%s.csv" % (feature_folder,
dataset_name)
with open(translationfilename, "w") as f:
f.write("index,raw_data_id,latex,formula_id\n")
for el in translation:
f.write("%i,%i,%s,%i\n" % (formula_id2index[el[2]],
el[0], el[1], el[2])) | Write a loop-up file that contains the direct (record-wise) lookup
information.
Parameters
----------
feature_folder :
Path to the feature files.
dataset_name :
'traindata', 'validdata' or 'testdata'.
translation : list of triples
(raw data id, formula in latex, formula id) | Below is the the instruction that describes the task:
### Input:
Write a loop-up file that contains the direct (record-wise) lookup
information.
Parameters
----------
feature_folder :
Path to the feature files.
dataset_name :
'traindata', 'validdata' or 'testdata'.
translation : list of triples
(raw data id, formula in latex, formula id)
### Response:
def _create_translation_file(feature_folder,
dataset_name,
translation,
formula_id2index):
"""
Write a loop-up file that contains the direct (record-wise) lookup
information.
Parameters
----------
feature_folder :
Path to the feature files.
dataset_name :
'traindata', 'validdata' or 'testdata'.
translation : list of triples
(raw data id, formula in latex, formula id)
"""
translationfilename = "%s/translation-%s.csv" % (feature_folder,
dataset_name)
with open(translationfilename, "w") as f:
f.write("index,raw_data_id,latex,formula_id\n")
for el in translation:
f.write("%i,%i,%s,%i\n" % (formula_id2index[el[2]],
el[0], el[1], el[2])) |
def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=',', align=True):
"""
d is a dictionary, keyed by tuple(A, B).
Goal is to put A in rows, B in columns, report data in table form.
>>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0}
>>> print tabulate(d)
===========
o a b
-----------
1 3 4
2 5 0
-----------
>>> print tabulate(d, transpose=True)
===========
o 1 2
-----------
a 3 5
b 4 0
-----------
"""
pairs = d.keys()
rows, cols = zip(*pairs)
if transpose:
rows, cols = cols, rows
rows = sorted(set(rows))
cols = sorted(set(cols))
header = ["o"] + list(cols)
table = []
for r in rows:
combo = [(r, c) for c in cols]
if transpose:
combo = [(c, r) for (r, c) in combo]
data = [d.get(x, "n/a") for x in combo]
data = ["{0:.1f}".format(x) if isinstance(x, float) else x
for x in data]
if key_fun:
data = [key_fun(x) for x in data]
table.append([str(r)] + data)
if not align:
formatted = load_csv(header, table, sep=sep)
return "\n".join(formatted)
return loadtable(header, table, thousands=thousands) | d is a dictionary, keyed by tuple(A, B).
Goal is to put A in rows, B in columns, report data in table form.
>>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0}
>>> print tabulate(d)
===========
o a b
-----------
1 3 4
2 5 0
-----------
>>> print tabulate(d, transpose=True)
===========
o 1 2
-----------
a 3 5
b 4 0
----------- | Below is the the instruction that describes the task:
### Input:
d is a dictionary, keyed by tuple(A, B).
Goal is to put A in rows, B in columns, report data in table form.
>>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0}
>>> print tabulate(d)
===========
o a b
-----------
1 3 4
2 5 0
-----------
>>> print tabulate(d, transpose=True)
===========
o 1 2
-----------
a 3 5
b 4 0
-----------
### Response:
def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=',', align=True):
"""
d is a dictionary, keyed by tuple(A, B).
Goal is to put A in rows, B in columns, report data in table form.
>>> d = {(1,'a'):3, (1,'b'):4, (2,'a'):5, (2,'b'):0}
>>> print tabulate(d)
===========
o a b
-----------
1 3 4
2 5 0
-----------
>>> print tabulate(d, transpose=True)
===========
o 1 2
-----------
a 3 5
b 4 0
-----------
"""
pairs = d.keys()
rows, cols = zip(*pairs)
if transpose:
rows, cols = cols, rows
rows = sorted(set(rows))
cols = sorted(set(cols))
header = ["o"] + list(cols)
table = []
for r in rows:
combo = [(r, c) for c in cols]
if transpose:
combo = [(c, r) for (r, c) in combo]
data = [d.get(x, "n/a") for x in combo]
data = ["{0:.1f}".format(x) if isinstance(x, float) else x
for x in data]
if key_fun:
data = [key_fun(x) for x in data]
table.append([str(r)] + data)
if not align:
formatted = load_csv(header, table, sep=sep)
return "\n".join(formatted)
return loadtable(header, table, thousands=thousands) |
def CLJP(S, color=False):
"""Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
S = remove_diagonal(S)
colorid = 0
if color:
colorid = 1
T = S.T.tocsr() # transpose S for efficient column access
splitting = np.empty(S.shape[0], dtype='intc')
amg_core.cljp_naive_splitting(S.shape[0],
S.indptr, S.indices,
T.indptr, T.indices,
splitting,
colorid)
return splitting | Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643. | Below is the the instruction that describes the task:
### Input:
Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
### Response:
def CLJP(S, color=False):
"""Compute a C/F splitting using the parallel CLJP algorithm.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
color : bool
use the CLJP coloring approach
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJP
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJP(S)
See Also
--------
MIS, PMIS, CLJPc
References
----------
.. [8] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
S = remove_diagonal(S)
colorid = 0
if color:
colorid = 1
T = S.T.tocsr() # transpose S for efficient column access
splitting = np.empty(S.shape[0], dtype='intc')
amg_core.cljp_naive_splitting(S.shape[0],
S.indptr, S.indices,
T.indptr, T.indices,
splitting,
colorid)
return splitting |
def get_git_home(path='.'):
"""Get Git path from the current context."""
ctx = click.get_current_context(silent=True)
if ctx and GIT_KEY in ctx.meta:
return ctx.meta[GIT_KEY]
from git import Repo
return Repo(path, search_parent_directories=True).working_dir | Get Git path from the current context. | Below is the the instruction that describes the task:
### Input:
Get Git path from the current context.
### Response:
def get_git_home(path='.'):
"""Get Git path from the current context."""
ctx = click.get_current_context(silent=True)
if ctx and GIT_KEY in ctx.meta:
return ctx.meta[GIT_KEY]
from git import Repo
return Repo(path, search_parent_directories=True).working_dir |
def crc8(data):
"""
Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code
"""
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
crc &= 0xFF
return crc | Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code | Below is the the instruction that describes the task:
### Input:
Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code
### Response:
def crc8(data):
"""
Perform the 1-Wire CRC check on the provided data.
:param bytearray data: 8 byte array representing 64 bit ROM code
"""
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
crc &= 0xFF
return crc |
def _set_fc_port(self, v, load=False):
"""
Setter method for fc_port, mapped from YANG variable /interface/fc_port (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fc_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fc_port() directly.
YANG Description: The list of fibrechannel interfaces in the managed
device. Each row represents a fibrechannel interface.
The list provides a way to discover all the fibrechannel interfaces
in a managed device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fc_port must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__fc_port = t
if hasattr(self, '_set'):
self._set() | Setter method for fc_port, mapped from YANG variable /interface/fc_port (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fc_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fc_port() directly.
YANG Description: The list of fibrechannel interfaces in the managed
device. Each row represents a fibrechannel interface.
The list provides a way to discover all the fibrechannel interfaces
in a managed device. | Below is the the instruction that describes the task:
### Input:
Setter method for fc_port, mapped from YANG variable /interface/fc_port (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fc_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fc_port() directly.
YANG Description: The list of fibrechannel interfaces in the managed
device. Each row represents a fibrechannel interface.
The list provides a way to discover all the fibrechannel interfaces
in a managed device.
### Response:
def _set_fc_port(self, v, load=False):
"""
Setter method for fc_port, mapped from YANG variable /interface/fc_port (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fc_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fc_port() directly.
YANG Description: The list of fibrechannel interfaces in the managed
device. Each row represents a fibrechannel interface.
The list provides a way to discover all the fibrechannel interfaces
in a managed device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fc_port must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",fc_port.fc_port, yang_name="fc-port", rest_name="FibreChannel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}), is_container='list', yang_name="fc-port", rest_name="FibreChannel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of fibrechannel interfaces.', u'cli-no-key-completion': None, u'alt-name': u'FibreChannel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-custom-range-actionpoint': u'FcRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-incomplete-no': None, u'callpoint': u'interface_fcport', u'cli-mode-name': u'conf-if-fi-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__fc_port = t
if hasattr(self, '_set'):
self._set() |
def stat(self, follow_symlinks=True):
"""Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
"""
if follow_symlinks:
if self._statresult_symlink is None:
file_object = self._filesystem.resolve(self.path)
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult_symlink = file_object.stat_result.copy()
return self._statresult_symlink
if self._statresult is None:
file_object = self._filesystem.lresolve(self.path)
self._inode = file_object.st_ino
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult = file_object.stat_result.copy()
return self._statresult | Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to. | Below is the the instruction that describes the task:
### Input:
Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
### Response:
def stat(self, follow_symlinks=True):
"""Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
"""
if follow_symlinks:
if self._statresult_symlink is None:
file_object = self._filesystem.resolve(self.path)
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult_symlink = file_object.stat_result.copy()
return self._statresult_symlink
if self._statresult is None:
file_object = self._filesystem.lresolve(self.path)
self._inode = file_object.st_ino
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult = file_object.stat_result.copy()
return self._statresult |
def close_filenos(preserve):
""" Close unprotected file descriptors
Close all open file descriptors that are not in preserve.
If ulimit -nofile is "unlimited", all is defined filenos <= 4096,
else all is <= the output of resource.getrlimit().
:param preserve: set with protected files
:type preserve: set
:return: None
"""
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 4096
for fileno in range(maxfd):
if fileno not in preserve:
try:
os.close(fileno)
except OSError as err:
if not err.errno == errno.EBADF:
raise DaemonError(
'Failed to close file descriptor {0}: {1}'
.format(fileno, err)) | Close unprotected file descriptors
Close all open file descriptors that are not in preserve.
If ulimit -nofile is "unlimited", all is defined filenos <= 4096,
else all is <= the output of resource.getrlimit().
:param preserve: set with protected files
:type preserve: set
:return: None | Below is the the instruction that describes the task:
### Input:
Close unprotected file descriptors
Close all open file descriptors that are not in preserve.
If ulimit -nofile is "unlimited", all is defined filenos <= 4096,
else all is <= the output of resource.getrlimit().
:param preserve: set with protected files
:type preserve: set
:return: None
### Response:
def close_filenos(preserve):
""" Close unprotected file descriptors
Close all open file descriptors that are not in preserve.
If ulimit -nofile is "unlimited", all is defined filenos <= 4096,
else all is <= the output of resource.getrlimit().
:param preserve: set with protected files
:type preserve: set
:return: None
"""
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 4096
for fileno in range(maxfd):
if fileno not in preserve:
try:
os.close(fileno)
except OSError as err:
if not err.errno == errno.EBADF:
raise DaemonError(
'Failed to close file descriptor {0}: {1}'
.format(fileno, err)) |
def update_ssh_public_key(
self,
name,
ssh_public_key,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an SSH public key and returns the profile information. This method
supports patch semantics.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> # TODO: Initialize `ssh_public_key`:
>>> ssh_public_key = {}
>>>
>>> response = client.update_ssh_public_key(name, ssh_public_key)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified
by their SHA-256 fingerprint. The fingerprint of the public key is in
format ``users/{user}/sshPublicKeys/{fingerprint}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_ssh_public_key" not in self._inner_api_calls:
self._inner_api_calls[
"update_ssh_public_key"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_ssh_public_key,
default_retry=self._method_configs["UpdateSshPublicKey"].retry,
default_timeout=self._method_configs["UpdateSshPublicKey"].timeout,
client_info=self._client_info,
)
request = oslogin_pb2.UpdateSshPublicKeyRequest(
name=name, ssh_public_key=ssh_public_key, update_mask=update_mask
)
return self._inner_api_calls["update_ssh_public_key"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Updates an SSH public key and returns the profile information. This method
supports patch semantics.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> # TODO: Initialize `ssh_public_key`:
>>> ssh_public_key = {}
>>>
>>> response = client.update_ssh_public_key(name, ssh_public_key)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified
by their SHA-256 fingerprint. The fingerprint of the public key is in
format ``users/{user}/sshPublicKeys/{fingerprint}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Updates an SSH public key and returns the profile information. This method
supports patch semantics.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> # TODO: Initialize `ssh_public_key`:
>>> ssh_public_key = {}
>>>
>>> response = client.update_ssh_public_key(name, ssh_public_key)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified
by their SHA-256 fingerprint. The fingerprint of the public key is in
format ``users/{user}/sshPublicKeys/{fingerprint}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def update_ssh_public_key(
self,
name,
ssh_public_key,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an SSH public key and returns the profile information. This method
supports patch semantics.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.fingerprint_path('[USER]', '[FINGERPRINT]')
>>>
>>> # TODO: Initialize `ssh_public_key`:
>>> ssh_public_key = {}
>>>
>>> response = client.update_ssh_public_key(name, ssh_public_key)
Args:
name (str): The fingerprint of the public key to update. Public keys are identified
by their SHA-256 fingerprint. The fingerprint of the public key is in
format ``users/{user}/sshPublicKeys/{fingerprint}``.
ssh_public_key (Union[dict, ~google.cloud.oslogin_v1.types.SshPublicKey]): The SSH public key and expiration time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.SshPublicKey`
update_mask (Union[dict, ~google.cloud.oslogin_v1.types.FieldMask]): Mask to control which fields get updated. Updates all if not present.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.oslogin_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.oslogin_v1.types.SshPublicKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_ssh_public_key" not in self._inner_api_calls:
self._inner_api_calls[
"update_ssh_public_key"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_ssh_public_key,
default_retry=self._method_configs["UpdateSshPublicKey"].retry,
default_timeout=self._method_configs["UpdateSshPublicKey"].timeout,
client_info=self._client_info,
)
request = oslogin_pb2.UpdateSshPublicKeyRequest(
name=name, ssh_public_key=ssh_public_key, update_mask=update_mask
)
return self._inner_api_calls["update_ssh_public_key"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def bear(a1, b1, a2, b2):
"""Find bearing/position angle between two points on a unit sphere.
Parameters
----------
a1, b1 : float
Longitude-like and latitude-like angles defining the first
point. Both are in radians.
a2, b2 : float
Longitude-like and latitude-like angles defining the second
point. Both are in radians.
Notes
-----
Position angle of the second point with respect to the first
is returned in radians. Position angle is calculated clockwise
and counter-clockwise from the direction towards the North
pole. It is between [0 and π] if the second point is in the
eastern hemisphere w.r.t the first, and between (0, -π) if
the second point is in the western hemisphere w.r.t the first.
.. warning::
If the first point is at the pole then bearing is undefined and
0 is returned.
Results agree with those from SLALIB rountine sla_dbear. See
test_bear_against_slalib_dbear() in test_angles.py.
Examples
--------
>>> from angles import bear, r2d, d2r
>>> bear(0, 0, 0, -d2r(90.0))
3.141592653589793
>>> bear(0, -d2r(90.0), 0, 0)
0.0
>>> bear(0, -d2r(45.0), 0, 0)
0.0
>>> bear(0, -d2r(89.678), 0, 0)
0.0
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0)))
89.64644212193384
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0)))
-89.64644212193421
"""
# Find perpendicular to the plane containing the base and
# z-axis. Then find the perpendicular to the plane containing
# the base and the target. The angle between these two is the
# position angle or bearing of the target w.r.t the base. Check
# sign of the z component of the latter vector to determine
# quadrant: 1st and 2nd quadrants are +ve while 3rd and 4th are
# negative.
#
# Tolerance to decide if first is on the pole and also to decide if
# the calculated bearing is zero.
tol = 1e-15
v1 = CartesianVector.from_spherical(1.0, a1, b1)
v2 = CartesianVector.from_spherical(1.0, a2, b2)
# Z-axis
v0 = CartesianVector.from_spherical(r=1.0, alpha=0.0, delta=d2r(90.0))
if abs(v1.cross(v0).mod) < tol:
# The first point is on the pole. Bearing is undefined.
warnings.warn(
"First point is on the pole. Bearing undefined.")
return 0.0
# Vector perpendicular to great circle containing two points.
v12 = v1.cross(v2)
# Vector perpendicular to great circle containing base and
# Z-axis.
v10 = v1.cross(v0)
# Find angle between these two vectors.
dot = v12.dot(v10)
cross = v12.cross(v10).mod
x = math.atan2(cross, dot)
# If z is negative then we are in the 3rd or 4th quadrant.
if v12.z < 0:
x = -x
if abs(x) < tol:
return 0.0
else:
return x | Find bearing/position angle between two points on a unit sphere.
Parameters
----------
a1, b1 : float
Longitude-like and latitude-like angles defining the first
point. Both are in radians.
a2, b2 : float
Longitude-like and latitude-like angles defining the second
point. Both are in radians.
Notes
-----
Position angle of the second point with respect to the first
is returned in radians. Position angle is calculated clockwise
and counter-clockwise from the direction towards the North
pole. It is between [0 and π] if the second point is in the
eastern hemisphere w.r.t the first, and between (0, -π) if
the second point is in the western hemisphere w.r.t the first.
.. warning::
If the first point is at the pole then bearing is undefined and
0 is returned.
Results agree with those from SLALIB rountine sla_dbear. See
test_bear_against_slalib_dbear() in test_angles.py.
Examples
--------
>>> from angles import bear, r2d, d2r
>>> bear(0, 0, 0, -d2r(90.0))
3.141592653589793
>>> bear(0, -d2r(90.0), 0, 0)
0.0
>>> bear(0, -d2r(45.0), 0, 0)
0.0
>>> bear(0, -d2r(89.678), 0, 0)
0.0
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0)))
89.64644212193384
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0)))
-89.64644212193421 | Below is the the instruction that describes the task:
### Input:
Find bearing/position angle between two points on a unit sphere.
Parameters
----------
a1, b1 : float
Longitude-like and latitude-like angles defining the first
point. Both are in radians.
a2, b2 : float
Longitude-like and latitude-like angles defining the second
point. Both are in radians.
Notes
-----
Position angle of the second point with respect to the first
is returned in radians. Position angle is calculated clockwise
and counter-clockwise from the direction towards the North
pole. It is between [0 and π] if the second point is in the
eastern hemisphere w.r.t the first, and between (0, -π) if
the second point is in the western hemisphere w.r.t the first.
.. warning::
If the first point is at the pole then bearing is undefined and
0 is returned.
Results agree with those from SLALIB rountine sla_dbear. See
test_bear_against_slalib_dbear() in test_angles.py.
Examples
--------
>>> from angles import bear, r2d, d2r
>>> bear(0, 0, 0, -d2r(90.0))
3.141592653589793
>>> bear(0, -d2r(90.0), 0, 0)
0.0
>>> bear(0, -d2r(45.0), 0, 0)
0.0
>>> bear(0, -d2r(89.678), 0, 0)
0.0
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0)))
89.64644212193384
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0)))
-89.64644212193421
### Response:
def bear(a1, b1, a2, b2):
"""Find bearing/position angle between two points on a unit sphere.
Parameters
----------
a1, b1 : float
Longitude-like and latitude-like angles defining the first
point. Both are in radians.
a2, b2 : float
Longitude-like and latitude-like angles defining the second
point. Both are in radians.
Notes
-----
Position angle of the second point with respect to the first
is returned in radians. Position angle is calculated clockwise
and counter-clockwise from the direction towards the North
pole. It is between [0 and π] if the second point is in the
eastern hemisphere w.r.t the first, and between (0, -π) if
the second point is in the western hemisphere w.r.t the first.
.. warning::
If the first point is at the pole then bearing is undefined and
0 is returned.
Results agree with those from SLALIB rountine sla_dbear. See
test_bear_against_slalib_dbear() in test_angles.py.
Examples
--------
>>> from angles import bear, r2d, d2r
>>> bear(0, 0, 0, -d2r(90.0))
3.141592653589793
>>> bear(0, -d2r(90.0), 0, 0)
0.0
>>> bear(0, -d2r(45.0), 0, 0)
0.0
>>> bear(0, -d2r(89.678), 0, 0)
0.0
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(46.0), d2r(45.0)))
89.64644212193384
>>> r2d(bear(d2r(45.0), d2r(45.0), d2r(44.0), d2r(45.0)))
-89.64644212193421
"""
# Find perpendicular to the plane containing the base and
# z-axis. Then find the perpendicular to the plane containing
# the base and the target. The angle between these two is the
# position angle or bearing of the target w.r.t the base. Check
# sign of the z component of the latter vector to determine
# quadrant: 1st and 2nd quadrants are +ve while 3rd and 4th are
# negative.
#
# Tolerance to decide if first is on the pole and also to decide if
# the calculated bearing is zero.
tol = 1e-15
v1 = CartesianVector.from_spherical(1.0, a1, b1)
v2 = CartesianVector.from_spherical(1.0, a2, b2)
# Z-axis
v0 = CartesianVector.from_spherical(r=1.0, alpha=0.0, delta=d2r(90.0))
if abs(v1.cross(v0).mod) < tol:
# The first point is on the pole. Bearing is undefined.
warnings.warn(
"First point is on the pole. Bearing undefined.")
return 0.0
# Vector perpendicular to great circle containing two points.
v12 = v1.cross(v2)
# Vector perpendicular to great circle containing base and
# Z-axis.
v10 = v1.cross(v0)
# Find angle between these two vectors.
dot = v12.dot(v10)
cross = v12.cross(v10).mod
x = math.atan2(cross, dot)
# If z is negative then we are in the 3rd or 4th quadrant.
if v12.z < 0:
x = -x
if abs(x) < tol:
return 0.0
else:
return x |
def get_template_loader(self, subdir='templates'):
'''App-specific function to get the current app's template loader'''
if self.request is None:
raise ValueError("this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.")
dmp = apps.get_app_config('django_mako_plus')
return dmp.engine.get_template_loader(self.app, subdir) | App-specific function to get the current app's template loader | Below is the the instruction that describes the task:
### Input:
App-specific function to get the current app's template loader
### Response:
def get_template_loader(self, subdir='templates'):
'''App-specific function to get the current app's template loader'''
if self.request is None:
raise ValueError("this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.")
dmp = apps.get_app_config('django_mako_plus')
return dmp.engine.get_template_loader(self.app, subdir) |
def getLoggingLocation(self):
"""Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go."""
if sys.platform == "win32":
modulePath = os.path.realpath(__file__)
modulePath = modulePath[:modulePath.rfind("/")]
return modulePath
else:
return "/tmp"
return "" | Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go. | Below is the the instruction that describes the task:
### Input:
Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go.
### Response:
def getLoggingLocation(self):
"""Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go."""
if sys.platform == "win32":
modulePath = os.path.realpath(__file__)
modulePath = modulePath[:modulePath.rfind("/")]
return modulePath
else:
return "/tmp"
return "" |
def c_if(self, classical, val):
"""Add classical control register to all instructions."""
for gate in self.instructions:
gate.c_if(classical, val)
return self | Add classical control register to all instructions. | Below is the the instruction that describes the task:
### Input:
Add classical control register to all instructions.
### Response:
def c_if(self, classical, val):
"""Add classical control register to all instructions."""
for gate in self.instructions:
gate.c_if(classical, val)
return self |
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join() | Signal runner to stop and join thread. | Below is the the instruction that describes the task:
### Input:
Signal runner to stop and join thread.
### Response:
def terminate(self):
"""Signal runner to stop and join thread."""
self.toggle_scan(False)
self.keep_going = False
self.join() |
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_context, max_hosted):
"""Create resources for a hosting device in a plugin specific way."""
mgmt_port = None
if mgmt_context and mgmt_context.get('mgmt_nw_id') and tenant_id:
# Create port for mgmt interface
p_spec = {'port': {
'tenant_id': tenant_id,
'admin_state_up': True,
'name': 'mgmt',
'network_id': mgmt_context['mgmt_nw_id'],
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': self._mgmt_subnet_spec(context, mgmt_context),
'device_id': "",
# Use device_owner attribute to ensure we can query for these
# ports even before Nova has set device_id attribute.
'device_owner': complementary_id}}
try:
mgmt_port = self._core_plugin.create_port(context, p_spec)
except n_exc.NeutronException as e:
LOG.error('Error %s when creating management port. '
'Cleaning up.', e)
self.delete_hosting_device_resources(
context, tenant_id, mgmt_port)
mgmt_port = None
# We are setting the 'ports' to an empty list as it is expected by
# the callee: device_handling_db._create_svc_vm_hosting_devices()
return {'mgmt_port': mgmt_port, 'ports': []} | Create resources for a hosting device in a plugin specific way. | Below is the the instruction that describes the task:
### Input:
Create resources for a hosting device in a plugin specific way.
### Response:
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_context, max_hosted):
"""Create resources for a hosting device in a plugin specific way."""
mgmt_port = None
if mgmt_context and mgmt_context.get('mgmt_nw_id') and tenant_id:
# Create port for mgmt interface
p_spec = {'port': {
'tenant_id': tenant_id,
'admin_state_up': True,
'name': 'mgmt',
'network_id': mgmt_context['mgmt_nw_id'],
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': self._mgmt_subnet_spec(context, mgmt_context),
'device_id': "",
# Use device_owner attribute to ensure we can query for these
# ports even before Nova has set device_id attribute.
'device_owner': complementary_id}}
try:
mgmt_port = self._core_plugin.create_port(context, p_spec)
except n_exc.NeutronException as e:
LOG.error('Error %s when creating management port. '
'Cleaning up.', e)
self.delete_hosting_device_resources(
context, tenant_id, mgmt_port)
mgmt_port = None
# We are setting the 'ports' to an empty list as it is expected by
# the callee: device_handling_db._create_svc_vm_hosting_devices()
return {'mgmt_port': mgmt_port, 'ports': []} |
def updateFile(self, logical_file_name=[], is_file_valid=1, lost=0, dataset=''):
"""
API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring
"""
if lost in [1, True, 'True', 'true', '1', 'y', 'yes']:
lost = 1
if is_file_valid in [1, True, 'True', 'true', '1', 'y', 'yes']:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception,\
"Lost file must set to invalid" )
else: lost = 0
for f in logical_file_name, dataset:
if '*' in f or '%' in f:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No \
wildcard allow in LFN or dataset for updatefile API." )
try:
self.dbsFile.updateStatus(logical_file_name, is_file_valid, lost, dataset)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/updateFile. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring | Below is the the instruction that describes the task:
### Input:
API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring
### Response:
def updateFile(self, logical_file_name=[], is_file_valid=1, lost=0, dataset=''):
"""
API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring
"""
if lost in [1, True, 'True', 'true', '1', 'y', 'yes']:
lost = 1
if is_file_valid in [1, True, 'True', 'true', '1', 'y', 'yes']:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception,\
"Lost file must set to invalid" )
else: lost = 0
for f in logical_file_name, dataset:
if '*' in f or '%' in f:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No \
wildcard allow in LFN or dataset for updatefile API." )
try:
self.dbsFile.updateStatus(logical_file_name, is_file_valid, lost, dataset)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/updateFile. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def commit(self, changeset_id: uuid.UUID) -> None:
"""
Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB().
"""
self._validate_changeset(changeset_id)
journal_data = self.journal.commit_changeset(changeset_id)
if self.journal.is_empty():
# Ensure the journal automatically restarts recording after
# it has been persisted to the underlying db
self.reset()
for key, value in journal_data.items():
try:
if value is DELETED_ENTRY:
del self.wrapped_db[key]
elif value is ERASE_CREATED_ENTRY:
pass
else:
self.wrapped_db[key] = cast(bytes, value)
except Exception:
self._reapply_changeset_to_journal(changeset_id, journal_data)
raise | Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB(). | Below is the the instruction that describes the task:
### Input:
Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB().
### Response:
def commit(self, changeset_id: uuid.UUID) -> None:
"""
Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB().
"""
self._validate_changeset(changeset_id)
journal_data = self.journal.commit_changeset(changeset_id)
if self.journal.is_empty():
# Ensure the journal automatically restarts recording after
# it has been persisted to the underlying db
self.reset()
for key, value in journal_data.items():
try:
if value is DELETED_ENTRY:
del self.wrapped_db[key]
elif value is ERASE_CREATED_ENTRY:
pass
else:
self.wrapped_db[key] = cast(bytes, value)
except Exception:
self._reapply_changeset_to_journal(changeset_id, journal_data)
raise |
def load_data(self, mode="train", format="csv"):
"""
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv"
"""
if mode in self.data.keys():
run_dates = pd.DatetimeIndex(start=self.start_dates[mode],
end=self.end_dates[mode],freq="1D")
run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date]
print(run_date_str)
all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*total_" + self.ensemble_name + "*." + format))
all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*step_" + self.ensemble_name + "*." + format))
total_track_files = []
for track_file in all_total_track_files:
file_date = track_file.split("_")[-1][:-4]
if file_date in run_date_str:
total_track_files.append(track_file)
step_track_files = []
for step_file in all_step_track_files:
file_date = step_file.split("_")[-1][:-4]
if file_date in run_date_str:
step_track_files.append(step_file)
self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files),
ignore_index=True)
self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0)
self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0)
self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files),
ignore_index=True)
self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0)
self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0)
if mode == "forecast":
self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID")
self.data[mode]["member"] = pd.read_csv(self.member_files[mode])
self.data[mode]["combo"] = pd.merge(self.data[mode]["step"],
self.data[mode]["total"],
on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"])
self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"],
self.data[mode]["member"],
on="Ensemble_Member")
self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"],
self.data[mode]["member"],
on="Ensemble_Member") | Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv" | Below is the the instruction that describes the task:
### Input:
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv"
### Response:
def load_data(self, mode="train", format="csv"):
"""
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv"
"""
if mode in self.data.keys():
run_dates = pd.DatetimeIndex(start=self.start_dates[mode],
end=self.end_dates[mode],freq="1D")
run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date]
print(run_date_str)
all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*total_" + self.ensemble_name + "*." + format))
all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*step_" + self.ensemble_name + "*." + format))
total_track_files = []
for track_file in all_total_track_files:
file_date = track_file.split("_")[-1][:-4]
if file_date in run_date_str:
total_track_files.append(track_file)
step_track_files = []
for step_file in all_step_track_files:
file_date = step_file.split("_")[-1][:-4]
if file_date in run_date_str:
step_track_files.append(step_file)
self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files),
ignore_index=True)
self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0)
self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0)
self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files),
ignore_index=True)
self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0)
self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0)
if mode == "forecast":
self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID")
self.data[mode]["member"] = pd.read_csv(self.member_files[mode])
self.data[mode]["combo"] = pd.merge(self.data[mode]["step"],
self.data[mode]["total"],
on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"])
self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"],
self.data[mode]["member"],
on="Ensemble_Member")
self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"],
self.data[mode]["member"],
on="Ensemble_Member") |
def post_event(self, event):
"""
Posts a single event to the Keen IO API. The write key must be set first.
:param event: an Event to upload
"""
url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version,
self.project_id,
event.event_collection)
headers = utilities.headers(self.write_key)
payload = event.to_json()
response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout)
self._error_handling(response) | Posts a single event to the Keen IO API. The write key must be set first.
:param event: an Event to upload | Below is the the instruction that describes the task:
### Input:
Posts a single event to the Keen IO API. The write key must be set first.
:param event: an Event to upload
### Response:
def post_event(self, event):
"""
Posts a single event to the Keen IO API. The write key must be set first.
:param event: an Event to upload
"""
url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version,
self.project_id,
event.event_collection)
headers = utilities.headers(self.write_key)
payload = event.to_json()
response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout)
self._error_handling(response) |
def num_to_var_int(x):
"""
(bitcoin-specific): convert an integer into a variable-length integer
"""
x = int(x)
if x < 253:
return from_int_to_byte(x)
elif x < 65536:
return from_int_to_byte(253) + encode(x, 256, 2)[::-1]
elif x < 4294967296:
return from_int_to_byte(254) + encode(x, 256, 4)[::-1]
else:
return from_int_to_byte(255) + encode(x, 256, 8)[::-1] | (bitcoin-specific): convert an integer into a variable-length integer | Below is the the instruction that describes the task:
### Input:
(bitcoin-specific): convert an integer into a variable-length integer
### Response:
def num_to_var_int(x):
"""
(bitcoin-specific): convert an integer into a variable-length integer
"""
x = int(x)
if x < 253:
return from_int_to_byte(x)
elif x < 65536:
return from_int_to_byte(253) + encode(x, 256, 2)[::-1]
elif x < 4294967296:
return from_int_to_byte(254) + encode(x, 256, 4)[::-1]
else:
return from_int_to_byte(255) + encode(x, 256, 8)[::-1] |
def Scatter(y, win=13, remove_outliers=False):
'''
Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_outliers: Clip outliers at 5 sigma before computing \
the CDPP? Default `False`
'''
if remove_outliers:
# Remove 5-sigma outliers from data
# smoothed on a 1 day timescale
if len(y) >= 50:
ys = y - Smooth(y, 50)
else:
ys = y
M = np.nanmedian(ys)
MAD = 1.4826 * np.nanmedian(np.abs(ys - M))
out = []
for i, _ in enumerate(y):
if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD):
out.append(i)
out = np.array(out, dtype=int)
y = np.delete(y, out)
if len(y):
return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win)
for yi in Chunks(y, win, all=True)])
else:
return np.nan | Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_outliers: Clip outliers at 5 sigma before computing \
the CDPP? Default `False` | Below is the the instruction that describes the task:
### Input:
Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_outliers: Clip outliers at 5 sigma before computing \
the CDPP? Default `False`
### Response:
def Scatter(y, win=13, remove_outliers=False):
'''
Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_outliers: Clip outliers at 5 sigma before computing \
the CDPP? Default `False`
'''
if remove_outliers:
# Remove 5-sigma outliers from data
# smoothed on a 1 day timescale
if len(y) >= 50:
ys = y - Smooth(y, 50)
else:
ys = y
M = np.nanmedian(ys)
MAD = 1.4826 * np.nanmedian(np.abs(ys - M))
out = []
for i, _ in enumerate(y):
if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD):
out.append(i)
out = np.array(out, dtype=int)
y = np.delete(y, out)
if len(y):
return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win)
for yi in Chunks(y, win, all=True)])
else:
return np.nan |
def _check_samples_line(klass, arr):
"""Peform additional check on samples line"""
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
) | Peform additional check on samples line | Below is the the instruction that describes the task:
### Input:
Peform additional check on samples line
### Response:
def _check_samples_line(klass, arr):
"""Peform additional check on samples line"""
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
) |
def ListClientsForKeywords(self, keywords, start_time=None):
"""Lists the clients associated with keywords."""
res = {kw: [] for kw in keywords}
for kw in keywords:
for client_id, timestamp in iteritems(self.keywords.get(kw, {})):
if start_time is not None and timestamp < start_time:
continue
res[kw].append(client_id)
return res | Lists the clients associated with keywords. | Below is the the instruction that describes the task:
### Input:
Lists the clients associated with keywords.
### Response:
def ListClientsForKeywords(self, keywords, start_time=None):
"""Lists the clients associated with keywords."""
res = {kw: [] for kw in keywords}
for kw in keywords:
for client_id, timestamp in iteritems(self.keywords.get(kw, {})):
if start_time is not None and timestamp < start_time:
continue
res[kw].append(client_id)
return res |
def getFrontmostApp(cls):
"""Get the current frontmost application.
Raise a ValueError exception if no GUI applications are found.
"""
# Refresh the runningApplications list
apps = cls._getRunningApps()
for app in apps:
pid = app.processIdentifier()
ref = cls.getAppRefByPid(pid)
try:
if ref.AXFrontmost:
return ref
except (_a11y.ErrorUnsupported,
_a11y.ErrorCannotComplete,
_a11y.ErrorAPIDisabled,
_a11y.ErrorNotImplemented):
# Some applications do not have an explicit GUI
# and so will not have an AXFrontmost attribute
# Trying to read attributes from Google Chrome Helper returns
# ErrorAPIDisabled for some reason - opened radar bug 12837995
pass
raise ValueError('No GUI application found.') | Get the current frontmost application.
Raise a ValueError exception if no GUI applications are found. | Below is the the instruction that describes the task:
### Input:
Get the current frontmost application.
Raise a ValueError exception if no GUI applications are found.
### Response:
def getFrontmostApp(cls):
"""Get the current frontmost application.
Raise a ValueError exception if no GUI applications are found.
"""
# Refresh the runningApplications list
apps = cls._getRunningApps()
for app in apps:
pid = app.processIdentifier()
ref = cls.getAppRefByPid(pid)
try:
if ref.AXFrontmost:
return ref
except (_a11y.ErrorUnsupported,
_a11y.ErrorCannotComplete,
_a11y.ErrorAPIDisabled,
_a11y.ErrorNotImplemented):
# Some applications do not have an explicit GUI
# and so will not have an AXFrontmost attribute
# Trying to read attributes from Google Chrome Helper returns
# ErrorAPIDisabled for some reason - opened radar bug 12837995
pass
raise ValueError('No GUI application found.') |
def validate(self, columns=None):
"""
Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool>
"""
schema = self.schema()
if not columns:
ignore_flags = orb.Column.Flags.Virtual | orb.Column.Flags.ReadOnly
columns = schema.columns(flags=~ignore_flags).values()
use_indexes = True
else:
use_indexes = False
# validate the column values
values = self.values(key='column', columns=columns)
for col, value in values.items():
if not col.validate(value):
return False
# valide the index values
if use_indexes:
for index in self.schema().indexes().values():
if not index.validate(self, values):
return False
return True | Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool> | Below is the the instruction that describes the task:
### Input:
Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool>
### Response:
def validate(self, columns=None):
"""
Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool>
"""
schema = self.schema()
if not columns:
ignore_flags = orb.Column.Flags.Virtual | orb.Column.Flags.ReadOnly
columns = schema.columns(flags=~ignore_flags).values()
use_indexes = True
else:
use_indexes = False
# validate the column values
values = self.values(key='column', columns=columns)
for col, value in values.items():
if not col.validate(value):
return False
# valide the index values
if use_indexes:
for index in self.schema().indexes().values():
if not index.validate(self, values):
return False
return True |
def _parse_cpe_name(cpe):
'''
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
:param cpe:
:return:
'''
part = {
'o': 'operating system',
'h': 'hardware',
'a': 'application',
}
ret = {}
cpe = (cpe or '').split(':')
if len(cpe) > 4 and cpe[0] == 'cpe':
if cpe[1].startswith('/'): # WFN to URI
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
ret['phase'] = cpe[5] if len(cpe) > 5 else None
ret['part'] = part.get(cpe[1][1:])
elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
ret['part'] = part.get(cpe[2])
return ret | Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
:param cpe:
:return: | Below is the the instruction that describes the task:
### Input:
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
:param cpe:
:return:
### Response:
def _parse_cpe_name(cpe):
'''
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
:param cpe:
:return:
'''
part = {
'o': 'operating system',
'h': 'hardware',
'a': 'application',
}
ret = {}
cpe = (cpe or '').split(':')
if len(cpe) > 4 and cpe[0] == 'cpe':
if cpe[1].startswith('/'): # WFN to URI
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
ret['phase'] = cpe[5] if len(cpe) > 5 else None
ret['part'] = part.get(cpe[1][1:])
elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
ret['part'] = part.get(cpe[2])
return ret |
def _filter_defs_at_call_sites(self, defs):
"""
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return:
"""
# TODO: make definition killing architecture independent and calling convention independent
# TODO: use information from a calling convention analysis
filtered_defs = LiveDefinitions()
for variable, locs in defs.items():
if isinstance(variable, SimRegisterVariable):
if self.project.arch.name == 'X86':
if variable.reg in (self.project.arch.registers['eax'][0],
self.project.arch.registers['ecx'][0],
self.project.arch.registers['edx'][0]):
continue
filtered_defs.add_defs(variable, locs)
return filtered_defs | If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return: | Below is the the instruction that describes the task:
### Input:
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return:
### Response:
def _filter_defs_at_call_sites(self, defs):
"""
If we are not tracing into the function that are called in a real execution, we should properly filter the defs
to account for the behavior of the skipped function at this call site.
This function is a WIP. See TODOs inside.
:param defs:
:return:
"""
# TODO: make definition killing architecture independent and calling convention independent
# TODO: use information from a calling convention analysis
filtered_defs = LiveDefinitions()
for variable, locs in defs.items():
if isinstance(variable, SimRegisterVariable):
if self.project.arch.name == 'X86':
if variable.reg in (self.project.arch.registers['eax'][0],
self.project.arch.registers['ecx'][0],
self.project.arch.registers['edx'][0]):
continue
filtered_defs.add_defs(variable, locs)
return filtered_defs |
def build_attrs(self, *args, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(*args, **kwargs)
return self.attrs | Helper function for building an attribute dictionary. | Below is the the instruction that describes the task:
### Input:
Helper function for building an attribute dictionary.
### Response:
def build_attrs(self, *args, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(*args, **kwargs)
return self.attrs |
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "amari_alpha", [logu]):
if alpha is None or tf.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if (self_normalized is None or tf.is_tensor(self_normalized)):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = tf.convert_to_tensor(value=logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = tf.exp(logu) * logu
else:
f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + tf.math.expm1(logu)
elif alpha == 1.:
return f - tf.math.expm1(logu)
else:
return f - tf.math.expm1(logu) / (alpha - 1.) | The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`. | Below is the the instruction that describes the task:
### Input:
The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
### Response:
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with tf.compat.v1.name_scope(name, "amari_alpha", [logu]):
if alpha is None or tf.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if (self_normalized is None or tf.is_tensor(self_normalized)):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = tf.convert_to_tensor(value=logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = tf.exp(logu) * logu
else:
f = tf.math.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + tf.math.expm1(logu)
elif alpha == 1.:
return f - tf.math.expm1(logu)
else:
return f - tf.math.expm1(logu) / (alpha - 1.) |
def get_job(self, job_resource_name: str) -> Dict:
"""Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata.
"""
return self.service.projects().programs().jobs().get(
name=job_resource_name).execute() | Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata. | Below is the the instruction that describes the task:
### Input:
Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata.
### Response:
def get_job(self, job_resource_name: str) -> Dict:
"""Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata.
"""
return self.service.projects().programs().jobs().get(
name=job_resource_name).execute() |
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.breakpoints.edit_goto.connect(self.main.editor.load)
#self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.breakpoints.clear_all_breakpoints.connect(
self.main.editor.clear_all_breakpoints)
self.breakpoints.clear_breakpoint.connect(
self.main.editor.clear_breakpoint)
self.main.editor.breakpoints_saved.connect(self.breakpoints.set_data)
self.breakpoints.set_or_edit_conditional_breakpoint.connect(
self.main.editor.set_or_edit_conditional_breakpoint)
self.main.add_dockwidget(self)
list_action = create_action(self, _("List breakpoints"),
triggered=self.show)
list_action.setEnabled(True)
pos = self.main.debug_menu_actions.index('list_breakpoints')
self.main.debug_menu_actions.insert(pos, list_action)
self.main.editor.pythonfile_dependent_actions += [list_action] | Register plugin in Spyder's main window | Below is the the instruction that describes the task:
### Input:
Register plugin in Spyder's main window
### Response:
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.breakpoints.edit_goto.connect(self.main.editor.load)
#self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
self.breakpoints.clear_all_breakpoints.connect(
self.main.editor.clear_all_breakpoints)
self.breakpoints.clear_breakpoint.connect(
self.main.editor.clear_breakpoint)
self.main.editor.breakpoints_saved.connect(self.breakpoints.set_data)
self.breakpoints.set_or_edit_conditional_breakpoint.connect(
self.main.editor.set_or_edit_conditional_breakpoint)
self.main.add_dockwidget(self)
list_action = create_action(self, _("List breakpoints"),
triggered=self.show)
list_action.setEnabled(True)
pos = self.main.debug_menu_actions.index('list_breakpoints')
self.main.debug_menu_actions.insert(pos, list_action)
self.main.editor.pythonfile_dependent_actions += [list_action] |
def _squeeze(x, axis):
"""A version of squeeze that works with dynamic axis."""
x = tf.convert_to_tensor(value=x, name='x')
if axis is None:
return tf.squeeze(x, axis=None)
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
axis += tf.zeros([1], dtype=axis.dtype) # Make axis at least 1d.
keep_axis, _ = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(x)), axis)
return tf.reshape(x, tf.gather(tf.shape(input=x), keep_axis)) | A version of squeeze that works with dynamic axis. | Below is the the instruction that describes the task:
### Input:
A version of squeeze that works with dynamic axis.
### Response:
def _squeeze(x, axis):
"""A version of squeeze that works with dynamic axis."""
x = tf.convert_to_tensor(value=x, name='x')
if axis is None:
return tf.squeeze(x, axis=None)
axis = tf.convert_to_tensor(value=axis, name='axis', dtype=tf.int32)
axis += tf.zeros([1], dtype=axis.dtype) # Make axis at least 1d.
keep_axis, _ = tf.compat.v1.setdiff1d(tf.range(0, tf.rank(x)), axis)
return tf.reshape(x, tf.gather(tf.shape(input=x), keep_axis)) |
def imports(modules, forgive=False):
"""
Should be used as a decorator to *attach* import statments to function
definitions. These imports are added to the global (i.e. module-level of
the decorated function) namespace.
Two forms of import statements are supported (in the following examples
``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions)::
import foo, bar # -> @imports(['foo', 'bar'])
import foo.oof as oof
import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab'])
It provides support for alternatives::
try:
import foo
except ImportError:
import bar
which is expressed as::
@imports(['foo,bar'])
or alternatively::
try:
import foo.oof as oof
except ImportError:
import bar.rab as oof
becomes::
@imports(['foo.oof,bar.rab'])
This import is available in the body of the function as ``oof`` All needed
imports should be attached for every function (even if two function are in
the same module and have the same ``globals``)
Arguments:
- modules (``list``) A list of modules in the following forms
``['foo', 'bar', ..., 'baz']`` or
``['foo.oof', 'bar.rab', ..., 'baz.zab']``
- forgive (``bool``) [default: ``False``] If ``True`` will not raise
`ImportError``
"""
def wrap(f):
if modules:
# attach import to function
setattr(f, 'imports', modules)
for alternatives in modules:
# alternatives are comma seperated
alternatives = alternatives.split(',')
# we import the part of the import X.Y.Z -> Z
mod_name = alternatives[0].split('.')[-1]
for mod in alternatives:
mod = mod.strip().split('.')
try:
if len(mod) == 1:
module = __import__(mod[0])
else:
module = getattr(__import__('.'.join(mod[:-1]), \
fromlist=[mod[-1]]), mod[-1])
f.func_globals[mod_name] = module
break # import only one
except ImportError:
pass
else:
if forgive: # no break -> no import
warnings.warn('Failed to import %s' % alternatives)
else:
raise ImportError('Failed to import %s' % alternatives)
return f
return wrap | Should be used as a decorator to *attach* import statments to function
definitions. These imports are added to the global (i.e. module-level of
the decorated function) namespace.
Two forms of import statements are supported (in the following examples
``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions)::
import foo, bar # -> @imports(['foo', 'bar'])
import foo.oof as oof
import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab'])
It provides support for alternatives::
try:
import foo
except ImportError:
import bar
which is expressed as::
@imports(['foo,bar'])
or alternatively::
try:
import foo.oof as oof
except ImportError:
import bar.rab as oof
becomes::
@imports(['foo.oof,bar.rab'])
This import is available in the body of the function as ``oof`` All needed
imports should be attached for every function (even if two function are in
the same module and have the same ``globals``)
Arguments:
- modules (``list``) A list of modules in the following forms
``['foo', 'bar', ..., 'baz']`` or
``['foo.oof', 'bar.rab', ..., 'baz.zab']``
- forgive (``bool``) [default: ``False``] If ``True`` will not raise
`ImportError`` | Below is the the instruction that describes the task:
### Input:
Should be used as a decorator to *attach* import statments to function
definitions. These imports are added to the global (i.e. module-level of
the decorated function) namespace.
Two forms of import statements are supported (in the following examples
``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions)::
import foo, bar # -> @imports(['foo', 'bar'])
import foo.oof as oof
import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab'])
It provides support for alternatives::
try:
import foo
except ImportError:
import bar
which is expressed as::
@imports(['foo,bar'])
or alternatively::
try:
import foo.oof as oof
except ImportError:
import bar.rab as oof
becomes::
@imports(['foo.oof,bar.rab'])
This import is available in the body of the function as ``oof`` All needed
imports should be attached for every function (even if two function are in
the same module and have the same ``globals``)
Arguments:
- modules (``list``) A list of modules in the following forms
``['foo', 'bar', ..., 'baz']`` or
``['foo.oof', 'bar.rab', ..., 'baz.zab']``
- forgive (``bool``) [default: ``False``] If ``True`` will not raise
`ImportError``
### Response:
def imports(modules, forgive=False):
"""
Should be used as a decorator to *attach* import statments to function
definitions. These imports are added to the global (i.e. module-level of
the decorated function) namespace.
Two forms of import statements are supported (in the following examples
``foo``, ``bar``, ``oof, and ``rab`` are modules not classes or functions)::
import foo, bar # -> @imports(['foo', 'bar'])
import foo.oof as oof
import bar.rab as rab # -> @imports(['foo.oof', 'bar.rab'])
It provides support for alternatives::
try:
import foo
except ImportError:
import bar
which is expressed as::
@imports(['foo,bar'])
or alternatively::
try:
import foo.oof as oof
except ImportError:
import bar.rab as oof
becomes::
@imports(['foo.oof,bar.rab'])
This import is available in the body of the function as ``oof`` All needed
imports should be attached for every function (even if two function are in
the same module and have the same ``globals``)
Arguments:
- modules (``list``) A list of modules in the following forms
``['foo', 'bar', ..., 'baz']`` or
``['foo.oof', 'bar.rab', ..., 'baz.zab']``
- forgive (``bool``) [default: ``False``] If ``True`` will not raise
`ImportError``
"""
def wrap(f):
if modules:
# attach import to function
setattr(f, 'imports', modules)
for alternatives in modules:
# alternatives are comma seperated
alternatives = alternatives.split(',')
# we import the part of the import X.Y.Z -> Z
mod_name = alternatives[0].split('.')[-1]
for mod in alternatives:
mod = mod.strip().split('.')
try:
if len(mod) == 1:
module = __import__(mod[0])
else:
module = getattr(__import__('.'.join(mod[:-1]), \
fromlist=[mod[-1]]), mod[-1])
f.func_globals[mod_name] = module
break # import only one
except ImportError:
pass
else:
if forgive: # no break -> no import
warnings.warn('Failed to import %s' % alternatives)
else:
raise ImportError('Failed to import %s' % alternatives)
return f
return wrap |
def generate_set_partitions(set_):
"""Generate all of the partitions of a set.
This is a helper function that utilizes the restricted growth strings from
:py:func:`generate_set_partition_strings`. The partitions are returned in
lexicographic order.
Parameters
----------
set_ : :py:class:`Array` or other Array-like, (`m`,)
The set to find the partitions of.
Returns
-------
partitions : list of lists of :py:class:`Array`
The number of elements in the outer list is equal to the number of
partitions, which is the len(`m`)^th Bell number. Each of the inner lists
corresponds to a single possible partition. The length of an inner list
is therefore equal to the number of blocks. Each of the arrays in an
inner list is hence a block.
"""
set_ = scipy.asarray(set_)
strings = generate_set_partition_strings(len(set_))
partitions = []
for string in strings:
blocks = []
for block_num in scipy.unique(string):
blocks.append(set_[string == block_num])
partitions.append(blocks)
return partitions | Generate all of the partitions of a set.
This is a helper function that utilizes the restricted growth strings from
:py:func:`generate_set_partition_strings`. The partitions are returned in
lexicographic order.
Parameters
----------
set_ : :py:class:`Array` or other Array-like, (`m`,)
The set to find the partitions of.
Returns
-------
partitions : list of lists of :py:class:`Array`
The number of elements in the outer list is equal to the number of
partitions, which is the len(`m`)^th Bell number. Each of the inner lists
corresponds to a single possible partition. The length of an inner list
is therefore equal to the number of blocks. Each of the arrays in an
inner list is hence a block. | Below is the the instruction that describes the task:
### Input:
Generate all of the partitions of a set.
This is a helper function that utilizes the restricted growth strings from
:py:func:`generate_set_partition_strings`. The partitions are returned in
lexicographic order.
Parameters
----------
set_ : :py:class:`Array` or other Array-like, (`m`,)
The set to find the partitions of.
Returns
-------
partitions : list of lists of :py:class:`Array`
The number of elements in the outer list is equal to the number of
partitions, which is the len(`m`)^th Bell number. Each of the inner lists
corresponds to a single possible partition. The length of an inner list
is therefore equal to the number of blocks. Each of the arrays in an
inner list is hence a block.
### Response:
def generate_set_partitions(set_):
"""Generate all of the partitions of a set.
This is a helper function that utilizes the restricted growth strings from
:py:func:`generate_set_partition_strings`. The partitions are returned in
lexicographic order.
Parameters
----------
set_ : :py:class:`Array` or other Array-like, (`m`,)
The set to find the partitions of.
Returns
-------
partitions : list of lists of :py:class:`Array`
The number of elements in the outer list is equal to the number of
partitions, which is the len(`m`)^th Bell number. Each of the inner lists
corresponds to a single possible partition. The length of an inner list
is therefore equal to the number of blocks. Each of the arrays in an
inner list is hence a block.
"""
set_ = scipy.asarray(set_)
strings = generate_set_partition_strings(len(set_))
partitions = []
for string in strings:
blocks = []
for block_num in scipy.unique(string):
blocks.append(set_[string == block_num])
partitions.append(blocks)
return partitions |
def pair(seeders, delegator_factory, *args, **kwargs):
"""
The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder.
"""
return (chain(*seeders) if len(seeders) > 1 else seeders[0],
delegator_factory(*args, **kwargs)) | The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder. | Below is the the instruction that describes the task:
### Input:
The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder.
### Response:
def pair(seeders, delegator_factory, *args, **kwargs):
"""
The basic pair producer.
:return:
a (seeder, delegator_factory(\*args, \*\*kwargs)) tuple.
:param seeders:
If it is a seeder function or a list of one seeder function, it is returned
as the final seeder. If it is a list of more than one seeder function, they
are chained together before returned as the final seeder.
"""
return (chain(*seeders) if len(seeders) > 1 else seeders[0],
delegator_factory(*args, **kwargs)) |
def iter(context, resource, **kwargs):
"""List all resources"""
data = utils.sanitize_kwargs(**kwargs)
id = data.pop('id', None)
subresource = data.pop('subresource', None)
data['limit'] = data.get('limit', 20)
if subresource:
uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource)
resource = subresource
else:
uri = '%s/%s' % (context.dci_cs_api, resource)
data['offset'] = 0
while True:
j = context.session.get(uri, timeout=HTTP_TIMEOUT, params=data).json()
if len(j[resource]):
for i in j[resource]:
yield i
else:
break
data['offset'] += data['limit'] | List all resources | Below is the the instruction that describes the task:
### Input:
List all resources
### Response:
def iter(context, resource, **kwargs):
"""List all resources"""
data = utils.sanitize_kwargs(**kwargs)
id = data.pop('id', None)
subresource = data.pop('subresource', None)
data['limit'] = data.get('limit', 20)
if subresource:
uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource)
resource = subresource
else:
uri = '%s/%s' % (context.dci_cs_api, resource)
data['offset'] = 0
while True:
j = context.session.get(uri, timeout=HTTP_TIMEOUT, params=data).json()
if len(j[resource]):
for i in j[resource]:
yield i
else:
break
data['offset'] += data['limit'] |
def init_connection_file(self):
"""find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name.
"""
if self.existing:
try:
cf = find_connection_file(self.existing)
except Exception:
self.log.critical("Could not find existing kernel connection file %s", self.existing)
self.exit(1)
self.log.info("Connecting to existing kernel: %s" % cf)
self.connection_file = cf
else:
# not existing, check if we are going to write the file
# and ensure that self.connection_file is a full path, not just the shortname
try:
cf = find_connection_file(self.connection_file)
except Exception:
# file might not exist
if self.connection_file == os.path.basename(self.connection_file):
# just shortname, put it in security dir
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
self.connection_file = cf
# should load_connection_file only be used for existing?
# as it is now, this allows reusing ports if an existing
# file is requested
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1) | find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name. | Below is the the instruction that describes the task:
### Input:
find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name.
### Response:
def init_connection_file(self):
"""find the connection file, and load the info if found.
The current working directory and the current profile's security
directory will be searched for the file if it is not given by
absolute path.
When attempting to connect to an existing kernel and the `--existing`
argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the current profile's security dir
with the latest access time will be used.
After this method is called, self.connection_file contains the *full path*
to the connection file, never just its name.
"""
if self.existing:
try:
cf = find_connection_file(self.existing)
except Exception:
self.log.critical("Could not find existing kernel connection file %s", self.existing)
self.exit(1)
self.log.info("Connecting to existing kernel: %s" % cf)
self.connection_file = cf
else:
# not existing, check if we are going to write the file
# and ensure that self.connection_file is a full path, not just the shortname
try:
cf = find_connection_file(self.connection_file)
except Exception:
# file might not exist
if self.connection_file == os.path.basename(self.connection_file):
# just shortname, put it in security dir
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
self.connection_file = cf
# should load_connection_file only be used for existing?
# as it is now, this allows reusing ports if an existing
# file is requested
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1) |
def _validate_forbidden(self, forbidden_values, field, value):
""" {'type': 'list'} """
if isinstance(value, _str_type):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
elif isinstance(value, Sequence):
forbidden = set(value) & set(forbidden_values)
if forbidden:
self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
elif isinstance(value, int):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value) | {'type': 'list'} | Below is the the instruction that describes the task:
### Input:
{'type': 'list'}
### Response:
def _validate_forbidden(self, forbidden_values, field, value):
""" {'type': 'list'} """
if isinstance(value, _str_type):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
elif isinstance(value, Sequence):
forbidden = set(value) & set(forbidden_values)
if forbidden:
self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
elif isinstance(value, int):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value) |
def _calc_footprint(self):
"""Return rectangle in world coordinates, as GeoVector."""
corners = [self.corner(corner) for corner in self.corner_types()]
coords = []
for corner in corners:
shape = corner.get_shape(corner.crs)
coords.append([shape.x, shape.y])
shp = Polygon(coords)
# TODO use GeoVector.from_bounds
self._footprint = GeoVector(shp, self.crs)
return self._footprint | Return rectangle in world coordinates, as GeoVector. | Below is the the instruction that describes the task:
### Input:
Return rectangle in world coordinates, as GeoVector.
### Response:
def _calc_footprint(self):
"""Return rectangle in world coordinates, as GeoVector."""
corners = [self.corner(corner) for corner in self.corner_types()]
coords = []
for corner in corners:
shape = corner.get_shape(corner.crs)
coords.append([shape.x, shape.y])
shp = Polygon(coords)
# TODO use GeoVector.from_bounds
self._footprint = GeoVector(shp, self.crs)
return self._footprint |
def t0_ref_supconj(b, orbit, solve_for=None, **kwargs):
"""
Create a constraint for t0_ref in an orbit - allowing translating between
t0_ref and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_ref' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
"""
orbit_ps = _get_system_ps(b, orbit)
metawargs = orbit_ps.meta
metawargs.pop('qualifier')
# by default both t0s exist in an orbit, so we don't have to worry about creating either
t0_ref = b.get_parameter(qualifier='t0_ref', **metawargs)
t0_supconj = b.get_parameter(qualifier='t0_supconj', **metawargs)
period = b.get_parameter(qualifier='period', **metawargs)
ecc = b.get_parameter(qualifier='ecc', **metawargs)
per0 = b.get_parameter(qualifier='per0', **metawargs)
if solve_for in [None, t0_ref]:
lhs = t0_ref
rhs = t0_supconj_to_ref(t0_supconj, period, ecc, per0)
elif solve_for == t0_supconj:
lhs = t0_supconj
rhs = t0_ref_to_supconj(t0_ref, period, ecc, per0)
else:
raise NotImplementedError
return lhs, rhs, {'orbit': orbit} | Create a constraint for t0_ref in an orbit - allowing translating between
t0_ref and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_ref' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function) | Below is the the instruction that describes the task:
### Input:
Create a constraint for t0_ref in an orbit - allowing translating between
t0_ref and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_ref' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
### Response:
def t0_ref_supconj(b, orbit, solve_for=None, **kwargs):
"""
Create a constraint for t0_ref in an orbit - allowing translating between
t0_ref and t0_supconj.
:parameter b: the :class:`phoebe.frontend.bundle.Bundle`
:parameter str orbit: the label of the orbit in which this
constraint should be built
:parameter str solve_for: if 't0_ref' should not be the derived/constrained
parameter, provide which other parameter should be derived
(ie 't0_supconj', 'per0', 'period')
:returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments
that were passed to this function)
"""
orbit_ps = _get_system_ps(b, orbit)
metawargs = orbit_ps.meta
metawargs.pop('qualifier')
# by default both t0s exist in an orbit, so we don't have to worry about creating either
t0_ref = b.get_parameter(qualifier='t0_ref', **metawargs)
t0_supconj = b.get_parameter(qualifier='t0_supconj', **metawargs)
period = b.get_parameter(qualifier='period', **metawargs)
ecc = b.get_parameter(qualifier='ecc', **metawargs)
per0 = b.get_parameter(qualifier='per0', **metawargs)
if solve_for in [None, t0_ref]:
lhs = t0_ref
rhs = t0_supconj_to_ref(t0_supconj, period, ecc, per0)
elif solve_for == t0_supconj:
lhs = t0_supconj
rhs = t0_ref_to_supconj(t0_ref, period, ecc, per0)
else:
raise NotImplementedError
return lhs, rhs, {'orbit': orbit} |
def pushback(self) -> None:
"""Push one character back onto the stream, allowing it to be
read again."""
if abs(self._idx - 1) > self._pushback_depth:
raise IndexError("Exceeded pushback depth")
self._idx -= 1 | Push one character back onto the stream, allowing it to be
read again. | Below is the the instruction that describes the task:
### Input:
Push one character back onto the stream, allowing it to be
read again.
### Response:
def pushback(self) -> None:
"""Push one character back onto the stream, allowing it to be
read again."""
if abs(self._idx - 1) > self._pushback_depth:
raise IndexError("Exceeded pushback depth")
self._idx -= 1 |
def _str_to_int(self, string):
"""Check for the hex
"""
string = string.lower()
if string.endswith("l"):
string = string[:-1]
if string.lower().startswith("0x"):
# should always match
match = re.match(r'0[xX]([a-fA-F0-9]+)', string)
return int(match.group(1), 0x10)
else:
return int(string) | Check for the hex | Below is the the instruction that describes the task:
### Input:
Check for the hex
### Response:
def _str_to_int(self, string):
"""Check for the hex
"""
string = string.lower()
if string.endswith("l"):
string = string[:-1]
if string.lower().startswith("0x"):
# should always match
match = re.match(r'0[xX]([a-fA-F0-9]+)', string)
return int(match.group(1), 0x10)
else:
return int(string) |
def containerIsRunning(container_name):
"""
Checks whether the container is running or not.
:param container_name: Name of the container being checked.
:returns: True if status is 'running', False if status is anything else,
and None if the container does not exist.
"""
client = docker.from_env(version='auto')
try:
this_container = client.containers.get(container_name)
if this_container.status == 'running':
return True
else:
# this_container.status == 'exited', 'restarting', or 'paused'
return False
except NotFound:
return None
except requests.exceptions.HTTPError as e:
logger.debug("Server error attempting to call container: ",
container_name)
raise create_api_error_from_http_exception(e) | Checks whether the container is running or not.
:param container_name: Name of the container being checked.
:returns: True if status is 'running', False if status is anything else,
and None if the container does not exist. | Below is the the instruction that describes the task:
### Input:
Checks whether the container is running or not.
:param container_name: Name of the container being checked.
:returns: True if status is 'running', False if status is anything else,
and None if the container does not exist.
### Response:
def containerIsRunning(container_name):
"""
Checks whether the container is running or not.
:param container_name: Name of the container being checked.
:returns: True if status is 'running', False if status is anything else,
and None if the container does not exist.
"""
client = docker.from_env(version='auto')
try:
this_container = client.containers.get(container_name)
if this_container.status == 'running':
return True
else:
# this_container.status == 'exited', 'restarting', or 'paused'
return False
except NotFound:
return None
except requests.exceptions.HTTPError as e:
logger.debug("Server error attempting to call container: ",
container_name)
raise create_api_error_from_http_exception(e) |
def get_jids():
'''
Return a list of all job ids
'''
serv = _get_serv(ret=None)
jids = _get_list(serv, 'jids')
loads = serv.get_multi(jids) # {jid: load, jid: load, ...}
ret = {}
for jid, load in six.iteritems(loads):
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
return ret | Return a list of all job ids | Below is the the instruction that describes the task:
### Input:
Return a list of all job ids
### Response:
def get_jids():
'''
Return a list of all job ids
'''
serv = _get_serv(ret=None)
jids = _get_list(serv, 'jids')
loads = serv.get_multi(jids) # {jid: load, jid: load, ...}
ret = {}
for jid, load in six.iteritems(loads):
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
return ret |
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret | Ensure an image source is absent on the computenode
name : string
source url | Below is the the instruction that describes the task:
### Input:
Ensure an image source is absent on the computenode
name : string
source url
### Response:
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret |
def lazy_result(f):
"""Decorate function to return LazyProxy."""
@wraps(f)
def decorated(ctx, param, value):
return LocalProxy(lambda: f(ctx, param, value))
return decorated | Decorate function to return LazyProxy. | Below is the the instruction that describes the task:
### Input:
Decorate function to return LazyProxy.
### Response:
def lazy_result(f):
"""Decorate function to return LazyProxy."""
@wraps(f)
def decorated(ctx, param, value):
return LocalProxy(lambda: f(ctx, param, value))
return decorated |
def filter(args):
"""
%prog filter test.blast
Produce a new blast file and filter based on:
- score: >= cutoff
- pctid: >= cutoff
- hitlen: >= cutoff
- evalue: <= cutoff
- ids: valid ids
Use --inverse to obtain the complementary records for the criteria above.
- noself: remove self-self hits
"""
p = OptionParser(filter.__doc__)
p.add_option("--score", dest="score", default=0, type="int",
help="Score cutoff")
p.set_align(pctid=95, hitlen=100, evalue=.01)
p.add_option("--noself", default=False, action="store_true",
help="Remove self-self hits")
p.add_option("--ids", help="Path to file with ids to retain")
p.add_option("--inverse", default=False, action="store_true",
help="Similar to grep -v, inverse")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
if opts.ids:
ids = set()
for row in must_open(opts.ids):
if row[0] == "#":
continue
row = row.replace(",", "\t")
ids.update(row.split())
else:
ids = None
blastfile, = args
inverse = opts.inverse
outfile = opts.outfile
fp = must_open(blastfile)
score, pctid, hitlen, evalue, noself = \
opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself
newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \
outfile is None else outfile
if inverse:
newblastfile += ".inverse"
fw = must_open(newblastfile, "w")
for row in fp:
if row[0] == '#':
continue
c = BlastLine(row)
if ids:
if c.query in ids and c.subject in ids:
noids = False
else:
noids = True
else:
noids = None
remove = c.score < score or \
c.pctid < pctid or \
c.hitlen < hitlen or \
c.evalue > evalue or \
noids
if inverse:
remove = not remove
remove = remove or (noself and c.query == c.subject)
if not remove:
print(row.rstrip(), file=fw)
fw.close()
return newblastfile | %prog filter test.blast
Produce a new blast file and filter based on:
- score: >= cutoff
- pctid: >= cutoff
- hitlen: >= cutoff
- evalue: <= cutoff
- ids: valid ids
Use --inverse to obtain the complementary records for the criteria above.
- noself: remove self-self hits | Below is the the instruction that describes the task:
### Input:
%prog filter test.blast
Produce a new blast file and filter based on:
- score: >= cutoff
- pctid: >= cutoff
- hitlen: >= cutoff
- evalue: <= cutoff
- ids: valid ids
Use --inverse to obtain the complementary records for the criteria above.
- noself: remove self-self hits
### Response:
def filter(args):
"""
%prog filter test.blast
Produce a new blast file and filter based on:
- score: >= cutoff
- pctid: >= cutoff
- hitlen: >= cutoff
- evalue: <= cutoff
- ids: valid ids
Use --inverse to obtain the complementary records for the criteria above.
- noself: remove self-self hits
"""
p = OptionParser(filter.__doc__)
p.add_option("--score", dest="score", default=0, type="int",
help="Score cutoff")
p.set_align(pctid=95, hitlen=100, evalue=.01)
p.add_option("--noself", default=False, action="store_true",
help="Remove self-self hits")
p.add_option("--ids", help="Path to file with ids to retain")
p.add_option("--inverse", default=False, action="store_true",
help="Similar to grep -v, inverse")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
if opts.ids:
ids = set()
for row in must_open(opts.ids):
if row[0] == "#":
continue
row = row.replace(",", "\t")
ids.update(row.split())
else:
ids = None
blastfile, = args
inverse = opts.inverse
outfile = opts.outfile
fp = must_open(blastfile)
score, pctid, hitlen, evalue, noself = \
opts.score, opts.pctid, opts.hitlen, opts.evalue, opts.noself
newblastfile = blastfile + ".P{0}L{1}".format(int(pctid), hitlen) if \
outfile is None else outfile
if inverse:
newblastfile += ".inverse"
fw = must_open(newblastfile, "w")
for row in fp:
if row[0] == '#':
continue
c = BlastLine(row)
if ids:
if c.query in ids and c.subject in ids:
noids = False
else:
noids = True
else:
noids = None
remove = c.score < score or \
c.pctid < pctid or \
c.hitlen < hitlen or \
c.evalue > evalue or \
noids
if inverse:
remove = not remove
remove = remove or (noself and c.query == c.subject)
if not remove:
print(row.rstrip(), file=fw)
fw.close()
return newblastfile |
def setup(config, minconn=5, maxconn=10, adapter='mysql', key='default', slave=False):
"""Setup database
:param config dict: is the db adapter config
:param key string: the key to identify dabtabase
:param adapter string: the dabtabase adapter current support mysql only
:param minconn int: the min connection for connection pool
:param maxconn int: the max connection for connection pool
:param slave boolean: If True the database can be read only.
"""
global __db
if '.' in key:
raise TypeError('The DB Key: "%s" Can\'t Contain dot' % (key))
if slave == False and key in __db:
raise DBError('The Key: "%s" was set' % (key))
database = DB(config, minconn, maxconn, key, adapter)
master_key = key
slave_key = key + '.slave'
if not slave:
__db[master_key] = database
if slave_key not in __db:
__db[slave_key] = [database]
else:
if key in __db:
databases = __db[slave_key]
if len(databases) == 1 and __db[master_key] == databases[0]:
__db[slave_key] = [database]
else:
__db[slave_key].append(database)
else:
__db[slave_key] = [database] | Setup database
:param config dict: is the db adapter config
:param key string: the key to identify dabtabase
:param adapter string: the dabtabase adapter current support mysql only
:param minconn int: the min connection for connection pool
:param maxconn int: the max connection for connection pool
:param slave boolean: If True the database can be read only. | Below is the the instruction that describes the task:
### Input:
Setup database
:param config dict: is the db adapter config
:param key string: the key to identify dabtabase
:param adapter string: the dabtabase adapter current support mysql only
:param minconn int: the min connection for connection pool
:param maxconn int: the max connection for connection pool
:param slave boolean: If True the database can be read only.
### Response:
def setup(config, minconn=5, maxconn=10, adapter='mysql', key='default', slave=False):
"""Setup database
:param config dict: is the db adapter config
:param key string: the key to identify dabtabase
:param adapter string: the dabtabase adapter current support mysql only
:param minconn int: the min connection for connection pool
:param maxconn int: the max connection for connection pool
:param slave boolean: If True the database can be read only.
"""
global __db
if '.' in key:
raise TypeError('The DB Key: "%s" Can\'t Contain dot' % (key))
if slave == False and key in __db:
raise DBError('The Key: "%s" was set' % (key))
database = DB(config, minconn, maxconn, key, adapter)
master_key = key
slave_key = key + '.slave'
if not slave:
__db[master_key] = database
if slave_key not in __db:
__db[slave_key] = [database]
else:
if key in __db:
databases = __db[slave_key]
if len(databases) == 1 and __db[master_key] == databases[0]:
__db[slave_key] = [database]
else:
__db[slave_key].append(database)
else:
__db[slave_key] = [database] |
def matched_filter(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, sigmasq=None):
""" Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
"""
snr, _, norm = matched_filter_core(template, data, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq)
return snr * norm | Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr. | Below is the the instruction that describes the task:
### Input:
Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
### Response:
def matched_filter(template, data, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, sigmasq=None):
""" Return the complex snr.
Return the complex snr, along with its associated normalization of the
template, matched filtered against the data.
Parameters
----------
template : TimeSeries or FrequencySeries
The template waveform
data : TimeSeries or FrequencySeries
The strain data to be filtered.
psd : FrequencySeries
The noise weighting of the filter.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the filter calculation. If None, begin at the
first frequency after DC.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the filter calculation. If None, continue to the
the nyquist frequency.
sigmasq : {None, float}, optional
The template normalization. If none, this value is calculated
internally.
Returns
-------
snr : TimeSeries
A time series containing the complex snr.
"""
snr, _, norm = matched_filter_core(template, data, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff, h_norm=sigmasq)
return snr * norm |
Subsets and Splits