code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def filter_rows(self, filters, rows):
'''returns rows as filtered by filters'''
ret = []
for row in rows:
if not self.row_is_filtered(row, filters):
ret.append(row)
return ret | returns rows as filtered by filters | Below is the the instruction that describes the task:
### Input:
returns rows as filtered by filters
### Response:
def filter_rows(self, filters, rows):
'''returns rows as filtered by filters'''
ret = []
for row in rows:
if not self.row_is_filtered(row, filters):
ret.append(row)
return ret |
def gateway_snapshot(self, indices=None):
"""
Gateway snapshot one or more indices
(See :ref:`es-guide-reference-api-admin-indices-gateway-snapshot`)
:keyword indices: a list of indices or None for default configured.
"""
path = self.conn._make_path(indices, (), '_gateway', 'snapshot')
return self.conn._send_request('POST', path) | Gateway snapshot one or more indices
(See :ref:`es-guide-reference-api-admin-indices-gateway-snapshot`)
:keyword indices: a list of indices or None for default configured. | Below is the the instruction that describes the task:
### Input:
Gateway snapshot one or more indices
(See :ref:`es-guide-reference-api-admin-indices-gateway-snapshot`)
:keyword indices: a list of indices or None for default configured.
### Response:
def gateway_snapshot(self, indices=None):
"""
Gateway snapshot one or more indices
(See :ref:`es-guide-reference-api-admin-indices-gateway-snapshot`)
:keyword indices: a list of indices or None for default configured.
"""
path = self.conn._make_path(indices, (), '_gateway', 'snapshot')
return self.conn._send_request('POST', path) |
def setDatastreamVersionable(self, pid, dsID, versionable):
'''Update datastream versionable setting.
:param pid: object pid
:param dsID: datastream id
:param versionable: boolean
:returns: boolean success
'''
# /objects/{pid}/datastreams/{dsID} ? [versionable]
http_args = {'versionable': versionable}
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
response = self.put(url, params=http_args)
# returns response code 200 on success
return response.status_code == requests.codes.ok | Update datastream versionable setting.
:param pid: object pid
:param dsID: datastream id
:param versionable: boolean
:returns: boolean success | Below is the the instruction that describes the task:
### Input:
Update datastream versionable setting.
:param pid: object pid
:param dsID: datastream id
:param versionable: boolean
:returns: boolean success
### Response:
def setDatastreamVersionable(self, pid, dsID, versionable):
'''Update datastream versionable setting.
:param pid: object pid
:param dsID: datastream id
:param versionable: boolean
:returns: boolean success
'''
# /objects/{pid}/datastreams/{dsID} ? [versionable]
http_args = {'versionable': versionable}
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
response = self.put(url, params=http_args)
# returns response code 200 on success
return response.status_code == requests.codes.ok |
def get_application_choices():
"""
Get the select options for the application selector
:return:
"""
result = []
keys = set()
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel) and ct.app_label not in keys:
result.append(('{}'.format(ct.app_label), '{}'.format(ct.app_label.capitalize())))
keys.add(ct.app_label)
except TypeError:
continue
return result | Get the select options for the application selector
:return: | Below is the the instruction that describes the task:
### Input:
Get the select options for the application selector
:return:
### Response:
def get_application_choices():
"""
Get the select options for the application selector
:return:
"""
result = []
keys = set()
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel) and ct.app_label not in keys:
result.append(('{}'.format(ct.app_label), '{}'.format(ct.app_label.capitalize())))
keys.add(ct.app_label)
except TypeError:
continue
return result |
def handle_Sample(self, instance):
"""If this sample has a single AR, go there.
If the sample has 0 or >1 ARs, go to the sample's view URL.
"""
ars = instance.getAnalysisRequests()
if len(ars) == 1:
return self.handle_AnalysisRequest(ars[0])
else:
return instance.absolute_url() | If this sample has a single AR, go there.
If the sample has 0 or >1 ARs, go to the sample's view URL. | Below is the the instruction that describes the task:
### Input:
If this sample has a single AR, go there.
If the sample has 0 or >1 ARs, go to the sample's view URL.
### Response:
def handle_Sample(self, instance):
"""If this sample has a single AR, go there.
If the sample has 0 or >1 ARs, go to the sample's view URL.
"""
ars = instance.getAnalysisRequests()
if len(ars) == 1:
return self.handle_AnalysisRequest(ars[0])
else:
return instance.absolute_url() |
def excel_synthese(fct, df, excel_file):
"""
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
"""
def sheet_name(name):
# formatage du nom des feuilles (suppression des guillements, :, ...)
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = k.replace("'", "").replace(":", "").replace(" ", "_")
name = "%i-%s" % (i, name)
name = name[:31]
return name
res_count = dict()
polluant, res = fct(df)
print("\nTraitement du polluant: %s" % polluant)
writer = pd.ExcelWriter(excel_file)
# Valeurs mesurées suivant critères
for i, (k, v) in enumerate(res.items()):
comp = compresse(v)
comp.index.name = k
comp = comp.apply(pd.np.round)
comp.to_excel(writer, sheet_name=sheet_name(k))
res_count[k] = v.count()
# Nombre de dépassements des critères
name = "Nombre_de_depassements"
res_count = pd.DataFrame(res_count).T
res_count.index.name = name
res_count.to_excel(writer, sheet_name=name)
writer.save() | Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien | Below is the the instruction that describes the task:
### Input:
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
### Response:
def excel_synthese(fct, df, excel_file):
"""
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
"""
def sheet_name(name):
# formatage du nom des feuilles (suppression des guillements, :, ...)
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = k.replace("'", "").replace(":", "").replace(" ", "_")
name = "%i-%s" % (i, name)
name = name[:31]
return name
res_count = dict()
polluant, res = fct(df)
print("\nTraitement du polluant: %s" % polluant)
writer = pd.ExcelWriter(excel_file)
# Valeurs mesurées suivant critères
for i, (k, v) in enumerate(res.items()):
comp = compresse(v)
comp.index.name = k
comp = comp.apply(pd.np.round)
comp.to_excel(writer, sheet_name=sheet_name(k))
res_count[k] = v.count()
# Nombre de dépassements des critères
name = "Nombre_de_depassements"
res_count = pd.DataFrame(res_count).T
res_count.index.name = name
res_count.to_excel(writer, sheet_name=name)
writer.save() |
def stats(self, topic=None, channel=None, text=False):
"""Return internal instrumented statistics.
:param topic: (optional) filter to topic
:param channel: (optional) filter to channel
:param text: return the stats as a string (default: ``False``)
"""
if text:
fields = {'format': 'text'}
else:
fields = {'format': 'json'}
if topic:
nsq.assert_valid_topic_name(topic)
fields['topic'] = topic
if channel:
nsq.assert_valid_channel_name(channel)
fields['channel'] = channel
return self._request('GET', '/stats', fields=fields) | Return internal instrumented statistics.
:param topic: (optional) filter to topic
:param channel: (optional) filter to channel
:param text: return the stats as a string (default: ``False``) | Below is the the instruction that describes the task:
### Input:
Return internal instrumented statistics.
:param topic: (optional) filter to topic
:param channel: (optional) filter to channel
:param text: return the stats as a string (default: ``False``)
### Response:
def stats(self, topic=None, channel=None, text=False):
"""Return internal instrumented statistics.
:param topic: (optional) filter to topic
:param channel: (optional) filter to channel
:param text: return the stats as a string (default: ``False``)
"""
if text:
fields = {'format': 'text'}
else:
fields = {'format': 'json'}
if topic:
nsq.assert_valid_topic_name(topic)
fields['topic'] = topic
if channel:
nsq.assert_valid_channel_name(channel)
fields['channel'] = channel
return self._request('GET', '/stats', fields=fields) |
def create_autosummary_file(modules, opts):
# type: (List[unicode], Any, unicode) -> None
"""Create the module's index."""
lines = [
'API Reference',
'=============',
'',
'.. autosummary::',
' :template: api_module.rst',
' :toctree: {}'.format(opts.destdir),
'',
]
modules.sort()
for module in modules:
lines.append(' {}'.format(module))
lines.append('')
fname = path.join(opts.srcdir, '{}.rst'.format(opts.docname))
logger.info('[apigen] creating API docs file: {}'.format(fname))
with FileAvoidWrite(fname) as f:
f.write('\n'.join(lines)) | Create the module's index. | Below is the the instruction that describes the task:
### Input:
Create the module's index.
### Response:
def create_autosummary_file(modules, opts):
# type: (List[unicode], Any, unicode) -> None
"""Create the module's index."""
lines = [
'API Reference',
'=============',
'',
'.. autosummary::',
' :template: api_module.rst',
' :toctree: {}'.format(opts.destdir),
'',
]
modules.sort()
for module in modules:
lines.append(' {}'.format(module))
lines.append('')
fname = path.join(opts.srcdir, '{}.rst'.format(opts.docname))
logger.info('[apigen] creating API docs file: {}'.format(fname))
with FileAvoidWrite(fname) as f:
f.write('\n'.join(lines)) |
def mounted(name,
device,
fstype,
mkmnt=False,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
persist=True,
mount=True,
user=None,
match_on='auto',
device_name_regex=None,
extra_mount_invisible_options=None,
extra_mount_invisible_keys=None,
extra_mount_ignore_fs_keys=None,
extra_mount_translate_options=None,
hidden_opts=None,
**kwargs):
'''
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.mounted'
return ret
if not device:
ret['result'] = False
ret['comment'] = 'Must provide device to mount.mounted'
return ret
if not fstype:
ret['result'] = False
ret['comment'] = 'Must provide fstype to mount.mounted'
return ret
if device_name_regex is None:
device_name_regex = []
# Defaults is not a valid option on Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and opts == 'defaults':
opts = 'noowners'
# Defaults is not a valid option on AIX
if __grains__['os'] in ['AIX']:
if opts == 'defaults':
opts = ''
# Defaults is not a valid option on Solaris
if 'Solaris' in __grains__['os'] and opts == 'defaults':
opts = '-'
# Make sure that opts is correct, it can be a list or a comma delimited
# string
if isinstance(opts, string_types):
opts = opts.split(',')
if isinstance(hidden_opts, string_types):
hidden_opts = hidden_opts.split(',')
# remove possible trailing slash
if not name == '/':
name = name.rstrip('/')
device_list = []
# Get the active data
active = __salt__['mount.active'](extended=True)
real_name = os.path.realpath(name)
if device.startswith('/'):
if 'bind' in opts and real_name in active:
_device = device
if active[real_name]['device'].startswith('/'):
# Find the device that the bind really points at.
while True:
if _device in active:
_real_device = active[_device]['device']
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
break
_device = os.path.dirname(_device)
real_device = _real_device
else:
# Remote file systems act differently.
if _device in active:
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
real_device = active[real_name]['device']
else:
real_device = os.path.realpath(device)
elif device.upper().startswith('UUID='):
real_device = device.split('=')[1].strip('"').lower()
elif device.upper().startswith('LABEL='):
_label = device.split('=')[1]
cmd = 'blkid -t LABEL={0}'.format(_label)
res = __salt__['cmd.run_all']('{0}'.format(cmd))
if res['retcode'] > 0:
ret['comment'] = 'Unable to find device with label {0}.'.format(_label)
ret['result'] = False
return ret
else:
# output is a list of entries like this:
# /dev/sda: LABEL="<label>" UUID="<uuid>" UUID_SUB="<uuid>" TYPE="btrfs"
# exact list of properties varies between filesystems, but we're
# only interested in the device in the first column
for line in res['stdout']:
dev_with_label = line.split(':')[0]
device_list.append(dev_with_label)
real_device = device_list[0]
else:
real_device = device
# LVS devices have 2 names under /dev:
# /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name
# No matter what name is used for mounting,
# mount always displays the device as /dev/mapper/vg--name-lv--name
# Note the double-dash escaping.
# So, let's call that the canonical device name
# We should normalize names of the /dev/vg-name/lv-name type to the canonical name
lvs_match = re.match(r'^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)', device)
if lvs_match:
double_dash_escaped = dict((k, re.sub(r'-', '--', v)) for k, v in six.iteritems(lvs_match.groupdict()))
mapper_device = '/dev/mapper/{vg_name}-{lv_name}'.format(**double_dash_escaped)
if os.path.exists(mapper_device):
real_device = mapper_device
# When included in a Salt state file, FUSE devices are prefaced by the
# filesystem type and a hash, e.g. sshfs. In the mount list only the
# hostname is included. So if we detect that the device is a FUSE device
# then we remove the prefaced string so that the device in state matches
# the device in the mount list.
fuse_match = re.match(r'^\w+\#(?P<device_name>.+)', device)
if fuse_match:
if 'device_name' in fuse_match.groupdict():
real_device = fuse_match.group('device_name')
if real_name in active:
if 'superopts' not in active[real_name]:
active[real_name]['superopts'] = []
if mount:
device_list.append(active[real_name]['device'])
device_list.append(os.path.realpath(device_list[0]))
alt_device = active[real_name]['alt_device'] if 'alt_device' in active[real_name] else None
uuid_device = active[real_name]['device_uuid'] if 'device_uuid' in active[real_name] else None
label_device = active[real_name]['device_label'] if 'device_label' in active[real_name] else None
if alt_device and alt_device not in device_list:
device_list.append(alt_device)
if uuid_device and uuid_device not in device_list:
device_list.append(uuid_device)
if label_device and label_device not in device_list:
device_list.append(label_device)
if opts:
mount_invisible_options = [
'_netdev',
'actimeo',
'bg',
'comment',
'defaults',
'delay_connect',
'direct-io-mode',
'intr',
'loop',
'nointr',
'nobootwait',
'nofail',
'password',
'reconnect',
'retry',
'soft',
'auto',
'users',
'bind',
'nonempty',
'transform_symlinks',
'port',
'backup-volfile-servers',
]
if extra_mount_invisible_options:
mount_invisible_options.extend(extra_mount_invisible_options)
if hidden_opts:
mount_invisible_options = list(set(mount_invisible_options) | set(hidden_opts))
# options which are provided as key=value (e.g. password=Zohp5ohb)
mount_invisible_keys = [
'actimeo',
'comment',
'credentials',
'direct-io-mode',
'password',
'port',
'retry',
'secretfile',
]
if extra_mount_invisible_keys:
mount_invisible_keys.extend(extra_mount_invisible_keys)
# Some filesystems have options which should not force a remount.
mount_ignore_fs_keys = {
'ramfs': ['size']
}
if extra_mount_ignore_fs_keys:
mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys)
# Some options are translated once mounted
mount_translate_options = {
'tcp': 'proto=tcp',
'udp': 'proto=udp',
}
if extra_mount_translate_options:
mount_translate_options.update(extra_mount_translate_options)
for opt in opts:
if opt in mount_translate_options:
opt = mount_translate_options[opt]
keyval_option = opt.split('=')[0]
if keyval_option in mount_invisible_keys:
opt = keyval_option
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
# make cifs option user synonym for option username which is reported by /proc/mounts
if fstype in ['cifs'] and opt.split('=')[0] == 'user':
opt = "username={0}".format(opt.split('=')[1])
if opt.split('=')[0] in mount_ignore_fs_keys.get(fstype, []):
opt = opt.split('=')[0]
# convert uid/gid to numeric value from user/group name
name_id_opts = {'uid': 'user.info',
'gid': 'group.info'}
if opt.split('=')[0] in name_id_opts and len(opt.split('=')) > 1:
_givenid = opt.split('=')[1]
_param = opt.split('=')[0]
_id = _givenid
if not re.match('[0-9]+$', _givenid):
_info = __salt__[name_id_opts[_param]](_givenid)
if _info and _param in _info:
_id = _info[_param]
opt = _param + '=' + six.text_type(_id)
_active_superopts = active[real_name].get('superopts', [])
for _active_opt in _active_superopts:
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', _active_opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
_active_superopts.remove(_active_opt)
_active_opt = "size={0}k".format(converted_size)
_active_superopts.append(_active_opt)
if opt not in active[real_name]['opts'] \
and opt not in _active_superopts \
and opt not in mount_invisible_options \
and opt not in mount_ignore_fs_keys.get(fstype, []) \
and opt not in mount_invisible_keys:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "Remount would be forced because options ({0}) changed".format(opt)
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
# Update the cache
update_mount_cache = True
mount_cache = __salt__['mount.read_mount_cache'](real_name)
if 'opts' in mount_cache:
_missing = [opt for opt in mount_cache['opts']
if opt not in opts]
if _missing:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Remount would be forced because'
' options ({0})'
'changed'.format(','.join(_missing)))
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
update_mount_cache = True
else:
update_mount_cache = True
if real_device not in device_list:
# name matches but device doesn't - need to umount
_device_mismatch_is_ignored = None
for regex in list(device_name_regex):
for _device in device_list:
if re.match(regex, _device):
_device_mismatch_is_ignored = _device
break
if _device_mismatch_is_ignored:
ret['result'] = True
ret['comment'] = "An umount will not be forced " \
+ "because device matched device_name_regex: " \
+ _device_mismatch_is_ignored
elif __opts__['test']:
ret['result'] = None
ret['comment'] = "An umount would have been forced " \
+ "because devices do not match. Watched: " \
+ device
else:
ret['changes']['umount'] = "Forced unmount because devices " \
+ "don't match. Wanted: " + device
if real_device != device:
ret['changes']['umount'] += " (" + real_device + ")"
ret['changes']['umount'] += ", current: " + ', '.join(device_list)
out = __salt__['mount.umount'](real_name, user=user)
active = __salt__['mount.active'](extended=True)
if real_name in active:
ret['comment'] = "Unable to unmount"
ret['result'] = None
return ret
update_mount_cache = True
else:
ret['comment'] = 'Target was already mounted'
# using a duplicate check so I can catch the results of a umount
if real_name not in active:
if mount:
# The mount is not present! Mount it
if __opts__['test']:
ret['result'] = None
if os.path.exists(name):
ret['comment'] = '{0} would be mounted'.format(name)
elif mkmnt:
ret['comment'] = '{0} would be created and mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would not be created'.format(name)
return ret
if not os.path.exists(name) and not mkmnt:
ret['result'] = False
ret['comment'] = 'Mount directory is not present'
return ret
out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user)
active = __salt__['mount.active'](extended=True)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to (re)mount, the state has failed!
ret['comment'] = out
ret['result'] = False
return ret
elif real_name in active:
# (Re)mount worked!
ret['comment'] = 'Target was successfully mounted'
ret['changes']['mount'] = True
elif not os.path.exists(name):
if __opts__['test']:
ret['result'] = None
if mkmnt:
ret['comment'] = '{0} would be created, but not mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would neither be created nor mounted'.format(name)
elif mkmnt:
__salt__['file.mkdir'](name, user=user)
ret['comment'] = '{0} was created, not mounted'.format(name)
else:
ret['comment'] = '{0} not present and not mounted'.format(name)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} would not be mounted'.format(name)
else:
ret['comment'] = '{0} not mounted'.format(name)
if persist:
if '/etc/fstab' == config:
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin']:
config = "/etc/auto_salt"
# Override default for AIX
elif 'AIX' in __grains__['os']:
config = "/etc/filesystems"
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config,
test=True)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
test=True,
match_on=match_on)
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.set_vfstab'](name,
device,
fstype,
opts,
config=config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
test=True,
match_on=match_on)
if out != 'present':
ret['result'] = None
if out == 'new':
if mount:
comment = ('{0} is mounted, but needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
else:
comment = ('{0} needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
elif out == 'change':
if mount:
comment = ('{0} is mounted, but its fstab entry '
'must be updated.').format(name)
else:
comment = ('The {0} fstab entry '
'must be updated.').format(name)
else:
ret['result'] = False
comment = ('Unable to detect fstab status for '
'mount point {0} due to unexpected '
'output \'{1}\' from call to '
'mount.set_fstab. This is most likely '
'a bug.').format(name, out)
if 'comment' in ret:
ret['comment'] = '{0}. {1}'.format(ret['comment'], comment)
else:
ret['comment'] = comment
return ret
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
match_on=match_on)
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.set_vfstab'](name,
device,
fstype,
opts,
config=config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
match_on=match_on)
if update_mount_cache:
cache_result = __salt__['mount.write_mount_cache'](real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
mount_opts=opts)
if out == 'present':
ret['comment'] += '. Entry already exists in the fstab.'
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += '. Added new entry to the fstab.'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += '. Updated the entry in the fstab.'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += '. However, the fstab was not found.'
return ret
return ret | Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2 | Below is the the instruction that describes the task:
### Input:
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2
### Response:
def mounted(name,
device,
fstype,
mkmnt=False,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
persist=True,
mount=True,
user=None,
match_on='auto',
device_name_regex=None,
extra_mount_invisible_options=None,
extra_mount_invisible_keys=None,
extra_mount_ignore_fs_keys=None,
extra_mount_translate_options=None,
hidden_opts=None,
**kwargs):
'''
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314`` or ``LABEL=DATA``
fstype
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, ``fuse`` in the case of fuse mounts, and ``nfs`` in the case of nfs mounts
mkmnt
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, Default is ``True``
user
The account used to execute the mount; this defaults to the user salt is
running as on the minion
match_on
A name or list of fstab properties on which this state should be applied.
Default is ``auto``, a special value indicating to guess based on fstype.
In general, ``auto`` matches on name for recognized special devices and
device otherwise.
device_name_regex
A list of device exact names or regular expressions which should
not force a remount. For example, glusterfs may be mounted with a
comma-separated list of servers in fstab, but the /proc/self/mountinfo
will show only the first available server.
.. code-block:: jinja
{% set glusterfs_ip_list = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] %}
mount glusterfs volume:
mount.mounted:
- name: /mnt/glusterfs_mount_point
- device: {{ glusterfs_ip_list|join(',') }}:/volume_name
- fstype: glusterfs
- opts: _netdev,rw,defaults,direct-io-mode=disable
- mkmnt: True
- persist: True
- dump: 0
- pass_num: 0
- device_name_regex:
- ({{ glusterfs_ip_list|join('|') }}):/volume_name
.. versionadded:: 2016.11.0
extra_mount_invisible_options
A list of extra options that are not visible through the
``/proc/self/mountinfo`` interface.
If a option is not visible through this interface it will always remount
the device. This option extends the builtin ``mount_invisible_options``
list.
extra_mount_invisible_keys
A list of extra key options that are not visible through the
``/proc/self/mountinfo`` interface.
If a key option is not visible through this interface it will always
remount the device. This option extends the builtin
``mount_invisible_keys`` list.
A good example for a key option is the password option::
password=badsecret
extra_mount_ignore_fs_keys
A dict of filesystem options which should not force a remount. This will update
the internal dictionary. The dict should look like this::
{
'ramfs': ['size']
}
extra_mount_translate_options
A dict of mount options that gets translated when mounted. To prevent a remount
add additional options to the default dictionary. This will update the internal
dictionary. The dictionary should look like this::
{
'tcp': 'proto=tcp',
'udp': 'proto=udp'
}
hidden_opts
A list of mount options that will be ignored when considering a remount
as part of the state application
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
update_mount_cache = False
if not name:
ret['result'] = False
ret['comment'] = 'Must provide name to mount.mounted'
return ret
if not device:
ret['result'] = False
ret['comment'] = 'Must provide device to mount.mounted'
return ret
if not fstype:
ret['result'] = False
ret['comment'] = 'Must provide fstype to mount.mounted'
return ret
if device_name_regex is None:
device_name_regex = []
# Defaults is not a valid option on Mac OS
if __grains__['os'] in ['MacOS', 'Darwin'] and opts == 'defaults':
opts = 'noowners'
# Defaults is not a valid option on AIX
if __grains__['os'] in ['AIX']:
if opts == 'defaults':
opts = ''
# Defaults is not a valid option on Solaris
if 'Solaris' in __grains__['os'] and opts == 'defaults':
opts = '-'
# Make sure that opts is correct, it can be a list or a comma delimited
# string
if isinstance(opts, string_types):
opts = opts.split(',')
if isinstance(hidden_opts, string_types):
hidden_opts = hidden_opts.split(',')
# remove possible trailing slash
if not name == '/':
name = name.rstrip('/')
device_list = []
# Get the active data
active = __salt__['mount.active'](extended=True)
real_name = os.path.realpath(name)
if device.startswith('/'):
if 'bind' in opts and real_name in active:
_device = device
if active[real_name]['device'].startswith('/'):
# Find the device that the bind really points at.
while True:
if _device in active:
_real_device = active[_device]['device']
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
break
_device = os.path.dirname(_device)
real_device = _real_device
else:
# Remote file systems act differently.
if _device in active:
opts = list(set(opts + active[_device]['opts'] + active[_device]['superopts']))
active[real_name]['opts'].append('bind')
real_device = active[real_name]['device']
else:
real_device = os.path.realpath(device)
elif device.upper().startswith('UUID='):
real_device = device.split('=')[1].strip('"').lower()
elif device.upper().startswith('LABEL='):
_label = device.split('=')[1]
cmd = 'blkid -t LABEL={0}'.format(_label)
res = __salt__['cmd.run_all']('{0}'.format(cmd))
if res['retcode'] > 0:
ret['comment'] = 'Unable to find device with label {0}.'.format(_label)
ret['result'] = False
return ret
else:
# output is a list of entries like this:
# /dev/sda: LABEL="<label>" UUID="<uuid>" UUID_SUB="<uuid>" TYPE="btrfs"
# exact list of properties varies between filesystems, but we're
# only interested in the device in the first column
for line in res['stdout']:
dev_with_label = line.split(':')[0]
device_list.append(dev_with_label)
real_device = device_list[0]
else:
real_device = device
# LVS devices have 2 names under /dev:
# /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name
# No matter what name is used for mounting,
# mount always displays the device as /dev/mapper/vg--name-lv--name
# Note the double-dash escaping.
# So, let's call that the canonical device name
# We should normalize names of the /dev/vg-name/lv-name type to the canonical name
lvs_match = re.match(r'^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)', device)
if lvs_match:
double_dash_escaped = dict((k, re.sub(r'-', '--', v)) for k, v in six.iteritems(lvs_match.groupdict()))
mapper_device = '/dev/mapper/{vg_name}-{lv_name}'.format(**double_dash_escaped)
if os.path.exists(mapper_device):
real_device = mapper_device
# When included in a Salt state file, FUSE devices are prefaced by the
# filesystem type and a hash, e.g. sshfs. In the mount list only the
# hostname is included. So if we detect that the device is a FUSE device
# then we remove the prefaced string so that the device in state matches
# the device in the mount list.
fuse_match = re.match(r'^\w+\#(?P<device_name>.+)', device)
if fuse_match:
if 'device_name' in fuse_match.groupdict():
real_device = fuse_match.group('device_name')
if real_name in active:
if 'superopts' not in active[real_name]:
active[real_name]['superopts'] = []
if mount:
device_list.append(active[real_name]['device'])
device_list.append(os.path.realpath(device_list[0]))
alt_device = active[real_name]['alt_device'] if 'alt_device' in active[real_name] else None
uuid_device = active[real_name]['device_uuid'] if 'device_uuid' in active[real_name] else None
label_device = active[real_name]['device_label'] if 'device_label' in active[real_name] else None
if alt_device and alt_device not in device_list:
device_list.append(alt_device)
if uuid_device and uuid_device not in device_list:
device_list.append(uuid_device)
if label_device and label_device not in device_list:
device_list.append(label_device)
if opts:
mount_invisible_options = [
'_netdev',
'actimeo',
'bg',
'comment',
'defaults',
'delay_connect',
'direct-io-mode',
'intr',
'loop',
'nointr',
'nobootwait',
'nofail',
'password',
'reconnect',
'retry',
'soft',
'auto',
'users',
'bind',
'nonempty',
'transform_symlinks',
'port',
'backup-volfile-servers',
]
if extra_mount_invisible_options:
mount_invisible_options.extend(extra_mount_invisible_options)
if hidden_opts:
mount_invisible_options = list(set(mount_invisible_options) | set(hidden_opts))
# options which are provided as key=value (e.g. password=Zohp5ohb)
mount_invisible_keys = [
'actimeo',
'comment',
'credentials',
'direct-io-mode',
'password',
'port',
'retry',
'secretfile',
]
if extra_mount_invisible_keys:
mount_invisible_keys.extend(extra_mount_invisible_keys)
# Some filesystems have options which should not force a remount.
mount_ignore_fs_keys = {
'ramfs': ['size']
}
if extra_mount_ignore_fs_keys:
mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys)
# Some options are translated once mounted
mount_translate_options = {
'tcp': 'proto=tcp',
'udp': 'proto=udp',
}
if extra_mount_translate_options:
mount_translate_options.update(extra_mount_translate_options)
for opt in opts:
if opt in mount_translate_options:
opt = mount_translate_options[opt]
keyval_option = opt.split('=')[0]
if keyval_option in mount_invisible_keys:
opt = keyval_option
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
# make cifs option user synonym for option username which is reported by /proc/mounts
if fstype in ['cifs'] and opt.split('=')[0] == 'user':
opt = "username={0}".format(opt.split('=')[1])
if opt.split('=')[0] in mount_ignore_fs_keys.get(fstype, []):
opt = opt.split('=')[0]
# convert uid/gid to numeric value from user/group name
name_id_opts = {'uid': 'user.info',
'gid': 'group.info'}
if opt.split('=')[0] in name_id_opts and len(opt.split('=')) > 1:
_givenid = opt.split('=')[1]
_param = opt.split('=')[0]
_id = _givenid
if not re.match('[0-9]+$', _givenid):
_info = __salt__[name_id_opts[_param]](_givenid)
if _info and _param in _info:
_id = _info[_param]
opt = _param + '=' + six.text_type(_id)
_active_superopts = active[real_name].get('superopts', [])
for _active_opt in _active_superopts:
size_match = re.match(r'size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)', _active_opt)
if size_match:
converted_size = _size_convert(size_match)
opt = "size={0}k".format(converted_size)
_active_superopts.remove(_active_opt)
_active_opt = "size={0}k".format(converted_size)
_active_superopts.append(_active_opt)
if opt not in active[real_name]['opts'] \
and opt not in _active_superopts \
and opt not in mount_invisible_options \
and opt not in mount_ignore_fs_keys.get(fstype, []) \
and opt not in mount_invisible_keys:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "Remount would be forced because options ({0}) changed".format(opt)
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
# Update the cache
update_mount_cache = True
mount_cache = __salt__['mount.read_mount_cache'](real_name)
if 'opts' in mount_cache:
_missing = [opt for opt in mount_cache['opts']
if opt not in opts]
if _missing:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Remount would be forced because'
' options ({0})'
'changed'.format(','.join(_missing)))
return ret
else:
# Some file systems require umounting and mounting if options change
# add others to list that require similiar functionality
if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'):
ret['changes']['umount'] = "Forced unmount and mount because " \
+ "options ({0}) changed".format(opt)
unmount_result = __salt__['mount.umount'](real_name)
if unmount_result is True:
mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = mount_result
else:
ret['result'] = False
ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result)
return ret
else:
ret['changes']['umount'] = "Forced remount because " \
+ "options ({0}) changed".format(opt)
remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts)
ret['result'] = remount_result
# Cleanup after the remount, so we
# don't write remount into fstab
if 'remount' in opts:
opts.remove('remount')
update_mount_cache = True
else:
update_mount_cache = True
if real_device not in device_list:
# name matches but device doesn't - need to umount
_device_mismatch_is_ignored = None
for regex in list(device_name_regex):
for _device in device_list:
if re.match(regex, _device):
_device_mismatch_is_ignored = _device
break
if _device_mismatch_is_ignored:
ret['result'] = True
ret['comment'] = "An umount will not be forced " \
+ "because device matched device_name_regex: " \
+ _device_mismatch_is_ignored
elif __opts__['test']:
ret['result'] = None
ret['comment'] = "An umount would have been forced " \
+ "because devices do not match. Watched: " \
+ device
else:
ret['changes']['umount'] = "Forced unmount because devices " \
+ "don't match. Wanted: " + device
if real_device != device:
ret['changes']['umount'] += " (" + real_device + ")"
ret['changes']['umount'] += ", current: " + ', '.join(device_list)
out = __salt__['mount.umount'](real_name, user=user)
active = __salt__['mount.active'](extended=True)
if real_name in active:
ret['comment'] = "Unable to unmount"
ret['result'] = None
return ret
update_mount_cache = True
else:
ret['comment'] = 'Target was already mounted'
# using a duplicate check so I can catch the results of a umount
if real_name not in active:
if mount:
# The mount is not present! Mount it
if __opts__['test']:
ret['result'] = None
if os.path.exists(name):
ret['comment'] = '{0} would be mounted'.format(name)
elif mkmnt:
ret['comment'] = '{0} would be created and mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would not be created'.format(name)
return ret
if not os.path.exists(name) and not mkmnt:
ret['result'] = False
ret['comment'] = 'Mount directory is not present'
return ret
out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user)
active = __salt__['mount.active'](extended=True)
update_mount_cache = True
if isinstance(out, string_types):
# Failed to (re)mount, the state has failed!
ret['comment'] = out
ret['result'] = False
return ret
elif real_name in active:
# (Re)mount worked!
ret['comment'] = 'Target was successfully mounted'
ret['changes']['mount'] = True
elif not os.path.exists(name):
if __opts__['test']:
ret['result'] = None
if mkmnt:
ret['comment'] = '{0} would be created, but not mounted'.format(name)
else:
ret['comment'] = '{0} does not exist and would neither be created nor mounted'.format(name)
elif mkmnt:
__salt__['file.mkdir'](name, user=user)
ret['comment'] = '{0} was created, not mounted'.format(name)
else:
ret['comment'] = '{0} not present and not mounted'.format(name)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} would not be mounted'.format(name)
else:
ret['comment'] = '{0} not mounted'.format(name)
if persist:
if '/etc/fstab' == config:
# Override default for Mac OS
if __grains__['os'] in ['MacOS', 'Darwin']:
config = "/etc/auto_salt"
# Override default for AIX
elif 'AIX' in __grains__['os']:
config = "/etc/filesystems"
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config,
test=True)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
test=True,
match_on=match_on)
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.set_vfstab'](name,
device,
fstype,
opts,
config=config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
test=True,
match_on=match_on)
if out != 'present':
ret['result'] = None
if out == 'new':
if mount:
comment = ('{0} is mounted, but needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
else:
comment = ('{0} needs to be '
'written to the fstab in order to be '
'made persistent.').format(name)
elif out == 'change':
if mount:
comment = ('{0} is mounted, but its fstab entry '
'must be updated.').format(name)
else:
comment = ('The {0} fstab entry '
'must be updated.').format(name)
else:
ret['result'] = False
comment = ('Unable to detect fstab status for '
'mount point {0} due to unexpected '
'output \'{1}\' from call to '
'mount.set_fstab. This is most likely '
'a bug.').format(name, out)
if 'comment' in ret:
ret['comment'] = '{0}. {1}'.format(ret['comment'], comment)
else:
ret['comment'] = comment
return ret
else:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name,
device,
fstype,
opts,
config)
elif __grains__['os'] in ['AIX']:
out = __salt__['mount.set_filesystems'](name,
device,
fstype,
opts,
mount,
config,
match_on=match_on)
elif 'Solaris' in __grains__['os']:
out = __salt__['mount.set_vfstab'](name,
device,
fstype,
opts,
config=config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name,
device,
fstype,
opts,
dump,
pass_num,
config,
match_on=match_on)
if update_mount_cache:
cache_result = __salt__['mount.write_mount_cache'](real_name,
device,
mkmnt=mkmnt,
fstype=fstype,
mount_opts=opts)
if out == 'present':
ret['comment'] += '. Entry already exists in the fstab.'
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += '. Added new entry to the fstab.'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += '. Updated the entry in the fstab.'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += '. However, the fstab was not found.'
return ret
return ret |
async def write_close_frame(self, data: bytes = b"") -> None:
"""
Write a close frame if and only if the connection state is OPEN.
This dedicated coroutine must be used for writing close frames to
ensure that at most one close frame is sent on a given connection.
"""
# Test and set the connection state before sending the close frame to
# avoid sending two frames in case of concurrent calls.
if self.state is State.OPEN:
# 7.1.3. The WebSocket Closing Handshake is Started
self.state = State.CLOSING
logger.debug("%s - state = CLOSING", self.side)
# 7.1.2. Start the WebSocket Closing Handshake
await self.write_frame(True, OP_CLOSE, data, _expected_state=State.CLOSING) | Write a close frame if and only if the connection state is OPEN.
This dedicated coroutine must be used for writing close frames to
ensure that at most one close frame is sent on a given connection. | Below is the the instruction that describes the task:
### Input:
Write a close frame if and only if the connection state is OPEN.
This dedicated coroutine must be used for writing close frames to
ensure that at most one close frame is sent on a given connection.
### Response:
async def write_close_frame(self, data: bytes = b"") -> None:
"""
Write a close frame if and only if the connection state is OPEN.
This dedicated coroutine must be used for writing close frames to
ensure that at most one close frame is sent on a given connection.
"""
# Test and set the connection state before sending the close frame to
# avoid sending two frames in case of concurrent calls.
if self.state is State.OPEN:
# 7.1.3. The WebSocket Closing Handshake is Started
self.state = State.CLOSING
logger.debug("%s - state = CLOSING", self.side)
# 7.1.2. Start the WebSocket Closing Handshake
await self.write_frame(True, OP_CLOSE, data, _expected_state=State.CLOSING) |
def tokenize_punctuation_command(text):
"""Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
"""
if text.peek() == '\\':
for point in PUNCTUATION_COMMANDS:
if text.peek((1, len(point) + 1)) == point:
return text.forward(len(point) + 1) | Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position | Below is the the instruction that describes the task:
### Input:
Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
### Response:
def tokenize_punctuation_command(text):
"""Process command that augments or modifies punctuation.
This is important to the tokenization of a string, as opening or closing
punctuation is not supposed to match.
:param Buffer text: iterator over text, with current position
"""
if text.peek() == '\\':
for point in PUNCTUATION_COMMANDS:
if text.peek((1, len(point) + 1)) == point:
return text.forward(len(point) + 1) |
def OnTextColorDialog(self, event):
"""Event handler for launching text color dialog"""
dlg = wx.ColourDialog(self.main_window)
# Ensure the full colour dialog is displayed,
# not the abbreviated version.
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
# Fetch color data
data = dlg.GetColourData()
color = data.GetColour().GetRGB()
post_command_event(self.main_window, self.main_window.TextColorMsg,
color=color)
dlg.Destroy() | Event handler for launching text color dialog | Below is the the instruction that describes the task:
### Input:
Event handler for launching text color dialog
### Response:
def OnTextColorDialog(self, event):
"""Event handler for launching text color dialog"""
dlg = wx.ColourDialog(self.main_window)
# Ensure the full colour dialog is displayed,
# not the abbreviated version.
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
# Fetch color data
data = dlg.GetColourData()
color = data.GetColour().GetRGB()
post_command_event(self.main_window, self.main_window.TextColorMsg,
color=color)
dlg.Destroy() |
def addDataModels(self, mods):
'''
Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef).
'''
# Load all the universal properties
for _, mdef in mods:
for univname, _, _ in mdef.get('univs', ()):
self.addUnivName(univname)
# Load all the forms
for _, mdef in mods:
for formname, formopts, propdefs in mdef.get('forms', ()):
self.formnames.add(formname)
self.propnames.add(formname)
for univname in self.univnames:
full = f'{formname}{univname}'
self.propnames.add(full)
for propname, _, _ in propdefs:
full = f'{formname}:{propname}'
self.propnames.add(full) | Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef). | Below is the the instruction that describes the task:
### Input:
Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef).
### Response:
def addDataModels(self, mods):
'''
Adds a model definition (same format as input to Model.addDataModels and output of Model.getModelDef).
'''
# Load all the universal properties
for _, mdef in mods:
for univname, _, _ in mdef.get('univs', ()):
self.addUnivName(univname)
# Load all the forms
for _, mdef in mods:
for formname, formopts, propdefs in mdef.get('forms', ()):
self.formnames.add(formname)
self.propnames.add(formname)
for univname in self.univnames:
full = f'{formname}{univname}'
self.propnames.add(full)
for propname, _, _ in propdefs:
full = f'{formname}:{propname}'
self.propnames.add(full) |
def countinputs(inputlist):
"""
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
"""
# Initialize return values
numInputs = 0
numASNfiles = 0
# User irafglob to count the number of inputfiles
files = irafglob(inputlist, atfile=None)
# Use the "len" ufunc to count the number of entries in the list
numInputs = len(files)
# Loop over the list and see if any of the entries are association files
for file in files:
if (checkASN(file) == True):
numASNfiles += 1
return numInputs,numASNfiles | Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input | Below is the the instruction that describes the task:
### Input:
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
### Response:
def countinputs(inputlist):
"""
Determine the number of inputfiles provided by the user and the
number of those files that are association tables
Parameters
----------
inputlist : string
the user input
Returns
-------
numInputs: int
number of inputs provided by the user
numASNfiles: int
number of association files provided as input
"""
# Initialize return values
numInputs = 0
numASNfiles = 0
# User irafglob to count the number of inputfiles
files = irafglob(inputlist, atfile=None)
# Use the "len" ufunc to count the number of entries in the list
numInputs = len(files)
# Loop over the list and see if any of the entries are association files
for file in files:
if (checkASN(file) == True):
numASNfiles += 1
return numInputs,numASNfiles |
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) | Get Kubernetes API version | Below is the the instruction that describes the task:
### Input:
Get Kubernetes API version
### Response:
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) |
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap | Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs | Below is the the instruction that describes the task:
### Input:
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
### Response:
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap |
def get_randomized_guid_sample(self, item_count):
""" Fetch a subset of randomzied GUIDs from the whitelist """
dataset = self.get_whitelist()
random.shuffle(dataset)
return dataset[:item_count] | Fetch a subset of randomzied GUIDs from the whitelist | Below is the the instruction that describes the task:
### Input:
Fetch a subset of randomzied GUIDs from the whitelist
### Response:
def get_randomized_guid_sample(self, item_count):
""" Fetch a subset of randomzied GUIDs from the whitelist """
dataset = self.get_whitelist()
random.shuffle(dataset)
return dataset[:item_count] |
def classical(group, src_filter, gsims, param, monitor=Monitor()):
"""
Compute the hazard curves for a set of sources belonging to the same
tectonic region type for all the GSIMs associated to that TRT.
The arguments are the same as in :func:`calc_hazard_curves`, except
for ``gsims``, which is a list of GSIM instances.
:returns:
a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
.eff_ruptures
"""
if not hasattr(src_filter, 'sitecol'): # a sitecol was passed
src_filter = SourceFilter(src_filter, {})
# Get the parameters assigned to the group
src_mutex = getattr(group, 'src_interdep', None) == 'mutex'
rup_mutex = getattr(group, 'rup_interdep', None) == 'mutex'
cluster = getattr(group, 'cluster', None)
# Compute the number of ruptures
grp_ids = set()
for src in group:
if not src.num_ruptures:
# src.num_ruptures is set when parsing the XML, but not when
# the source is instantiated manually, so it is set here
src.num_ruptures = src.count_ruptures()
# This sets the proper TOM in case of a cluster
if cluster:
src.temporal_occurrence_model = FatedTOM(time_span=1)
# Updating IDs
grp_ids.update(src.src_group_ids)
# Now preparing context
maxdist = src_filter.integration_distance
imtls = param['imtls']
trunclevel = param.get('truncation_level')
cmaker = ContextMaker(
src.tectonic_region_type, gsims, maxdist, param, monitor)
# Prepare the accumulator for the probability maps
pmap = AccumDict({grp_id: ProbabilityMap(len(imtls.array), len(gsims))
for grp_id in grp_ids})
rupdata = {grp_id: [] for grp_id in grp_ids}
# AccumDict of arrays with 3 elements weight, nsites, calc_time
calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
eff_ruptures = AccumDict(accum=0) # grp_id -> num_ruptures
# Computing hazard
for src, s_sites in src_filter(group): # filter now
t0 = time.time()
try:
poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel,
rup_indep=not rup_mutex)
except Exception as err:
etype, err, tb = sys.exc_info()
msg = '%s (source id=%s)' % (str(err), src.source_id)
raise etype(msg).with_traceback(tb)
if src_mutex: # mutex sources, there is a single group
for sid in poemap:
pcurve = pmap[src.src_group_id].setdefault(sid, 0)
pcurve += poemap[sid] * src.mutex_weight
elif poemap:
for gid in src.src_group_ids:
pmap[gid] |= poemap
if len(cmaker.rupdata):
for gid in src.src_group_ids:
rupdata[gid].append(cmaker.rupdata)
calc_times[src.id] += numpy.array(
[src.weight, len(s_sites), time.time() - t0])
# storing the number of contributing ruptures too
eff_ruptures += {gid: getattr(poemap, 'eff_ruptures', 0)
for gid in src.src_group_ids}
# Updating the probability map in the case of mutually exclusive
# sources
group_probability = getattr(group, 'grp_probability', None)
if src_mutex and group_probability:
pmap[src.src_group_id] *= group_probability
# Processing cluster
if cluster:
tom = getattr(group, 'temporal_occurrence_model')
pmap = _cluster(param, tom, imtls, gsims, grp_ids, pmap)
# Return results
for gid, data in rupdata.items():
if len(data):
rupdata[gid] = numpy.concatenate(data)
return dict(pmap=pmap, calc_times=calc_times, eff_ruptures=eff_ruptures,
rup_data=rupdata) | Compute the hazard curves for a set of sources belonging to the same
tectonic region type for all the GSIMs associated to that TRT.
The arguments are the same as in :func:`calc_hazard_curves`, except
for ``gsims``, which is a list of GSIM instances.
:returns:
a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
.eff_ruptures | Below is the the instruction that describes the task:
### Input:
Compute the hazard curves for a set of sources belonging to the same
tectonic region type for all the GSIMs associated to that TRT.
The arguments are the same as in :func:`calc_hazard_curves`, except
for ``gsims``, which is a list of GSIM instances.
:returns:
a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
.eff_ruptures
### Response:
def classical(group, src_filter, gsims, param, monitor=Monitor()):
"""
Compute the hazard curves for a set of sources belonging to the same
tectonic region type for all the GSIMs associated to that TRT.
The arguments are the same as in :func:`calc_hazard_curves`, except
for ``gsims``, which is a list of GSIM instances.
:returns:
a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
.eff_ruptures
"""
if not hasattr(src_filter, 'sitecol'): # a sitecol was passed
src_filter = SourceFilter(src_filter, {})
# Get the parameters assigned to the group
src_mutex = getattr(group, 'src_interdep', None) == 'mutex'
rup_mutex = getattr(group, 'rup_interdep', None) == 'mutex'
cluster = getattr(group, 'cluster', None)
# Compute the number of ruptures
grp_ids = set()
for src in group:
if not src.num_ruptures:
# src.num_ruptures is set when parsing the XML, but not when
# the source is instantiated manually, so it is set here
src.num_ruptures = src.count_ruptures()
# This sets the proper TOM in case of a cluster
if cluster:
src.temporal_occurrence_model = FatedTOM(time_span=1)
# Updating IDs
grp_ids.update(src.src_group_ids)
# Now preparing context
maxdist = src_filter.integration_distance
imtls = param['imtls']
trunclevel = param.get('truncation_level')
cmaker = ContextMaker(
src.tectonic_region_type, gsims, maxdist, param, monitor)
# Prepare the accumulator for the probability maps
pmap = AccumDict({grp_id: ProbabilityMap(len(imtls.array), len(gsims))
for grp_id in grp_ids})
rupdata = {grp_id: [] for grp_id in grp_ids}
# AccumDict of arrays with 3 elements weight, nsites, calc_time
calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
eff_ruptures = AccumDict(accum=0) # grp_id -> num_ruptures
# Computing hazard
for src, s_sites in src_filter(group): # filter now
t0 = time.time()
try:
poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel,
rup_indep=not rup_mutex)
except Exception as err:
etype, err, tb = sys.exc_info()
msg = '%s (source id=%s)' % (str(err), src.source_id)
raise etype(msg).with_traceback(tb)
if src_mutex: # mutex sources, there is a single group
for sid in poemap:
pcurve = pmap[src.src_group_id].setdefault(sid, 0)
pcurve += poemap[sid] * src.mutex_weight
elif poemap:
for gid in src.src_group_ids:
pmap[gid] |= poemap
if len(cmaker.rupdata):
for gid in src.src_group_ids:
rupdata[gid].append(cmaker.rupdata)
calc_times[src.id] += numpy.array(
[src.weight, len(s_sites), time.time() - t0])
# storing the number of contributing ruptures too
eff_ruptures += {gid: getattr(poemap, 'eff_ruptures', 0)
for gid in src.src_group_ids}
# Updating the probability map in the case of mutually exclusive
# sources
group_probability = getattr(group, 'grp_probability', None)
if src_mutex and group_probability:
pmap[src.src_group_id] *= group_probability
# Processing cluster
if cluster:
tom = getattr(group, 'temporal_occurrence_model')
pmap = _cluster(param, tom, imtls, gsims, grp_ids, pmap)
# Return results
for gid, data in rupdata.items():
if len(data):
rupdata[gid] = numpy.concatenate(data)
return dict(pmap=pmap, calc_times=calc_times, eff_ruptures=eff_ruptures,
rup_data=rupdata) |
def getEventTypeNameFromEnum(self, eType):
"""returns the name of an EVREvent enum value"""
fn = self.function_table.getEventTypeNameFromEnum
result = fn(eType)
return result | returns the name of an EVREvent enum value | Below is the the instruction that describes the task:
### Input:
returns the name of an EVREvent enum value
### Response:
def getEventTypeNameFromEnum(self, eType):
"""returns the name of an EVREvent enum value"""
fn = self.function_table.getEventTypeNameFromEnum
result = fn(eType)
return result |
def isa_from_graph(graph: nx.Graph, oneq_type='Xhalves', twoq_type='CZ') -> ISA:
"""
Generate an ISA object from a NetworkX graph.
:param graph: The graph
:param oneq_type: The type of 1-qubit gate. Currently 'Xhalves'
:param twoq_type: The type of 2-qubit gate. One of 'CZ' or 'CPHASE'.
"""
all_qubits = list(range(max(graph.nodes) + 1))
qubits = [Qubit(i, type=oneq_type, dead=i not in graph.nodes) for i in all_qubits]
edges = [Edge(sorted((a, b)), type=twoq_type, dead=False) for a, b in graph.edges]
return ISA(qubits, edges) | Generate an ISA object from a NetworkX graph.
:param graph: The graph
:param oneq_type: The type of 1-qubit gate. Currently 'Xhalves'
:param twoq_type: The type of 2-qubit gate. One of 'CZ' or 'CPHASE'. | Below is the the instruction that describes the task:
### Input:
Generate an ISA object from a NetworkX graph.
:param graph: The graph
:param oneq_type: The type of 1-qubit gate. Currently 'Xhalves'
:param twoq_type: The type of 2-qubit gate. One of 'CZ' or 'CPHASE'.
### Response:
def isa_from_graph(graph: nx.Graph, oneq_type='Xhalves', twoq_type='CZ') -> ISA:
"""
Generate an ISA object from a NetworkX graph.
:param graph: The graph
:param oneq_type: The type of 1-qubit gate. Currently 'Xhalves'
:param twoq_type: The type of 2-qubit gate. One of 'CZ' or 'CPHASE'.
"""
all_qubits = list(range(max(graph.nodes) + 1))
qubits = [Qubit(i, type=oneq_type, dead=i not in graph.nodes) for i in all_qubits]
edges = [Edge(sorted((a, b)), type=twoq_type, dead=False) for a, b in graph.edges]
return ISA(qubits, edges) |
def make_roi(cls, sources=None):
"""Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources
"""
if sources is None:
sources = {}
src_fact = cls()
src_fact.add_sources(sources)
ret_model = roi_model.ROIModel(
{}, skydir=SkyCoord(0.0, 0.0, unit='deg'))
for source in src_fact.sources.values():
ret_model.load_source(source,
build_index=False, merge_sources=False)
return ret_model | Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources | Below is the the instruction that describes the task:
### Input:
Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources
### Response:
def make_roi(cls, sources=None):
"""Build and return a `fermipy.roi_model.ROIModel` object from
a dict with information about the sources
"""
if sources is None:
sources = {}
src_fact = cls()
src_fact.add_sources(sources)
ret_model = roi_model.ROIModel(
{}, skydir=SkyCoord(0.0, 0.0, unit='deg'))
for source in src_fact.sources.values():
ret_model.load_source(source,
build_index=False, merge_sources=False)
return ret_model |
def robust_isinstance(inst, typ) -> bool:
"""
Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic
class so that the instance check works. It is also robust to Union and Any.
:param inst:
:param typ:
:return:
"""
if typ is Any:
return True
if is_typevar(typ):
if hasattr(typ, '__constraints__') and typ.__constraints__ is not None:
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
elif hasattr(typ, '__bound__') and typ.__bound__ is not None:
return robust_isinstance(inst, typ.__bound__)
else:
# a raw TypeVar means 'anything'
return True
else:
if is_union_type(typ):
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
else:
return isinstance(inst, get_base_generic_type(typ)) | Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic
class so that the instance check works. It is also robust to Union and Any.
:param inst:
:param typ:
:return: | Below is the the instruction that describes the task:
### Input:
Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic
class so that the instance check works. It is also robust to Union and Any.
:param inst:
:param typ:
:return:
### Response:
def robust_isinstance(inst, typ) -> bool:
"""
Similar to isinstance, but if 'typ' is a parametrized generic Type, it is first transformed into its base generic
class so that the instance check works. It is also robust to Union and Any.
:param inst:
:param typ:
:return:
"""
if typ is Any:
return True
if is_typevar(typ):
if hasattr(typ, '__constraints__') and typ.__constraints__ is not None:
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
elif hasattr(typ, '__bound__') and typ.__bound__ is not None:
return robust_isinstance(inst, typ.__bound__)
else:
# a raw TypeVar means 'anything'
return True
else:
if is_union_type(typ):
typs = get_args(typ, evaluate=True)
return any(robust_isinstance(inst, t) for t in typs)
else:
return isinstance(inst, get_base_generic_type(typ)) |
def _get_id(self, file_path):
''' a helper method for retrieving id of file or folder '''
title = '%s._get_id' % self.__class__.__name__
# construct request kwargs
list_kwargs = {
'spaces': self.drive_space,
'fields': 'files(id, parents)'
}
# determine path segments
path_segments = file_path.split(os.sep)
# walk down parents to file name
parent_id = ''
empty_string = ''
while path_segments:
walk_query = "name = '%s'" % path_segments.pop(0)
if parent_id:
walk_query += "and '%s' in parents" % parent_id
list_kwargs['q'] = walk_query
try:
response = self.drive.list(**list_kwargs).execute()
except:
raise DriveConnectionError(title)
file_list = response.get('files', [])
if file_list:
if path_segments:
parent_id = file_list[0].get('id')
else:
file_id = file_list[0].get('id')
return file_id, parent_id
else:
return empty_string, empty_string | a helper method for retrieving id of file or folder | Below is the the instruction that describes the task:
### Input:
a helper method for retrieving id of file or folder
### Response:
def _get_id(self, file_path):
''' a helper method for retrieving id of file or folder '''
title = '%s._get_id' % self.__class__.__name__
# construct request kwargs
list_kwargs = {
'spaces': self.drive_space,
'fields': 'files(id, parents)'
}
# determine path segments
path_segments = file_path.split(os.sep)
# walk down parents to file name
parent_id = ''
empty_string = ''
while path_segments:
walk_query = "name = '%s'" % path_segments.pop(0)
if parent_id:
walk_query += "and '%s' in parents" % parent_id
list_kwargs['q'] = walk_query
try:
response = self.drive.list(**list_kwargs).execute()
except:
raise DriveConnectionError(title)
file_list = response.get('files', [])
if file_list:
if path_segments:
parent_id = file_list[0].get('id')
else:
file_id = file_list[0].get('id')
return file_id, parent_id
else:
return empty_string, empty_string |
def insert_entry(self, entry):
"""!
@brief Insert new clustering feature to the leaf node.
@param[in] entry (cfentry): Clustering feature.
"""
self.feature += entry;
self.entries.append(entry); | !
@brief Insert new clustering feature to the leaf node.
@param[in] entry (cfentry): Clustering feature. | Below is the the instruction that describes the task:
### Input:
!
@brief Insert new clustering feature to the leaf node.
@param[in] entry (cfentry): Clustering feature.
### Response:
def insert_entry(self, entry):
"""!
@brief Insert new clustering feature to the leaf node.
@param[in] entry (cfentry): Clustering feature.
"""
self.feature += entry;
self.entries.append(entry); |
def set_mime_type(self, mime_type):
"""
Update the highlighter lexer based on a mime type.
:param mime_type: mime type of the new lexer to setup.
"""
try:
self.set_lexer_from_mime_type(mime_type)
except ClassNotFound:
_logger().exception('failed to get lexer from mimetype')
self._lexer = TextLexer()
return False
except ImportError:
# import error while loading some pygments plugins, the editor
# should not crash
_logger().warning('failed to get lexer from mimetype (%s)' %
mime_type)
self._lexer = TextLexer()
return False
else:
return True | Update the highlighter lexer based on a mime type.
:param mime_type: mime type of the new lexer to setup. | Below is the the instruction that describes the task:
### Input:
Update the highlighter lexer based on a mime type.
:param mime_type: mime type of the new lexer to setup.
### Response:
def set_mime_type(self, mime_type):
"""
Update the highlighter lexer based on a mime type.
:param mime_type: mime type of the new lexer to setup.
"""
try:
self.set_lexer_from_mime_type(mime_type)
except ClassNotFound:
_logger().exception('failed to get lexer from mimetype')
self._lexer = TextLexer()
return False
except ImportError:
# import error while loading some pygments plugins, the editor
# should not crash
_logger().warning('failed to get lexer from mimetype (%s)' %
mime_type)
self._lexer = TextLexer()
return False
else:
return True |
def track_file_ident_desc(self, file_ident):
# type: (UDFFileIdentifierDescriptor) -> None
'''
A method to start tracking a UDF File Identifier descriptor in this
UDF File Entry. Both 'tracking' and 'addition' add the identifier to
the list of file identifiers, but tracking doees not expand or
otherwise modify the UDF File Entry.
Parameters:
file_ident - The UDF File Identifier Descriptor to start tracking.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')
self.fi_descs.append(file_ident) | A method to start tracking a UDF File Identifier descriptor in this
UDF File Entry. Both 'tracking' and 'addition' add the identifier to
the list of file identifiers, but tracking doees not expand or
otherwise modify the UDF File Entry.
Parameters:
file_ident - The UDF File Identifier Descriptor to start tracking.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to start tracking a UDF File Identifier descriptor in this
UDF File Entry. Both 'tracking' and 'addition' add the identifier to
the list of file identifiers, but tracking doees not expand or
otherwise modify the UDF File Entry.
Parameters:
file_ident - The UDF File Identifier Descriptor to start tracking.
Returns:
Nothing.
### Response:
def track_file_ident_desc(self, file_ident):
# type: (UDFFileIdentifierDescriptor) -> None
'''
A method to start tracking a UDF File Identifier descriptor in this
UDF File Entry. Both 'tracking' and 'addition' add the identifier to
the list of file identifiers, but tracking doees not expand or
otherwise modify the UDF File Entry.
Parameters:
file_ident - The UDF File Identifier Descriptor to start tracking.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized')
self.fi_descs.append(file_ident) |
def get_run_on_node_mask():
"""
Returns the mask of nodes that the current thread is allowed to run on.
@return: node mask
@rtype: C{set}
"""
bitmask = libnuma.numa_get_run_node_mask()
nodemask = nodemask_t()
libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask))
libnuma.numa_bitmask_free(bitmask)
return numa_nodemask_to_set(nodemask) | Returns the mask of nodes that the current thread is allowed to run on.
@return: node mask
@rtype: C{set} | Below is the the instruction that describes the task:
### Input:
Returns the mask of nodes that the current thread is allowed to run on.
@return: node mask
@rtype: C{set}
### Response:
def get_run_on_node_mask():
"""
Returns the mask of nodes that the current thread is allowed to run on.
@return: node mask
@rtype: C{set}
"""
bitmask = libnuma.numa_get_run_node_mask()
nodemask = nodemask_t()
libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask))
libnuma.numa_bitmask_free(bitmask)
return numa_nodemask_to_set(nodemask) |
def main():
"""
Reads stdin jboss output, writes json on output
:return:
"""
buff = ''
for line in fileinput.input():
buff += line
parser = jbossparser.JbossParser()
result = parser.parse(buff)
print(json.dumps(result)) | Reads stdin jboss output, writes json on output
:return: | Below is the the instruction that describes the task:
### Input:
Reads stdin jboss output, writes json on output
:return:
### Response:
def main():
"""
Reads stdin jboss output, writes json on output
:return:
"""
buff = ''
for line in fileinput.input():
buff += line
parser = jbossparser.JbossParser()
result = parser.parse(buff)
print(json.dumps(result)) |
def aws_syncr_spec(self):
"""Spec for aws_syncr options"""
formatted_string = formatted(string_spec(), MergedOptionStringFormatter, expected_type=string_types)
return create_spec(AwsSyncr
, extra = defaulted(formatted_string, "")
, stage = defaulted(formatted_string, "")
, debug = defaulted(boolean(), False)
, dry_run = defaulted(boolean(), False)
, location = defaulted(formatted_string, "ap-southeast-2")
, artifact = formatted_string
, environment = formatted_string
, config_folder = directory_spec()
) | Spec for aws_syncr options | Below is the the instruction that describes the task:
### Input:
Spec for aws_syncr options
### Response:
def aws_syncr_spec(self):
"""Spec for aws_syncr options"""
formatted_string = formatted(string_spec(), MergedOptionStringFormatter, expected_type=string_types)
return create_spec(AwsSyncr
, extra = defaulted(formatted_string, "")
, stage = defaulted(formatted_string, "")
, debug = defaulted(boolean(), False)
, dry_run = defaulted(boolean(), False)
, location = defaulted(formatted_string, "ap-southeast-2")
, artifact = formatted_string
, environment = formatted_string
, config_folder = directory_spec()
) |
def gaps(args):
"""
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
"""
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug("Query gaps: {} Subject gaps: {}"\
.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig("query_gaps.pdf") | %prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs. | Below is the the instruction that describes the task:
### Input:
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
### Response:
def gaps(args):
"""
%prog gaps A_vs_B.blast
Find distribution of gap sizes betwen adjacent HSPs.
"""
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
blast = BlastSlow(blastfile)
logging.debug("A total of {} records imported".format(len(blast)))
query_gaps = list(collect_gaps(blast))
subject_gaps = list(collect_gaps(blast, use_subject=True))
logging.debug("Query gaps: {} Subject gaps: {}"\
.format(len(query_gaps), len(subject_gaps)))
from jcvi.graphics.base import savefig
import seaborn as sns
sns.distplot(query_gaps)
savefig("query_gaps.pdf") |
def set_volume(self, percent, update_group=True):
"""Set client volume percent."""
if percent not in range(0, 101):
raise ValueError('Volume percent out of range')
new_volume = self._client['config']['volume']
new_volume['percent'] = percent
self._client['config']['volume']['percent'] = percent
yield from self._server.client_volume(self.identifier, new_volume)
if update_group:
self._server.group(self.group.identifier).callback()
_LOGGER.info('set volume to %s on %s', percent, self.friendly_name) | Set client volume percent. | Below is the the instruction that describes the task:
### Input:
Set client volume percent.
### Response:
def set_volume(self, percent, update_group=True):
"""Set client volume percent."""
if percent not in range(0, 101):
raise ValueError('Volume percent out of range')
new_volume = self._client['config']['volume']
new_volume['percent'] = percent
self._client['config']['volume']['percent'] = percent
yield from self._server.client_volume(self.identifier, new_volume)
if update_group:
self._server.group(self.group.identifier).callback()
_LOGGER.info('set volume to %s on %s', percent, self.friendly_name) |
def _add_variant_gene_relationship(self, patient_var_map, gene_coordinate_map):
"""
Right now it is unclear the best approach on how to connect
variants to genes. In most cases has_affected_locus/GENO:0000418
is accurate; however, there are cases where a variant is in the intron
on one gene and is purported to causally affect another gene down or
upstream. In these cases we must first disambiguate which gene
is the affected locus, and which gene(s) are predicated to be
causully influenced by (RO:0002566)
UPDATE 8-30: In the latest dataset we no longer have 1-many mappings
between variants and genes, but leaving this here in case we see
these in the future
The logic followed here is:
if mutation type contains downstream/upstream and more than one
gene of interest, investigate coordinates of all genes to
see if we can disambiguate which genes are which
:return: None
"""
# genotype = Genotype(self.graph)
dipper_util = DipperUtil()
model = Model(self.graph)
# Note this could be compressed in someway to remove one level of for looping
for patient in patient_var_map:
for variant_id, variant in patient_var_map[patient].items():
variant_bnode = self.make_id("{0}".format(variant_id), "_")
genes_of_interest = variant['genes_of_interest']
if len(genes_of_interest) == 1:
# Assume variant is variant allele of gene
gene = genes_of_interest[0]
gene_id = dipper_util.get_ncbi_id_from_symbol(gene)
self._add_gene_to_graph(
gene, variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
elif re.search(r'upstream|downstream', variant['type'], flags=re.I):
# Attempt to disambiguate
ref_gene = []
up_down_gene = []
unmatched_genes = []
for gene in variant['genes_of_interest']:
if gene_id and gene_id != '' and gene_id in gene_coordinate_map:
if gene_coordinate_map[gene_id]['start'] \
<= variant['position']\
<= gene_coordinate_map[gene_id]['end']:
gene_info = {
'symbol': gene,
'strand': gene_coordinate_map[gene_id]['strand']
}
ref_gene.append(gene_info)
else:
up_down_gene.append(gene)
else:
unmatched_genes.append(gene)
if len(ref_gene) == 1:
self._add_gene_to_graph(
ref_gene[0]['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
# update label with gene
gene_list = [ref_gene[0]['symbol']] # build label expects list
variant_label = self._build_variant_label(
variant['build'], variant['chromosome'],
variant['position'], variant['reference_allele'],
variant['variant_allele'], gene_list)
model.addLabel(variant_bnode, variant_label)
# In some cases there are multiple instances
# of same gene from dupe rows in the source
# Credit http://stackoverflow.com/a/3844832
elif len(ref_gene) > 0 and ref_gene[1:] == ref_gene[:-1]:
self._add_gene_to_graph(
ref_gene[0]['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
# build label function expects list
gene_list = [ref_gene[0]['symbol']]
variant_label = self._build_variant_label(
variant['build'], variant['chromosome'],
variant['position'], variant['reference_allele'],
variant['variant_allele'], gene_list)
model.addLabel(variant_bnode, variant_label)
# Check if reference genes are on different strands
elif len(ref_gene) == 2:
strands = [st['strand'] for st in ref_gene]
if "minus" in strands and "plus" in strands:
for r_gene in ref_gene:
self._add_gene_to_graph(
r_gene['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
else:
LOG.warning(
"unable to map intron variant to gene coordinates: %s",
variant)
for r_gene in ref_gene:
self._add_gene_to_graph(
r_gene['symbol'], variant_bnode, gene_id,
self.globaltt['causally_influences'])
elif re.search(r'intron', variant['type'], flags=re.I):
LOG.warning(
"unable to map intron variant to gene coordinates_2: %s",
variant)
for neighbor in up_down_gene:
self._add_gene_to_graph(
neighbor, variant_bnode, gene_id,
self.globaltt['causally_influences'])
# Unmatched genes are likely because we cannot map to an NCBIGene
# or we do not have coordinate information
for unmatched_gene in unmatched_genes:
self._add_gene_to_graph(
unmatched_gene, variant_bnode, gene_id,
self.globaltt['causally_influences'])
return | Right now it is unclear the best approach on how to connect
variants to genes. In most cases has_affected_locus/GENO:0000418
is accurate; however, there are cases where a variant is in the intron
on one gene and is purported to causally affect another gene down or
upstream. In these cases we must first disambiguate which gene
is the affected locus, and which gene(s) are predicated to be
causully influenced by (RO:0002566)
UPDATE 8-30: In the latest dataset we no longer have 1-many mappings
between variants and genes, but leaving this here in case we see
these in the future
The logic followed here is:
if mutation type contains downstream/upstream and more than one
gene of interest, investigate coordinates of all genes to
see if we can disambiguate which genes are which
:return: None | Below is the the instruction that describes the task:
### Input:
Right now it is unclear the best approach on how to connect
variants to genes. In most cases has_affected_locus/GENO:0000418
is accurate; however, there are cases where a variant is in the intron
on one gene and is purported to causally affect another gene down or
upstream. In these cases we must first disambiguate which gene
is the affected locus, and which gene(s) are predicated to be
causully influenced by (RO:0002566)
UPDATE 8-30: In the latest dataset we no longer have 1-many mappings
between variants and genes, but leaving this here in case we see
these in the future
The logic followed here is:
if mutation type contains downstream/upstream and more than one
gene of interest, investigate coordinates of all genes to
see if we can disambiguate which genes are which
:return: None
### Response:
def _add_variant_gene_relationship(self, patient_var_map, gene_coordinate_map):
"""
Right now it is unclear the best approach on how to connect
variants to genes. In most cases has_affected_locus/GENO:0000418
is accurate; however, there are cases where a variant is in the intron
on one gene and is purported to causally affect another gene down or
upstream. In these cases we must first disambiguate which gene
is the affected locus, and which gene(s) are predicated to be
causully influenced by (RO:0002566)
UPDATE 8-30: In the latest dataset we no longer have 1-many mappings
between variants and genes, but leaving this here in case we see
these in the future
The logic followed here is:
if mutation type contains downstream/upstream and more than one
gene of interest, investigate coordinates of all genes to
see if we can disambiguate which genes are which
:return: None
"""
# genotype = Genotype(self.graph)
dipper_util = DipperUtil()
model = Model(self.graph)
# Note this could be compressed in someway to remove one level of for looping
for patient in patient_var_map:
for variant_id, variant in patient_var_map[patient].items():
variant_bnode = self.make_id("{0}".format(variant_id), "_")
genes_of_interest = variant['genes_of_interest']
if len(genes_of_interest) == 1:
# Assume variant is variant allele of gene
gene = genes_of_interest[0]
gene_id = dipper_util.get_ncbi_id_from_symbol(gene)
self._add_gene_to_graph(
gene, variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
elif re.search(r'upstream|downstream', variant['type'], flags=re.I):
# Attempt to disambiguate
ref_gene = []
up_down_gene = []
unmatched_genes = []
for gene in variant['genes_of_interest']:
if gene_id and gene_id != '' and gene_id in gene_coordinate_map:
if gene_coordinate_map[gene_id]['start'] \
<= variant['position']\
<= gene_coordinate_map[gene_id]['end']:
gene_info = {
'symbol': gene,
'strand': gene_coordinate_map[gene_id]['strand']
}
ref_gene.append(gene_info)
else:
up_down_gene.append(gene)
else:
unmatched_genes.append(gene)
if len(ref_gene) == 1:
self._add_gene_to_graph(
ref_gene[0]['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
# update label with gene
gene_list = [ref_gene[0]['symbol']] # build label expects list
variant_label = self._build_variant_label(
variant['build'], variant['chromosome'],
variant['position'], variant['reference_allele'],
variant['variant_allele'], gene_list)
model.addLabel(variant_bnode, variant_label)
# In some cases there are multiple instances
# of same gene from dupe rows in the source
# Credit http://stackoverflow.com/a/3844832
elif len(ref_gene) > 0 and ref_gene[1:] == ref_gene[:-1]:
self._add_gene_to_graph(
ref_gene[0]['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
# build label function expects list
gene_list = [ref_gene[0]['symbol']]
variant_label = self._build_variant_label(
variant['build'], variant['chromosome'],
variant['position'], variant['reference_allele'],
variant['variant_allele'], gene_list)
model.addLabel(variant_bnode, variant_label)
# Check if reference genes are on different strands
elif len(ref_gene) == 2:
strands = [st['strand'] for st in ref_gene]
if "minus" in strands and "plus" in strands:
for r_gene in ref_gene:
self._add_gene_to_graph(
r_gene['symbol'], variant_bnode, gene_id,
self.globaltt['has_affected_feature'])
else:
LOG.warning(
"unable to map intron variant to gene coordinates: %s",
variant)
for r_gene in ref_gene:
self._add_gene_to_graph(
r_gene['symbol'], variant_bnode, gene_id,
self.globaltt['causally_influences'])
elif re.search(r'intron', variant['type'], flags=re.I):
LOG.warning(
"unable to map intron variant to gene coordinates_2: %s",
variant)
for neighbor in up_down_gene:
self._add_gene_to_graph(
neighbor, variant_bnode, gene_id,
self.globaltt['causally_influences'])
# Unmatched genes are likely because we cannot map to an NCBIGene
# or we do not have coordinate information
for unmatched_gene in unmatched_genes:
self._add_gene_to_graph(
unmatched_gene, variant_bnode, gene_id,
self.globaltt['causally_influences'])
return |
def xreload(mod):
"""Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done.
"""
r = Reload(mod)
r.apply()
found_change = r.found_change
r = None
pydevd_dont_trace.clear_trace_filter_cache()
return found_change | Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done. | Below is the the instruction that describes the task:
### Input:
Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done.
### Response:
def xreload(mod):
"""Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done.
"""
r = Reload(mod)
r.apply()
found_change = r.found_change
r = None
pydevd_dont_trace.clear_trace_filter_cache()
return found_change |
def get_peers_in_established(self):
"""Returns list of peers in established state."""
est_peers = []
for peer in self._peers.values():
if peer.in_established:
est_peers.append(peer)
return est_peers | Returns list of peers in established state. | Below is the the instruction that describes the task:
### Input:
Returns list of peers in established state.
### Response:
def get_peers_in_established(self):
"""Returns list of peers in established state."""
est_peers = []
for peer in self._peers.values():
if peer.in_established:
est_peers.append(peer)
return est_peers |
def print_extended_help():
"""
Prints an extended help message.
"""
# initiate TextWrapper class, which will handle all of the string formatting
w = textwrap.TextWrapper()
w.expand_tabs = False
w.width=110
w.initial_indent = ' '
w.subsequent_indent = ' '
print('')
print(textwrap.fill("<split> Complete parameter list:", initial_indent=''))
print('')
cmd = "--input : (required) csv file to split into training and test sets"
print(w.fill(cmd))
cmd = "\t\tColumns should be as follows:"
print(w.fill(cmd))
print('')
cmd="\t\t id, status, receptor_1, receptor_2, ..., receptor_N"
print(w.fill(cmd))
cmd="\t\t CH44, 1, -9.7, -9.3, ..., -10.2"
print(w.fill(cmd))
cmd="\t\t ZN44, 0, -6.6, -6.1, ..., -6.8"
print(w.fill(cmd))
print('')
cmd="\t\tid is a unique molecular identifier"
print(w.fill(cmd))
cmd="\t\tstatus takes a value of '1' if the molecule is active and '0' otherwise."
print(w.fill(cmd))
cmd="\t\treceptor_1 through receptor_N are docking scores."
print(w.fill(cmd))
print('')
tfrac = "--training_fraction : (optional) The fraction of input active molecules\
allocated to the training set, e.g. 0.40. Defaults to allocate half to the training\
set."
print(w.fill(tfrac))
print('')
d2a = "--decoy_to_active : (optional) The decoy to active ratio to establish in the \
training and validation sets. Defaults to maintain the input file ratio."
print(w.fill(d2a))
print('') | Prints an extended help message. | Below is the the instruction that describes the task:
### Input:
Prints an extended help message.
### Response:
def print_extended_help():
"""
Prints an extended help message.
"""
# initiate TextWrapper class, which will handle all of the string formatting
w = textwrap.TextWrapper()
w.expand_tabs = False
w.width=110
w.initial_indent = ' '
w.subsequent_indent = ' '
print('')
print(textwrap.fill("<split> Complete parameter list:", initial_indent=''))
print('')
cmd = "--input : (required) csv file to split into training and test sets"
print(w.fill(cmd))
cmd = "\t\tColumns should be as follows:"
print(w.fill(cmd))
print('')
cmd="\t\t id, status, receptor_1, receptor_2, ..., receptor_N"
print(w.fill(cmd))
cmd="\t\t CH44, 1, -9.7, -9.3, ..., -10.2"
print(w.fill(cmd))
cmd="\t\t ZN44, 0, -6.6, -6.1, ..., -6.8"
print(w.fill(cmd))
print('')
cmd="\t\tid is a unique molecular identifier"
print(w.fill(cmd))
cmd="\t\tstatus takes a value of '1' if the molecule is active and '0' otherwise."
print(w.fill(cmd))
cmd="\t\treceptor_1 through receptor_N are docking scores."
print(w.fill(cmd))
print('')
tfrac = "--training_fraction : (optional) The fraction of input active molecules\
allocated to the training set, e.g. 0.40. Defaults to allocate half to the training\
set."
print(w.fill(tfrac))
print('')
d2a = "--decoy_to_active : (optional) The decoy to active ratio to establish in the \
training and validation sets. Defaults to maintain the input file ratio."
print(w.fill(d2a))
print('') |
def affected_files(self):
"""
Get's a fast accessible file changes for given changeset
"""
added, modified, deleted = self._changes_cache
return list(added.union(modified).union(deleted)) | Get's a fast accessible file changes for given changeset | Below is the the instruction that describes the task:
### Input:
Get's a fast accessible file changes for given changeset
### Response:
def affected_files(self):
"""
Get's a fast accessible file changes for given changeset
"""
added, modified, deleted = self._changes_cache
return list(added.union(modified).union(deleted)) |
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore):
"""
Gets successors of the given job by walking the job graph recursively.
Any successor in alreadySeenSuccessors is ignored and not traversed.
Returns the set of found successors. This set is added to alreadySeenSuccessors.
"""
successors = set()
def successorRecursion(jobGraph):
# For lists of successors
for successorList in jobGraph.stack:
# For each successor in list of successors
for successorJobNode in successorList:
# If successor not already visited
if successorJobNode.jobStoreID not in alreadySeenSuccessors:
# Add to set of successors
successors.add(successorJobNode.jobStoreID)
alreadySeenSuccessors.add(successorJobNode.jobStoreID)
# Recurse if job exists
# (job may not exist if already completed)
if jobStore.exists(successorJobNode.jobStoreID):
successorRecursion(jobStore.load(successorJobNode.jobStoreID))
successorRecursion(jobGraph) # Recurse from jobGraph
return successors | Gets successors of the given job by walking the job graph recursively.
Any successor in alreadySeenSuccessors is ignored and not traversed.
Returns the set of found successors. This set is added to alreadySeenSuccessors. | Below is the the instruction that describes the task:
### Input:
Gets successors of the given job by walking the job graph recursively.
Any successor in alreadySeenSuccessors is ignored and not traversed.
Returns the set of found successors. This set is added to alreadySeenSuccessors.
### Response:
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore):
"""
Gets successors of the given job by walking the job graph recursively.
Any successor in alreadySeenSuccessors is ignored and not traversed.
Returns the set of found successors. This set is added to alreadySeenSuccessors.
"""
successors = set()
def successorRecursion(jobGraph):
# For lists of successors
for successorList in jobGraph.stack:
# For each successor in list of successors
for successorJobNode in successorList:
# If successor not already visited
if successorJobNode.jobStoreID not in alreadySeenSuccessors:
# Add to set of successors
successors.add(successorJobNode.jobStoreID)
alreadySeenSuccessors.add(successorJobNode.jobStoreID)
# Recurse if job exists
# (job may not exist if already completed)
if jobStore.exists(successorJobNode.jobStoreID):
successorRecursion(jobStore.load(successorJobNode.jobStoreID))
successorRecursion(jobGraph) # Recurse from jobGraph
return successors |
def to_netcdf4(self, fname=None, base_instrument=None, epoch_name='Epoch',
zlib=False, complevel=4, shuffle=True):
"""Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs.
"""
import netCDF4
import pysat
file_format = 'NETCDF4'
# base_instrument used to define the standard attributes attached
# to the instrument object. Any additional attributes added
# to the main input Instrument will be written to the netCDF4
base_instrument = Instrument() if base_instrument is None else base_instrument
# begin processing metadata for writing to the file
# look to see if user supplied a list of export keys
# corresponding to internally tracked metadata within pysat
export_meta = self.generic_meta_translator(self.meta)
if self._meta_translation_table is None:
# didn't find a translation table, using the strings
# attached to the supplied pysat.Instrument object
export_name_labels = [self.name_label]
export_units_labels = [self.units_label]
export_desc_labels = [self.desc_label]
export_notes_labels = [self.notes_label]
else:
# user supplied labels in translation table
export_name_labels = self._meta_translation_table['name_label']
export_units_labels = self._meta_translation_table['units_label']
export_desc_labels = self._meta_translation_table['desc_label']
export_notes_labels = self._meta_translation_table['notes_label']
print('Using Metadata Translation Table: ', self._meta_translation_table)
# Apply instrument specific post-processing to the export_meta
if hasattr(self._export_meta_post_processing, '__call__'):
export_meta = self._export_meta_post_processing(export_meta)
# general process for writing data is this
# first, take care of the EPOCH information
# second, iterate over the variable colums in Instrument.data
# check the type of data
# if 1D column, do simple write (type is not an object)
# if it is an object, then check if writing strings, if not strings, then
# if column is a Series of Frames, write as 2D variables
# metadata must be filtered before writing to netCDF4, string variables
# can't have a fill value
with netCDF4.Dataset(fname, mode='w', format=file_format) as out_data:
# number of items, yeah
num = len(self.data.index)
# write out the datetime index
out_data.createDimension(epoch_name, num)
cdfkey = out_data.createVariable(epoch_name, 'i8',
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# grab existing metadata for Epoch or create suitable info
if epoch_name in self.meta:
new_dict = export_meta[self.meta.var_case_name(epoch_name)]
else:
# create empty shell
new_dict = {}
# update required and basic information if not present
for export_name_label in export_name_labels:
if export_name_label not in new_dict:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
if export_units_label not in new_dict:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_desc_label in export_desc_labels:
if export_desc_label not in new_dict:
new_dict[export_desc_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_notes_label in export_notes_labels:
if export_notes_label not in new_dict:
new_dict[export_notes_label] = ''
new_dict['calendar'] = 'standard'
new_dict['Format'] = 'i8'
new_dict['Var_Type'] = 'data'
if self.data.index.is_monotonic_increasing:
new_dict['MonoTon'] = 'increase'
elif self.data.index.is_monotonic_decreasing:
new_dict['MonoTon'] = 'decrease'
new_dict['Time_Base'] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict['Time_Scale'] = 'UTC'
new_dict = self._filter_netcdf4_metadata(new_dict, np.int64)
# attach metadata
cdfkey.setncatts(new_dict)
# attach data
cdfkey[:] = (self.data.index.values.astype(np.int64) *
1.E-6).astype(np.int64)
# iterate over all of the columns in the Instrument dataframe
# check what kind of data we are dealing with, then store
for key in self.data.columns:
# print (key)
# get information on type data we are dealing with
# data is data in proer type( multiformat support)
# coltype is the direct type, np.int64
# and datetime_flag lets you know if the data is full of time
# information
data, coltype, datetime_flag = self._get_data_info(self[key],
file_format)
# operate on data based upon type
if self[key].dtype != np.dtype('O'):
# not an object, normal basic 1D data
# print(key, coltype, file_format)
cdfkey = out_data.createVariable(key,
coltype,
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data, after filtering for standards
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict,
coltype)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# assign data
if datetime_flag:
# datetime is in nanoseconds, storing milliseconds
cdfkey[:] = (data.values.astype(coltype)
* 1.E-6).astype(coltype)
else:
# not datetime data, just store as is
cdfkey[:] = data.values.astype(coltype)
# back to main check on type of data to write
else:
# it is a Series of objects, need to figure out
# what the actual objects are, then act as needed
# use info in coltype to get real datatype of object
# isinstance isn't working here because of something with coltype
if (coltype == type(' ')) or (coltype == type(u' ')):
# dealing with a string
cdfkey = out_data.createVariable(key, coltype, \
dimensions=(epoch_name), zlib=zlib, \
complevel=complevel, shuffle=shuffle)
# attach any meta data
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# no FillValue or FillVal allowed for strings
new_dict = self._filter_netcdf4_metadata(new_dict, \
coltype, remove=True)
# really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for',
key)))
# time to actually write the data now
cdfkey[:] = data.values
# still dealing with an object, not just a series
# of strings
# maps to if check on coltypes being stringbased
else:
# presuming a series with a dataframe or series in each location
# start by collecting some basic info on dimensions
# sizes, names, then create corresponding netCDF4 dimensions
# total dimensions stored for object are epoch plus ones
# created below
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
if len(dims) == 1:
# generally working with higher dimensional data
# pad dimensions so that the rest of the code works
# for either a Series or a Frame
dims = (dims[0], 0)
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns (if a frame)
obj_dim_names.append(key)
out_data.createDimension(obj_dim_names[-1], dim)
# create simple tuple with information needed to create
# the right dimensions for variables that will
# be written to file
var_dim = tuple([epoch_name] + obj_dim_names)
# We need to do different things if a series or dataframe
# stored
try:
# start by assuming it is a dataframe
# get list of subvariables
iterable = self[key].iloc[0].columns
# store our newfound knowledge, we are dealing with
# a series of DataFrames
is_frame = True
except AttributeError:
# turns out data is Series of Series
# which doesn't have columns
iterable = [self[key].iloc[0].name]
is_frame = False
# find location within main variable
# that actually has subvariable data (not just empty frame/series)
# so we can determine what the real underlying data types are
good_data_loc = 0
for jjj in np.arange(len(self.data)):
if len(self.data[key].iloc[0]) > 0:
data_loc = jjj
break
# found a place with data, if there is one
# now iterate over the subvariables, get data info
# create netCDF4 variables and store the data
# stored name is variable_subvariable
for col in iterable:
if is_frame:
# we are working with a dataframe
# so multiple subvariables stored under a single
# main variable heading
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc][col], file_format)
cdfkey = out_data.createVariable(key + '_' + col,
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# attach any meta data
try:
new_dict = export_meta[key+'_'+col]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# print('Frame Writing ', key, col, export_meta[key].children[col])
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# print ('mid2 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key, col)) )
# attach data
# it may be slow to repeatedly call the store
# method as well astype method below collect
# data into a numpy array, then write the full
# array in one go
# print(coltype, dims)
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i][col].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
else:
# we are dealing with a Series
# get information about information within series
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc], file_format)
cdfkey = out_data.createVariable(key + '_data',
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data
try:
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# really attach metadata now
# print ('mid3 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# attach data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# we are done storing the actual data for the given higher
# order variable, now we need to store the index for all
# of that fancy data
# get index information
data, coltype, datetime_flag = self._get_data_info(self[key].iloc[good_data_loc].index, file_format)
# create dimension variable for to store index in netCDF4
cdfkey = out_data.createVariable(key,
coltype, dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# work with metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
if datetime_flag:
#print('datetime flag')
for export_name_label in export_name_labels:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# set metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num,
dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].index.values
cdfkey[:, :] = (temp_cdf_data.astype(coltype) *
1.E-6).astype(coltype)
else:
if self[key].iloc[data_loc].index.name is not None:
for export_name_label in export_name_labels:
new_dict[export_name_label] = self[key].iloc[data_loc].index.name
else:
for export_name_label in export_name_labels:
new_dict[export_name_label] = key
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# assign metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i].index.to_native_types()
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# store any non standard attributes
# compare this Instrument's attributes to base object
base_attrb = dir(base_instrument)
this_attrb = dir(self)
# filter out any 'private' attributes
# those that start with a _
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# store any non-standard attributes attached to meta
base_attrb = dir(base_instrument.meta)
this_attrb = dir(self.meta)
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.meta.__getattribute__(key)
adict['pysat_version'] = pysat.__version__
if 'Conventions' not in adict:
adict['Conventions'] = 'SPDF ISTP/IACG Modified for NetCDF'
if 'Text_Supplement' not in adict:
adict['Text_Supplement'] = ''
adict['Date_Start'] = pysat.datetime.strftime(self.data.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['Date_End'] = pysat.datetime.strftime(self.data.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['File'] = os.path.split(fname)
adict['Generation_Date'] = pysat.datetime.utcnow().strftime('%Y%m%d')
adict['Logical_File_ID'] = os.path.split(fname)[-1].split('.')[:-1]
# check for binary types
for key in adict.keys():
if isinstance(adict[key], bool):
adict[key] = int(adict[key])
# print('adict', adict)
out_data.setncatts(adict)
return | Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs. | Below is the the instruction that describes the task:
### Input:
Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs.
### Response:
def to_netcdf4(self, fname=None, base_instrument=None, epoch_name='Epoch',
zlib=False, complevel=4, shuffle=True):
"""Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs.
"""
import netCDF4
import pysat
file_format = 'NETCDF4'
# base_instrument used to define the standard attributes attached
# to the instrument object. Any additional attributes added
# to the main input Instrument will be written to the netCDF4
base_instrument = Instrument() if base_instrument is None else base_instrument
# begin processing metadata for writing to the file
# look to see if user supplied a list of export keys
# corresponding to internally tracked metadata within pysat
export_meta = self.generic_meta_translator(self.meta)
if self._meta_translation_table is None:
# didn't find a translation table, using the strings
# attached to the supplied pysat.Instrument object
export_name_labels = [self.name_label]
export_units_labels = [self.units_label]
export_desc_labels = [self.desc_label]
export_notes_labels = [self.notes_label]
else:
# user supplied labels in translation table
export_name_labels = self._meta_translation_table['name_label']
export_units_labels = self._meta_translation_table['units_label']
export_desc_labels = self._meta_translation_table['desc_label']
export_notes_labels = self._meta_translation_table['notes_label']
print('Using Metadata Translation Table: ', self._meta_translation_table)
# Apply instrument specific post-processing to the export_meta
if hasattr(self._export_meta_post_processing, '__call__'):
export_meta = self._export_meta_post_processing(export_meta)
# general process for writing data is this
# first, take care of the EPOCH information
# second, iterate over the variable colums in Instrument.data
# check the type of data
# if 1D column, do simple write (type is not an object)
# if it is an object, then check if writing strings, if not strings, then
# if column is a Series of Frames, write as 2D variables
# metadata must be filtered before writing to netCDF4, string variables
# can't have a fill value
with netCDF4.Dataset(fname, mode='w', format=file_format) as out_data:
# number of items, yeah
num = len(self.data.index)
# write out the datetime index
out_data.createDimension(epoch_name, num)
cdfkey = out_data.createVariable(epoch_name, 'i8',
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# grab existing metadata for Epoch or create suitable info
if epoch_name in self.meta:
new_dict = export_meta[self.meta.var_case_name(epoch_name)]
else:
# create empty shell
new_dict = {}
# update required and basic information if not present
for export_name_label in export_name_labels:
if export_name_label not in new_dict:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
if export_units_label not in new_dict:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_desc_label in export_desc_labels:
if export_desc_label not in new_dict:
new_dict[export_desc_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_notes_label in export_notes_labels:
if export_notes_label not in new_dict:
new_dict[export_notes_label] = ''
new_dict['calendar'] = 'standard'
new_dict['Format'] = 'i8'
new_dict['Var_Type'] = 'data'
if self.data.index.is_monotonic_increasing:
new_dict['MonoTon'] = 'increase'
elif self.data.index.is_monotonic_decreasing:
new_dict['MonoTon'] = 'decrease'
new_dict['Time_Base'] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict['Time_Scale'] = 'UTC'
new_dict = self._filter_netcdf4_metadata(new_dict, np.int64)
# attach metadata
cdfkey.setncatts(new_dict)
# attach data
cdfkey[:] = (self.data.index.values.astype(np.int64) *
1.E-6).astype(np.int64)
# iterate over all of the columns in the Instrument dataframe
# check what kind of data we are dealing with, then store
for key in self.data.columns:
# print (key)
# get information on type data we are dealing with
# data is data in proer type( multiformat support)
# coltype is the direct type, np.int64
# and datetime_flag lets you know if the data is full of time
# information
data, coltype, datetime_flag = self._get_data_info(self[key],
file_format)
# operate on data based upon type
if self[key].dtype != np.dtype('O'):
# not an object, normal basic 1D data
# print(key, coltype, file_format)
cdfkey = out_data.createVariable(key,
coltype,
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data, after filtering for standards
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict,
coltype)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# assign data
if datetime_flag:
# datetime is in nanoseconds, storing milliseconds
cdfkey[:] = (data.values.astype(coltype)
* 1.E-6).astype(coltype)
else:
# not datetime data, just store as is
cdfkey[:] = data.values.astype(coltype)
# back to main check on type of data to write
else:
# it is a Series of objects, need to figure out
# what the actual objects are, then act as needed
# use info in coltype to get real datatype of object
# isinstance isn't working here because of something with coltype
if (coltype == type(' ')) or (coltype == type(u' ')):
# dealing with a string
cdfkey = out_data.createVariable(key, coltype, \
dimensions=(epoch_name), zlib=zlib, \
complevel=complevel, shuffle=shuffle)
# attach any meta data
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# no FillValue or FillVal allowed for strings
new_dict = self._filter_netcdf4_metadata(new_dict, \
coltype, remove=True)
# really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for',
key)))
# time to actually write the data now
cdfkey[:] = data.values
# still dealing with an object, not just a series
# of strings
# maps to if check on coltypes being stringbased
else:
# presuming a series with a dataframe or series in each location
# start by collecting some basic info on dimensions
# sizes, names, then create corresponding netCDF4 dimensions
# total dimensions stored for object are epoch plus ones
# created below
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
if len(dims) == 1:
# generally working with higher dimensional data
# pad dimensions so that the rest of the code works
# for either a Series or a Frame
dims = (dims[0], 0)
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns (if a frame)
obj_dim_names.append(key)
out_data.createDimension(obj_dim_names[-1], dim)
# create simple tuple with information needed to create
# the right dimensions for variables that will
# be written to file
var_dim = tuple([epoch_name] + obj_dim_names)
# We need to do different things if a series or dataframe
# stored
try:
# start by assuming it is a dataframe
# get list of subvariables
iterable = self[key].iloc[0].columns
# store our newfound knowledge, we are dealing with
# a series of DataFrames
is_frame = True
except AttributeError:
# turns out data is Series of Series
# which doesn't have columns
iterable = [self[key].iloc[0].name]
is_frame = False
# find location within main variable
# that actually has subvariable data (not just empty frame/series)
# so we can determine what the real underlying data types are
good_data_loc = 0
for jjj in np.arange(len(self.data)):
if len(self.data[key].iloc[0]) > 0:
data_loc = jjj
break
# found a place with data, if there is one
# now iterate over the subvariables, get data info
# create netCDF4 variables and store the data
# stored name is variable_subvariable
for col in iterable:
if is_frame:
# we are working with a dataframe
# so multiple subvariables stored under a single
# main variable heading
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc][col], file_format)
cdfkey = out_data.createVariable(key + '_' + col,
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# attach any meta data
try:
new_dict = export_meta[key+'_'+col]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# print('Frame Writing ', key, col, export_meta[key].children[col])
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# print ('mid2 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key, col)) )
# attach data
# it may be slow to repeatedly call the store
# method as well astype method below collect
# data into a numpy array, then write the full
# array in one go
# print(coltype, dims)
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i][col].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
else:
# we are dealing with a Series
# get information about information within series
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc], file_format)
cdfkey = out_data.createVariable(key + '_data',
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data
try:
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# really attach metadata now
# print ('mid3 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# attach data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# we are done storing the actual data for the given higher
# order variable, now we need to store the index for all
# of that fancy data
# get index information
data, coltype, datetime_flag = self._get_data_info(self[key].iloc[good_data_loc].index, file_format)
# create dimension variable for to store index in netCDF4
cdfkey = out_data.createVariable(key,
coltype, dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# work with metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
if datetime_flag:
#print('datetime flag')
for export_name_label in export_name_labels:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# set metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num,
dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].index.values
cdfkey[:, :] = (temp_cdf_data.astype(coltype) *
1.E-6).astype(coltype)
else:
if self[key].iloc[data_loc].index.name is not None:
for export_name_label in export_name_labels:
new_dict[export_name_label] = self[key].iloc[data_loc].index.name
else:
for export_name_label in export_name_labels:
new_dict[export_name_label] = key
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# assign metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i].index.to_native_types()
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# store any non standard attributes
# compare this Instrument's attributes to base object
base_attrb = dir(base_instrument)
this_attrb = dir(self)
# filter out any 'private' attributes
# those that start with a _
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# store any non-standard attributes attached to meta
base_attrb = dir(base_instrument.meta)
this_attrb = dir(self.meta)
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.meta.__getattribute__(key)
adict['pysat_version'] = pysat.__version__
if 'Conventions' not in adict:
adict['Conventions'] = 'SPDF ISTP/IACG Modified for NetCDF'
if 'Text_Supplement' not in adict:
adict['Text_Supplement'] = ''
adict['Date_Start'] = pysat.datetime.strftime(self.data.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['Date_End'] = pysat.datetime.strftime(self.data.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['File'] = os.path.split(fname)
adict['Generation_Date'] = pysat.datetime.utcnow().strftime('%Y%m%d')
adict['Logical_File_ID'] = os.path.split(fname)[-1].split('.')[:-1]
# check for binary types
for key in adict.keys():
if isinstance(adict[key], bool):
adict[key] = int(adict[key])
# print('adict', adict)
out_data.setncatts(adict)
return |
def _compute_ndim(row_loc, col_loc):
"""Compute the ndim of result from locators
"""
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
if row_scaler and col_scaler:
ndim = 0
elif row_scaler ^ col_scaler:
ndim = 1
else:
ndim = 2
return ndim | Compute the ndim of result from locators | Below is the the instruction that describes the task:
### Input:
Compute the ndim of result from locators
### Response:
def _compute_ndim(row_loc, col_loc):
"""Compute the ndim of result from locators
"""
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
if row_scaler and col_scaler:
ndim = 0
elif row_scaler ^ col_scaler:
ndim = 1
else:
ndim = 2
return ndim |
def _check_delete_fw(self, tenant_id, drvr_name):
"""Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device.
"""
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
ret = False
try:
with self.fwid_attr[tenant_id].mutex_lock:
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_DELETE_INIT))
ret = self._delete_fw_fab_dev(tenant_id, drvr_name, fw_dict)
except Exception as exc:
LOG.error("Exception raised in delete fw %s", str(exc))
return ret | Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device. | Below is the the instruction that describes the task:
### Input:
Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device.
### Response:
def _check_delete_fw(self, tenant_id, drvr_name):
"""Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device.
"""
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
ret = False
try:
with self.fwid_attr[tenant_id].mutex_lock:
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_DELETE_INIT))
ret = self._delete_fw_fab_dev(tenant_id, drvr_name, fw_dict)
except Exception as exc:
LOG.error("Exception raised in delete fw %s", str(exc))
return ret |
def get_occupied_slots(instance):
"""
Return a list of slots for which values have been set.
(While a slot might be defined, if a value for that slot hasn't
been set, then it's an AttributeError to request the slot's
value.)
"""
return [slot for slot in get_all_slots(type(instance))
if hasattr(instance,slot)] | Return a list of slots for which values have been set.
(While a slot might be defined, if a value for that slot hasn't
been set, then it's an AttributeError to request the slot's
value.) | Below is the the instruction that describes the task:
### Input:
Return a list of slots for which values have been set.
(While a slot might be defined, if a value for that slot hasn't
been set, then it's an AttributeError to request the slot's
value.)
### Response:
def get_occupied_slots(instance):
"""
Return a list of slots for which values have been set.
(While a slot might be defined, if a value for that slot hasn't
been set, then it's an AttributeError to request the slot's
value.)
"""
return [slot for slot in get_all_slots(type(instance))
if hasattr(instance,slot)] |
def build_debian(config, os_versions, os_type='ubuntu'):
"""build_debian
Builds for a specific debian operating system with os version
specified. By default, it will use os_type='ubuntu'
"""
def build_pkg(config, os_type, os_version):
result = _build_package(config, os_type, os_version)
if not result.succeeded:
print(result.cli)
raise DebianError(result, os_type, os_version, frame=gfi(cf()))
error = 0
if isinstance(os_versions, str):
os_version = os_versions
try:
build_pkg(config, os_type, os_version)
except DebianError as error:
error.print_msg()
else:
for os_version in os_versions:
try:
build_pkg(config, os_type, os_version)
except DebianError as error:
error.print_msg()
return error | build_debian
Builds for a specific debian operating system with os version
specified. By default, it will use os_type='ubuntu' | Below is the the instruction that describes the task:
### Input:
build_debian
Builds for a specific debian operating system with os version
specified. By default, it will use os_type='ubuntu'
### Response:
def build_debian(config, os_versions, os_type='ubuntu'):
"""build_debian
Builds for a specific debian operating system with os version
specified. By default, it will use os_type='ubuntu'
"""
def build_pkg(config, os_type, os_version):
result = _build_package(config, os_type, os_version)
if not result.succeeded:
print(result.cli)
raise DebianError(result, os_type, os_version, frame=gfi(cf()))
error = 0
if isinstance(os_versions, str):
os_version = os_versions
try:
build_pkg(config, os_type, os_version)
except DebianError as error:
error.print_msg()
else:
for os_version in os_versions:
try:
build_pkg(config, os_type, os_version)
except DebianError as error:
error.print_msg()
return error |
def remove_container(self, container, v=False, link=False, force=False):
"""
Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res) | Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def remove_container(self, container, v=False, link=False, force=False):
"""
Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res) |
def add_if_unique(self, name):
"""
Returns ``True`` on success.
Returns ``False`` if the name already exists in the namespace.
"""
with self.lock:
if name not in self.names:
self.names.append(name)
return True
return False | Returns ``True`` on success.
Returns ``False`` if the name already exists in the namespace. | Below is the the instruction that describes the task:
### Input:
Returns ``True`` on success.
Returns ``False`` if the name already exists in the namespace.
### Response:
def add_if_unique(self, name):
"""
Returns ``True`` on success.
Returns ``False`` if the name already exists in the namespace.
"""
with self.lock:
if name not in self.names:
self.names.append(name)
return True
return False |
def add_prefix(arg, opts, shell_opts):
""" Add prefix to NIPAP
"""
# sanity checks
if 'from-pool' not in opts and 'from-prefix' not in opts and 'prefix' not in opts:
print("ERROR: 'prefix', 'from-pool' or 'from-prefix' must be specified.", file=sys.stderr)
sys.exit(1)
if len([opt for opt in opts if opt in ['from-pool', 'from-prefix', 'prefix']]) > 1:
print("ERROR: Use either assignment 'from-pool', 'from-prefix' or manual mode (using 'prefix')", file=sys.stderr)
sys.exit(1)
if 'from-pool' in opts:
return add_prefix_from_pool(arg, opts)
args = {}
p = _prefix_from_opts(opts)
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
return
p.avps[key] = value
if 'from-prefix' in opts:
args['from-prefix'] = [ opts['from-prefix'], ]
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
if 'family' in opts:
if opts['family'] == 'ipv4':
family = 4
elif opts['family'] == 'ipv6':
family = 6
elif opts['family'] == 'dual-stack':
print("ERROR: dual-stack mode only valid for from-pool assignments", file=sys.stderr)
sys.exit(1)
args['family'] = family
# try to automatically figure out type for new prefix when not
# allocating from a pool
# get a list of prefixes that contain this prefix
vrf_id = 0
if p.vrf:
vrf_id = p.vrf.id
if 'from-prefix' in args:
parent_prefix = args['from-prefix'][0]
parent_op = 'equals'
else:
# If no prefix length is specified it is assumed to be a host and we do
# a search for prefixes that contains the specified prefix. The last
# entry will be the parent of the new prefix and we can look at it to
# determine type.
# If prefix length is specified (i.e. CIDR format) we check if prefix
# length equals max length in which case we assume a host prefix,
# otherwise we search for the network using an equal match and by
# zeroing out bits in the host part.
if len(opts.get('prefix').split("/")) == 2:
ip = IPy.IP(opts.get('prefix').split("/")[0])
plen = int(opts.get('prefix').split("/")[1])
if ip.version() == 4 and plen == 32 or ip.version() == 6 and plen == 128:
parent_prefix = str(ip)
parent_op = 'contains'
else:
parent_prefix = str(IPy.IP(opts.get('prefix'), make_net=True))
parent_op = 'equals'
else:
parent_prefix = opts.get('prefix')
parent_op = 'contains'
auto_type_query = {
'val1': {
'val1' : 'prefix',
'operator' : parent_op,
'val2' : parent_prefix
},
'operator': 'and',
'val2': {
'val1' : 'vrf_id',
'operator' : 'equals',
'val2' : vrf_id
}
}
res = Prefix.search(auto_type_query, { })
# no results, ie the requested prefix is a top level prefix
if len(res['result']) == 0:
if p.type is None:
print("ERROR: Type of prefix must be specified ('assignment' or 'reservation').", file=sys.stderr)
sys.exit(1)
else:
# last prefix in list will be the parent of the new prefix
parent = res['result'][-1]
# if the parent is an assignment, we can assume the new prefix to be
# a host and act accordingly
if parent.type == 'assignment':
# automatically set type
if p.type is None:
print("WARNING: Parent prefix is of type 'assignment'. Automatically setting type 'host' for new prefix.", file=sys.stderr)
elif p.type == 'host':
pass
else:
print("WARNING: Parent prefix is of type 'assignment'. Automatically overriding specified type '%s' with type 'host' for new prefix." % p.type, file=sys.stderr)
p.type = 'host'
# if it's a manually specified prefix
if 'prefix' in opts:
# fiddle prefix length to all bits set
if parent.family == 4:
p.prefix = p.prefix.split('/')[0] + '/32'
else:
p.prefix = p.prefix.split('/')[0] + '/128'
# for from-prefix, we set prefix_length to host length
elif 'from-prefix' in opts:
if parent.family == 4:
args['prefix_length'] = 32
else:
args['prefix_length'] = 128
try:
p.save(args)
except NipapError as exc:
print("Could not add prefix to NIPAP: %s" % str(exc), file=sys.stderr)
sys.exit(1)
if p.type == 'host':
print("Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description))
else:
print("Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description))
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print("ERROR: Not possible to add hosts to non-assignment", file=sys.stderr)
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {}) | Add prefix to NIPAP | Below is the the instruction that describes the task:
### Input:
Add prefix to NIPAP
### Response:
def add_prefix(arg, opts, shell_opts):
""" Add prefix to NIPAP
"""
# sanity checks
if 'from-pool' not in opts and 'from-prefix' not in opts and 'prefix' not in opts:
print("ERROR: 'prefix', 'from-pool' or 'from-prefix' must be specified.", file=sys.stderr)
sys.exit(1)
if len([opt for opt in opts if opt in ['from-pool', 'from-prefix', 'prefix']]) > 1:
print("ERROR: Use either assignment 'from-pool', 'from-prefix' or manual mode (using 'prefix')", file=sys.stderr)
sys.exit(1)
if 'from-pool' in opts:
return add_prefix_from_pool(arg, opts)
args = {}
p = _prefix_from_opts(opts)
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
return
p.avps[key] = value
if 'from-prefix' in opts:
args['from-prefix'] = [ opts['from-prefix'], ]
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
if 'family' in opts:
if opts['family'] == 'ipv4':
family = 4
elif opts['family'] == 'ipv6':
family = 6
elif opts['family'] == 'dual-stack':
print("ERROR: dual-stack mode only valid for from-pool assignments", file=sys.stderr)
sys.exit(1)
args['family'] = family
# try to automatically figure out type for new prefix when not
# allocating from a pool
# get a list of prefixes that contain this prefix
vrf_id = 0
if p.vrf:
vrf_id = p.vrf.id
if 'from-prefix' in args:
parent_prefix = args['from-prefix'][0]
parent_op = 'equals'
else:
# If no prefix length is specified it is assumed to be a host and we do
# a search for prefixes that contains the specified prefix. The last
# entry will be the parent of the new prefix and we can look at it to
# determine type.
# If prefix length is specified (i.e. CIDR format) we check if prefix
# length equals max length in which case we assume a host prefix,
# otherwise we search for the network using an equal match and by
# zeroing out bits in the host part.
if len(opts.get('prefix').split("/")) == 2:
ip = IPy.IP(opts.get('prefix').split("/")[0])
plen = int(opts.get('prefix').split("/")[1])
if ip.version() == 4 and plen == 32 or ip.version() == 6 and plen == 128:
parent_prefix = str(ip)
parent_op = 'contains'
else:
parent_prefix = str(IPy.IP(opts.get('prefix'), make_net=True))
parent_op = 'equals'
else:
parent_prefix = opts.get('prefix')
parent_op = 'contains'
auto_type_query = {
'val1': {
'val1' : 'prefix',
'operator' : parent_op,
'val2' : parent_prefix
},
'operator': 'and',
'val2': {
'val1' : 'vrf_id',
'operator' : 'equals',
'val2' : vrf_id
}
}
res = Prefix.search(auto_type_query, { })
# no results, ie the requested prefix is a top level prefix
if len(res['result']) == 0:
if p.type is None:
print("ERROR: Type of prefix must be specified ('assignment' or 'reservation').", file=sys.stderr)
sys.exit(1)
else:
# last prefix in list will be the parent of the new prefix
parent = res['result'][-1]
# if the parent is an assignment, we can assume the new prefix to be
# a host and act accordingly
if parent.type == 'assignment':
# automatically set type
if p.type is None:
print("WARNING: Parent prefix is of type 'assignment'. Automatically setting type 'host' for new prefix.", file=sys.stderr)
elif p.type == 'host':
pass
else:
print("WARNING: Parent prefix is of type 'assignment'. Automatically overriding specified type '%s' with type 'host' for new prefix." % p.type, file=sys.stderr)
p.type = 'host'
# if it's a manually specified prefix
if 'prefix' in opts:
# fiddle prefix length to all bits set
if parent.family == 4:
p.prefix = p.prefix.split('/')[0] + '/32'
else:
p.prefix = p.prefix.split('/')[0] + '/128'
# for from-prefix, we set prefix_length to host length
elif 'from-prefix' in opts:
if parent.family == 4:
args['prefix_length'] = 32
else:
args['prefix_length'] = 128
try:
p.save(args)
except NipapError as exc:
print("Could not add prefix to NIPAP: %s" % str(exc), file=sys.stderr)
sys.exit(1)
if p.type == 'host':
print("Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description))
else:
print("Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description))
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print("ERROR: Not possible to add hosts to non-assignment", file=sys.stderr)
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {}) |
def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']
assert f_fp != None, "BFGS requires f_fp"
opt_dict = {}
if self.xtol is not None:
print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
if self.bfgs_factor is not None:
opt_dict['factr'] = self.bfgs_factor
opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, maxfun=self.max_iters, maxiter=self.max_iters, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[2]['funcalls']
self.status = rcstrings[opt_result[2]['warnflag']]
#a more helpful error message is available in opt_result in the Error case
if opt_result[2]['warnflag']==2: # pragma: no coverage, this is not needed to be covered
self.status = 'Error' + str(opt_result[2]['task']) | Run the optimizer | Below is the the instruction that describes the task:
### Input:
Run the optimizer
### Response:
def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']
assert f_fp != None, "BFGS requires f_fp"
opt_dict = {}
if self.xtol is not None:
print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
if self.bfgs_factor is not None:
opt_dict['factr'] = self.bfgs_factor
opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, maxfun=self.max_iters, maxiter=self.max_iters, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[2]['funcalls']
self.status = rcstrings[opt_result[2]['warnflag']]
#a more helpful error message is available in opt_result in the Error case
if opt_result[2]['warnflag']==2: # pragma: no coverage, this is not needed to be covered
self.status = 'Error' + str(opt_result[2]['task']) |
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True, xgb_model=None,
sample_weight_eval_set=None, callbacks=None):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
instance weights on the i-th validation set.
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.rst. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
xgb_model : str
file name of stored xgb model or 'Booster' instance Xgb model to be
loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using :ref:`callback_api`.
Example:
.. code-block:: python
[xgb.callback.reset_learning_rate(custom_rates)]
"""
if sample_weight is not None:
trainDmatrix = DMatrix(X, label=y, weight=sample_weight,
missing=self.missing, nthread=self.n_jobs)
else:
trainDmatrix = DMatrix(X, label=y, missing=self.missing, nthread=self.n_jobs)
evals_result = {}
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
evals = list(
DMatrix(eval_set[i][0], label=eval_set[i][1], missing=self.missing,
weight=sample_weight_eval_set[i], nthread=self.n_jobs)
for i in range(len(eval_set)))
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
params = self.get_xgb_params()
if callable(self.objective):
obj = _objective_decorator(self.objective)
params["objective"] = "reg:linear"
else:
obj = None
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({'eval_metric': eval_metric})
self._Booster = train(params, trainDmatrix,
self.get_num_boosting_rounds(), evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose, xgb_model=xgb_model,
callbacks=callbacks)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self | Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
instance weights on the i-th validation set.
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.rst. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
xgb_model : str
file name of stored xgb model or 'Booster' instance Xgb model to be
loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using :ref:`callback_api`.
Example:
.. code-block:: python
[xgb.callback.reset_learning_rate(custom_rates)] | Below is the the instruction that describes the task:
### Input:
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
instance weights on the i-th validation set.
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.rst. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
xgb_model : str
file name of stored xgb model or 'Booster' instance Xgb model to be
loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using :ref:`callback_api`.
Example:
.. code-block:: python
[xgb.callback.reset_learning_rate(custom_rates)]
### Response:
def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True, xgb_model=None,
sample_weight_eval_set=None, callbacks=None):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for
early-stopping
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
instance weights on the i-th validation set.
eval_metric : str, callable, optional
If a str, should be a built-in evaluation metric to use. See
doc/parameter.rst. If callable, a custom evaluation metric. The call
signature is func(y_predicted, y_true) where y_true will be a
DMatrix object such that you may need to call the get_label
method. It must return a str, value pair where the str is a name
for the evaluation and value is the value of the evaluation
function. This objective is always minimized.
early_stopping_rounds : int
Activates early stopping. Validation error needs to decrease at
least every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals. If there's more than one,
will use the last. Returns the model from the last iteration
(not the best one). If early stopping occurs, the model will
have three additional fields: bst.best_score, bst.best_iteration
and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
xgb_model : str
file name of stored xgb model or 'Booster' instance Xgb model to be
loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using :ref:`callback_api`.
Example:
.. code-block:: python
[xgb.callback.reset_learning_rate(custom_rates)]
"""
if sample_weight is not None:
trainDmatrix = DMatrix(X, label=y, weight=sample_weight,
missing=self.missing, nthread=self.n_jobs)
else:
trainDmatrix = DMatrix(X, label=y, missing=self.missing, nthread=self.n_jobs)
evals_result = {}
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
evals = list(
DMatrix(eval_set[i][0], label=eval_set[i][1], missing=self.missing,
weight=sample_weight_eval_set[i], nthread=self.n_jobs)
for i in range(len(eval_set)))
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
params = self.get_xgb_params()
if callable(self.objective):
obj = _objective_decorator(self.objective)
params["objective"] = "reg:linear"
else:
obj = None
feval = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({'eval_metric': eval_metric})
self._Booster = train(params, trainDmatrix,
self.get_num_boosting_rounds(), evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, obj=obj, feval=feval,
verbose_eval=verbose, xgb_model=xgb_model,
callbacks=callbacks)
if evals_result:
for val in evals_result.items():
evals_result_key = list(val[1].keys())[0]
evals_result[val[0]][evals_result_key] = val[1][evals_result_key]
self.evals_result_ = evals_result
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self |
def _is_idempotent(self, output):
"""
Parses the output of the provisioning for changed and returns a bool.
:param output: A string containing the output of the ansible run.
:return: bool
"""
# Remove blank lines to make regex matches easier
output = re.sub(r'\n\s*\n*', '\n', output)
# Look for any non-zero changed lines
changed = re.search(r'(changed=[1-9][0-9]*)', output)
if changed:
# Not idempotent
return False
return True | Parses the output of the provisioning for changed and returns a bool.
:param output: A string containing the output of the ansible run.
:return: bool | Below is the the instruction that describes the task:
### Input:
Parses the output of the provisioning for changed and returns a bool.
:param output: A string containing the output of the ansible run.
:return: bool
### Response:
def _is_idempotent(self, output):
"""
Parses the output of the provisioning for changed and returns a bool.
:param output: A string containing the output of the ansible run.
:return: bool
"""
# Remove blank lines to make regex matches easier
output = re.sub(r'\n\s*\n*', '\n', output)
# Look for any non-zero changed lines
changed = re.search(r'(changed=[1-9][0-9]*)', output)
if changed:
# Not idempotent
return False
return True |
def getCatalog(instance, field='UID'):
"""
Returns the catalog that stores objects of instance passed in type.
If an object is indexed by more than one catalog, the first match
will be returned.
:param instance: A single object
:type instance: ATContentType
:returns: The first catalog that stores the type of object passed in
"""
uid = instance.UID()
if 'workflow_skiplist' in instance.REQUEST and \
[x for x in instance.REQUEST['workflow_skiplist']
if x.find(uid) > -1]:
return None
else:
# grab the first catalog we are indexed in.
# we're only indexed in one.
at = getToolByName(instance, 'archetype_tool')
plone = instance.portal_url.getPortalObject()
catalog_name = instance.portal_type in at.catalog_map \
and at.catalog_map[instance.portal_type][0] or 'portal_catalog'
catalog = getToolByName(plone, catalog_name)
return catalog | Returns the catalog that stores objects of instance passed in type.
If an object is indexed by more than one catalog, the first match
will be returned.
:param instance: A single object
:type instance: ATContentType
:returns: The first catalog that stores the type of object passed in | Below is the the instruction that describes the task:
### Input:
Returns the catalog that stores objects of instance passed in type.
If an object is indexed by more than one catalog, the first match
will be returned.
:param instance: A single object
:type instance: ATContentType
:returns: The first catalog that stores the type of object passed in
### Response:
def getCatalog(instance, field='UID'):
"""
Returns the catalog that stores objects of instance passed in type.
If an object is indexed by more than one catalog, the first match
will be returned.
:param instance: A single object
:type instance: ATContentType
:returns: The first catalog that stores the type of object passed in
"""
uid = instance.UID()
if 'workflow_skiplist' in instance.REQUEST and \
[x for x in instance.REQUEST['workflow_skiplist']
if x.find(uid) > -1]:
return None
else:
# grab the first catalog we are indexed in.
# we're only indexed in one.
at = getToolByName(instance, 'archetype_tool')
plone = instance.portal_url.getPortalObject()
catalog_name = instance.portal_type in at.catalog_map \
and at.catalog_map[instance.portal_type][0] or 'portal_catalog'
catalog = getToolByName(plone, catalog_name)
return catalog |
def get_tasks(self, thread_name):
"""
Args:
thread_name (str): name of the thread to get the tasks for
Returns:
OrderedDict of str, Task: list of task names and log records for
each for the given thread
"""
if thread_name not in self.tasks_by_thread:
with self._tasks_lock:
self.tasks_by_thread[thread_name] = OrderedDict()
return self.tasks_by_thread[thread_name] | Args:
thread_name (str): name of the thread to get the tasks for
Returns:
OrderedDict of str, Task: list of task names and log records for
each for the given thread | Below is the the instruction that describes the task:
### Input:
Args:
thread_name (str): name of the thread to get the tasks for
Returns:
OrderedDict of str, Task: list of task names and log records for
each for the given thread
### Response:
def get_tasks(self, thread_name):
"""
Args:
thread_name (str): name of the thread to get the tasks for
Returns:
OrderedDict of str, Task: list of task names and log records for
each for the given thread
"""
if thread_name not in self.tasks_by_thread:
with self._tasks_lock:
self.tasks_by_thread[thread_name] = OrderedDict()
return self.tasks_by_thread[thread_name] |
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping)) | Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not | Below is the the instruction that describes the task:
### Input:
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
### Response:
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping)) |
def prepend_zeros_to_lists(ls):
"""
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
"""
longest = max([len(l) for l in ls])
for i in range(len(ls)):
while len(ls[i]) < longest:
ls[i].insert(0, "0") | Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers. | Below is the the instruction that describes the task:
### Input:
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
### Response:
def prepend_zeros_to_lists(ls):
"""
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
"""
longest = max([len(l) for l in ls])
for i in range(len(ls)):
while len(ls[i]) < longest:
ls[i].insert(0, "0") |
def on_delete(self, forced):
"""Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
"""
# Do not remove connection if it was not forced and there's running connection
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close() | Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False. | Below is the the instruction that describes the task:
### Input:
Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
### Response:
def on_delete(self, forced):
"""Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
"""
# Do not remove connection if it was not forced and there's running connection
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close() |
def search_information(adjacency, transform=None, has_memory=False):
"""
Calculates search information of `adjacency`
Computes the amount of information (measured in bits) that a random walker
needs to follow the shortest path between a given pair of nodes.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
has_memory : bool, optional
This flag defines whether or not the random walker "remembers" its
previous step, which has the effect of reducing the amount of
information needed to find the next state. Default: False
Returns
-------
SI : (N x N) ndarray
Pair-wise search information array. Note that `SI[i,j]` may be
different from `SI[j,i]``; hence, `SI` is not a symmetric matrix even
when `adjacency` is symmetric.
References
----------
.. [1] Goni, J., van den Heuvel, M. P., Avena-Koenigsberger, A., de
Mendizabal, N. V., Betzel, R. F., Griffa, A., Hagmann, P.,
Corominas-Murtra, B., Thiran, J-P., & Sporns, O. (2014). Resting-brain
functional connectivity predicted by analytic measures of network
communication. Proceedings of the National Academy of Sciences, 111(2),
833-838.
.. [2] Rosvall, M., Trusina, A., Minnhagen, P., & Sneppen, K. (2005).
Networks and cities: An information perspective. Physical Review
Letters, 94(2), 028701.
"""
N = len(adjacency)
if np.allclose(adjacency, adjacency.T):
flag_triu = True
else:
flag_triu = False
T = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
_, hops, Pmat = distance_wei_floyd(adjacency, transform)
SI = np.zeros((N, N))
SI[np.eye(N) > 0] = np.nan
for i in range(N):
for j in range(N):
if (j > i and flag_triu) or (not flag_triu and i != j):
path = retrieve_shortest_path(i, j, hops, Pmat)
lp = len(path) - 1
if flag_triu:
if np.any(path):
pr_step_ff = np.zeros(lp)
pr_step_bk = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
pr_step_bk[lp-1] = T[path[lp], path[lp-1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
pr_step_bk[lp-z-1] = T[path[lp-z], path[lp-z-1]] / (1 - T[path[lp-z+1], path[lp-z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
pr_step_bk[z] = T[path[z+1], path[z]]
prob_sp_ff = np.prod(pr_step_ff)
prob_sp_bk = np.prod(pr_step_bk)
SI[i, j] = -np.log2(prob_sp_ff)
SI[j, i] = -np.log2(prob_sp_bk)
else:
if np.any(path):
pr_step_ff = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
prob_sp_ff = np.prod(pr_step_ff)
SI[i, j] = -np.log2(prob_sp_ff)
else:
SI[i, j] = np.inf
return SI | Calculates search information of `adjacency`
Computes the amount of information (measured in bits) that a random walker
needs to follow the shortest path between a given pair of nodes.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
has_memory : bool, optional
This flag defines whether or not the random walker "remembers" its
previous step, which has the effect of reducing the amount of
information needed to find the next state. Default: False
Returns
-------
SI : (N x N) ndarray
Pair-wise search information array. Note that `SI[i,j]` may be
different from `SI[j,i]``; hence, `SI` is not a symmetric matrix even
when `adjacency` is symmetric.
References
----------
.. [1] Goni, J., van den Heuvel, M. P., Avena-Koenigsberger, A., de
Mendizabal, N. V., Betzel, R. F., Griffa, A., Hagmann, P.,
Corominas-Murtra, B., Thiran, J-P., & Sporns, O. (2014). Resting-brain
functional connectivity predicted by analytic measures of network
communication. Proceedings of the National Academy of Sciences, 111(2),
833-838.
.. [2] Rosvall, M., Trusina, A., Minnhagen, P., & Sneppen, K. (2005).
Networks and cities: An information perspective. Physical Review
Letters, 94(2), 028701. | Below is the the instruction that describes the task:
### Input:
Calculates search information of `adjacency`
Computes the amount of information (measured in bits) that a random walker
needs to follow the shortest path between a given pair of nodes.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
has_memory : bool, optional
This flag defines whether or not the random walker "remembers" its
previous step, which has the effect of reducing the amount of
information needed to find the next state. Default: False
Returns
-------
SI : (N x N) ndarray
Pair-wise search information array. Note that `SI[i,j]` may be
different from `SI[j,i]``; hence, `SI` is not a symmetric matrix even
when `adjacency` is symmetric.
References
----------
.. [1] Goni, J., van den Heuvel, M. P., Avena-Koenigsberger, A., de
Mendizabal, N. V., Betzel, R. F., Griffa, A., Hagmann, P.,
Corominas-Murtra, B., Thiran, J-P., & Sporns, O. (2014). Resting-brain
functional connectivity predicted by analytic measures of network
communication. Proceedings of the National Academy of Sciences, 111(2),
833-838.
.. [2] Rosvall, M., Trusina, A., Minnhagen, P., & Sneppen, K. (2005).
Networks and cities: An information perspective. Physical Review
Letters, 94(2), 028701.
### Response:
def search_information(adjacency, transform=None, has_memory=False):
"""
Calculates search information of `adjacency`
Computes the amount of information (measured in bits) that a random walker
needs to follow the shortest path between a given pair of nodes.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
has_memory : bool, optional
This flag defines whether or not the random walker "remembers" its
previous step, which has the effect of reducing the amount of
information needed to find the next state. Default: False
Returns
-------
SI : (N x N) ndarray
Pair-wise search information array. Note that `SI[i,j]` may be
different from `SI[j,i]``; hence, `SI` is not a symmetric matrix even
when `adjacency` is symmetric.
References
----------
.. [1] Goni, J., van den Heuvel, M. P., Avena-Koenigsberger, A., de
Mendizabal, N. V., Betzel, R. F., Griffa, A., Hagmann, P.,
Corominas-Murtra, B., Thiran, J-P., & Sporns, O. (2014). Resting-brain
functional connectivity predicted by analytic measures of network
communication. Proceedings of the National Academy of Sciences, 111(2),
833-838.
.. [2] Rosvall, M., Trusina, A., Minnhagen, P., & Sneppen, K. (2005).
Networks and cities: An information perspective. Physical Review
Letters, 94(2), 028701.
"""
N = len(adjacency)
if np.allclose(adjacency, adjacency.T):
flag_triu = True
else:
flag_triu = False
T = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
_, hops, Pmat = distance_wei_floyd(adjacency, transform)
SI = np.zeros((N, N))
SI[np.eye(N) > 0] = np.nan
for i in range(N):
for j in range(N):
if (j > i and flag_triu) or (not flag_triu and i != j):
path = retrieve_shortest_path(i, j, hops, Pmat)
lp = len(path) - 1
if flag_triu:
if np.any(path):
pr_step_ff = np.zeros(lp)
pr_step_bk = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
pr_step_bk[lp-1] = T[path[lp], path[lp-1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
pr_step_bk[lp-z-1] = T[path[lp-z], path[lp-z-1]] / (1 - T[path[lp-z+1], path[lp-z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
pr_step_bk[z] = T[path[z+1], path[z]]
prob_sp_ff = np.prod(pr_step_ff)
prob_sp_bk = np.prod(pr_step_bk)
SI[i, j] = -np.log2(prob_sp_ff)
SI[j, i] = -np.log2(prob_sp_bk)
else:
if np.any(path):
pr_step_ff = np.zeros(lp)
if has_memory:
pr_step_ff[0] = T[path[0], path[1]]
for z in range(1, lp):
pr_step_ff[z] = T[path[z], path[z+1]] / (1 - T[path[z-1], path[z]])
else:
for z in range(lp):
pr_step_ff[z] = T[path[z], path[z+1]]
prob_sp_ff = np.prod(pr_step_ff)
SI[i, j] = -np.log2(prob_sp_ff)
else:
SI[i, j] = np.inf
return SI |
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
elastic_output_modules = (
elastic.ElasticsearchOutputModule, elastic.ElasticsearchOutputModule)
if not isinstance(output_module, elastic_output_modules):
raise errors.BadConfigObject(
'Output module is not an instance of ElasticsearchOutputModule')
index_name = cls._ParseStringOption(
options, 'index_name', default_value=cls._DEFAULT_INDEX_NAME)
document_type = cls._ParseStringOption(
options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)
flush_interval = cls._ParseNumericOption(
options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)
raw_fields = getattr(
options, 'raw_fields', cls._DEFAULT_RAW_FIELDS)
elastic_user = cls._ParseStringOption(
options, 'elastic_user', default_value=cls._DEFAULT_ELASTIC_USER)
use_ssl = getattr(options, 'use_ssl', False)
ca_certificates_path = cls._ParseStringOption(
options, 'ca_certificates_file_path',
default_value=cls._DEFAULT_CA_CERTS)
elastic_url_prefix = cls._ParseStringOption(
options, 'elastic_url_prefix', default_value=cls._DEFAULT_URL_PREFIX)
if elastic_user is not None:
elastic_password = getpass.getpass(
'Enter your Elasticsearch password: ')
else:
elastic_password = None
ElasticSearchServerArgumentsHelper.ParseOptions(options, output_module)
output_module.SetIndexName(index_name)
output_module.SetDocumentType(document_type)
output_module.SetFlushInterval(flush_interval)
output_module.SetRawFields(raw_fields)
output_module.SetUsername(elastic_user)
output_module.SetPassword(elastic_password)
output_module.SetUseSSL(use_ssl)
output_module.SetCACertificatesPath(ca_certificates_path)
output_module.SetURLPrefix(elastic_url_prefix) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation. | Below is the the instruction that describes the task:
### Input:
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
### Response:
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
elastic_output_modules = (
elastic.ElasticsearchOutputModule, elastic.ElasticsearchOutputModule)
if not isinstance(output_module, elastic_output_modules):
raise errors.BadConfigObject(
'Output module is not an instance of ElasticsearchOutputModule')
index_name = cls._ParseStringOption(
options, 'index_name', default_value=cls._DEFAULT_INDEX_NAME)
document_type = cls._ParseStringOption(
options, 'document_type', default_value=cls._DEFAULT_DOCUMENT_TYPE)
flush_interval = cls._ParseNumericOption(
options, 'flush_interval', default_value=cls._DEFAULT_FLUSH_INTERVAL)
raw_fields = getattr(
options, 'raw_fields', cls._DEFAULT_RAW_FIELDS)
elastic_user = cls._ParseStringOption(
options, 'elastic_user', default_value=cls._DEFAULT_ELASTIC_USER)
use_ssl = getattr(options, 'use_ssl', False)
ca_certificates_path = cls._ParseStringOption(
options, 'ca_certificates_file_path',
default_value=cls._DEFAULT_CA_CERTS)
elastic_url_prefix = cls._ParseStringOption(
options, 'elastic_url_prefix', default_value=cls._DEFAULT_URL_PREFIX)
if elastic_user is not None:
elastic_password = getpass.getpass(
'Enter your Elasticsearch password: ')
else:
elastic_password = None
ElasticSearchServerArgumentsHelper.ParseOptions(options, output_module)
output_module.SetIndexName(index_name)
output_module.SetDocumentType(document_type)
output_module.SetFlushInterval(flush_interval)
output_module.SetRawFields(raw_fields)
output_module.SetUsername(elastic_user)
output_module.SetPassword(elastic_password)
output_module.SetUseSSL(use_ssl)
output_module.SetCACertificatesPath(ca_certificates_path)
output_module.SetURLPrefix(elastic_url_prefix) |
def connect(self):
"""
Method automatically called by the run() method of the AgentProxyThread
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn | Method automatically called by the run() method of the AgentProxyThread | Below is the the instruction that describes the task:
### Input:
Method automatically called by the run() method of the AgentProxyThread
### Response:
def connect(self):
"""
Method automatically called by the run() method of the AgentProxyThread
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn |
def dropna(self, drop_nan=True, drop_masked=True, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using select_non_missing.
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
copy = self.copy()
copy.select_non_missing(drop_nan=drop_nan, drop_masked=drop_masked, column_names=column_names,
name=FILTER_SELECTION_NAME, mode='and')
return copy | Create a shallow copy of a DataFrame, with filtering set using select_non_missing.
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame | Below is the the instruction that describes the task:
### Input:
Create a shallow copy of a DataFrame, with filtering set using select_non_missing.
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
### Response:
def dropna(self, drop_nan=True, drop_masked=True, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using select_non_missing.
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
copy = self.copy()
copy.select_non_missing(drop_nan=drop_nan, drop_masked=drop_masked, column_names=column_names,
name=FILTER_SELECTION_NAME, mode='and')
return copy |
def password(self, value):
"""gets/sets the current password"""
if isinstance(value, str):
self._password = value
self._handler = None | gets/sets the current password | Below is the the instruction that describes the task:
### Input:
gets/sets the current password
### Response:
def password(self, value):
"""gets/sets the current password"""
if isinstance(value, str):
self._password = value
self._handler = None |
def import_args_from_dict(value, args, config):
"""Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
"""
if isinstance(value, six.string_types):
for match in TOKEN_REGEX.finditer(str(value)):
token = match.group(1)
if token in args:
actual_param = args[token]
if isinstance(actual_param, six.string_types):
value = value.replace("@"+token, args[token])
else:
value = actual_param
elif isinstance(value, list):
return [import_args_from_dict(item, args, config) for item in value]
elif isinstance(value, dict):
return {
key: import_args_from_dict(val, args, config)
for key, val in value.items()
}
elif isinstance(value, tuple):
return tuple(import_args_from_dict(val, args, config) for val in value)
return value | Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args. | Below is the the instruction that describes the task:
### Input:
Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
### Response:
def import_args_from_dict(value, args, config):
"""Replaces some arguments by those specified by a key-value dictionary.
This function will be recursively called on a dictionary looking for any
value containing a "$" variable. If found, the value will be replaced
by the attribute in "args" of the same name.
It is used to load arguments from the CLI and any extra configuration
parameters passed in recipes.
Args:
value: The value of a {key: value} dictionary. This is passed recursively
and may change in nature: string, list, or dict. The top-level variable
should be the dictionary that is supposed to be recursively traversed.
args: A {key: value} dictionary used to do replacements.
config: A dftimewolf.Config class containing configuration information
Returns:
The first caller of the function will receive a dictionary in which strings
starting with "@" are replaced by the parameters in args.
"""
if isinstance(value, six.string_types):
for match in TOKEN_REGEX.finditer(str(value)):
token = match.group(1)
if token in args:
actual_param = args[token]
if isinstance(actual_param, six.string_types):
value = value.replace("@"+token, args[token])
else:
value = actual_param
elif isinstance(value, list):
return [import_args_from_dict(item, args, config) for item in value]
elif isinstance(value, dict):
return {
key: import_args_from_dict(val, args, config)
for key, val in value.items()
}
elif isinstance(value, tuple):
return tuple(import_args_from_dict(val, args, config) for val in value)
return value |
def Join(self, Id):
"""Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference`
"""
#self._Alter('JOIN_CONFERENCE', Id)
reply = self._Owner._DoCommand('SET CALL %s JOIN_CONFERENCE %s' % (self.Id, Id),
'CALL %s CONF_ID' % self.Id)
return Conference(self._Owner, reply.split()[-1]) | Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference` | Below is the the instruction that describes the task:
### Input:
Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference`
### Response:
def Join(self, Id):
"""Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference`
"""
#self._Alter('JOIN_CONFERENCE', Id)
reply = self._Owner._DoCommand('SET CALL %s JOIN_CONFERENCE %s' % (self.Id, Id),
'CALL %s CONF_ID' % self.Id)
return Conference(self._Owner, reply.split()[-1]) |
def apply_update(self, value, index):
"""
Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database.
"""
_log.debug('Recording {} measurement, index={}, value={}'.format(type(value).__name__, index, value.value))
builder = asiodnp3.UpdateBuilder()
builder.Update(value, index)
update = builder.Build()
OutstationApplication.get_outstation().Apply(update) | Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database. | Below is the the instruction that describes the task:
### Input:
Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database.
### Response:
def apply_update(self, value, index):
"""
Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database.
"""
_log.debug('Recording {} measurement, index={}, value={}'.format(type(value).__name__, index, value.value))
builder = asiodnp3.UpdateBuilder()
builder.Update(value, index)
update = builder.Build()
OutstationApplication.get_outstation().Apply(update) |
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name) | Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed. | Below is the the instruction that describes the task:
### Input:
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
### Response:
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name) |
def read_union(fo, writer_schema, reader_schema=None):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index]) | A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union. | Below is the the instruction that describes the task:
### Input:
A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
### Response:
def read_union(fo, writer_schema, reader_schema=None):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index]) |
def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) | Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA') | Below is the the instruction that describes the task:
### Input:
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
### Response:
def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) |
def get_instance(self, payload):
"""
Build an instance of RecordingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance
"""
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
) | Build an instance of RecordingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of RecordingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of RecordingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.recording.RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance
"""
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
) |
def _array_handler(self, _cursor_type):
"""
Handles all array types.
Resolves it's element type and makes a Array typedesc.
"""
# The element type has been previously declared
# we need to get the canonical typedef, in some cases
_type = _cursor_type.get_canonical()
size = _type.get_array_size()
if size == -1 and _type.kind == TypeKind.INCOMPLETEARRAY:
size = 0
# FIXME: Incomplete Array handling at end of record.
# https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
# FIXME VARIABLEARRAY DEPENDENTSIZEDARRAY
_array_type = _type.get_array_element_type() # .get_canonical()
if self.is_fundamental_type(_array_type):
_subtype = self.parse_cursor_type(_array_type)
elif self.is_pointer_type(_array_type):
# code.interact(local=locals())
# pointers to POD have no declaration ??
# FIXME test_struct_with_pointer x_n_t g[1]
_subtype = self.parse_cursor_type(_array_type)
elif self.is_array_type(_array_type):
_subtype = self.parse_cursor_type(_array_type)
else:
_subtype_decl = _array_type.get_declaration()
_subtype = self.parse_cursor(_subtype_decl)
# if _subtype_decl.kind == CursorKind.NO_DECL_FOUND:
# pass
#_subtype_name = self.get_unique_name(_subtype_decl)
#_subtype = self.get_registered(_subtype_name)
obj = typedesc.ArrayType(_subtype, size)
obj.location = _subtype.location
return obj | Handles all array types.
Resolves it's element type and makes a Array typedesc. | Below is the the instruction that describes the task:
### Input:
Handles all array types.
Resolves it's element type and makes a Array typedesc.
### Response:
def _array_handler(self, _cursor_type):
"""
Handles all array types.
Resolves it's element type and makes a Array typedesc.
"""
# The element type has been previously declared
# we need to get the canonical typedef, in some cases
_type = _cursor_type.get_canonical()
size = _type.get_array_size()
if size == -1 and _type.kind == TypeKind.INCOMPLETEARRAY:
size = 0
# FIXME: Incomplete Array handling at end of record.
# https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
# FIXME VARIABLEARRAY DEPENDENTSIZEDARRAY
_array_type = _type.get_array_element_type() # .get_canonical()
if self.is_fundamental_type(_array_type):
_subtype = self.parse_cursor_type(_array_type)
elif self.is_pointer_type(_array_type):
# code.interact(local=locals())
# pointers to POD have no declaration ??
# FIXME test_struct_with_pointer x_n_t g[1]
_subtype = self.parse_cursor_type(_array_type)
elif self.is_array_type(_array_type):
_subtype = self.parse_cursor_type(_array_type)
else:
_subtype_decl = _array_type.get_declaration()
_subtype = self.parse_cursor(_subtype_decl)
# if _subtype_decl.kind == CursorKind.NO_DECL_FOUND:
# pass
#_subtype_name = self.get_unique_name(_subtype_decl)
#_subtype = self.get_registered(_subtype_name)
obj = typedesc.ArrayType(_subtype, size)
obj.location = _subtype.location
return obj |
def get_automation(self, automation_id, refresh=False):
"""Get a single automation."""
if self._automations is None:
self.get_automations()
refresh = False
automation = self._automations.get(str(automation_id))
if automation and refresh:
automation.refresh()
return automation | Get a single automation. | Below is the the instruction that describes the task:
### Input:
Get a single automation.
### Response:
def get_automation(self, automation_id, refresh=False):
"""Get a single automation."""
if self._automations is None:
self.get_automations()
refresh = False
automation = self._automations.get(str(automation_id))
if automation and refresh:
automation.refresh()
return automation |
def inicializar_y_capturar_excepciones_simple(func):
"Decorador para inicializar y capturar errores (versión básica indep.)"
@functools.wraps(func)
def capturar_errores_wrapper(self, *args, **kwargs):
self.inicializar()
try:
return func(self, *args, **kwargs)
except:
ex = exception_info()
self.Excepcion = ex['name']
self.Traceback = ex['msg']
if self.LanzarExcepciones:
raise
else:
return False
return capturar_errores_wrapper | Decorador para inicializar y capturar errores (versión básica indep.) | Below is the the instruction that describes the task:
### Input:
Decorador para inicializar y capturar errores (versión básica indep.)
### Response:
def inicializar_y_capturar_excepciones_simple(func):
"Decorador para inicializar y capturar errores (versión básica indep.)"
@functools.wraps(func)
def capturar_errores_wrapper(self, *args, **kwargs):
self.inicializar()
try:
return func(self, *args, **kwargs)
except:
ex = exception_info()
self.Excepcion = ex['name']
self.Traceback = ex['msg']
if self.LanzarExcepciones:
raise
else:
return False
return capturar_errores_wrapper |
def dmag_magic(in_file="measurements.txt", dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="loc", LT="AF", norm=True, XLP="",
save_plots=True, fmt="svg"):
"""
plots intensity decay curves for demagnetization experiments
Parameters
----------
in_file : str, default "measurements.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc] (specimen, sample, site, location), default "loc"
LT : str
lab treatment [T, AF, M], default AF
norm : bool
normalize by NRM magnetization, default True
XLP : str
exclude specific lab protocols, (for example, method codes like LP-PI)
default ""
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
dir_path = os.path.realpath(dir_path)
if not input_dir_path:
input_dir_path = dir_path
input_dir_path = os.path.realpath(input_dir_path)
# format plot_key
name_dict = {'loc': 'location', 'sit': 'site',
'sam': 'sample', 'spc': 'specimen'}
if plot_by not in name_dict.values():
try:
plot_key = name_dict[plot_by]
except KeyError:
print('Unrecognized plot_by {}, falling back to plot by location'.format(plot_by))
plot_key = "loc"
else:
plot_key = plot_by
# figure out what kind of experiment
LT = "LT-" + LT + "-Z"
print('LT', LT)
if LT == "LT-T-Z":
units, dmag_key = 'K', 'treat_temp'
elif LT == "LT-AF-Z":
units, dmag_key = 'T', 'treat_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treat_mw_energy'
else:
units = 'U'
# init
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
# create contribution and add required headers
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
return False, []
contribution = cb.Contribution(input_dir_path, single_file=in_file,
custom_filenames=fnames)
file_type = list(contribution.tables.keys())[0]
print(len(contribution.tables['measurements'].df), ' records read from ', in_file)
# add plot_key into measurements table
if plot_key not in contribution.tables['measurements'].df.columns:
#contribution.propagate_name_down(plot_key, 'measurements')
contribution.propagate_location_to_measurements()
data_container = contribution.tables[file_type]
# pare down to only records with useful data
# grab records that have the requested code
data_slice = data_container.get_records_for_code(LT)
# and don't have the offending code
data = data_container.get_records_for_code(XLP, incl=False, use_slice=True,
sli=data_slice, strict_match=False)
# make sure quality is in the dataframe
if 'quality' not in data.columns:
data['quality'] = 'g'
# get intensity key and make sure intensity data is not blank
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
# get rid of any entirely blank intensity columns
for col_name in IntMeths:
if not data[col_name].any():
data.drop(col_name, axis=1, inplace=True)
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
if len(IntMeths) == 0:
print('-E- No intensity headers found')
return False, []
int_key = IntMeths[0] # plot first intensity method found - normalized to initial value anyway - doesn't matter which used
data = data[data[int_key].notnull()]
# make list of individual plots
# by default, will be by location_name
plotlist = data[plot_key].unique()
plotlist.sort()
pmagplotlib.plot_init(FIG['demag'], 5, 5)
last_plot = False
# iterate through and plot the data
for plot in plotlist:
if plot == plotlist[-1]:
last_plot = True
plot_data = data[data[plot_key] == plot].copy()
if not save_plots:
print(plot, 'plotting by: ', plot_key)
if len(plot_data) > 2:
title = plot
spcs = []
spcs = plot_data['specimen'].unique()
for spc in spcs:
INTblock = []
spec_data = plot_data[plot_data['specimen'] == spc]
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save_plots:
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir)
else:
pmagplotlib.draw_figs(FIG)
prompt = " S[a]ve to save plot, [q]uit, Return to continue: "
ans = input(prompt)
if ans == 'q':
return True, []
if ans == "a":
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir)
pmagplotlib.clearFIG(FIG['demag'])
if last_plot:
return True, [] | plots intensity decay curves for demagnetization experiments
Parameters
----------
in_file : str, default "measurements.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc] (specimen, sample, site, location), default "loc"
LT : str
lab treatment [T, AF, M], default AF
norm : bool
normalize by NRM magnetization, default True
XLP : str
exclude specific lab protocols, (for example, method codes like LP-PI)
default ""
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) | Below is the the instruction that describes the task:
### Input:
plots intensity decay curves for demagnetization experiments
Parameters
----------
in_file : str, default "measurements.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc] (specimen, sample, site, location), default "loc"
LT : str
lab treatment [T, AF, M], default AF
norm : bool
normalize by NRM magnetization, default True
XLP : str
exclude specific lab protocols, (for example, method codes like LP-PI)
default ""
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
### Response:
def dmag_magic(in_file="measurements.txt", dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="loc", LT="AF", norm=True, XLP="",
save_plots=True, fmt="svg"):
"""
plots intensity decay curves for demagnetization experiments
Parameters
----------
in_file : str, default "measurements.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc] (specimen, sample, site, location), default "loc"
LT : str
lab treatment [T, AF, M], default AF
norm : bool
normalize by NRM magnetization, default True
XLP : str
exclude specific lab protocols, (for example, method codes like LP-PI)
default ""
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
dir_path = os.path.realpath(dir_path)
if not input_dir_path:
input_dir_path = dir_path
input_dir_path = os.path.realpath(input_dir_path)
# format plot_key
name_dict = {'loc': 'location', 'sit': 'site',
'sam': 'sample', 'spc': 'specimen'}
if plot_by not in name_dict.values():
try:
plot_key = name_dict[plot_by]
except KeyError:
print('Unrecognized plot_by {}, falling back to plot by location'.format(plot_by))
plot_key = "loc"
else:
plot_key = plot_by
# figure out what kind of experiment
LT = "LT-" + LT + "-Z"
print('LT', LT)
if LT == "LT-T-Z":
units, dmag_key = 'K', 'treat_temp'
elif LT == "LT-AF-Z":
units, dmag_key = 'T', 'treat_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treat_mw_energy'
else:
units = 'U'
# init
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
# create contribution and add required headers
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
return False, []
contribution = cb.Contribution(input_dir_path, single_file=in_file,
custom_filenames=fnames)
file_type = list(contribution.tables.keys())[0]
print(len(contribution.tables['measurements'].df), ' records read from ', in_file)
# add plot_key into measurements table
if plot_key not in contribution.tables['measurements'].df.columns:
#contribution.propagate_name_down(plot_key, 'measurements')
contribution.propagate_location_to_measurements()
data_container = contribution.tables[file_type]
# pare down to only records with useful data
# grab records that have the requested code
data_slice = data_container.get_records_for_code(LT)
# and don't have the offending code
data = data_container.get_records_for_code(XLP, incl=False, use_slice=True,
sli=data_slice, strict_match=False)
# make sure quality is in the dataframe
if 'quality' not in data.columns:
data['quality'] = 'g'
# get intensity key and make sure intensity data is not blank
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
# get rid of any entirely blank intensity columns
for col_name in IntMeths:
if not data[col_name].any():
data.drop(col_name, axis=1, inplace=True)
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
if len(IntMeths) == 0:
print('-E- No intensity headers found')
return False, []
int_key = IntMeths[0] # plot first intensity method found - normalized to initial value anyway - doesn't matter which used
data = data[data[int_key].notnull()]
# make list of individual plots
# by default, will be by location_name
plotlist = data[plot_key].unique()
plotlist.sort()
pmagplotlib.plot_init(FIG['demag'], 5, 5)
last_plot = False
# iterate through and plot the data
for plot in plotlist:
if plot == plotlist[-1]:
last_plot = True
plot_data = data[data[plot_key] == plot].copy()
if not save_plots:
print(plot, 'plotting by: ', plot_key)
if len(plot_data) > 2:
title = plot
spcs = []
spcs = plot_data['specimen'].unique()
for spc in spcs:
INTblock = []
spec_data = plot_data[plot_data['specimen'] == spc]
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save_plots:
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir)
else:
pmagplotlib.draw_figs(FIG)
prompt = " S[a]ve to save plot, [q]uit, Return to continue: "
ans = input(prompt)
if ans == 'q':
return True, []
if ans == "a":
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer:
files[key] = title + '_' + LT + '.' + fmt
incl_dir = False
else: # if not server, include directory in output path
files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt)
incl_dir = True
pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir)
pmagplotlib.clearFIG(FIG['demag'])
if last_plot:
return True, [] |
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function."
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k) | ICNR init of `x`, with `scale` and `init` function. | Below is the the instruction that describes the task:
### Input:
ICNR init of `x`, with `scale` and `init` function.
### Response:
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function."
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k) |
def vra(self,*args,**kwargs):
"""
NAME:
vra
PURPOSE:
return velocity in right ascension (km/s)
INPUT:
t - (optional) time at which to get vra (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
v_ra(t) in km/s
HISTORY:
2011-03-27 - Written - Bovy (NYU)
"""
from .OrbitTop import _check_roSet, _check_voSet
_check_roSet(self,kwargs,'vra')
_check_voSet(self,kwargs,'vra')
dist= self._orb.dist(*args,**kwargs)
if _APY_UNITS and isinstance(dist,units.Quantity):
out= units.Quantity(dist.to(units.kpc).value*_K*
self._orb.pmra(*args,**kwargs)\
.to(units.mas/units.yr).value,
unit=units.km/units.s)
else:
out= dist*_K*self._orb.pmra(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | NAME:
vra
PURPOSE:
return velocity in right ascension (km/s)
INPUT:
t - (optional) time at which to get vra (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
v_ra(t) in km/s
HISTORY:
2011-03-27 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
vra
PURPOSE:
return velocity in right ascension (km/s)
INPUT:
t - (optional) time at which to get vra (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
v_ra(t) in km/s
HISTORY:
2011-03-27 - Written - Bovy (NYU)
### Response:
def vra(self,*args,**kwargs):
"""
NAME:
vra
PURPOSE:
return velocity in right ascension (km/s)
INPUT:
t - (optional) time at which to get vra (can be Quantity)
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
in the Galactocentric frame
(in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
OUTPUT:
v_ra(t) in km/s
HISTORY:
2011-03-27 - Written - Bovy (NYU)
"""
from .OrbitTop import _check_roSet, _check_voSet
_check_roSet(self,kwargs,'vra')
_check_voSet(self,kwargs,'vra')
dist= self._orb.dist(*args,**kwargs)
if _APY_UNITS and isinstance(dist,units.Quantity):
out= units.Quantity(dist.to(units.kpc).value*_K*
self._orb.pmra(*args,**kwargs)\
.to(units.mas/units.yr).value,
unit=units.km/units.s)
else:
out= dist*_K*self._orb.pmra(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out |
def p_tag_ref(self, p):
'tag_ref : ID'
p[0] = AstTagRef(self.path, p.lineno(1), p.lexpos(1), p[1]) | tag_ref : ID | Below is the the instruction that describes the task:
### Input:
tag_ref : ID
### Response:
def p_tag_ref(self, p):
'tag_ref : ID'
p[0] = AstTagRef(self.path, p.lineno(1), p.lexpos(1), p[1]) |
def is_ipython_notebook(file_name):
"""
Return True if file_name matches a regexp for an ipython notebook. False otherwise.
:param file_name: file to test
"""
if (not re.match("^.*checkpoint\.ipynb$", file_name)) and re.match("^.*\.ipynb$", file_name): return True
return False | Return True if file_name matches a regexp for an ipython notebook. False otherwise.
:param file_name: file to test | Below is the the instruction that describes the task:
### Input:
Return True if file_name matches a regexp for an ipython notebook. False otherwise.
:param file_name: file to test
### Response:
def is_ipython_notebook(file_name):
"""
Return True if file_name matches a regexp for an ipython notebook. False otherwise.
:param file_name: file to test
"""
if (not re.match("^.*checkpoint\.ipynb$", file_name)) and re.match("^.*\.ipynb$", file_name): return True
return False |
def write(self, file_or_filename):
""" Writes case data to file in Excel format.
"""
self.book = Workbook()
self._write_data(None)
self.book.save(file_or_filename) | Writes case data to file in Excel format. | Below is the the instruction that describes the task:
### Input:
Writes case data to file in Excel format.
### Response:
def write(self, file_or_filename):
""" Writes case data to file in Excel format.
"""
self.book = Workbook()
self._write_data(None)
self.book.save(file_or_filename) |
def get_s3_client():
"""
A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html
"""
session_kwargs = {}
if hasattr(settings, 'AWS_ACCESS_KEY_ID'):
session_kwargs['aws_access_key_id'] = settings.AWS_ACCESS_KEY_ID
if hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
session_kwargs['aws_secret_access_key'] = settings.AWS_SECRET_ACCESS_KEY
boto3.setup_default_session(**session_kwargs)
s3_kwargs = {}
if hasattr(settings, 'AWS_S3_ENDPOINT'):
s3_kwargs['endpoint_url'] = settings.AWS_S3_ENDPOINT
elif hasattr(settings, 'AWS_S3_HOST'):
if hasattr(settings, 'AWS_S3_USE_SSL') and settings.AWS_S3_USE_SSL is False:
protocol = "http://"
else:
protocol = "https://"
s3_kwargs['endpoint_url'] = "{}{}".format(
protocol,
settings.AWS_S3_HOST
)
if hasattr(settings, "AWS_REGION"):
s3_kwargs['region_name'] = settings.AWS_REGION
s3_client = boto3.client('s3', **s3_kwargs)
s3_resource = boto3.resource('s3', **s3_kwargs)
return s3_client, s3_resource | A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html | Below is the the instruction that describes the task:
### Input:
A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html
### Response:
def get_s3_client():
"""
A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html
"""
session_kwargs = {}
if hasattr(settings, 'AWS_ACCESS_KEY_ID'):
session_kwargs['aws_access_key_id'] = settings.AWS_ACCESS_KEY_ID
if hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
session_kwargs['aws_secret_access_key'] = settings.AWS_SECRET_ACCESS_KEY
boto3.setup_default_session(**session_kwargs)
s3_kwargs = {}
if hasattr(settings, 'AWS_S3_ENDPOINT'):
s3_kwargs['endpoint_url'] = settings.AWS_S3_ENDPOINT
elif hasattr(settings, 'AWS_S3_HOST'):
if hasattr(settings, 'AWS_S3_USE_SSL') and settings.AWS_S3_USE_SSL is False:
protocol = "http://"
else:
protocol = "https://"
s3_kwargs['endpoint_url'] = "{}{}".format(
protocol,
settings.AWS_S3_HOST
)
if hasattr(settings, "AWS_REGION"):
s3_kwargs['region_name'] = settings.AWS_REGION
s3_client = boto3.client('s3', **s3_kwargs)
s3_resource = boto3.resource('s3', **s3_kwargs)
return s3_client, s3_resource |
def token_expired(self):
"""Provide access to flag indicating if token has expired."""
if self._token_timer is None:
return True
return timeutil.is_newer_than(self._token_timer, timeutil.ONE_HOUR) | Provide access to flag indicating if token has expired. | Below is the the instruction that describes the task:
### Input:
Provide access to flag indicating if token has expired.
### Response:
def token_expired(self):
"""Provide access to flag indicating if token has expired."""
if self._token_timer is None:
return True
return timeutil.is_newer_than(self._token_timer, timeutil.ONE_HOUR) |
def economic_qs(K, epsilon=sqrt(finfo(float).eps)):
r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``.
"""
(S, Q) = eigh(K)
nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon
nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon
if nok:
from scipy.linalg import eigh as sp_eigh
(S, Q) = sp_eigh(K)
ok = S >= epsilon
nok = logical_not(ok)
S0 = S[ok]
Q0 = Q[:, ok]
Q1 = Q[:, nok]
return ((Q0, Q1), S0) | r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``. | Below is the the instruction that describes the task:
### Input:
r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``.
### Response:
def economic_qs(K, epsilon=sqrt(finfo(float).eps)):
r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``.
"""
(S, Q) = eigh(K)
nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon
nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon
if nok:
from scipy.linalg import eigh as sp_eigh
(S, Q) = sp_eigh(K)
ok = S >= epsilon
nok = logical_not(ok)
S0 = S[ok]
Q0 = Q[:, ok]
Q1 = Q[:, nok]
return ((Q0, Q1), S0) |
def sngl_ifo_job_setup(workflow, ifo, out_files, curr_exe_job, science_segs,
datafind_outs, parents=None,
link_job_instance=None, allow_overlap=True,
compatibility_mode=True):
""" This function sets up a set of single ifo jobs. A basic overview of how this
works is as follows:
* (1) Identify the length of data that each job needs to read in, and what
part of that data the job is valid for.
* START LOOPING OVER SCIENCE SEGMENTS
* (2) Identify how many jobs are needed (if any) to cover the given science
segment and the time shift between jobs. If no jobs continue.
* START LOOPING OVER JOBS
* (3) Identify the time that the given job should produce valid output (ie.
inspiral triggers) over.
* (4) Identify the data range that the job will need to read in to produce
the aforementioned valid output.
* (5) Identify all parents/inputs of the job.
* (6) Add the job to the workflow
* END LOOPING OVER JOBS
* END LOOPING OVER SCIENCE SEGMENTS
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the constructed workflow.
ifo : string
The name of the ifo to set up the jobs for
out_files : pycbc.workflow.core.FileList
The FileList containing the list of jobs. Jobs will be appended
to this list, and it does not need to be empty when supplied.
curr_exe_job : Job
An instanced of the Job class that has a get_valid times method.
science_segs : ligo.segments.segmentlist
The list of times that the jobs should cover
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
parents : pycbc.workflow.core.FileList (optional, kwarg, default=None)
The FileList containing the list of jobs that are parents to
the one being set up.
link_job_instance : Job instance (optional),
Coordinate the valid times with another Executable.
allow_overlap : boolean (optional, kwarg, default = True)
If this is set the times that jobs are valid for will be allowed to
overlap. This may be desired for template banks which may have some
overlap in the times they cover. This may not be desired for inspiral
jobs, where you probably want triggers recorded by jobs to not overlap
at all.
compatibility_mode : boolean (optional, kwarg, default = False)
If given the jobs will be tiled in the same method as used in inspiral
hipe. This requires that link_job_instance is also given. If not given
workflow's methods are used.
Returns
--------
out_files : pycbc.workflow.core.FileList
A list of the files that will be generated by this step in the
workflow.
"""
if compatibility_mode and not link_job_instance:
errMsg = "Compability mode requires a link_job_instance."
raise ValueError(errMsg)
########### (1) ############
# Get the times that can be analysed and needed data lengths
data_length, valid_chunk, valid_length = identify_needed_data(curr_exe_job,
link_job_instance=link_job_instance)
# Loop over science segments and set up jobs
for curr_seg in science_segs:
########### (2) ############
# Initialize the class that identifies how many jobs are needed and the
# shift between them.
segmenter = JobSegmenter(data_length, valid_chunk, valid_length,
curr_seg, curr_exe_job,
compatibility_mode=compatibility_mode)
for job_num in range(segmenter.num_jobs):
############## (3) #############
# Figure out over what times this job will be valid for
job_valid_seg = segmenter.get_valid_times_for_job(job_num,
allow_overlap=allow_overlap)
############## (4) #############
# Get the data that this job should read in
job_data_seg = segmenter.get_data_times_for_job(job_num)
############# (5) ############
# Identify parents/inputs to the job
if parents:
# Find the set of files with the best overlap
curr_parent = parents.find_outputs_in_range(ifo, job_valid_seg,
useSplitLists=True)
if not curr_parent:
err_string = ("No parent jobs found overlapping %d to %d."
%(job_valid_seg[0], job_valid_seg[1]))
err_string += "\nThis is a bad error! Contact a developer."
raise ValueError(err_string)
else:
curr_parent = [None]
curr_dfouts = None
if datafind_outs:
curr_dfouts = datafind_outs.find_all_output_in_range(ifo,
job_data_seg, useSplitLists=True)
if not curr_dfouts:
err_str = ("No datafind jobs found overlapping %d to %d."
%(job_data_seg[0],job_data_seg[1]))
err_str += "\nThis shouldn't happen. Contact a developer."
raise ValueError(err_str)
############## (6) #############
# Make node and add to workflow
# Note if I have more than one curr_parent I need to make more than
# one job. If there are no curr_parents it is set to [None] and I
# make a single job. This catches the case of a split template bank
# where I run a number of jobs to cover a single range of time.
# Sort parent jobs to ensure predictable order
sorted_parents = sorted(curr_parent,
key=lambda fobj: fobj.tagged_description)
for pnum, parent in enumerate(sorted_parents):
if len(curr_parent) != 1:
tag = ["JOB%d" %(pnum,)]
else:
tag = []
# To ensure output file uniqueness I add a tag
# We should generate unique names automatically, but it is a
# pain until we can set the output names for all Executables
node = curr_exe_job.create_node(job_data_seg, job_valid_seg,
parent=parent,
dfParents=curr_dfouts,
tags=tag)
workflow.add_node(node)
curr_out_files = node.output_files
# FIXME: Here we remove PSD files if they are coming through.
# This should be done in a better way. On to-do list.
curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\
not in i.tags]
out_files += curr_out_files
return out_files | This function sets up a set of single ifo jobs. A basic overview of how this
works is as follows:
* (1) Identify the length of data that each job needs to read in, and what
part of that data the job is valid for.
* START LOOPING OVER SCIENCE SEGMENTS
* (2) Identify how many jobs are needed (if any) to cover the given science
segment and the time shift between jobs. If no jobs continue.
* START LOOPING OVER JOBS
* (3) Identify the time that the given job should produce valid output (ie.
inspiral triggers) over.
* (4) Identify the data range that the job will need to read in to produce
the aforementioned valid output.
* (5) Identify all parents/inputs of the job.
* (6) Add the job to the workflow
* END LOOPING OVER JOBS
* END LOOPING OVER SCIENCE SEGMENTS
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the constructed workflow.
ifo : string
The name of the ifo to set up the jobs for
out_files : pycbc.workflow.core.FileList
The FileList containing the list of jobs. Jobs will be appended
to this list, and it does not need to be empty when supplied.
curr_exe_job : Job
An instanced of the Job class that has a get_valid times method.
science_segs : ligo.segments.segmentlist
The list of times that the jobs should cover
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
parents : pycbc.workflow.core.FileList (optional, kwarg, default=None)
The FileList containing the list of jobs that are parents to
the one being set up.
link_job_instance : Job instance (optional),
Coordinate the valid times with another Executable.
allow_overlap : boolean (optional, kwarg, default = True)
If this is set the times that jobs are valid for will be allowed to
overlap. This may be desired for template banks which may have some
overlap in the times they cover. This may not be desired for inspiral
jobs, where you probably want triggers recorded by jobs to not overlap
at all.
compatibility_mode : boolean (optional, kwarg, default = False)
If given the jobs will be tiled in the same method as used in inspiral
hipe. This requires that link_job_instance is also given. If not given
workflow's methods are used.
Returns
--------
out_files : pycbc.workflow.core.FileList
A list of the files that will be generated by this step in the
workflow. | Below is the the instruction that describes the task:
### Input:
This function sets up a set of single ifo jobs. A basic overview of how this
works is as follows:
* (1) Identify the length of data that each job needs to read in, and what
part of that data the job is valid for.
* START LOOPING OVER SCIENCE SEGMENTS
* (2) Identify how many jobs are needed (if any) to cover the given science
segment and the time shift between jobs. If no jobs continue.
* START LOOPING OVER JOBS
* (3) Identify the time that the given job should produce valid output (ie.
inspiral triggers) over.
* (4) Identify the data range that the job will need to read in to produce
the aforementioned valid output.
* (5) Identify all parents/inputs of the job.
* (6) Add the job to the workflow
* END LOOPING OVER JOBS
* END LOOPING OVER SCIENCE SEGMENTS
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the constructed workflow.
ifo : string
The name of the ifo to set up the jobs for
out_files : pycbc.workflow.core.FileList
The FileList containing the list of jobs. Jobs will be appended
to this list, and it does not need to be empty when supplied.
curr_exe_job : Job
An instanced of the Job class that has a get_valid times method.
science_segs : ligo.segments.segmentlist
The list of times that the jobs should cover
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
parents : pycbc.workflow.core.FileList (optional, kwarg, default=None)
The FileList containing the list of jobs that are parents to
the one being set up.
link_job_instance : Job instance (optional),
Coordinate the valid times with another Executable.
allow_overlap : boolean (optional, kwarg, default = True)
If this is set the times that jobs are valid for will be allowed to
overlap. This may be desired for template banks which may have some
overlap in the times they cover. This may not be desired for inspiral
jobs, where you probably want triggers recorded by jobs to not overlap
at all.
compatibility_mode : boolean (optional, kwarg, default = False)
If given the jobs will be tiled in the same method as used in inspiral
hipe. This requires that link_job_instance is also given. If not given
workflow's methods are used.
Returns
--------
out_files : pycbc.workflow.core.FileList
A list of the files that will be generated by this step in the
workflow.
### Response:
def sngl_ifo_job_setup(workflow, ifo, out_files, curr_exe_job, science_segs,
datafind_outs, parents=None,
link_job_instance=None, allow_overlap=True,
compatibility_mode=True):
""" This function sets up a set of single ifo jobs. A basic overview of how this
works is as follows:
* (1) Identify the length of data that each job needs to read in, and what
part of that data the job is valid for.
* START LOOPING OVER SCIENCE SEGMENTS
* (2) Identify how many jobs are needed (if any) to cover the given science
segment and the time shift between jobs. If no jobs continue.
* START LOOPING OVER JOBS
* (3) Identify the time that the given job should produce valid output (ie.
inspiral triggers) over.
* (4) Identify the data range that the job will need to read in to produce
the aforementioned valid output.
* (5) Identify all parents/inputs of the job.
* (6) Add the job to the workflow
* END LOOPING OVER JOBS
* END LOOPING OVER SCIENCE SEGMENTS
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the constructed workflow.
ifo : string
The name of the ifo to set up the jobs for
out_files : pycbc.workflow.core.FileList
The FileList containing the list of jobs. Jobs will be appended
to this list, and it does not need to be empty when supplied.
curr_exe_job : Job
An instanced of the Job class that has a get_valid times method.
science_segs : ligo.segments.segmentlist
The list of times that the jobs should cover
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
parents : pycbc.workflow.core.FileList (optional, kwarg, default=None)
The FileList containing the list of jobs that are parents to
the one being set up.
link_job_instance : Job instance (optional),
Coordinate the valid times with another Executable.
allow_overlap : boolean (optional, kwarg, default = True)
If this is set the times that jobs are valid for will be allowed to
overlap. This may be desired for template banks which may have some
overlap in the times they cover. This may not be desired for inspiral
jobs, where you probably want triggers recorded by jobs to not overlap
at all.
compatibility_mode : boolean (optional, kwarg, default = False)
If given the jobs will be tiled in the same method as used in inspiral
hipe. This requires that link_job_instance is also given. If not given
workflow's methods are used.
Returns
--------
out_files : pycbc.workflow.core.FileList
A list of the files that will be generated by this step in the
workflow.
"""
if compatibility_mode and not link_job_instance:
errMsg = "Compability mode requires a link_job_instance."
raise ValueError(errMsg)
########### (1) ############
# Get the times that can be analysed and needed data lengths
data_length, valid_chunk, valid_length = identify_needed_data(curr_exe_job,
link_job_instance=link_job_instance)
# Loop over science segments and set up jobs
for curr_seg in science_segs:
########### (2) ############
# Initialize the class that identifies how many jobs are needed and the
# shift between them.
segmenter = JobSegmenter(data_length, valid_chunk, valid_length,
curr_seg, curr_exe_job,
compatibility_mode=compatibility_mode)
for job_num in range(segmenter.num_jobs):
############## (3) #############
# Figure out over what times this job will be valid for
job_valid_seg = segmenter.get_valid_times_for_job(job_num,
allow_overlap=allow_overlap)
############## (4) #############
# Get the data that this job should read in
job_data_seg = segmenter.get_data_times_for_job(job_num)
############# (5) ############
# Identify parents/inputs to the job
if parents:
# Find the set of files with the best overlap
curr_parent = parents.find_outputs_in_range(ifo, job_valid_seg,
useSplitLists=True)
if not curr_parent:
err_string = ("No parent jobs found overlapping %d to %d."
%(job_valid_seg[0], job_valid_seg[1]))
err_string += "\nThis is a bad error! Contact a developer."
raise ValueError(err_string)
else:
curr_parent = [None]
curr_dfouts = None
if datafind_outs:
curr_dfouts = datafind_outs.find_all_output_in_range(ifo,
job_data_seg, useSplitLists=True)
if not curr_dfouts:
err_str = ("No datafind jobs found overlapping %d to %d."
%(job_data_seg[0],job_data_seg[1]))
err_str += "\nThis shouldn't happen. Contact a developer."
raise ValueError(err_str)
############## (6) #############
# Make node and add to workflow
# Note if I have more than one curr_parent I need to make more than
# one job. If there are no curr_parents it is set to [None] and I
# make a single job. This catches the case of a split template bank
# where I run a number of jobs to cover a single range of time.
# Sort parent jobs to ensure predictable order
sorted_parents = sorted(curr_parent,
key=lambda fobj: fobj.tagged_description)
for pnum, parent in enumerate(sorted_parents):
if len(curr_parent) != 1:
tag = ["JOB%d" %(pnum,)]
else:
tag = []
# To ensure output file uniqueness I add a tag
# We should generate unique names automatically, but it is a
# pain until we can set the output names for all Executables
node = curr_exe_job.create_node(job_data_seg, job_valid_seg,
parent=parent,
dfParents=curr_dfouts,
tags=tag)
workflow.add_node(node)
curr_out_files = node.output_files
# FIXME: Here we remove PSD files if they are coming through.
# This should be done in a better way. On to-do list.
curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\
not in i.tags]
out_files += curr_out_files
return out_files |
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1)
return (self.lmbda*rl1, rl1) | Compute regularisation term and contribution to objective
function. | Below is the the instruction that describes the task:
### Input:
Compute regularisation term and contribution to objective
function.
### Response:
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1)
return (self.lmbda*rl1, rl1) |
def get_post(self):
""" Returns the considered post if applicable. """
pk = self.kwargs.get(self.post_pk_url_kwarg, None)
if not pk:
return
if not hasattr(self, '_forum_post'):
self._forum_post = get_object_or_404(Post, pk=pk)
return self._forum_post | Returns the considered post if applicable. | Below is the the instruction that describes the task:
### Input:
Returns the considered post if applicable.
### Response:
def get_post(self):
""" Returns the considered post if applicable. """
pk = self.kwargs.get(self.post_pk_url_kwarg, None)
if not pk:
return
if not hasattr(self, '_forum_post'):
self._forum_post = get_object_or_404(Post, pk=pk)
return self._forum_post |
def read_csv(csvfile, options):
"""
Read csv and return molList, a list of mol objects
"""
# open file or exit
name, ext = os.path.splitext(csvfile)
try:
if ext == '.gz':
f = gzip.open(csvfile, 'rb')
else:
f = open(csvfile, 'rU')
except IOError:
print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csvfile)))
sys.exit(1)
# read file
csv_reader = csv.reader(f)
molList = []
linenumber = 1
for line in csv_reader:
# get column labels from the first line
if linenumber == 1:
prop_indices = read_header(line, options)
# otherwise read line & append to MolList
else:
mol = Molecule()
mol = read_line(line, options, prop_indices, mol)
# if the line's junk, skip it
if mol == 1:
print(" skipping molecule 'm'\n".format(m=(linenumber - 1)))
else:
molList.append(mol)
linenumber += 1
# return molList
return molList | Read csv and return molList, a list of mol objects | Below is the the instruction that describes the task:
### Input:
Read csv and return molList, a list of mol objects
### Response:
def read_csv(csvfile, options):
"""
Read csv and return molList, a list of mol objects
"""
# open file or exit
name, ext = os.path.splitext(csvfile)
try:
if ext == '.gz':
f = gzip.open(csvfile, 'rb')
else:
f = open(csvfile, 'rU')
except IOError:
print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csvfile)))
sys.exit(1)
# read file
csv_reader = csv.reader(f)
molList = []
linenumber = 1
for line in csv_reader:
# get column labels from the first line
if linenumber == 1:
prop_indices = read_header(line, options)
# otherwise read line & append to MolList
else:
mol = Molecule()
mol = read_line(line, options, prop_indices, mol)
# if the line's junk, skip it
if mol == 1:
print(" skipping molecule 'm'\n".format(m=(linenumber - 1)))
else:
molList.append(mol)
linenumber += 1
# return molList
return molList |
def location_2_json(self):
"""
transform ariane_clip3 location object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("Location.location_2_json")
json_obj = {
'locationID': self.id,
'locationName': self.name,
'locationDescription': self.description,
'locationAddress': self.address,
'locationZipCode': self.zip_code,
'locationTown': self.town,
'locationType': self.type,
'locationCountry': self.country,
'locationGPSLat': self.gpsLatitude,
'locationGPSLng': self.gpsLongitude,
'locationRoutingAreasID': self.routing_area_ids,
'locationSubnetsID': self.subnet_ids
}
return json.dumps(json_obj) | transform ariane_clip3 location object to Ariane server JSON obj
:return: Ariane JSON obj | Below is the the instruction that describes the task:
### Input:
transform ariane_clip3 location object to Ariane server JSON obj
:return: Ariane JSON obj
### Response:
def location_2_json(self):
"""
transform ariane_clip3 location object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("Location.location_2_json")
json_obj = {
'locationID': self.id,
'locationName': self.name,
'locationDescription': self.description,
'locationAddress': self.address,
'locationZipCode': self.zip_code,
'locationTown': self.town,
'locationType': self.type,
'locationCountry': self.country,
'locationGPSLat': self.gpsLatitude,
'locationGPSLng': self.gpsLongitude,
'locationRoutingAreasID': self.routing_area_ids,
'locationSubnetsID': self.subnet_ids
}
return json.dumps(json_obj) |
def calculate_size(name, sequence):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size | Below is the the instruction that describes the task:
### Input:
Calculates the request payload size
### Response:
def calculate_size(name, sequence):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size |
def saccadic_momentum_effect(durations, forward_angle,
summary_stat=nanmean):
"""
Computes the mean fixation duration at forward angles.
"""
durations_per_da = np.nan * np.ones((len(e_angle) - 1,))
for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])):
idx = (
bo <= forward_angle) & (
forward_angle < b1) & (
~np.isnan(durations))
durations_per_da[i] = summary_stat(durations[idx])
return durations_per_da | Computes the mean fixation duration at forward angles. | Below is the the instruction that describes the task:
### Input:
Computes the mean fixation duration at forward angles.
### Response:
def saccadic_momentum_effect(durations, forward_angle,
summary_stat=nanmean):
"""
Computes the mean fixation duration at forward angles.
"""
durations_per_da = np.nan * np.ones((len(e_angle) - 1,))
for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])):
idx = (
bo <= forward_angle) & (
forward_angle < b1) & (
~np.isnan(durations))
durations_per_da[i] = summary_stat(durations[idx])
return durations_per_da |
def get_json_body(self, required=None, validators=None):
"""Get JSON from the request body
:param required: optionally provide a list of keys that should be
in the JSON body (raises a 400 HTTPError if any are missing)
:param validator: optionally provide a dictionary of items that should
be in the body with a method that validates the item.
The method must be synchronous and return a boolean, no exceptions.
:raises: HTTPError
"""
content_type = self.request.headers.get('Content-Type',
'application/json')
if 'application/json' not in content_type.split(';'):
raise HTTPError(415, 'Content-Type should be application/json')
if not self.request.body:
error = 'Request body is empty'
logging.warning(error)
raise HTTPError(400, error)
try:
body = json.loads(self.request.body)
except (ValueError, TypeError):
error = 'Error parsing JSON'
logging.warning(error)
raise HTTPError(400, error)
if required:
_check_required(body, required)
if validators:
_validate(body, validators)
return body | Get JSON from the request body
:param required: optionally provide a list of keys that should be
in the JSON body (raises a 400 HTTPError if any are missing)
:param validator: optionally provide a dictionary of items that should
be in the body with a method that validates the item.
The method must be synchronous and return a boolean, no exceptions.
:raises: HTTPError | Below is the the instruction that describes the task:
### Input:
Get JSON from the request body
:param required: optionally provide a list of keys that should be
in the JSON body (raises a 400 HTTPError if any are missing)
:param validator: optionally provide a dictionary of items that should
be in the body with a method that validates the item.
The method must be synchronous and return a boolean, no exceptions.
:raises: HTTPError
### Response:
def get_json_body(self, required=None, validators=None):
"""Get JSON from the request body
:param required: optionally provide a list of keys that should be
in the JSON body (raises a 400 HTTPError if any are missing)
:param validator: optionally provide a dictionary of items that should
be in the body with a method that validates the item.
The method must be synchronous and return a boolean, no exceptions.
:raises: HTTPError
"""
content_type = self.request.headers.get('Content-Type',
'application/json')
if 'application/json' not in content_type.split(';'):
raise HTTPError(415, 'Content-Type should be application/json')
if not self.request.body:
error = 'Request body is empty'
logging.warning(error)
raise HTTPError(400, error)
try:
body = json.loads(self.request.body)
except (ValueError, TypeError):
error = 'Error parsing JSON'
logging.warning(error)
raise HTTPError(400, error)
if required:
_check_required(body, required)
if validators:
_validate(body, validators)
return body |
def detection_multiplot(stream, template, times, streamcolour='k',
templatecolour='r', size=(10.5, 7.5), **kwargs):
"""
Plot a stream of data with a template on top of it at detection times.
:type stream: obspy.core.stream.Stream
:param stream: Stream of data to be plotted as the background.
:type template: obspy.core.stream.Stream
:param template: Template to be plotted on top of the base stream.
:type times: list
:param times: list of detection times, one for each event
:type streamcolour: str
:param streamcolour: String of matplotlib colour types for the stream
:type templatecolour: str
:param templatecolour: Colour to plot the template in.
:type size: tuple
:param size: Figure size.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import detection_multiplot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA',
... 'TEST_', '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> times = [min([pk.time -0.05 for pk in event.picks])]
>>> detection_multiplot(stream=st, template=template,
... times=times) # doctest: +SKIP
.. plot::
from obspy import read, read_events
import os
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import detection_multiplot
test_file = os.path.realpath('../../..') + \
'/tests/test_data/REA/TEST_/01-0411-15L.S201309'
test_wavefile = os.path.realpath('../../..') +\
'/tests/test_data/WAV/TEST_/' +\
'2013-09-01-0410-35.DFDPC_024_00'
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
times = [min([pk.time -0.05 for pk in event.picks])]
detection_multiplot(stream=st, template=template,
times=times)
"""
import matplotlib.pyplot as plt
# Only take traces that match in both accounting for streams shorter than
# templates
template_stachans = [(tr.stats.station, tr.stats.channel)
for tr in template]
stream_stachans = [(tr.stats.station, tr.stats.channel)
for tr in stream]
temp = Stream([tr for tr in template
if (tr.stats.station,
tr.stats.channel) in stream_stachans])
st = Stream([tr for tr in stream
if (tr.stats.station,
tr.stats.channel) in template_stachans])
ntraces = len(temp)
fig, axes = plt.subplots(ntraces, 1, sharex=True, figsize=size)
if len(temp) > 1:
axes = axes.ravel()
mintime = min([tr.stats.starttime for tr in temp])
temp.sort(keys=['starttime'])
for i, template_tr in enumerate(temp):
if len(temp) > 1:
axis = axes[i]
else:
axis = axes
image = st.select(station=template_tr.stats.station,
channel='*' + template_tr.stats.channel[-1])
if not image:
msg = ' '.join(['No data for', template_tr.stats.station,
template_tr.stats.channel])
print(msg)
continue
image = image.merge()[0]
# Downsample if needed
if image.stats.sampling_rate > 20 and image.stats.npts > 10000:
image.decimate(int(image.stats.sampling_rate // 20))
template_tr.decimate(int(template_tr.stats.sampling_rate // 20))
# Get a list of datetime objects
image_times = [image.stats.starttime.datetime +
dt.timedelta((j * image.stats.delta) / 86400)
for j in range(len(image.data))]
axis.plot(image_times, image.data / max(image.data),
streamcolour, linewidth=1.2)
for time in times:
lagged_time = UTCDateTime(time) + (template_tr.stats.starttime -
mintime)
lagged_time = lagged_time.datetime
template_times = [lagged_time +
dt.timedelta((j * template_tr.stats.delta) /
86400)
for j in range(len(template_tr.data))]
# Normalize the template according to the data detected in
try:
normalizer = max(image.data[int((template_times[0] -
image_times[0]).
total_seconds() /
image.stats.delta):
int((template_times[-1] -
image_times[0]).
total_seconds() /
image.stats.delta)] /
max(image.data))
except ValueError:
# Occurs when there is no data in the image at this time...
normalizer = max(image.data)
normalizer /= max(template_tr.data)
axis.plot(template_times,
template_tr.data * normalizer,
templatecolour, linewidth=1.2)
ylab = '.'.join([template_tr.stats.station,
template_tr.stats.channel])
axis.set_ylabel(ylab, rotation=0,
horizontalalignment='right')
if len(template) > 1:
axes[len(axes) - 1].set_xlabel('Time')
else:
axis.set_xlabel('Time')
plt.subplots_adjust(hspace=0, left=0.175, right=0.95, bottom=0.07)
plt.xticks(rotation=10)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plot a stream of data with a template on top of it at detection times.
:type stream: obspy.core.stream.Stream
:param stream: Stream of data to be plotted as the background.
:type template: obspy.core.stream.Stream
:param template: Template to be plotted on top of the base stream.
:type times: list
:param times: list of detection times, one for each event
:type streamcolour: str
:param streamcolour: String of matplotlib colour types for the stream
:type templatecolour: str
:param templatecolour: Colour to plot the template in.
:type size: tuple
:param size: Figure size.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import detection_multiplot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA',
... 'TEST_', '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> times = [min([pk.time -0.05 for pk in event.picks])]
>>> detection_multiplot(stream=st, template=template,
... times=times) # doctest: +SKIP
.. plot::
from obspy import read, read_events
import os
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import detection_multiplot
test_file = os.path.realpath('../../..') + \
'/tests/test_data/REA/TEST_/01-0411-15L.S201309'
test_wavefile = os.path.realpath('../../..') +\
'/tests/test_data/WAV/TEST_/' +\
'2013-09-01-0410-35.DFDPC_024_00'
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
times = [min([pk.time -0.05 for pk in event.picks])]
detection_multiplot(stream=st, template=template,
times=times) | Below is the the instruction that describes the task:
### Input:
Plot a stream of data with a template on top of it at detection times.
:type stream: obspy.core.stream.Stream
:param stream: Stream of data to be plotted as the background.
:type template: obspy.core.stream.Stream
:param template: Template to be plotted on top of the base stream.
:type times: list
:param times: list of detection times, one for each event
:type streamcolour: str
:param streamcolour: String of matplotlib colour types for the stream
:type templatecolour: str
:param templatecolour: Colour to plot the template in.
:type size: tuple
:param size: Figure size.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import detection_multiplot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA',
... 'TEST_', '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> times = [min([pk.time -0.05 for pk in event.picks])]
>>> detection_multiplot(stream=st, template=template,
... times=times) # doctest: +SKIP
.. plot::
from obspy import read, read_events
import os
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import detection_multiplot
test_file = os.path.realpath('../../..') + \
'/tests/test_data/REA/TEST_/01-0411-15L.S201309'
test_wavefile = os.path.realpath('../../..') +\
'/tests/test_data/WAV/TEST_/' +\
'2013-09-01-0410-35.DFDPC_024_00'
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
times = [min([pk.time -0.05 for pk in event.picks])]
detection_multiplot(stream=st, template=template,
times=times)
### Response:
def detection_multiplot(stream, template, times, streamcolour='k',
templatecolour='r', size=(10.5, 7.5), **kwargs):
"""
Plot a stream of data with a template on top of it at detection times.
:type stream: obspy.core.stream.Stream
:param stream: Stream of data to be plotted as the background.
:type template: obspy.core.stream.Stream
:param template: Template to be plotted on top of the base stream.
:type times: list
:param times: list of detection times, one for each event
:type streamcolour: str
:param streamcolour: String of matplotlib colour types for the stream
:type templatecolour: str
:param templatecolour: Colour to plot the template in.
:type size: tuple
:param size: Figure size.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import detection_multiplot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA',
... 'TEST_', '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> times = [min([pk.time -0.05 for pk in event.picks])]
>>> detection_multiplot(stream=st, template=template,
... times=times) # doctest: +SKIP
.. plot::
from obspy import read, read_events
import os
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import detection_multiplot
test_file = os.path.realpath('../../..') + \
'/tests/test_data/REA/TEST_/01-0411-15L.S201309'
test_wavefile = os.path.realpath('../../..') +\
'/tests/test_data/WAV/TEST_/' +\
'2013-09-01-0410-35.DFDPC_024_00'
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
times = [min([pk.time -0.05 for pk in event.picks])]
detection_multiplot(stream=st, template=template,
times=times)
"""
import matplotlib.pyplot as plt
# Only take traces that match in both accounting for streams shorter than
# templates
template_stachans = [(tr.stats.station, tr.stats.channel)
for tr in template]
stream_stachans = [(tr.stats.station, tr.stats.channel)
for tr in stream]
temp = Stream([tr for tr in template
if (tr.stats.station,
tr.stats.channel) in stream_stachans])
st = Stream([tr for tr in stream
if (tr.stats.station,
tr.stats.channel) in template_stachans])
ntraces = len(temp)
fig, axes = plt.subplots(ntraces, 1, sharex=True, figsize=size)
if len(temp) > 1:
axes = axes.ravel()
mintime = min([tr.stats.starttime for tr in temp])
temp.sort(keys=['starttime'])
for i, template_tr in enumerate(temp):
if len(temp) > 1:
axis = axes[i]
else:
axis = axes
image = st.select(station=template_tr.stats.station,
channel='*' + template_tr.stats.channel[-1])
if not image:
msg = ' '.join(['No data for', template_tr.stats.station,
template_tr.stats.channel])
print(msg)
continue
image = image.merge()[0]
# Downsample if needed
if image.stats.sampling_rate > 20 and image.stats.npts > 10000:
image.decimate(int(image.stats.sampling_rate // 20))
template_tr.decimate(int(template_tr.stats.sampling_rate // 20))
# Get a list of datetime objects
image_times = [image.stats.starttime.datetime +
dt.timedelta((j * image.stats.delta) / 86400)
for j in range(len(image.data))]
axis.plot(image_times, image.data / max(image.data),
streamcolour, linewidth=1.2)
for time in times:
lagged_time = UTCDateTime(time) + (template_tr.stats.starttime -
mintime)
lagged_time = lagged_time.datetime
template_times = [lagged_time +
dt.timedelta((j * template_tr.stats.delta) /
86400)
for j in range(len(template_tr.data))]
# Normalize the template according to the data detected in
try:
normalizer = max(image.data[int((template_times[0] -
image_times[0]).
total_seconds() /
image.stats.delta):
int((template_times[-1] -
image_times[0]).
total_seconds() /
image.stats.delta)] /
max(image.data))
except ValueError:
# Occurs when there is no data in the image at this time...
normalizer = max(image.data)
normalizer /= max(template_tr.data)
axis.plot(template_times,
template_tr.data * normalizer,
templatecolour, linewidth=1.2)
ylab = '.'.join([template_tr.stats.station,
template_tr.stats.channel])
axis.set_ylabel(ylab, rotation=0,
horizontalalignment='right')
if len(template) > 1:
axes[len(axes) - 1].set_xlabel('Time')
else:
axis.set_xlabel('Time')
plt.subplots_adjust(hspace=0, left=0.175, right=0.95, bottom=0.07)
plt.xticks(rotation=10)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig |
def add_items(self, items):
'''Adds ListItems to the XBMC interface. Each item in the
provided list should either be instances of xbmcswift2.ListItem,
or regular dictionaries that will be passed to
xbmcswift2.ListItem.from_dict. Returns the list of ListItems.
:param items: An iterable of items where each item is either a
dictionary with keys/values suitable for passing to
:meth:`xbmcswift2.ListItem.from_dict` or an instance of
:class:`xbmcswift2.ListItem`.
'''
_items = [self._listitemify(item) for item in items]
tuples = [item.as_tuple() for item in _items]
xbmcplugin.addDirectoryItems(self.handle, tuples, len(tuples))
# We need to keep track internally of added items so we can return them
# all at the end for testing purposes
self.added_items.extend(_items)
# Possibly need an if statement if only for debug mode
return _items | Adds ListItems to the XBMC interface. Each item in the
provided list should either be instances of xbmcswift2.ListItem,
or regular dictionaries that will be passed to
xbmcswift2.ListItem.from_dict. Returns the list of ListItems.
:param items: An iterable of items where each item is either a
dictionary with keys/values suitable for passing to
:meth:`xbmcswift2.ListItem.from_dict` or an instance of
:class:`xbmcswift2.ListItem`. | Below is the the instruction that describes the task:
### Input:
Adds ListItems to the XBMC interface. Each item in the
provided list should either be instances of xbmcswift2.ListItem,
or regular dictionaries that will be passed to
xbmcswift2.ListItem.from_dict. Returns the list of ListItems.
:param items: An iterable of items where each item is either a
dictionary with keys/values suitable for passing to
:meth:`xbmcswift2.ListItem.from_dict` or an instance of
:class:`xbmcswift2.ListItem`.
### Response:
def add_items(self, items):
'''Adds ListItems to the XBMC interface. Each item in the
provided list should either be instances of xbmcswift2.ListItem,
or regular dictionaries that will be passed to
xbmcswift2.ListItem.from_dict. Returns the list of ListItems.
:param items: An iterable of items where each item is either a
dictionary with keys/values suitable for passing to
:meth:`xbmcswift2.ListItem.from_dict` or an instance of
:class:`xbmcswift2.ListItem`.
'''
_items = [self._listitemify(item) for item in items]
tuples = [item.as_tuple() for item in _items]
xbmcplugin.addDirectoryItems(self.handle, tuples, len(tuples))
# We need to keep track internally of added items so we can return them
# all at the end for testing purposes
self.added_items.extend(_items)
# Possibly need an if statement if only for debug mode
return _items |
def get_template_names(self):
"""
Return a list of template names to be used for the view.
"""
year = self.get_archive_part_value('year')
week = self.get_archive_part_value('week')
month = self.get_archive_part_value('month')
day = self.get_archive_part_value('day')
templates = []
path = 'zinnia/archives'
template_names = self.get_default_base_template_names()
for template_name in template_names:
templates.extend([template_name,
'zinnia/%s' % template_name,
'%s/%s' % (path, template_name)])
if year:
for template_name in template_names:
templates.append(
'%s/%s/%s' % (path, year, template_name))
if week:
for template_name in template_names:
templates.extend([
'%s/week/%s/%s' % (path, week, template_name),
'%s/%s/week/%s/%s' % (path, year, week, template_name)])
if month:
for template_name in template_names:
templates.extend([
'%s/month/%s/%s' % (path, month, template_name),
'%s/%s/month/%s/%s' % (path, year, month, template_name)])
if day:
for template_name in template_names:
templates.extend([
'%s/day/%s/%s' % (path, day, template_name),
'%s/%s/day/%s/%s' % (path, year, day, template_name),
'%s/month/%s/day/%s/%s' % (path, month, day,
template_name),
'%s/%s/%s/%s/%s' % (path, year, month, day,
template_name)])
if self.template_name is not None:
templates.append(self.template_name)
templates.reverse()
return templates | Return a list of template names to be used for the view. | Below is the the instruction that describes the task:
### Input:
Return a list of template names to be used for the view.
### Response:
def get_template_names(self):
"""
Return a list of template names to be used for the view.
"""
year = self.get_archive_part_value('year')
week = self.get_archive_part_value('week')
month = self.get_archive_part_value('month')
day = self.get_archive_part_value('day')
templates = []
path = 'zinnia/archives'
template_names = self.get_default_base_template_names()
for template_name in template_names:
templates.extend([template_name,
'zinnia/%s' % template_name,
'%s/%s' % (path, template_name)])
if year:
for template_name in template_names:
templates.append(
'%s/%s/%s' % (path, year, template_name))
if week:
for template_name in template_names:
templates.extend([
'%s/week/%s/%s' % (path, week, template_name),
'%s/%s/week/%s/%s' % (path, year, week, template_name)])
if month:
for template_name in template_names:
templates.extend([
'%s/month/%s/%s' % (path, month, template_name),
'%s/%s/month/%s/%s' % (path, year, month, template_name)])
if day:
for template_name in template_names:
templates.extend([
'%s/day/%s/%s' % (path, day, template_name),
'%s/%s/day/%s/%s' % (path, year, day, template_name),
'%s/month/%s/day/%s/%s' % (path, month, day,
template_name),
'%s/%s/%s/%s/%s' % (path, year, month, day,
template_name)])
if self.template_name is not None:
templates.append(self.template_name)
templates.reverse()
return templates |
def node_set_to_surface(self, tag):
"""
Converts a node set to surface.
"""
# Create a dummy node with label 0
nodes = self.nodes.copy()
dummy = nodes.iloc[0].copy()
dummy["coords"] *= np.nan
dummy["sets"] = True
nodes.loc[0] = dummy
# Getting element surfaces
element_surfaces= self.split("surfaces").unstack()
# killer hack !
surf = pd.DataFrame(
nodes.sets[tag].loc[element_surfaces.values.flatten()]
.values.reshape(element_surfaces.shape)
.prod(axis = 1)
.astype(np.bool),
index = element_surfaces.index).unstack().fillna(False)
for k in surf.keys():
self.elements["surfaces", tag, "f{0}".format(k[1]+1) ] = surf.loc[:, k] | Converts a node set to surface. | Below is the the instruction that describes the task:
### Input:
Converts a node set to surface.
### Response:
def node_set_to_surface(self, tag):
"""
Converts a node set to surface.
"""
# Create a dummy node with label 0
nodes = self.nodes.copy()
dummy = nodes.iloc[0].copy()
dummy["coords"] *= np.nan
dummy["sets"] = True
nodes.loc[0] = dummy
# Getting element surfaces
element_surfaces= self.split("surfaces").unstack()
# killer hack !
surf = pd.DataFrame(
nodes.sets[tag].loc[element_surfaces.values.flatten()]
.values.reshape(element_surfaces.shape)
.prod(axis = 1)
.astype(np.bool),
index = element_surfaces.index).unstack().fillna(False)
for k in surf.keys():
self.elements["surfaces", tag, "f{0}".format(k[1]+1) ] = surf.loc[:, k] |
def copy(self, empty=False):
""" Create a copy of the graph (by default with nodes and edges).
"""
g = graph(self.layout.n, self.distance, self.layout.type)
g.layout = self.layout.copy(g)
g.styles = self.styles.copy(g)
g.events = self.events.copy(g)
if not empty:
for n in self.nodes:
g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)
for e in self.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g | Create a copy of the graph (by default with nodes and edges). | Below is the the instruction that describes the task:
### Input:
Create a copy of the graph (by default with nodes and edges).
### Response:
def copy(self, empty=False):
""" Create a copy of the graph (by default with nodes and edges).
"""
g = graph(self.layout.n, self.distance, self.layout.type)
g.layout = self.layout.copy(g)
g.styles = self.styles.copy(g)
g.events = self.events.copy(g)
if not empty:
for n in self.nodes:
g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)
for e in self.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g |
def get_X_gradients(self, X):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient, X.binary_prob.gradient | Get the gradients of the posterior distribution of X in its specific form. | Below is the the instruction that describes the task:
### Input:
Get the gradients of the posterior distribution of X in its specific form.
### Response:
def get_X_gradients(self, X):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient, X.binary_prob.gradient |
def add_inspection(name):
"""
Add a Jishaku object inspection
"""
# create the real decorator
def inspection_inner(func):
"""
Jishaku inspection decorator
"""
# pylint: disable=inconsistent-return-statements
# create an encapsulated version of the inspection that swallows exceptions
@functools.wraps(func)
def encapsulated(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, AttributeError, ValueError, OSError):
return
INSPECTIONS.append((name, encapsulated))
return func
return inspection_inner | Add a Jishaku object inspection | Below is the the instruction that describes the task:
### Input:
Add a Jishaku object inspection
### Response:
def add_inspection(name):
"""
Add a Jishaku object inspection
"""
# create the real decorator
def inspection_inner(func):
"""
Jishaku inspection decorator
"""
# pylint: disable=inconsistent-return-statements
# create an encapsulated version of the inspection that swallows exceptions
@functools.wraps(func)
def encapsulated(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, AttributeError, ValueError, OSError):
return
INSPECTIONS.append((name, encapsulated))
return func
return inspection_inner |
def gen_cannon_grad_spec(choose, coeffs, pivots):
""" Generate Cannon gradient spectra
Parameters
----------
labels: default values for [teff, logg, feh, cfe, nfe, afe, ak]
choose: val of cfe or nfe, whatever you're varying
low: lowest val of cfe or nfe, whatever you're varying
high: highest val of cfe or nfe, whatever you're varying
"""
base_labels = [4800, 2.5, 0.03, 0.10, -0.17, -0.17, 0, -0.16,
-0.13, -0.15, 0.13, 0.08, 0.17, -0.062]
label_names = np.array(
['TEFF', 'LOGG', 'AK', 'Al', 'Ca', 'C', 'Fe', 'Mg', 'Mn',
'Ni', 'N', 'O', 'Si', 'Ti'])
label_atnum = np.array(
[0, 1, -1, 13, 20, 6, 26, 12, 25, 28, 7, 8, 14, 22])
# Generate Cannon gradient spectra
ind = np.where(label_atnum==choose)[0][0]
low_lab = copy.copy(base_labels)
high = base_labels[ind]
if choose > 0:
low = base_labels[ind] - 0.2
else: #temperature
if choose != 0: print("warning...")
low = base_labels[ind] - 200
low_lab[ind] = low
lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0]
model_low = np.dot(coeffs, lvec)
lvec = (train_model._get_lvec(np.array([base_labels]), pivots))[0]
model_high = np.dot(coeffs, lvec)
grad_spec = (model_high - model_low) / (high - low)
return grad_spec | Generate Cannon gradient spectra
Parameters
----------
labels: default values for [teff, logg, feh, cfe, nfe, afe, ak]
choose: val of cfe or nfe, whatever you're varying
low: lowest val of cfe or nfe, whatever you're varying
high: highest val of cfe or nfe, whatever you're varying | Below is the the instruction that describes the task:
### Input:
Generate Cannon gradient spectra
Parameters
----------
labels: default values for [teff, logg, feh, cfe, nfe, afe, ak]
choose: val of cfe or nfe, whatever you're varying
low: lowest val of cfe or nfe, whatever you're varying
high: highest val of cfe or nfe, whatever you're varying
### Response:
def gen_cannon_grad_spec(choose, coeffs, pivots):
""" Generate Cannon gradient spectra
Parameters
----------
labels: default values for [teff, logg, feh, cfe, nfe, afe, ak]
choose: val of cfe or nfe, whatever you're varying
low: lowest val of cfe or nfe, whatever you're varying
high: highest val of cfe or nfe, whatever you're varying
"""
base_labels = [4800, 2.5, 0.03, 0.10, -0.17, -0.17, 0, -0.16,
-0.13, -0.15, 0.13, 0.08, 0.17, -0.062]
label_names = np.array(
['TEFF', 'LOGG', 'AK', 'Al', 'Ca', 'C', 'Fe', 'Mg', 'Mn',
'Ni', 'N', 'O', 'Si', 'Ti'])
label_atnum = np.array(
[0, 1, -1, 13, 20, 6, 26, 12, 25, 28, 7, 8, 14, 22])
# Generate Cannon gradient spectra
ind = np.where(label_atnum==choose)[0][0]
low_lab = copy.copy(base_labels)
high = base_labels[ind]
if choose > 0:
low = base_labels[ind] - 0.2
else: #temperature
if choose != 0: print("warning...")
low = base_labels[ind] - 200
low_lab[ind] = low
lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0]
model_low = np.dot(coeffs, lvec)
lvec = (train_model._get_lvec(np.array([base_labels]), pivots))[0]
model_high = np.dot(coeffs, lvec)
grad_spec = (model_high - model_low) / (high - low)
return grad_spec |
def inferURILocalSymbol(aUri):
"""
From a URI returns a tuple (namespace, uri-last-bit)
Eg
from <'http://www.w3.org/2008/05/skos#something'>
==> ('something', 'http://www.w3.org/2008/05/skos')
from <'http://www.w3.org/2003/01/geo/wgs84_pos'> we extract
==> ('wgs84_pos', 'http://www.w3.org/2003/01/geo/')
"""
# stringa = aUri.__str__()
stringa = aUri
try:
ns = stringa.split("#")[0]
name = stringa.split("#")[1]
except:
if "/" in stringa:
ns = stringa.rsplit("/", 1)[0]
name = stringa.rsplit("/", 1)[1]
else:
ns = ""
name = stringa
return (name, ns) | From a URI returns a tuple (namespace, uri-last-bit)
Eg
from <'http://www.w3.org/2008/05/skos#something'>
==> ('something', 'http://www.w3.org/2008/05/skos')
from <'http://www.w3.org/2003/01/geo/wgs84_pos'> we extract
==> ('wgs84_pos', 'http://www.w3.org/2003/01/geo/') | Below is the the instruction that describes the task:
### Input:
From a URI returns a tuple (namespace, uri-last-bit)
Eg
from <'http://www.w3.org/2008/05/skos#something'>
==> ('something', 'http://www.w3.org/2008/05/skos')
from <'http://www.w3.org/2003/01/geo/wgs84_pos'> we extract
==> ('wgs84_pos', 'http://www.w3.org/2003/01/geo/')
### Response:
def inferURILocalSymbol(aUri):
"""
From a URI returns a tuple (namespace, uri-last-bit)
Eg
from <'http://www.w3.org/2008/05/skos#something'>
==> ('something', 'http://www.w3.org/2008/05/skos')
from <'http://www.w3.org/2003/01/geo/wgs84_pos'> we extract
==> ('wgs84_pos', 'http://www.w3.org/2003/01/geo/')
"""
# stringa = aUri.__str__()
stringa = aUri
try:
ns = stringa.split("#")[0]
name = stringa.split("#")[1]
except:
if "/" in stringa:
ns = stringa.rsplit("/", 1)[0]
name = stringa.rsplit("/", 1)[1]
else:
ns = ""
name = stringa
return (name, ns) |
def get_dict(self, name, default=None):
"""Retrieves an environment variable value as a dictionary.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
dict: The environment variable's value as a ``dict``.
Raises:
EnvironmentError: If the environment variable does not
exist, and ``default`` was not provided.
"""
if name not in self:
if default is not None:
return default
raise EnvironmentError.not_found(self._prefix, name)
return dict(**self.get(name)) | Retrieves an environment variable value as a dictionary.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
dict: The environment variable's value as a ``dict``.
Raises:
EnvironmentError: If the environment variable does not
exist, and ``default`` was not provided. | Below is the the instruction that describes the task:
### Input:
Retrieves an environment variable value as a dictionary.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
dict: The environment variable's value as a ``dict``.
Raises:
EnvironmentError: If the environment variable does not
exist, and ``default`` was not provided.
### Response:
def get_dict(self, name, default=None):
"""Retrieves an environment variable value as a dictionary.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
dict: The environment variable's value as a ``dict``.
Raises:
EnvironmentError: If the environment variable does not
exist, and ``default`` was not provided.
"""
if name not in self:
if default is not None:
return default
raise EnvironmentError.not_found(self._prefix, name)
return dict(**self.get(name)) |
def export(self):
"""
Returns a dictionary with all album information.
Use the :meth:`from_export` method to recreate the
:class:`Album` object.
"""
return {'id' : self.id, 'name' : self.name, 'artist' : self._artist_name, 'artist_id' : self._artist_id, 'cover' : self._cover_url} | Returns a dictionary with all album information.
Use the :meth:`from_export` method to recreate the
:class:`Album` object. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary with all album information.
Use the :meth:`from_export` method to recreate the
:class:`Album` object.
### Response:
def export(self):
"""
Returns a dictionary with all album information.
Use the :meth:`from_export` method to recreate the
:class:`Album` object.
"""
return {'id' : self.id, 'name' : self.name, 'artist' : self._artist_name, 'artist_id' : self._artist_id, 'cover' : self._cover_url} |
def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):
"""
Make SciPy optimization tensor.
The `make_optimize_tensor` method builds optimization tensor and initializes
all necessary variables created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Scipy optional optimization parameters,
- `maxiter`, maximal number of iterations to perform.
- `disp`, if True, prints convergence messages.
:return: Tensorflow operation.
"""
session = model.enquire_session(session)
with session.as_default():
var_list = self._gen_var_list(model, var_list)
optimizer_kwargs = self._optimizer_kwargs.copy()
options = optimizer_kwargs.get('options', {})
options.update(kwargs)
optimizer_kwargs.update(dict(options=options))
objective = model.objective
optimizer = external_optimizer.ScipyOptimizerInterface(
objective, var_list=var_list, **optimizer_kwargs)
model.initialize(session=session)
return optimizer | Make SciPy optimization tensor.
The `make_optimize_tensor` method builds optimization tensor and initializes
all necessary variables created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Scipy optional optimization parameters,
- `maxiter`, maximal number of iterations to perform.
- `disp`, if True, prints convergence messages.
:return: Tensorflow operation. | Below is the the instruction that describes the task:
### Input:
Make SciPy optimization tensor.
The `make_optimize_tensor` method builds optimization tensor and initializes
all necessary variables created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Scipy optional optimization parameters,
- `maxiter`, maximal number of iterations to perform.
- `disp`, if True, prints convergence messages.
:return: Tensorflow operation.
### Response:
def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):
"""
Make SciPy optimization tensor.
The `make_optimize_tensor` method builds optimization tensor and initializes
all necessary variables created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Scipy optional optimization parameters,
- `maxiter`, maximal number of iterations to perform.
- `disp`, if True, prints convergence messages.
:return: Tensorflow operation.
"""
session = model.enquire_session(session)
with session.as_default():
var_list = self._gen_var_list(model, var_list)
optimizer_kwargs = self._optimizer_kwargs.copy()
options = optimizer_kwargs.get('options', {})
options.update(kwargs)
optimizer_kwargs.update(dict(options=options))
objective = model.objective
optimizer = external_optimizer.ScipyOptimizerInterface(
objective, var_list=var_list, **optimizer_kwargs)
model.initialize(session=session)
return optimizer |
def get_private_endpoint(id: str, guid: str) -> str:
"""Get remote endpoint for delivering private payloads."""
_username, domain = id.split("@")
return "https://%s/receive/users/%s" % (domain, guid) | Get remote endpoint for delivering private payloads. | Below is the the instruction that describes the task:
### Input:
Get remote endpoint for delivering private payloads.
### Response:
def get_private_endpoint(id: str, guid: str) -> str:
"""Get remote endpoint for delivering private payloads."""
_username, domain = id.split("@")
return "https://%s/receive/users/%s" % (domain, guid) |
def load_plume_package(package, plume_dir, accept_defaults):
"""Loads a canari package into Plume."""
from canari.commands.load_plume_package import load_plume_package
load_plume_package(package, plume_dir, accept_defaults) | Loads a canari package into Plume. | Below is the the instruction that describes the task:
### Input:
Loads a canari package into Plume.
### Response:
def load_plume_package(package, plume_dir, accept_defaults):
"""Loads a canari package into Plume."""
from canari.commands.load_plume_package import load_plume_package
load_plume_package(package, plume_dir, accept_defaults) |
def _parse_time_to_freeze(time_to_freeze_str):
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.utcnow()
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
elif isinstance(time_to_freeze_str, datetime.timedelta):
time_to_freeze = datetime.datetime.utcnow() + time_to_freeze_str
else:
time_to_freeze = parser.parse(time_to_freeze_str)
return convert_to_timezone_naive(time_to_freeze) | Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object | Below is the the instruction that describes the task:
### Input:
Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
### Response:
def _parse_time_to_freeze(time_to_freeze_str):
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.utcnow()
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
elif isinstance(time_to_freeze_str, datetime.timedelta):
time_to_freeze = datetime.datetime.utcnow() + time_to_freeze_str
else:
time_to_freeze = parser.parse(time_to_freeze_str)
return convert_to_timezone_naive(time_to_freeze) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.