code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def master(master=None, connected=True):
'''
.. versionadded:: 2015.5.0
Fire an event if the minion gets disconnected from its master. This
function is meant to be run via a scheduled job from the minion. If
master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4
address.
CLI Example:
.. code-block:: bash
salt '*' status.master
'''
def _win_remotes_on(port):
'''
Windows specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
PS C:> netstat -n -p TCP
Active Connections
Proto Local Address Foreign Address State
TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED
TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT
TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT
TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED
TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_unicode(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
remote_host, remote_port = chunks[2].rsplit(':', 1)
if int(remote_port) != port:
continue
remotes.add(remote_host)
return remotes
# the default publishing port
port = 4505
master_ips = None
if master:
master_ips = _host_to_ips(master)
if not master_ips:
return
if __salt__['config.get']('publish_port') != '':
port = int(__salt__['config.get']('publish_port'))
master_connection_status = False
connected_ips = _win_remotes_on(port)
# Get connection status for master
for master_ip in master_ips:
if master_ip in connected_ips:
master_connection_status = True
break
# Connection to master is not as expected
if master_connection_status is not connected:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
if master_connection_status:
event.fire_event({'master': master}, salt.minion.master_event(type='connected'))
else:
event.fire_event({'master': master}, salt.minion.master_event(type='disconnected'))
return master_connection_status | .. versionadded:: 2015.5.0
Fire an event if the minion gets disconnected from its master. This
function is meant to be run via a scheduled job from the minion. If
master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4
address.
CLI Example:
.. code-block:: bash
salt '*' status.master | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.5.0
Fire an event if the minion gets disconnected from its master. This
function is meant to be run via a scheduled job from the minion. If
master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4
address.
CLI Example:
.. code-block:: bash
salt '*' status.master
### Response:
def master(master=None, connected=True):
'''
.. versionadded:: 2015.5.0
Fire an event if the minion gets disconnected from its master. This
function is meant to be run via a scheduled job from the minion. If
master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4
address.
CLI Example:
.. code-block:: bash
salt '*' status.master
'''
def _win_remotes_on(port):
'''
Windows specific helper function.
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port.
Parses output of shell 'netstat' to get connections
PS C:> netstat -n -p TCP
Active Connections
Proto Local Address Foreign Address State
TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED
TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT
TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT
TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED
TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED
'''
remotes = set()
try:
data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version
except subprocess.CalledProcessError:
log.error('Failed netstat')
raise
lines = salt.utils.stringutils.to_unicode(data).split('\n')
for line in lines:
if 'ESTABLISHED' not in line:
continue
chunks = line.split()
remote_host, remote_port = chunks[2].rsplit(':', 1)
if int(remote_port) != port:
continue
remotes.add(remote_host)
return remotes
# the default publishing port
port = 4505
master_ips = None
if master:
master_ips = _host_to_ips(master)
if not master_ips:
return
if __salt__['config.get']('publish_port') != '':
port = int(__salt__['config.get']('publish_port'))
master_connection_status = False
connected_ips = _win_remotes_on(port)
# Get connection status for master
for master_ip in master_ips:
if master_ip in connected_ips:
master_connection_status = True
break
# Connection to master is not as expected
if master_connection_status is not connected:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
if master_connection_status:
event.fire_event({'master': master}, salt.minion.master_event(type='connected'))
else:
event.fire_event({'master': master}, salt.minion.master_event(type='disconnected'))
return master_connection_status |
def _parse_videoname(cls, videoname):
""" parse videoname and return video info dict
video info contains:
- title, the name of video
- sub_title, the sub_title of video
- resolution,
- source,
-
- season, defaults to 0
- episode, defaults to 0
"""
info = {
'title': '',
'season': 0,
'episode': 0,
'sub_title': '',
'resolution': '',
'source': '',
'audio_encoding': '',
'video_encoding': '',
}
last_index = 0
m = cls.RE_SEASON_EPISODE.search(videoname)
if m:
info['season'] = int(m.group('season'))
info['episode'] = int(m.group('episode'))
s, e = m.span()
info['title'] = videoname[0:s].strip('.')
last_index = e
else:
m = cls.RE_SEASON.search(videoname)
if m:
info['season'] = int(m.group('season'))
s, e = m.span()
info['title'] = videoname[0:s].strip('.')
last_index = e
m = cls.RE_RESOLUTION.search(videoname)
if m:
info['resolution'] = m.group('resolution')
s, e = m.span()
if info['title'] == '':
info['title'] = videoname[0:s].strip('.')
if info['season'] > 0 and info['episode'] > 0:
info['sub_title'] = videoname[last_index:s].strip('.')
last_index = e
if info['title'] == '':
info['title'] = videoname
m = cls.RE_SOURCE.search(videoname)
if m:
info['source'] = m.group('source')
m = cls.RE_AUDIO_ENC.search(videoname)
if m:
info['audio_encoding'] = m.group('audio_encoding')
m = cls.RE_VIDEO_ENC.search(videoname)
if m:
info['video_encoding'] = m.group('video_encoding')
return info | parse videoname and return video info dict
video info contains:
- title, the name of video
- sub_title, the sub_title of video
- resolution,
- source,
-
- season, defaults to 0
- episode, defaults to 0 | Below is the the instruction that describes the task:
### Input:
parse videoname and return video info dict
video info contains:
- title, the name of video
- sub_title, the sub_title of video
- resolution,
- source,
-
- season, defaults to 0
- episode, defaults to 0
### Response:
def _parse_videoname(cls, videoname):
""" parse videoname and return video info dict
video info contains:
- title, the name of video
- sub_title, the sub_title of video
- resolution,
- source,
-
- season, defaults to 0
- episode, defaults to 0
"""
info = {
'title': '',
'season': 0,
'episode': 0,
'sub_title': '',
'resolution': '',
'source': '',
'audio_encoding': '',
'video_encoding': '',
}
last_index = 0
m = cls.RE_SEASON_EPISODE.search(videoname)
if m:
info['season'] = int(m.group('season'))
info['episode'] = int(m.group('episode'))
s, e = m.span()
info['title'] = videoname[0:s].strip('.')
last_index = e
else:
m = cls.RE_SEASON.search(videoname)
if m:
info['season'] = int(m.group('season'))
s, e = m.span()
info['title'] = videoname[0:s].strip('.')
last_index = e
m = cls.RE_RESOLUTION.search(videoname)
if m:
info['resolution'] = m.group('resolution')
s, e = m.span()
if info['title'] == '':
info['title'] = videoname[0:s].strip('.')
if info['season'] > 0 and info['episode'] > 0:
info['sub_title'] = videoname[last_index:s].strip('.')
last_index = e
if info['title'] == '':
info['title'] = videoname
m = cls.RE_SOURCE.search(videoname)
if m:
info['source'] = m.group('source')
m = cls.RE_AUDIO_ENC.search(videoname)
if m:
info['audio_encoding'] = m.group('audio_encoding')
m = cls.RE_VIDEO_ENC.search(videoname)
if m:
info['video_encoding'] = m.group('video_encoding')
return info |
def run(self):
"""Import the controller and run it.
This mimics the processing done by :func:`helper.start`
when a controller is run in the foreground. A new instance
of ``self.controller`` is created and run until a keyboard
interrupt occurs or the controller stops on its own accord.
"""
segments = self.controller.split('.')
controller_class = reduce(getattr, segments[1:],
__import__('.'.join(segments[:-1])))
cmd_line = ['-f']
if self.configuration is not None:
cmd_line.extend(['-c', self.configuration])
args = parser.get().parse_args(cmd_line)
controller_instance = controller_class(args, platform)
try:
controller_instance.start()
except KeyboardInterrupt:
controller_instance.stop() | Import the controller and run it.
This mimics the processing done by :func:`helper.start`
when a controller is run in the foreground. A new instance
of ``self.controller`` is created and run until a keyboard
interrupt occurs or the controller stops on its own accord. | Below is the the instruction that describes the task:
### Input:
Import the controller and run it.
This mimics the processing done by :func:`helper.start`
when a controller is run in the foreground. A new instance
of ``self.controller`` is created and run until a keyboard
interrupt occurs or the controller stops on its own accord.
### Response:
def run(self):
"""Import the controller and run it.
This mimics the processing done by :func:`helper.start`
when a controller is run in the foreground. A new instance
of ``self.controller`` is created and run until a keyboard
interrupt occurs or the controller stops on its own accord.
"""
segments = self.controller.split('.')
controller_class = reduce(getattr, segments[1:],
__import__('.'.join(segments[:-1])))
cmd_line = ['-f']
if self.configuration is not None:
cmd_line.extend(['-c', self.configuration])
args = parser.get().parse_args(cmd_line)
controller_instance = controller_class(args, platform)
try:
controller_instance.start()
except KeyboardInterrupt:
controller_instance.stop() |
def _welch_anova(self, dv=None, between=None, export_filename=None):
"""Return one-way Welch ANOVA."""
aov = welch_anova(data=self, dv=dv, between=between,
export_filename=export_filename)
return aov | Return one-way Welch ANOVA. | Below is the the instruction that describes the task:
### Input:
Return one-way Welch ANOVA.
### Response:
def _welch_anova(self, dv=None, between=None, export_filename=None):
"""Return one-way Welch ANOVA."""
aov = welch_anova(data=self, dv=dv, between=between,
export_filename=export_filename)
return aov |
def handle_stage_changed(self, model):
'''
handle a stage change in the data model
:param model: the data model that was changed
'''
stages = model.get_stages()
if self.dataman:
self.dataman.set('stages', stages) | handle a stage change in the data model
:param model: the data model that was changed | Below is the the instruction that describes the task:
### Input:
handle a stage change in the data model
:param model: the data model that was changed
### Response:
def handle_stage_changed(self, model):
'''
handle a stage change in the data model
:param model: the data model that was changed
'''
stages = model.get_stages()
if self.dataman:
self.dataman.set('stages', stages) |
def get_Generic_itemtype(sq, simplify=True):
"""Retrieves the item type from a PEP 484 generic or subclass of such.
sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container.
Consequently this also works with typing.List, typing.Set and typing.Dict.
Note that for typing.Dict and mapping types in general, the key type is regarded as item type.
For typing.Tuple all contained types are returned as a typing.Union.
If simplify == True some effort is taken to eliminate redundancies in such a union.
"""
if is_Tuple(sq):
if simplify:
itm_tps = [x for x in get_Tuple_params(sq)]
simplify_for_Union(itm_tps)
return Union[tuple(itm_tps)]
else:
return Union[get_Tuple_params(sq)]
else:
try:
res = _select_Generic_superclass_parameters(sq, typing.Container)
except TypeError:
res = None
if res is None:
try:
res = _select_Generic_superclass_parameters(sq, typing.Iterable)
except TypeError:
pass
if res is None:
raise TypeError("Has no itemtype: "+type_str(sq))
else:
return res[0] | Retrieves the item type from a PEP 484 generic or subclass of such.
sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container.
Consequently this also works with typing.List, typing.Set and typing.Dict.
Note that for typing.Dict and mapping types in general, the key type is regarded as item type.
For typing.Tuple all contained types are returned as a typing.Union.
If simplify == True some effort is taken to eliminate redundancies in such a union. | Below is the the instruction that describes the task:
### Input:
Retrieves the item type from a PEP 484 generic or subclass of such.
sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container.
Consequently this also works with typing.List, typing.Set and typing.Dict.
Note that for typing.Dict and mapping types in general, the key type is regarded as item type.
For typing.Tuple all contained types are returned as a typing.Union.
If simplify == True some effort is taken to eliminate redundancies in such a union.
### Response:
def get_Generic_itemtype(sq, simplify=True):
"""Retrieves the item type from a PEP 484 generic or subclass of such.
sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container.
Consequently this also works with typing.List, typing.Set and typing.Dict.
Note that for typing.Dict and mapping types in general, the key type is regarded as item type.
For typing.Tuple all contained types are returned as a typing.Union.
If simplify == True some effort is taken to eliminate redundancies in such a union.
"""
if is_Tuple(sq):
if simplify:
itm_tps = [x for x in get_Tuple_params(sq)]
simplify_for_Union(itm_tps)
return Union[tuple(itm_tps)]
else:
return Union[get_Tuple_params(sq)]
else:
try:
res = _select_Generic_superclass_parameters(sq, typing.Container)
except TypeError:
res = None
if res is None:
try:
res = _select_Generic_superclass_parameters(sq, typing.Iterable)
except TypeError:
pass
if res is None:
raise TypeError("Has no itemtype: "+type_str(sq))
else:
return res[0] |
def describe_target_groups(names=None,
target_group_arns=None,
load_balancer_arn=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
Describes the specified target groups or all of your target groups. By default,
all target groups are described. Alternatively, you can specify one of the
following to filter the results: the ARN of the load balancer, the names of
one or more target groups, or the ARNs of one or more target groups.
Returns: list
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.describe_target_groups
salt myminion boto_elbv2.describe_target_groups target_group_name
salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]"
'''
if names and target_group_arns:
raise SaltInvocationError('At most one of names or target_group_arns may '
'be provided')
if names:
target_groups = names
elif target_group_arns:
target_groups = target_group_arns
else:
target_groups = None
tg_list = []
if target_groups:
if isinstance(target_groups, str) or isinstance(target_groups, six.text_type):
tg_list.append(target_groups)
else:
for group in target_groups:
tg_list.append(group)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if names:
ret = conn.describe_target_groups(Names=tg_list)['TargetGroups']
elif target_group_arns:
ret = conn.describe_target_groups(TargetGroupArns=tg_list)['TargetGroups']
elif load_balancer_arn:
ret = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn)['TargetGroups']
else:
ret = []
next_marker = ''
while True:
r = conn.describe_target_groups(Marker=next_marker)
for alb in r['TargetGroups']:
ret.append(alb)
if 'NextMarker' in r:
next_marker = r['NextMarker']
else:
break
return ret if ret else []
except ClientError as error:
log.warning(error)
return False | Describes the specified target groups or all of your target groups. By default,
all target groups are described. Alternatively, you can specify one of the
following to filter the results: the ARN of the load balancer, the names of
one or more target groups, or the ARNs of one or more target groups.
Returns: list
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.describe_target_groups
salt myminion boto_elbv2.describe_target_groups target_group_name
salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]" | Below is the the instruction that describes the task:
### Input:
Describes the specified target groups or all of your target groups. By default,
all target groups are described. Alternatively, you can specify one of the
following to filter the results: the ARN of the load balancer, the names of
one or more target groups, or the ARNs of one or more target groups.
Returns: list
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.describe_target_groups
salt myminion boto_elbv2.describe_target_groups target_group_name
salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]"
### Response:
def describe_target_groups(names=None,
target_group_arns=None,
load_balancer_arn=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
Describes the specified target groups or all of your target groups. By default,
all target groups are described. Alternatively, you can specify one of the
following to filter the results: the ARN of the load balancer, the names of
one or more target groups, or the ARNs of one or more target groups.
Returns: list
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.describe_target_groups
salt myminion boto_elbv2.describe_target_groups target_group_name
salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]"
'''
if names and target_group_arns:
raise SaltInvocationError('At most one of names or target_group_arns may '
'be provided')
if names:
target_groups = names
elif target_group_arns:
target_groups = target_group_arns
else:
target_groups = None
tg_list = []
if target_groups:
if isinstance(target_groups, str) or isinstance(target_groups, six.text_type):
tg_list.append(target_groups)
else:
for group in target_groups:
tg_list.append(group)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
if names:
ret = conn.describe_target_groups(Names=tg_list)['TargetGroups']
elif target_group_arns:
ret = conn.describe_target_groups(TargetGroupArns=tg_list)['TargetGroups']
elif load_balancer_arn:
ret = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn)['TargetGroups']
else:
ret = []
next_marker = ''
while True:
r = conn.describe_target_groups(Marker=next_marker)
for alb in r['TargetGroups']:
ret.append(alb)
if 'NextMarker' in r:
next_marker = r['NextMarker']
else:
break
return ret if ret else []
except ClientError as error:
log.warning(error)
return False |
def get_min_vertex_distance( coor, guess ):
"""Can miss the minimum, but is enough for our purposes."""
# Sort by x.
ix = nm.argsort( coor[:,0] )
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print n_coor
i0 = i1 = 0
x0 = scoor[i0,0]
while 1:
while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] )
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min( mvd, aux )
i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1
# i0 += 1
x0 = scoor[i0,0]
# print '-', i0
if i1 == n_coor - 1: break
print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]
return mvd | Can miss the minimum, but is enough for our purposes. | Below is the the instruction that describes the task:
### Input:
Can miss the minimum, but is enough for our purposes.
### Response:
def get_min_vertex_distance( coor, guess ):
"""Can miss the minimum, but is enough for our purposes."""
# Sort by x.
ix = nm.argsort( coor[:,0] )
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print n_coor
i0 = i1 = 0
x0 = scoor[i0,0]
while 1:
while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] )
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min( mvd, aux )
i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1
# i0 += 1
x0 = scoor[i0,0]
# print '-', i0
if i1 == n_coor - 1: break
print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]
return mvd |
def search(self, **kwargs):
"""
Method to search equipments based on extends search.
:param search: Dict containing QuerySets to find equipments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments
"""
return super(ApiEquipment, self).get(self.prepare_url('api/v3/equipment/',
kwargs)) | Method to search equipments based on extends search.
:param search: Dict containing QuerySets to find equipments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments | Below is the the instruction that describes the task:
### Input:
Method to search equipments based on extends search.
:param search: Dict containing QuerySets to find equipments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments
### Response:
def search(self, **kwargs):
"""
Method to search equipments based on extends search.
:param search: Dict containing QuerySets to find equipments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments
"""
return super(ApiEquipment, self).get(self.prepare_url('api/v3/equipment/',
kwargs)) |
def _parse_param(key, val):
""" Parse the query param looking for sparse fields params
Ensure the `val` or what will become the sparse fields
is always an array. If the query param is not a sparse
fields query param then return None.
:param key:
the query parameter key in the request (left of =)
:param val:
the query parameter val in the request (right of =)
:return:
tuple of resource type to implement the sparse
fields on & a array of the fields.
"""
regex = re.compile(r'fields\[([A-Za-z]+)\]')
match = regex.match(key)
if match:
if not isinstance(val, list):
val = val.split(',')
fields = [field.lower() for field in val]
rtype = match.groups()[0].lower()
return rtype, fields | Parse the query param looking for sparse fields params
Ensure the `val` or what will become the sparse fields
is always an array. If the query param is not a sparse
fields query param then return None.
:param key:
the query parameter key in the request (left of =)
:param val:
the query parameter val in the request (right of =)
:return:
tuple of resource type to implement the sparse
fields on & a array of the fields. | Below is the the instruction that describes the task:
### Input:
Parse the query param looking for sparse fields params
Ensure the `val` or what will become the sparse fields
is always an array. If the query param is not a sparse
fields query param then return None.
:param key:
the query parameter key in the request (left of =)
:param val:
the query parameter val in the request (right of =)
:return:
tuple of resource type to implement the sparse
fields on & a array of the fields.
### Response:
def _parse_param(key, val):
""" Parse the query param looking for sparse fields params
Ensure the `val` or what will become the sparse fields
is always an array. If the query param is not a sparse
fields query param then return None.
:param key:
the query parameter key in the request (left of =)
:param val:
the query parameter val in the request (right of =)
:return:
tuple of resource type to implement the sparse
fields on & a array of the fields.
"""
regex = re.compile(r'fields\[([A-Za-z]+)\]')
match = regex.match(key)
if match:
if not isinstance(val, list):
val = val.split(',')
fields = [field.lower() for field in val]
rtype = match.groups()[0].lower()
return rtype, fields |
def read_function(data, window, ij, g_args):
"""Takes an array, and sets any value above the mean to the max, the rest to 0"""
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output | Takes an array, and sets any value above the mean to the max, the rest to 0 | Below is the the instruction that describes the task:
### Input:
Takes an array, and sets any value above the mean to the max, the rest to 0
### Response:
def read_function(data, window, ij, g_args):
"""Takes an array, and sets any value above the mean to the max, the rest to 0"""
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output |
def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json") | returns a json response from a json serializable python object | Below is the the instruction that describes the task:
### Input:
returns a json response from a json serializable python object
### Response:
def json_response(obj):
"""
returns a json response from a json serializable python object
"""
return Response(
response=json.dumps(
obj, indent=4, cls=AirflowJsonEncoder),
status=200,
mimetype="application/json") |
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn):
"""Shared functionality for grouping by batches for variant calling and joint calling.
"""
singles = []
batch_groups = collections.defaultdict(list)
for args in xs:
data = utils.to_single_data(args)
caller, batch = caller_batch_fn(data)
region = _list_to_tuple(data["region"]) if "region" in data else ()
if batch is not None:
batches = batch if isinstance(batch, (list, tuple)) else [batch]
for b in batches:
batch_groups[(b, region, caller)].append(utils.deepish_copy(data))
else:
data = prep_data_fn(data, [data])
singles.append(data)
batches = []
for batch, items in batch_groups.items():
batch_data = utils.deepish_copy(_pick_lead_item(items))
# For nested primary batches, split permanently by batch
if tz.get_in(["metadata", "batch"], batch_data):
batch_name = batch[0]
batch_data["metadata"]["batch"] = batch_name
batch_data = prep_data_fn(batch_data, items)
batch_data["group_orig"] = _collapse_subitems(batch_data, items)
batch_data["group"] = batch
batches.append(batch_data)
return singles + batches | Shared functionality for grouping by batches for variant calling and joint calling. | Below is the the instruction that describes the task:
### Input:
Shared functionality for grouping by batches for variant calling and joint calling.
### Response:
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn):
"""Shared functionality for grouping by batches for variant calling and joint calling.
"""
singles = []
batch_groups = collections.defaultdict(list)
for args in xs:
data = utils.to_single_data(args)
caller, batch = caller_batch_fn(data)
region = _list_to_tuple(data["region"]) if "region" in data else ()
if batch is not None:
batches = batch if isinstance(batch, (list, tuple)) else [batch]
for b in batches:
batch_groups[(b, region, caller)].append(utils.deepish_copy(data))
else:
data = prep_data_fn(data, [data])
singles.append(data)
batches = []
for batch, items in batch_groups.items():
batch_data = utils.deepish_copy(_pick_lead_item(items))
# For nested primary batches, split permanently by batch
if tz.get_in(["metadata", "batch"], batch_data):
batch_name = batch[0]
batch_data["metadata"]["batch"] = batch_name
batch_data = prep_data_fn(batch_data, items)
batch_data["group_orig"] = _collapse_subitems(batch_data, items)
batch_data["group"] = batch
batches.append(batch_data)
return singles + batches |
async def on_raw_cap(self, message):
""" Handle CAP message. """
target, subcommand = message.params[:2]
params = message.params[2:]
# Call handler.
attr = 'on_raw_cap_' + pydle.protocol.identifierify(subcommand)
if hasattr(self, attr):
await getattr(self, attr)(params)
else:
self.logger.warning('Unknown CAP subcommand sent from server: %s', subcommand) | Handle CAP message. | Below is the the instruction that describes the task:
### Input:
Handle CAP message.
### Response:
async def on_raw_cap(self, message):
""" Handle CAP message. """
target, subcommand = message.params[:2]
params = message.params[2:]
# Call handler.
attr = 'on_raw_cap_' + pydle.protocol.identifierify(subcommand)
if hasattr(self, attr):
await getattr(self, attr)(params)
else:
self.logger.warning('Unknown CAP subcommand sent from server: %s', subcommand) |
def is_authenticated(user):
"""Return whether or not a User is authenticated.
Function provides compatibility following deprecation of method call to
`is_authenticated()` in Django 2.0.
This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier),
as `is_authenticated` was introduced as a property in v1.10.s
"""
if not hasattr(user, 'is_authenticated'):
return False
if callable(user.is_authenticated):
# Will be callable if django.version < 2.0, but is only necessary in
# v1.9 and earlier due to change introduced in v1.10 making
# `is_authenticated` a property instead of a callable.
return user.is_authenticated()
else:
return user.is_authenticated | Return whether or not a User is authenticated.
Function provides compatibility following deprecation of method call to
`is_authenticated()` in Django 2.0.
This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier),
as `is_authenticated` was introduced as a property in v1.10.s | Below is the the instruction that describes the task:
### Input:
Return whether or not a User is authenticated.
Function provides compatibility following deprecation of method call to
`is_authenticated()` in Django 2.0.
This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier),
as `is_authenticated` was introduced as a property in v1.10.s
### Response:
def is_authenticated(user):
"""Return whether or not a User is authenticated.
Function provides compatibility following deprecation of method call to
`is_authenticated()` in Django 2.0.
This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier),
as `is_authenticated` was introduced as a property in v1.10.s
"""
if not hasattr(user, 'is_authenticated'):
return False
if callable(user.is_authenticated):
# Will be callable if django.version < 2.0, but is only necessary in
# v1.9 and earlier due to change introduced in v1.10 making
# `is_authenticated` a property instead of a callable.
return user.is_authenticated()
else:
return user.is_authenticated |
def buildNavigation(self):
"""
Chooses the appropriate layout navigation component based on user prefs
"""
if self.buildSpec['navigation'] == constants.TABBED:
navigation = Tabbar(self, self.buildSpec, self.configs)
else:
navigation = Sidebar(self, self.buildSpec, self.configs)
if self.buildSpec['navigation'] == constants.HIDDEN:
navigation.Hide()
return navigation | Chooses the appropriate layout navigation component based on user prefs | Below is the the instruction that describes the task:
### Input:
Chooses the appropriate layout navigation component based on user prefs
### Response:
def buildNavigation(self):
"""
Chooses the appropriate layout navigation component based on user prefs
"""
if self.buildSpec['navigation'] == constants.TABBED:
navigation = Tabbar(self, self.buildSpec, self.configs)
else:
navigation = Sidebar(self, self.buildSpec, self.configs)
if self.buildSpec['navigation'] == constants.HIDDEN:
navigation.Hide()
return navigation |
def _fill_function(func, globals, defaults, dict, module, closure_values):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
func.__module__ = module
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, closure_values):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func(). | Below is the the instruction that describes the task:
### Input:
Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
### Response:
def _fill_function(func, globals, defaults, dict, module, closure_values):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.__globals__.update(globals)
func.__defaults__ = defaults
func.__dict__ = dict
func.__module__ = module
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, closure_values):
if value is not _empty_cell_value:
cell_set(cell, value)
return func |
def form_lines_valid(self, form):
"""Handle a valid LineFormSet."""
handled = 0
for inner_form in form:
if not inner_form.cleaned_data.get(formsets.DELETION_FIELD_NAME):
handled += 1
self.handle_inner_form(inner_form)
self.log_and_notify_lines(handled)
return http.HttpResponseRedirect(self.get_success_url()) | Handle a valid LineFormSet. | Below is the the instruction that describes the task:
### Input:
Handle a valid LineFormSet.
### Response:
def form_lines_valid(self, form):
"""Handle a valid LineFormSet."""
handled = 0
for inner_form in form:
if not inner_form.cleaned_data.get(formsets.DELETION_FIELD_NAME):
handled += 1
self.handle_inner_form(inner_form)
self.log_and_notify_lines(handled)
return http.HttpResponseRedirect(self.get_success_url()) |
def submesh(mesh,
faces_sequence,
only_watertight=False,
append=False):
"""
Return a subset of a mesh.
Parameters
----------
mesh : Trimesh
Source mesh to take geometry from
faces_sequence : sequence (p,) int
Indexes of mesh.faces
only_watertight : bool
Only return submeshes which are watertight.
append : bool
Return a single mesh which has the faces appended,
if this flag is set, only_watertight is ignored
Returns
---------
if append : Trimesh object
else list of Trimesh objects
"""
# evaluate generators so we can escape early
faces_sequence = list(faces_sequence)
if len(faces_sequence) == 0:
return []
# check to make sure we're not doing a whole bunch of work
# to deliver a subset which ends up as the whole mesh
if len(faces_sequence[0]) == len(mesh.faces):
all_faces = np.array_equal(np.sort(faces_sequence),
np.arange(len(faces_sequence)))
if all_faces:
log.debug('entire mesh requested, returning copy')
return mesh.copy()
# avoid nuking the cache on the original mesh
original_faces = mesh.faces.view(np.ndarray)
original_vertices = mesh.vertices.view(np.ndarray)
faces = []
vertices = []
normals = []
visuals = []
# for reindexing faces
mask = np.arange(len(original_vertices))
for faces_index in faces_sequence:
# sanitize indices in case they are coming in as a set or tuple
faces_index = np.asanyarray(faces_index, dtype=np.int64)
if len(faces_index) == 0:
continue
faces_current = original_faces[faces_index]
unique = np.unique(faces_current.reshape(-1))
# redefine face indices from zero
mask[unique] = np.arange(len(unique))
normals.append(mesh.face_normals[faces_index])
faces.append(mask[faces_current])
vertices.append(original_vertices[unique])
visuals.append(mesh.visual.face_subset(faces_index))
# we use type(mesh) rather than importing Trimesh from base
# to avoid a circular import
trimesh_type = type_named(mesh, 'Trimesh')
if append:
if all(hasattr(i, 'concatenate')
for i in visuals):
visuals = np.array(visuals)
visual = visuals[0].concatenate(visuals[1:])
else:
visual = None
vertices, faces = append_faces(vertices, faces)
appended = trimesh_type(
vertices=vertices,
faces=faces,
face_normals=np.vstack(normals),
visual=visual,
process=False)
return appended
# generate a list of Trimesh objects
result = [trimesh_type(
vertices=v,
faces=f,
face_normals=n,
visual=c,
metadata=copy.deepcopy(mesh.metadata),
process=False) for v, f, n, c in zip(vertices,
faces,
normals,
visuals)]
result = np.array(result)
if len(result) > 0 and only_watertight:
# fill_holes will attempt a repair and returns the
# watertight status at the end of the repair attempt
watertight = np.array([i.fill_holes() and len(i.faces) >= 4
for i in result])
# remove unrepairable meshes
result = result[watertight]
return result | Return a subset of a mesh.
Parameters
----------
mesh : Trimesh
Source mesh to take geometry from
faces_sequence : sequence (p,) int
Indexes of mesh.faces
only_watertight : bool
Only return submeshes which are watertight.
append : bool
Return a single mesh which has the faces appended,
if this flag is set, only_watertight is ignored
Returns
---------
if append : Trimesh object
else list of Trimesh objects | Below is the the instruction that describes the task:
### Input:
Return a subset of a mesh.
Parameters
----------
mesh : Trimesh
Source mesh to take geometry from
faces_sequence : sequence (p,) int
Indexes of mesh.faces
only_watertight : bool
Only return submeshes which are watertight.
append : bool
Return a single mesh which has the faces appended,
if this flag is set, only_watertight is ignored
Returns
---------
if append : Trimesh object
else list of Trimesh objects
### Response:
def submesh(mesh,
faces_sequence,
only_watertight=False,
append=False):
"""
Return a subset of a mesh.
Parameters
----------
mesh : Trimesh
Source mesh to take geometry from
faces_sequence : sequence (p,) int
Indexes of mesh.faces
only_watertight : bool
Only return submeshes which are watertight.
append : bool
Return a single mesh which has the faces appended,
if this flag is set, only_watertight is ignored
Returns
---------
if append : Trimesh object
else list of Trimesh objects
"""
# evaluate generators so we can escape early
faces_sequence = list(faces_sequence)
if len(faces_sequence) == 0:
return []
# check to make sure we're not doing a whole bunch of work
# to deliver a subset which ends up as the whole mesh
if len(faces_sequence[0]) == len(mesh.faces):
all_faces = np.array_equal(np.sort(faces_sequence),
np.arange(len(faces_sequence)))
if all_faces:
log.debug('entire mesh requested, returning copy')
return mesh.copy()
# avoid nuking the cache on the original mesh
original_faces = mesh.faces.view(np.ndarray)
original_vertices = mesh.vertices.view(np.ndarray)
faces = []
vertices = []
normals = []
visuals = []
# for reindexing faces
mask = np.arange(len(original_vertices))
for faces_index in faces_sequence:
# sanitize indices in case they are coming in as a set or tuple
faces_index = np.asanyarray(faces_index, dtype=np.int64)
if len(faces_index) == 0:
continue
faces_current = original_faces[faces_index]
unique = np.unique(faces_current.reshape(-1))
# redefine face indices from zero
mask[unique] = np.arange(len(unique))
normals.append(mesh.face_normals[faces_index])
faces.append(mask[faces_current])
vertices.append(original_vertices[unique])
visuals.append(mesh.visual.face_subset(faces_index))
# we use type(mesh) rather than importing Trimesh from base
# to avoid a circular import
trimesh_type = type_named(mesh, 'Trimesh')
if append:
if all(hasattr(i, 'concatenate')
for i in visuals):
visuals = np.array(visuals)
visual = visuals[0].concatenate(visuals[1:])
else:
visual = None
vertices, faces = append_faces(vertices, faces)
appended = trimesh_type(
vertices=vertices,
faces=faces,
face_normals=np.vstack(normals),
visual=visual,
process=False)
return appended
# generate a list of Trimesh objects
result = [trimesh_type(
vertices=v,
faces=f,
face_normals=n,
visual=c,
metadata=copy.deepcopy(mesh.metadata),
process=False) for v, f, n, c in zip(vertices,
faces,
normals,
visuals)]
result = np.array(result)
if len(result) > 0 and only_watertight:
# fill_holes will attempt a repair and returns the
# watertight status at the end of the repair attempt
watertight = np.array([i.fill_holes() and len(i.faces) >= 4
for i in result])
# remove unrepairable meshes
result = result[watertight]
return result |
def set_row_height(self, row, tab, height):
"""Sets row height"""
try:
old_height = self.row_heights.pop((row, tab))
except KeyError:
old_height = None
if height is not None:
self.row_heights[(row, tab)] = float(height) | Sets row height | Below is the the instruction that describes the task:
### Input:
Sets row height
### Response:
def set_row_height(self, row, tab, height):
"""Sets row height"""
try:
old_height = self.row_heights.pop((row, tab))
except KeyError:
old_height = None
if height is not None:
self.row_heights[(row, tab)] = float(height) |
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback} | Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available | Below is the the instruction that describes the task:
### Input:
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
### Response:
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback} |
def map_data(self):
"""
provides a mapping from the CSV file to the
aikif data structures.
"""
with open(self.src_file, "r") as f:
for line in f:
cols = line.split(',')
print(cols) | provides a mapping from the CSV file to the
aikif data structures. | Below is the the instruction that describes the task:
### Input:
provides a mapping from the CSV file to the
aikif data structures.
### Response:
def map_data(self):
"""
provides a mapping from the CSV file to the
aikif data structures.
"""
with open(self.src_file, "r") as f:
for line in f:
cols = line.split(',')
print(cols) |
def allocate_objects(self, eps = 0.01, noise_size = 1):
"""!
@brief Allocates object segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
"""
if (self.__object_segment_analysers is None):
return [];
segments = [];
for object_segment_analyser in self.__object_segment_analysers:
indexes = object_segment_analyser['color_segment'];
analyser = object_segment_analyser['analyser'];
segments += analyser.allocate_clusters(eps, indexes);
real_segments = [segment for segment in segments if len(segment) > noise_size];
return real_segments; | !
@brief Allocates object segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment. | Below is the the instruction that describes the task:
### Input:
!
@brief Allocates object segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
### Response:
def allocate_objects(self, eps = 0.01, noise_size = 1):
"""!
@brief Allocates object segments.
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
"""
if (self.__object_segment_analysers is None):
return [];
segments = [];
for object_segment_analyser in self.__object_segment_analysers:
indexes = object_segment_analyser['color_segment'];
analyser = object_segment_analyser['analyser'];
segments += analyser.allocate_clusters(eps, indexes);
real_segments = [segment for segment in segments if len(segment) > noise_size];
return real_segments; |
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs):
"""
Initialize the Lago environment
Args:
config(str): Path to LagoInitFile
workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago"
**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago enviornment
Raises:
:exc:`~lago.utils.LagoException`: If initialization failed
"""
setup_sdk_logging(logfile, loglevel)
defaults = lago_config.get_section('init')
if workdir is None:
workdir = os.path.abspath('.lago')
defaults['workdir'] = workdir
defaults['virt_config'] = config
defaults.update(kwargs)
workdir, prefix = cmd.do_init(**defaults)
return SDK(workdir, prefix) | Initialize the Lago environment
Args:
config(str): Path to LagoInitFile
workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago"
**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago enviornment
Raises:
:exc:`~lago.utils.LagoException`: If initialization failed | Below is the the instruction that describes the task:
### Input:
Initialize the Lago environment
Args:
config(str): Path to LagoInitFile
workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago"
**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago enviornment
Raises:
:exc:`~lago.utils.LagoException`: If initialization failed
### Response:
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs):
"""
Initialize the Lago environment
Args:
config(str): Path to LagoInitFile
workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago"
**kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init`
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
:class:`~lago.sdk.SDK`: Initialized Lago enviornment
Raises:
:exc:`~lago.utils.LagoException`: If initialization failed
"""
setup_sdk_logging(logfile, loglevel)
defaults = lago_config.get_section('init')
if workdir is None:
workdir = os.path.abspath('.lago')
defaults['workdir'] = workdir
defaults['virt_config'] = config
defaults.update(kwargs)
workdir, prefix = cmd.do_init(**defaults)
return SDK(workdir, prefix) |
def getDatetimeAxis():
"""
use datetime as x-axis
"""
dataSet = 'nyc_taxi'
filePath = './data/' + dataSet + '.csv'
data = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['datetime', 'value', 'timeofday', 'dayofweek'])
xaxisDate = pd.to_datetime(data['datetime'])
return xaxisDate | use datetime as x-axis | Below is the the instruction that describes the task:
### Input:
use datetime as x-axis
### Response:
def getDatetimeAxis():
"""
use datetime as x-axis
"""
dataSet = 'nyc_taxi'
filePath = './data/' + dataSet + '.csv'
data = pd.read_csv(filePath, header=0, skiprows=[1, 2],
names=['datetime', 'value', 'timeofday', 'dayofweek'])
xaxisDate = pd.to_datetime(data['datetime'])
return xaxisDate |
def getSpec(cls):
"""
Return the Spec for ApicalTMSequenceRegion.
"""
spec = {
"description": ApicalTMSequenceRegion.__doc__,
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": ("An array of 0's and 1's representing the active "
"minicolumns, i.e. the input to the TemporalMemory"),
"dataType": "Real32",
"count": 0,
"required": True,
"regionLevel": True,
"isDefaultInput": True,
"requireSplitterMap": False
},
"resetIn": {
"description": ("A boolean flag that indicates whether"
" or not the input vector received in this compute cycle"
" represents the first presentation in a"
" new temporal sequence."),
"dataType": "Real32",
"count": 1,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalInput": {
"description": "An array of 0's and 1's representing top down input."
" The input will be provided to apical dendrites.",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalGrowthCandidates": {
"description": ("An array of 0's and 1's representing apical input "
"that can be learned on new synapses on apical "
"segments. If this input is a length-0 array, the "
"whole apicalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
},
"outputs": {
"nextPredictedCells": {
"description": ("A binary output containing a 1 for every "
"cell that is predicted for the next timestep."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"predictedActiveCells": {
"description": ("A binary output containing a 1 for every "
"cell that transitioned from predicted to active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"activeCells": {
"description": ("A binary output containing a 1 for every "
"cell that is currently active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": True
},
"winnerCells": {
"description": ("A binary output containing a 1 for every "
"'winner' cell in the TM."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
},
"parameters": {
# Input sizes (the network API doesn't provide these during initialize)
"columnCount": {
"description": ("The size of the 'activeColumns' input " +
"(i.e. the number of columns)"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"apicalInputWidth": {
"description": "The size of the 'apicalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"learn": {
"description": "True if the TM should learn.",
"accessMode": "ReadWrite",
"dataType": "Bool",
"count": 1,
"defaultValue": "true"
},
"cellsPerColumn": {
"description": "Number of cells per column",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"activationThreshold": {
"description": ("If the number of active connected synapses on a "
"segment is at least this threshold, the segment "
"is said to be active."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"reducedBasalThreshold": {
"description": ("Activation threshold of basal segments for cells "
"with active apical segments (with apicalTiebreak "
"implementation). "),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"initialPermanence": {
"description": "Initial permanence of a new synapse.",
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"connectedPermanence": {
"description": ("If the permanence value for a synapse is greater "
"than this value, it is said to be connected."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"minThreshold": {
"description": ("If the number of synapses active on a segment is at "
"least this threshold, it is selected as the best "
"matching cell in a bursting column."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"sampleSize": {
"description": ("The desired number of active synapses for an " +
"active cell"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"learnOnOneCell": {
"description": ("If True, the winner cell for each column will be"
" fixed between resets."),
"accessMode": "Read",
"dataType": "Bool",
"count": 1,
"defaultValue": "false"
},
"maxSynapsesPerSegment": {
"description": "The maximum number of synapses per segment",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"maxSegmentsPerCell": {
"description": "The maximum number of segments per cell",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"permanenceIncrement": {
"description": ("Amount by which permanences of synapses are "
"incremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"permanenceDecrement": {
"description": ("Amount by which permanences of synapses are "
"decremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"basalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"apicalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"seed": {
"description": "Seed for the random number generator.",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"implementation": {
"description": "Apical implementation",
"accessMode": "Read",
"dataType": "Byte",
"count": 0,
"constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"),
"defaultValue": "ApicalTiebreakCPP"
},
},
}
return spec | Return the Spec for ApicalTMSequenceRegion. | Below is the the instruction that describes the task:
### Input:
Return the Spec for ApicalTMSequenceRegion.
### Response:
def getSpec(cls):
"""
Return the Spec for ApicalTMSequenceRegion.
"""
spec = {
"description": ApicalTMSequenceRegion.__doc__,
"singleNodeOnly": True,
"inputs": {
"activeColumns": {
"description": ("An array of 0's and 1's representing the active "
"minicolumns, i.e. the input to the TemporalMemory"),
"dataType": "Real32",
"count": 0,
"required": True,
"regionLevel": True,
"isDefaultInput": True,
"requireSplitterMap": False
},
"resetIn": {
"description": ("A boolean flag that indicates whether"
" or not the input vector received in this compute cycle"
" represents the first presentation in a"
" new temporal sequence."),
"dataType": "Real32",
"count": 1,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalInput": {
"description": "An array of 0's and 1's representing top down input."
" The input will be provided to apical dendrites.",
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
"apicalGrowthCandidates": {
"description": ("An array of 0's and 1's representing apical input "
"that can be learned on new synapses on apical "
"segments. If this input is a length-0 array, the "
"whole apicalInput is used."),
"dataType": "Real32",
"count": 0,
"required": False,
"regionLevel": True,
"isDefaultInput": False,
"requireSplitterMap": False
},
},
"outputs": {
"nextPredictedCells": {
"description": ("A binary output containing a 1 for every "
"cell that is predicted for the next timestep."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"predictedActiveCells": {
"description": ("A binary output containing a 1 for every "
"cell that transitioned from predicted to active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
"activeCells": {
"description": ("A binary output containing a 1 for every "
"cell that is currently active."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": True
},
"winnerCells": {
"description": ("A binary output containing a 1 for every "
"'winner' cell in the TM."),
"dataType": "Real32",
"count": 0,
"regionLevel": True,
"isDefaultOutput": False
},
},
"parameters": {
# Input sizes (the network API doesn't provide these during initialize)
"columnCount": {
"description": ("The size of the 'activeColumns' input " +
"(i.e. the number of columns)"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"apicalInputWidth": {
"description": "The size of the 'apicalInput' input",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"learn": {
"description": "True if the TM should learn.",
"accessMode": "ReadWrite",
"dataType": "Bool",
"count": 1,
"defaultValue": "true"
},
"cellsPerColumn": {
"description": "Number of cells per column",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"activationThreshold": {
"description": ("If the number of active connected synapses on a "
"segment is at least this threshold, the segment "
"is said to be active."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"reducedBasalThreshold": {
"description": ("Activation threshold of basal segments for cells "
"with active apical segments (with apicalTiebreak "
"implementation). "),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"initialPermanence": {
"description": "Initial permanence of a new synapse.",
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"connectedPermanence": {
"description": ("If the permanence value for a synapse is greater "
"than this value, it is said to be connected."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1,
"constraints": ""
},
"minThreshold": {
"description": ("If the number of synapses active on a segment is at "
"least this threshold, it is selected as the best "
"matching cell in a bursting column."),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1,
"constraints": ""
},
"sampleSize": {
"description": ("The desired number of active synapses for an " +
"active cell"),
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"learnOnOneCell": {
"description": ("If True, the winner cell for each column will be"
" fixed between resets."),
"accessMode": "Read",
"dataType": "Bool",
"count": 1,
"defaultValue": "false"
},
"maxSynapsesPerSegment": {
"description": "The maximum number of synapses per segment",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"maxSegmentsPerCell": {
"description": "The maximum number of segments per cell",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"permanenceIncrement": {
"description": ("Amount by which permanences of synapses are "
"incremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"permanenceDecrement": {
"description": ("Amount by which permanences of synapses are "
"decremented during learning."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"basalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"apicalPredictedSegmentDecrement": {
"description": ("Amount by which active permanences of synapses of "
"previously predicted but inactive segments are "
"decremented."),
"accessMode": "Read",
"dataType": "Real32",
"count": 1
},
"seed": {
"description": "Seed for the random number generator.",
"accessMode": "Read",
"dataType": "UInt32",
"count": 1
},
"implementation": {
"description": "Apical implementation",
"accessMode": "Read",
"dataType": "Byte",
"count": 0,
"constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"),
"defaultValue": "ApicalTiebreakCPP"
},
},
}
return spec |
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter() | Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value. | Below is the the instruction that describes the task:
### Input:
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
### Response:
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter() |
def get_dev_details(ip_address):
"""Takes string input of IP address to issue RESTUL call to HP IMC
:param ip_address: string object of dotted decimal notation of IPv4 address
:return: dictionary of device details
>>> get_dev_details('10.101.0.1')
{'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'}
>>> get_dev_details('8.8.8.8')
Device not found
'Device not found'
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \
str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false"
f_url = url + get_dev_details_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
# r.status_code
if r.status_code == 200:
dev_details = (json.loads(r.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details['device']) == list:
for i in dev_details['device']:
if i['ip'] == ip_address:
dev_details = i
return dev_details
elif type(dev_details['device']) == dict:
return dev_details['device']
else:
print("dev_details: An Error has occured") | Takes string input of IP address to issue RESTUL call to HP IMC
:param ip_address: string object of dotted decimal notation of IPv4 address
:return: dictionary of device details
>>> get_dev_details('10.101.0.1')
{'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'}
>>> get_dev_details('8.8.8.8')
Device not found
'Device not found' | Below is the the instruction that describes the task:
### Input:
Takes string input of IP address to issue RESTUL call to HP IMC
:param ip_address: string object of dotted decimal notation of IPv4 address
:return: dictionary of device details
>>> get_dev_details('10.101.0.1')
{'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'}
>>> get_dev_details('8.8.8.8')
Device not found
'Device not found'
### Response:
def get_dev_details(ip_address):
"""Takes string input of IP address to issue RESTUL call to HP IMC
:param ip_address: string object of dotted decimal notation of IPv4 address
:return: dictionary of device details
>>> get_dev_details('10.101.0.1')
{'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'}
>>> get_dev_details('8.8.8.8')
Device not found
'Device not found'
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \
str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false"
f_url = url + get_dev_details_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
# r.status_code
if r.status_code == 200:
dev_details = (json.loads(r.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details['device']) == list:
for i in dev_details['device']:
if i['ip'] == ip_address:
dev_details = i
return dev_details
elif type(dev_details['device']) == dict:
return dev_details['device']
else:
print("dev_details: An Error has occured") |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling(C, rup.mag) +
self._get_distance_scaling(C, dists, rup.mag) +
self._get_site_term(C, sites.vs30))
# Mean is returned in terms of m/s^2. Need to convert to g
mean -= np.log(g)
stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types)
return mean + self.adjustment_factor, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling(C, rup.mag) +
self._get_distance_scaling(C, dists, rup.mag) +
self._get_site_term(C, sites.vs30))
# Mean is returned in terms of m/s^2. Need to convert to g
mean -= np.log(g)
stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types)
return mean + self.adjustment_factor, stddevs |
def colors_like(color, arr, colormap=DEFAULT_COLORMAP):
'''
Given an array of size NxM (usually Nx3), we accept color in the following ways:
- A string color name. The accepted names are roughly what's in X11's rgb.txt
- An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape
- A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color
- An array of colors (N, 3) or (3, N)
There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple,
not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single
triangle and not something we ever actually use in practice.
'''
import numpy as np
from blmath.numerics import is_empty_arraylike
if is_empty_arraylike(color):
return None
if isinstance(color, basestring):
from lace.color_names import name_to_rgb
color = name_to_rgb[color]
elif isinstance(color, list):
color = np.array(color)
color = np.squeeze(color)
num_verts = arr.shape[0]
if color.ndim == 1:
if color.shape[0] == 3: # rgb triple
return np.ones((num_verts, 3)) * np.array([color])
else:
from matplotlib import cm
return np.ones((num_verts, 3)) * cm.get_cmap(colormap)(color.flatten())[:, :3]
elif color.ndim == 2:
if color.shape[1] == num_verts:
color = color.T
return np.ones((num_verts, 3)) * color
else:
raise ValueError("Colors must be specified as one or two dimensions") | Given an array of size NxM (usually Nx3), we accept color in the following ways:
- A string color name. The accepted names are roughly what's in X11's rgb.txt
- An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape
- A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color
- An array of colors (N, 3) or (3, N)
There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple,
not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single
triangle and not something we ever actually use in practice. | Below is the the instruction that describes the task:
### Input:
Given an array of size NxM (usually Nx3), we accept color in the following ways:
- A string color name. The accepted names are roughly what's in X11's rgb.txt
- An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape
- A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color
- An array of colors (N, 3) or (3, N)
There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple,
not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single
triangle and not something we ever actually use in practice.
### Response:
def colors_like(color, arr, colormap=DEFAULT_COLORMAP):
'''
Given an array of size NxM (usually Nx3), we accept color in the following ways:
- A string color name. The accepted names are roughly what's in X11's rgb.txt
- An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape
- A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color
- An array of colors (N, 3) or (3, N)
There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple,
not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single
triangle and not something we ever actually use in practice.
'''
import numpy as np
from blmath.numerics import is_empty_arraylike
if is_empty_arraylike(color):
return None
if isinstance(color, basestring):
from lace.color_names import name_to_rgb
color = name_to_rgb[color]
elif isinstance(color, list):
color = np.array(color)
color = np.squeeze(color)
num_verts = arr.shape[0]
if color.ndim == 1:
if color.shape[0] == 3: # rgb triple
return np.ones((num_verts, 3)) * np.array([color])
else:
from matplotlib import cm
return np.ones((num_verts, 3)) * cm.get_cmap(colormap)(color.flatten())[:, :3]
elif color.ndim == 2:
if color.shape[1] == num_verts:
color = color.T
return np.ones((num_verts, 3)) * color
else:
raise ValueError("Colors must be specified as one or two dimensions") |
def sources(self):
"""
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
"""
api_url = self.sources_api_url.format(experience_id=self.experience_id)
res = self.get(api_url, params={"pinst_id": self.pinst_id})
return self.session.http.json(res) | Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict | Below is the the instruction that describes the task:
### Input:
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
### Response:
def sources(self):
"""
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
"""
api_url = self.sources_api_url.format(experience_id=self.experience_id)
res = self.get(api_url, params={"pinst_id": self.pinst_id})
return self.session.http.json(res) |
def drop_column(self, name):
"""Drop the column ``name``.
::
table.drop_column('created_at')
"""
if self.db.engine.dialect.name == 'sqlite':
raise RuntimeError("SQLite does not support dropping columns.")
name = normalize_column_name(name)
with self.db.lock:
if not self.exists or not self.has_column(name):
log.debug("Column does not exist: %s", name)
return
self._threading_warn()
self.db.op.drop_column(
self.table.name,
name,
self.table.schema
)
self._reflect_table() | Drop the column ``name``.
::
table.drop_column('created_at') | Below is the the instruction that describes the task:
### Input:
Drop the column ``name``.
::
table.drop_column('created_at')
### Response:
def drop_column(self, name):
"""Drop the column ``name``.
::
table.drop_column('created_at')
"""
if self.db.engine.dialect.name == 'sqlite':
raise RuntimeError("SQLite does not support dropping columns.")
name = normalize_column_name(name)
with self.db.lock:
if not self.exists or not self.has_column(name):
log.debug("Column does not exist: %s", name)
return
self._threading_warn()
self.db.op.drop_column(
self.table.name,
name,
self.table.schema
)
self._reflect_table() |
def get_random_edge(self):
"""This function should be run when there are no leaves, but there are still unscored nodes. It will introduce
a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score
for the network. This means that the score can be averaged over many runs for a given graph, and a better
data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges
have been disregarded, later)
1. get all un-scored
2. rank by in-degree
3. weighted probability over all in-edges where lower in-degree means higher probability
4. pick randomly which edge
:return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)
:rtype: tuple
"""
nodes = [
(n, self.in_out_ratio(n))
for n in self.unscored_nodes_iter()
if n != self.target_node
]
node, deg = min(nodes, key=itemgetter(1))
log.log(5, 'checking %s (in/out ratio: %.3f)', node, deg)
possible_edges = self.graph.in_edges(node, keys=True)
log.log(5, 'possible edges: %s', possible_edges)
edge_to_remove = random.choice(possible_edges)
log.log(5, 'chose: %s', edge_to_remove)
return edge_to_remove | This function should be run when there are no leaves, but there are still unscored nodes. It will introduce
a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score
for the network. This means that the score can be averaged over many runs for a given graph, and a better
data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges
have been disregarded, later)
1. get all un-scored
2. rank by in-degree
3. weighted probability over all in-edges where lower in-degree means higher probability
4. pick randomly which edge
:return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
This function should be run when there are no leaves, but there are still unscored nodes. It will introduce
a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score
for the network. This means that the score can be averaged over many runs for a given graph, and a better
data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges
have been disregarded, later)
1. get all un-scored
2. rank by in-degree
3. weighted probability over all in-edges where lower in-degree means higher probability
4. pick randomly which edge
:return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)
:rtype: tuple
### Response:
def get_random_edge(self):
"""This function should be run when there are no leaves, but there are still unscored nodes. It will introduce
a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score
for the network. This means that the score can be averaged over many runs for a given graph, and a better
data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges
have been disregarded, later)
1. get all un-scored
2. rank by in-degree
3. weighted probability over all in-edges where lower in-degree means higher probability
4. pick randomly which edge
:return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)
:rtype: tuple
"""
nodes = [
(n, self.in_out_ratio(n))
for n in self.unscored_nodes_iter()
if n != self.target_node
]
node, deg = min(nodes, key=itemgetter(1))
log.log(5, 'checking %s (in/out ratio: %.3f)', node, deg)
possible_edges = self.graph.in_edges(node, keys=True)
log.log(5, 'possible edges: %s', possible_edges)
edge_to_remove = random.choice(possible_edges)
log.log(5, 'chose: %s', edge_to_remove)
return edge_to_remove |
def setup_experiment(debug=True, verbose=False, app=None):
"""Check the app and, if it's compatible with Wallace, freeze its state."""
print_header()
# Verify that the package is usable.
log("Verifying that directory is compatible with Wallace...")
if not verify_package(verbose=verbose):
raise AssertionError(
"This is not a valid Wallace app. " +
"Fix the errors and then try running 'wallace verify'.")
# Verify that the Postgres server is running.
try:
psycopg2.connect(database="x", user="postgres", password="nada")
except psycopg2.OperationalError, e:
if "could not connect to server" in str(e):
raise RuntimeError("The Postgres server isn't running.")
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Check that the demo-specific requirements are satisfied.
try:
with open("requirements.txt", "r") as f:
dependencies = f.readlines()
except:
dependencies = []
pkg_resources.require(dependencies)
# Generate a unique id for this experiment.
id = "w" + str(uuid.uuid4())[0:28]
# If the user provided an app name, use it everywhere that's user-facing.
if app:
id_long = id
id = str(app)
log("Running as experiment " + id + "...")
# Copy this directory into a temporary folder, ignoring .git
dst = os.path.join(tempfile.mkdtemp(), id)
to_ignore = shutil.ignore_patterns(
".git/*",
"*.db",
"snapshots",
"data",
"server.log"
)
shutil.copytree(os.getcwd(), dst, ignore=to_ignore)
click.echo(dst)
# Save the experiment id
with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
if app:
file.write(id_long)
else:
file.write(id)
# Zip up the temporary directory and place it in the cwd.
if not debug:
log("Freezing the experiment package...")
shutil.make_archive(
os.path.join("snapshots", id + "-code"), "zip", dst)
# Change directory to the temporary folder.
cwd = os.getcwd()
os.chdir(dst)
# Check directories.
if not os.path.exists("static/scripts"):
os.makedirs("static/scripts")
if not os.path.exists("templates"):
os.makedirs("templates")
if not os.path.exists("static/css"):
os.makedirs("static/css")
# Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict.
os.rename(
os.path.join(dst, "experiment.py"),
os.path.join(dst, "wallace_experiment.py"))
# Copy files into this experiment package.
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"custom.py")
shutil.copy(src, os.path.join(dst, "custom.py"))
heroku_files = [
"Procfile",
"requirements.txt",
"psiturkapp.py",
"worker.py",
"clock.py",
]
for filename in heroku_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
filename)
shutil.copy(src, os.path.join(dst, filename))
clock_on = config.getboolean('Server Parameters', 'clock_on')
# If the clock process has been disabled, overwrite the Procfile.
if not clock_on:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
"Procfile_no_clock")
shutil.copy(src, os.path.join(dst, "Procfile"))
frontend_files = [
"static/css/wallace.css",
"static/scripts/wallace.js",
"static/scripts/reqwest.min.js",
"templates/error_wallace.html",
"templates/launch.html",
"templates/complete.html",
"static/robots.txt"
]
for filename in frontend_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"frontend",
filename)
shutil.copy(src, os.path.join(dst, filename))
time.sleep(0.25)
os.chdir(cwd)
return (id, dst) | Check the app and, if it's compatible with Wallace, freeze its state. | Below is the the instruction that describes the task:
### Input:
Check the app and, if it's compatible with Wallace, freeze its state.
### Response:
def setup_experiment(debug=True, verbose=False, app=None):
"""Check the app and, if it's compatible with Wallace, freeze its state."""
print_header()
# Verify that the package is usable.
log("Verifying that directory is compatible with Wallace...")
if not verify_package(verbose=verbose):
raise AssertionError(
"This is not a valid Wallace app. " +
"Fix the errors and then try running 'wallace verify'.")
# Verify that the Postgres server is running.
try:
psycopg2.connect(database="x", user="postgres", password="nada")
except psycopg2.OperationalError, e:
if "could not connect to server" in str(e):
raise RuntimeError("The Postgres server isn't running.")
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Check that the demo-specific requirements are satisfied.
try:
with open("requirements.txt", "r") as f:
dependencies = f.readlines()
except:
dependencies = []
pkg_resources.require(dependencies)
# Generate a unique id for this experiment.
id = "w" + str(uuid.uuid4())[0:28]
# If the user provided an app name, use it everywhere that's user-facing.
if app:
id_long = id
id = str(app)
log("Running as experiment " + id + "...")
# Copy this directory into a temporary folder, ignoring .git
dst = os.path.join(tempfile.mkdtemp(), id)
to_ignore = shutil.ignore_patterns(
".git/*",
"*.db",
"snapshots",
"data",
"server.log"
)
shutil.copytree(os.getcwd(), dst, ignore=to_ignore)
click.echo(dst)
# Save the experiment id
with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
if app:
file.write(id_long)
else:
file.write(id)
# Zip up the temporary directory and place it in the cwd.
if not debug:
log("Freezing the experiment package...")
shutil.make_archive(
os.path.join("snapshots", id + "-code"), "zip", dst)
# Change directory to the temporary folder.
cwd = os.getcwd()
os.chdir(dst)
# Check directories.
if not os.path.exists("static/scripts"):
os.makedirs("static/scripts")
if not os.path.exists("templates"):
os.makedirs("templates")
if not os.path.exists("static/css"):
os.makedirs("static/css")
# Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict.
os.rename(
os.path.join(dst, "experiment.py"),
os.path.join(dst, "wallace_experiment.py"))
# Copy files into this experiment package.
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"custom.py")
shutil.copy(src, os.path.join(dst, "custom.py"))
heroku_files = [
"Procfile",
"requirements.txt",
"psiturkapp.py",
"worker.py",
"clock.py",
]
for filename in heroku_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
filename)
shutil.copy(src, os.path.join(dst, filename))
clock_on = config.getboolean('Server Parameters', 'clock_on')
# If the clock process has been disabled, overwrite the Procfile.
if not clock_on:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
"Procfile_no_clock")
shutil.copy(src, os.path.join(dst, "Procfile"))
frontend_files = [
"static/css/wallace.css",
"static/scripts/wallace.js",
"static/scripts/reqwest.min.js",
"templates/error_wallace.html",
"templates/launch.html",
"templates/complete.html",
"static/robots.txt"
]
for filename in frontend_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"frontend",
filename)
shutil.copy(src, os.path.join(dst, filename))
time.sleep(0.25)
os.chdir(cwd)
return (id, dst) |
def copy(self):
"""
Return a new :class:`~pywbem.CIMClassName` object that is a copy
of this CIM class path.
Objects of this class have no mutable types in any attributes, so
modifications of the original object will not affect the returned copy,
and vice versa.
Note that the Python functions :func:`py:copy.copy` and
:func:`py:copy.deepcopy` can be used to create completely shallow or
completely deep copies of objects of this class.
"""
return CIMClassName(
self.classname,
host=self.host,
namespace=self.namespace) | Return a new :class:`~pywbem.CIMClassName` object that is a copy
of this CIM class path.
Objects of this class have no mutable types in any attributes, so
modifications of the original object will not affect the returned copy,
and vice versa.
Note that the Python functions :func:`py:copy.copy` and
:func:`py:copy.deepcopy` can be used to create completely shallow or
completely deep copies of objects of this class. | Below is the the instruction that describes the task:
### Input:
Return a new :class:`~pywbem.CIMClassName` object that is a copy
of this CIM class path.
Objects of this class have no mutable types in any attributes, so
modifications of the original object will not affect the returned copy,
and vice versa.
Note that the Python functions :func:`py:copy.copy` and
:func:`py:copy.deepcopy` can be used to create completely shallow or
completely deep copies of objects of this class.
### Response:
def copy(self):
"""
Return a new :class:`~pywbem.CIMClassName` object that is a copy
of this CIM class path.
Objects of this class have no mutable types in any attributes, so
modifications of the original object will not affect the returned copy,
and vice versa.
Note that the Python functions :func:`py:copy.copy` and
:func:`py:copy.deepcopy` can be used to create completely shallow or
completely deep copies of objects of this class.
"""
return CIMClassName(
self.classname,
host=self.host,
namespace=self.namespace) |
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD) | Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature. | Below is the the instruction that describes the task:
### Input:
Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
### Response:
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD) |
def search_directory(self, **kwargs):
"""
SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos"
"""
search_response = self.request('SearchDirectory', kwargs)
result = {}
items = {
"account": zobjects.Account.from_dict,
"domain": zobjects.Domain.from_dict,
"dl": zobjects.DistributionList.from_dict,
"cos": zobjects.COS.from_dict,
"calresource": zobjects.CalendarResource.from_dict
# "alias": TODO,
}
for obj_type, func in items.items():
if obj_type in search_response:
if isinstance(search_response[obj_type], list):
result[obj_type] = [
func(v) for v in search_response[obj_type]]
else:
result[obj_type] = func(search_response[obj_type])
return result | SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos" | Below is the the instruction that describes the task:
### Input:
SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos"
### Response:
def search_directory(self, **kwargs):
"""
SearchAccount is deprecated, using SearchDirectory
:param query: Query string - should be an LDAP-style filter
string (RFC 2254)
:param limit: The maximum number of accounts to return
(0 is default and means all)
:param offset: The starting offset (0, 25, etc)
:param domain: The domain name to limit the search to
:param applyCos: applyCos - Flag whether or not to apply the COS
policy to account. Specify 0 (false) if only requesting attrs that
aren't inherited from COS
:param applyConfig: whether or not to apply the global config attrs to
account. specify 0 (false) if only requesting attrs that aren't
inherited from global config
:param sortBy: Name of attribute to sort on. Default is the account
name.
:param types: Comma-separated list of types to return. Legal values
are: accounts|distributionlists|aliases|resources|domains|coses
(default is accounts)
:param sortAscending: Whether to sort in ascending order. Default is
1 (true)
:param countOnly: Whether response should be count only. Default is
0 (false)
:param attrs: Comma-seperated list of attrs to return ("displayName",
"zimbraId", "zimbraAccountStatus")
:return: dict of list of "account" "alias" "dl" "calresource" "domain"
"cos"
"""
search_response = self.request('SearchDirectory', kwargs)
result = {}
items = {
"account": zobjects.Account.from_dict,
"domain": zobjects.Domain.from_dict,
"dl": zobjects.DistributionList.from_dict,
"cos": zobjects.COS.from_dict,
"calresource": zobjects.CalendarResource.from_dict
# "alias": TODO,
}
for obj_type, func in items.items():
if obj_type in search_response:
if isinstance(search_response[obj_type], list):
result[obj_type] = [
func(v) for v in search_response[obj_type]]
else:
result[obj_type] = func(search_response[obj_type])
return result |
def add_tag(self, tag):
""" add a tag to the tag list """
if tag not in self._tags:
self._tags[tag] = dict() | add a tag to the tag list | Below is the the instruction that describes the task:
### Input:
add a tag to the tag list
### Response:
def add_tag(self, tag):
""" add a tag to the tag list """
if tag not in self._tags:
self._tags[tag] = dict() |
def dependency_items(self):
"""
Generates all containers' dependencies, i.e. an iterator on tuples in the format
``(container_name, used_containers)``, whereas the used containers are a set, and can be empty.
:return: Container dependencies.
:rtype: collections.Iterable
"""
def _get_used_items_np(u):
volume_config_name, __, volume_instance = u.name.partition('.')
attaching_config_name = attaching.get(volume_config_name)
if attaching_config_name:
used_c_name = attaching_config_name
used_instances = instances.get(attaching_config_name)
else:
used_c_name = volume_config_name
if volume_instance:
used_instances = (volume_instance, )
else:
used_instances = instances.get(volume_config_name)
return [MapConfigId(ItemType.CONTAINER, self._name, used_c_name, ai)
for ai in used_instances or (None, )]
def _get_used_items_ap(u):
volume_config_name, __, volume_instance = u.name.partition('.')
attaching_config = ext_map.get_existing(volume_config_name)
attaching_instances = instances.get(volume_config_name)
config_volumes = {a.name for a in attaching_config.attaches}
if not volume_instance or volume_instance in config_volumes:
used_instances = attaching_instances
else:
used_instances = (volume_instance, )
return [MapConfigId(ItemType.CONTAINER, self._name, volume_config_name, ai)
for ai in used_instances or (None, )]
def _get_linked_items(lc):
linked_config_name, __, linked_instance = lc.partition('.')
if linked_instance:
linked_instances = (linked_instance, )
else:
linked_instances = instances.get(linked_config_name)
return [MapConfigId(ItemType.CONTAINER, self._name, linked_config_name, li)
for li in linked_instances or (None, )]
def _get_network_mode_items(n):
net_config_name, net_instance = n
network_ref_config = ext_map.get_existing(net_config_name)
if network_ref_config:
if net_instance and net_instance in network_ref_config.instances:
network_instances = (net_instance, )
else:
network_instances = network_ref_config.instances or (None, )
return [MapConfigId(ItemType.CONTAINER, self._name, net_config_name, ni)
for ni in network_instances]
return []
def _get_network_items(n):
if n.network_name in DEFAULT_PRESET_NETWORKS:
return []
net_items = [MapConfigId(ItemType.NETWORK, self._name, n.network_name)]
if n.links:
net_items.extend(itertools.chain.from_iterable(_get_linked_items(l.container) for l in n.links))
return net_items
if self._extended:
ext_map = self
else:
ext_map = self.get_extended_map()
instances = {c_name: c_config.instances
for c_name, c_config in ext_map}
if not self.use_attached_parent_name:
attaching = {attaches.name: c_name
for c_name, c_config in ext_map
for attaches in c_config.attaches}
used_func = _get_used_items_np
else:
used_func = _get_used_items_ap
def _get_dep_list(name, config):
image, tag = self.get_image(config.image or name)
d = []
nw = config.network_mode
if isinstance(nw, tuple):
merge_list(d, _get_network_mode_items(nw))
merge_list(d, itertools.chain.from_iterable(map(_get_network_items, config.networks)))
merge_list(d, itertools.chain.from_iterable(map(used_func, config.uses)))
merge_list(d, itertools.chain.from_iterable(_get_linked_items(l.container) for l in config.links))
d.extend(MapConfigId(ItemType.VOLUME, self._name, name, a.name)
for a in config.attaches)
d.append(MapConfigId(ItemType.IMAGE, self._name, image, tag))
return d
for c_name, c_config in ext_map:
dep_list = _get_dep_list(c_name, c_config)
for c_instance in c_config.instances or (None, ):
yield MapConfigId(ItemType.CONTAINER, self._name, c_name, c_instance), dep_list | Generates all containers' dependencies, i.e. an iterator on tuples in the format
``(container_name, used_containers)``, whereas the used containers are a set, and can be empty.
:return: Container dependencies.
:rtype: collections.Iterable | Below is the the instruction that describes the task:
### Input:
Generates all containers' dependencies, i.e. an iterator on tuples in the format
``(container_name, used_containers)``, whereas the used containers are a set, and can be empty.
:return: Container dependencies.
:rtype: collections.Iterable
### Response:
def dependency_items(self):
"""
Generates all containers' dependencies, i.e. an iterator on tuples in the format
``(container_name, used_containers)``, whereas the used containers are a set, and can be empty.
:return: Container dependencies.
:rtype: collections.Iterable
"""
def _get_used_items_np(u):
volume_config_name, __, volume_instance = u.name.partition('.')
attaching_config_name = attaching.get(volume_config_name)
if attaching_config_name:
used_c_name = attaching_config_name
used_instances = instances.get(attaching_config_name)
else:
used_c_name = volume_config_name
if volume_instance:
used_instances = (volume_instance, )
else:
used_instances = instances.get(volume_config_name)
return [MapConfigId(ItemType.CONTAINER, self._name, used_c_name, ai)
for ai in used_instances or (None, )]
def _get_used_items_ap(u):
volume_config_name, __, volume_instance = u.name.partition('.')
attaching_config = ext_map.get_existing(volume_config_name)
attaching_instances = instances.get(volume_config_name)
config_volumes = {a.name for a in attaching_config.attaches}
if not volume_instance or volume_instance in config_volumes:
used_instances = attaching_instances
else:
used_instances = (volume_instance, )
return [MapConfigId(ItemType.CONTAINER, self._name, volume_config_name, ai)
for ai in used_instances or (None, )]
def _get_linked_items(lc):
linked_config_name, __, linked_instance = lc.partition('.')
if linked_instance:
linked_instances = (linked_instance, )
else:
linked_instances = instances.get(linked_config_name)
return [MapConfigId(ItemType.CONTAINER, self._name, linked_config_name, li)
for li in linked_instances or (None, )]
def _get_network_mode_items(n):
net_config_name, net_instance = n
network_ref_config = ext_map.get_existing(net_config_name)
if network_ref_config:
if net_instance and net_instance in network_ref_config.instances:
network_instances = (net_instance, )
else:
network_instances = network_ref_config.instances or (None, )
return [MapConfigId(ItemType.CONTAINER, self._name, net_config_name, ni)
for ni in network_instances]
return []
def _get_network_items(n):
if n.network_name in DEFAULT_PRESET_NETWORKS:
return []
net_items = [MapConfigId(ItemType.NETWORK, self._name, n.network_name)]
if n.links:
net_items.extend(itertools.chain.from_iterable(_get_linked_items(l.container) for l in n.links))
return net_items
if self._extended:
ext_map = self
else:
ext_map = self.get_extended_map()
instances = {c_name: c_config.instances
for c_name, c_config in ext_map}
if not self.use_attached_parent_name:
attaching = {attaches.name: c_name
for c_name, c_config in ext_map
for attaches in c_config.attaches}
used_func = _get_used_items_np
else:
used_func = _get_used_items_ap
def _get_dep_list(name, config):
image, tag = self.get_image(config.image or name)
d = []
nw = config.network_mode
if isinstance(nw, tuple):
merge_list(d, _get_network_mode_items(nw))
merge_list(d, itertools.chain.from_iterable(map(_get_network_items, config.networks)))
merge_list(d, itertools.chain.from_iterable(map(used_func, config.uses)))
merge_list(d, itertools.chain.from_iterable(_get_linked_items(l.container) for l in config.links))
d.extend(MapConfigId(ItemType.VOLUME, self._name, name, a.name)
for a in config.attaches)
d.append(MapConfigId(ItemType.IMAGE, self._name, image, tag))
return d
for c_name, c_config in ext_map:
dep_list = _get_dep_list(c_name, c_config)
for c_instance in c_config.instances or (None, ):
yield MapConfigId(ItemType.CONTAINER, self._name, c_name, c_instance), dep_list |
def slugify(text, sep='-'):
"""A simple slug generator."""
text = stringify(text)
if text is None:
return None
text = text.replace(sep, WS)
text = normalize(text, ascii=True)
if text is None:
return None
return text.replace(WS, sep) | A simple slug generator. | Below is the the instruction that describes the task:
### Input:
A simple slug generator.
### Response:
def slugify(text, sep='-'):
"""A simple slug generator."""
text = stringify(text)
if text is None:
return None
text = text.replace(sep, WS)
text = normalize(text, ascii=True)
if text is None:
return None
return text.replace(WS, sep) |
def get_sub_comp_info(source_info, comp):
"""Build and return information about a sub-component for a particular selection
"""
sub_comps = source_info.get('components', None)
if sub_comps is None:
return source_info.copy()
moving = source_info.get('moving', False)
selection_dependent = source_info.get('selection_dependent', False)
if selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}')
elif moving:
key = "zmax%i" % comp.zmax
ret_dict = source_info.copy()
ret_dict.update(sub_comps[key])
return ret_dict | Build and return information about a sub-component for a particular selection | Below is the the instruction that describes the task:
### Input:
Build and return information about a sub-component for a particular selection
### Response:
def get_sub_comp_info(source_info, comp):
"""Build and return information about a sub-component for a particular selection
"""
sub_comps = source_info.get('components', None)
if sub_comps is None:
return source_info.copy()
moving = source_info.get('moving', False)
selection_dependent = source_info.get('selection_dependent', False)
if selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}')
elif moving:
key = "zmax%i" % comp.zmax
ret_dict = source_info.copy()
ret_dict.update(sub_comps[key])
return ret_dict |
def set_sum_w2(self, w, ix, iy=0, iz=0):
"""
Sets the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
self.GetSumw2().SetAt(w, idx) | Sets the true number of entries in the bin weighted by w^2 | Below is the the instruction that describes the task:
### Input:
Sets the true number of entries in the bin weighted by w^2
### Response:
def set_sum_w2(self, w, ix, iy=0, iz=0):
"""
Sets the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
self.GetSumw2().SetAt(w, idx) |
def create_prefetch(self, addresses):
"""Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain).
"""
with self._lock:
for add in addresses:
self._state[add] = _ContextFuture(address=add,
wait_for_tree=True) | Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain). | Below is the the instruction that describes the task:
### Input:
Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain).
### Response:
def create_prefetch(self, addresses):
"""Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain).
"""
with self._lock:
for add in addresses:
self._state[add] = _ContextFuture(address=add,
wait_for_tree=True) |
def OxmlElement(nsptag_str, nsmap=None):
"""
Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined.
"""
nsptag = NamespacePrefixedTag(nsptag_str)
nsmap = nsmap if nsmap is not None else nsptag.nsmap
return oxml_parser.makeelement(nsptag.clark_name, nsmap=nsmap) | Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined. | Below is the the instruction that describes the task:
### Input:
Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined.
### Response:
def OxmlElement(nsptag_str, nsmap=None):
"""
Return a 'loose' lxml element having the tag specified by *nsptag_str*.
*nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'.
The resulting element is an instance of the custom element class for this
tag name if one is defined.
"""
nsptag = NamespacePrefixedTag(nsptag_str)
nsmap = nsmap if nsmap is not None else nsptag.nsmap
return oxml_parser.makeelement(nsptag.clark_name, nsmap=nsmap) |
def withdict(parser, token):
"""
Take a complete context dict as extra layer.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("{% withdict %} expects one argument")
nodelist = parser.parse(('endwithdict',))
parser.delete_first_token()
return WithDictNode(
nodelist=nodelist,
context_expr=parser.compile_filter(bits[1])
) | Take a complete context dict as extra layer. | Below is the the instruction that describes the task:
### Input:
Take a complete context dict as extra layer.
### Response:
def withdict(parser, token):
"""
Take a complete context dict as extra layer.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("{% withdict %} expects one argument")
nodelist = parser.parse(('endwithdict',))
parser.delete_first_token()
return WithDictNode(
nodelist=nodelist,
context_expr=parser.compile_filter(bits[1])
) |
def set_argsx(self, arguments, *args):
"""
Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated.
"""
return lib.zproc_set_argsx(self._as_parameter_, arguments, *args) | Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated. | Below is the the instruction that describes the task:
### Input:
Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated.
### Response:
def set_argsx(self, arguments, *args):
"""
Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated.
"""
return lib.zproc_set_argsx(self._as_parameter_, arguments, *args) |
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures | Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters | Below is the the instruction that describes the task:
### Input:
Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
### Response:
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures |
def _shortcut_open(
uri,
mode,
ignore_ext=False,
buffering=-1,
encoding=None,
errors=None,
):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != 'file':
return None
_, extension = P.splitext(parsed_uri.uri_path)
if extension in _COMPRESSOR_REGISTRY and not ignore_ext:
return None
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '')
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors
#
# Under Py3, the built-in open accepts kwargs, and it's OK to use that.
# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it
# whenever possible (see issue #207). If we're under Py2 and have to use
# kwargs, then we have no option other to use io.open.
#
if six.PY3:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
elif not open_kwargs:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering)
return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs) | Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file | Below is the the instruction that describes the task:
### Input:
Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
### Response:
def _shortcut_open(
uri,
mode,
ignore_ext=False,
buffering=-1,
encoding=None,
errors=None,
):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != 'file':
return None
_, extension = P.splitext(parsed_uri.uri_path)
if extension in _COMPRESSOR_REGISTRY and not ignore_ext:
return None
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '')
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors
#
# Under Py3, the built-in open accepts kwargs, and it's OK to use that.
# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it
# whenever possible (see issue #207). If we're under Py2 and have to use
# kwargs, then we have no option other to use io.open.
#
if six.PY3:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
elif not open_kwargs:
return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering)
return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs) |
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles | Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10) | Below is the the instruction that describes the task:
### Input:
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
### Response:
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles |
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# Check prefix and initial bracket of WFN
if self._str[0:5] != CPE2_3_WFN.CPE_PREFIX:
errmsg = "Bad-formed CPE Name: WFN prefix not found"
raise ValueError(errmsg)
# Check final backet
if self._str[-1:] != "]":
errmsg = "Bad-formed CPE Name: final bracket of WFN not found"
raise ValueError(errmsg)
content = self._str[5:-1]
if content != "":
# Dictionary with pairs attribute-value
components = dict()
# Split WFN in components
list_component = content.split(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Adds the defined components
for e in list_component:
# Whitespace not valid in component names and values
if e.find(" ") != -1:
msg = "Bad-formed CPE Name: WFN with too many whitespaces"
raise ValueError(msg)
# Split pair attribute-value
pair = e.split(CPEComponent2_3_WFN.SEPARATOR_PAIR)
att_name = pair[0]
att_value = pair[1]
# Check valid attribute name
if att_name not in CPEComponent.CPE_COMP_KEYS_EXTENDED:
msg = "Bad-formed CPE Name: invalid attribute name '{0}'".format(
att_name)
raise ValueError(msg)
if att_name in components:
# Duplicate attribute
msg = "Bad-formed CPE Name: attribute '{0}' repeated".format(
att_name)
raise ValueError(msg)
if not (att_value.startswith('"') and
att_value.endswith('"')):
# Logical value
strUpper = att_value.upper()
if strUpper == CPEComponent2_3_WFN.VALUE_ANY:
comp = CPEComponentAnyValue()
elif strUpper == CPEComponent2_3_WFN.VALUE_NA:
comp = CPEComponentNotApplicable()
else:
msg = "Invalid logical value '{0}'".format(att_value)
raise ValueError(msg)
elif att_value.startswith('"') and att_value.endswith('"'):
# String value
comp = CPEComponent2_3_WFN(att_value, att_name)
else:
# Bad value
msg = "Bad-formed CPE Name: invalid value '{0}'".format(
att_value)
raise ValueError(msg)
components[att_name] = comp
# Adds the undefined components
for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck not in components:
components[ck] = CPEComponentUndefined()
# #######################
# Storage of CPE Name #
# #######################
part_comp = components[CPEComponent.ATT_PART]
if isinstance(part_comp, CPEComponentLogical):
elements = []
elements.append(components)
self[CPE.KEY_UNDEFINED] = elements
else:
# Create internal structure of CPE Name in parts:
# one of them is filled with identified components,
# the rest are empty
part_value = part_comp.get_value()
# Del double quotes of value
system = part_value[1:-1]
if system in CPEComponent.SYSTEM_VALUES:
self._create_cpe_parts(system, components)
else:
self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED,
components)
# Fills the empty parts of internal structure of CPE Name
for pk in CPE.CPE_PART_KEYS:
if pk not in self.keys():
self[pk] = [] | Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name | Below is the the instruction that describes the task:
### Input:
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
### Response:
def _parse(self):
"""
Checks if the CPE Name is valid.
:returns: None
:exception: ValueError - bad-formed CPE Name
"""
# Check prefix and initial bracket of WFN
if self._str[0:5] != CPE2_3_WFN.CPE_PREFIX:
errmsg = "Bad-formed CPE Name: WFN prefix not found"
raise ValueError(errmsg)
# Check final backet
if self._str[-1:] != "]":
errmsg = "Bad-formed CPE Name: final bracket of WFN not found"
raise ValueError(errmsg)
content = self._str[5:-1]
if content != "":
# Dictionary with pairs attribute-value
components = dict()
# Split WFN in components
list_component = content.split(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Adds the defined components
for e in list_component:
# Whitespace not valid in component names and values
if e.find(" ") != -1:
msg = "Bad-formed CPE Name: WFN with too many whitespaces"
raise ValueError(msg)
# Split pair attribute-value
pair = e.split(CPEComponent2_3_WFN.SEPARATOR_PAIR)
att_name = pair[0]
att_value = pair[1]
# Check valid attribute name
if att_name not in CPEComponent.CPE_COMP_KEYS_EXTENDED:
msg = "Bad-formed CPE Name: invalid attribute name '{0}'".format(
att_name)
raise ValueError(msg)
if att_name in components:
# Duplicate attribute
msg = "Bad-formed CPE Name: attribute '{0}' repeated".format(
att_name)
raise ValueError(msg)
if not (att_value.startswith('"') and
att_value.endswith('"')):
# Logical value
strUpper = att_value.upper()
if strUpper == CPEComponent2_3_WFN.VALUE_ANY:
comp = CPEComponentAnyValue()
elif strUpper == CPEComponent2_3_WFN.VALUE_NA:
comp = CPEComponentNotApplicable()
else:
msg = "Invalid logical value '{0}'".format(att_value)
raise ValueError(msg)
elif att_value.startswith('"') and att_value.endswith('"'):
# String value
comp = CPEComponent2_3_WFN(att_value, att_name)
else:
# Bad value
msg = "Bad-formed CPE Name: invalid value '{0}'".format(
att_value)
raise ValueError(msg)
components[att_name] = comp
# Adds the undefined components
for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED:
if ck not in components:
components[ck] = CPEComponentUndefined()
# #######################
# Storage of CPE Name #
# #######################
part_comp = components[CPEComponent.ATT_PART]
if isinstance(part_comp, CPEComponentLogical):
elements = []
elements.append(components)
self[CPE.KEY_UNDEFINED] = elements
else:
# Create internal structure of CPE Name in parts:
# one of them is filled with identified components,
# the rest are empty
part_value = part_comp.get_value()
# Del double quotes of value
system = part_value[1:-1]
if system in CPEComponent.SYSTEM_VALUES:
self._create_cpe_parts(system, components)
else:
self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED,
components)
# Fills the empty parts of internal structure of CPE Name
for pk in CPE.CPE_PART_KEYS:
if pk not in self.keys():
self[pk] = [] |
def mint_sub(client_salt, sector_id="", subject_type="public",
uid='', user_salt=''):
"""
Mint a new sub (subject identifier)
:param authn_event: Authentication event information
:param client_salt: client specific salt - used in pairwise
:param sector_id: Possible sector identifier
:param subject_type: 'public'/'pairwise'
:return: Subject identifier
"""
if subject_type == "public":
sub = hashlib.sha256(
"{}{}".format(uid, user_salt).encode("utf-8")).hexdigest()
else:
sub = pairwise_id(uid, sector_id,
"{}{}".format(client_salt, user_salt))
return sub | Mint a new sub (subject identifier)
:param authn_event: Authentication event information
:param client_salt: client specific salt - used in pairwise
:param sector_id: Possible sector identifier
:param subject_type: 'public'/'pairwise'
:return: Subject identifier | Below is the the instruction that describes the task:
### Input:
Mint a new sub (subject identifier)
:param authn_event: Authentication event information
:param client_salt: client specific salt - used in pairwise
:param sector_id: Possible sector identifier
:param subject_type: 'public'/'pairwise'
:return: Subject identifier
### Response:
def mint_sub(client_salt, sector_id="", subject_type="public",
uid='', user_salt=''):
"""
Mint a new sub (subject identifier)
:param authn_event: Authentication event information
:param client_salt: client specific salt - used in pairwise
:param sector_id: Possible sector identifier
:param subject_type: 'public'/'pairwise'
:return: Subject identifier
"""
if subject_type == "public":
sub = hashlib.sha256(
"{}{}".format(uid, user_salt).encode("utf-8")).hexdigest()
else:
sub = pairwise_id(uid, sector_id,
"{}{}".format(client_salt, user_salt))
return sub |
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int:
"""
Returns the number of shards.
:param num_samples: Number of training data samples.
:param samples_per_shard: Samples per shard.
:param min_num_shards: Minimum number of shards.
:return: Number of shards.
"""
return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards) | Returns the number of shards.
:param num_samples: Number of training data samples.
:param samples_per_shard: Samples per shard.
:param min_num_shards: Minimum number of shards.
:return: Number of shards. | Below is the the instruction that describes the task:
### Input:
Returns the number of shards.
:param num_samples: Number of training data samples.
:param samples_per_shard: Samples per shard.
:param min_num_shards: Minimum number of shards.
:return: Number of shards.
### Response:
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int:
"""
Returns the number of shards.
:param num_samples: Number of training data samples.
:param samples_per_shard: Samples per shard.
:param min_num_shards: Minimum number of shards.
:return: Number of shards.
"""
return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards) |
def invalid_config_error_message(action, key, val):
"""Returns a better error message when invalid configuration option
is provided."""
if action in ('store_true', 'store_false'):
return ("{0} is not a valid value for {1} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead.").format(val, key)
return ("{0} is not a valid value for {1} option, "
"please specify a numerical value like 1/0 "
"instead.").format(val, key) | Returns a better error message when invalid configuration option
is provided. | Below is the the instruction that describes the task:
### Input:
Returns a better error message when invalid configuration option
is provided.
### Response:
def invalid_config_error_message(action, key, val):
"""Returns a better error message when invalid configuration option
is provided."""
if action in ('store_true', 'store_false'):
return ("{0} is not a valid value for {1} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead.").format(val, key)
return ("{0} is not a valid value for {1} option, "
"please specify a numerical value like 1/0 "
"instead.").format(val, key) |
def independent_data(self):
"""
Read-only Property
:return: Data belonging to each independent variable as a dict with
variable names as key, data as value.
:rtype: collections.OrderedDict
"""
return OrderedDict((var, self.data[var]) for var in self.model.independent_vars) | Read-only Property
:return: Data belonging to each independent variable as a dict with
variable names as key, data as value.
:rtype: collections.OrderedDict | Below is the the instruction that describes the task:
### Input:
Read-only Property
:return: Data belonging to each independent variable as a dict with
variable names as key, data as value.
:rtype: collections.OrderedDict
### Response:
def independent_data(self):
"""
Read-only Property
:return: Data belonging to each independent variable as a dict with
variable names as key, data as value.
:rtype: collections.OrderedDict
"""
return OrderedDict((var, self.data[var]) for var in self.model.independent_vars) |
def OnSecondaryCheckbox(self, event):
"""Top Checkbox event handler"""
self.attrs["top"] = event.IsChecked()
self.attrs["right"] = event.IsChecked()
post_command_event(self, self.DrawChartMsg) | Top Checkbox event handler | Below is the the instruction that describes the task:
### Input:
Top Checkbox event handler
### Response:
def OnSecondaryCheckbox(self, event):
"""Top Checkbox event handler"""
self.attrs["top"] = event.IsChecked()
self.attrs["right"] = event.IsChecked()
post_command_event(self, self.DrawChartMsg) |
def write(self, path=None):
"""
Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts
"""
written_count = 0
comments_written = 0
blanks_written = 0
ipv4_entries_written = 0
ipv6_entries_written = 0
if path:
output_file_path = path
else:
output_file_path = self.hosts_path
try:
with open(output_file_path, 'w') as hosts_file:
for written_count, line in enumerate(self.entries):
if line.entry_type == 'comment':
hosts_file.write(line.comment + "\n")
comments_written += 1
if line.entry_type == 'blank':
hosts_file.write("\n")
blanks_written += 1
if line.entry_type == 'ipv4':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names),
)
)
ipv4_entries_written += 1
if line.entry_type == 'ipv6':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names), ))
ipv6_entries_written += 1
except:
raise UnableToWriteHosts()
return {'total_written': written_count + 1,
'comments_written': comments_written,
'blanks_written': blanks_written,
'ipv4_entries_written': ipv4_entries_written,
'ipv6_entries_written': ipv6_entries_written} | Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts | Below is the the instruction that describes the task:
### Input:
Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts
### Response:
def write(self, path=None):
"""
Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts
"""
written_count = 0
comments_written = 0
blanks_written = 0
ipv4_entries_written = 0
ipv6_entries_written = 0
if path:
output_file_path = path
else:
output_file_path = self.hosts_path
try:
with open(output_file_path, 'w') as hosts_file:
for written_count, line in enumerate(self.entries):
if line.entry_type == 'comment':
hosts_file.write(line.comment + "\n")
comments_written += 1
if line.entry_type == 'blank':
hosts_file.write("\n")
blanks_written += 1
if line.entry_type == 'ipv4':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names),
)
)
ipv4_entries_written += 1
if line.entry_type == 'ipv6':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names), ))
ipv6_entries_written += 1
except:
raise UnableToWriteHosts()
return {'total_written': written_count + 1,
'comments_written': comments_written,
'blanks_written': blanks_written,
'ipv4_entries_written': ipv4_entries_written,
'ipv6_entries_written': ipv6_entries_written} |
def clear_data(self, queues=None, edge=None, edge_type=None):
"""Clears data from all queues.
If none of the parameters are given then every queue's data is
cleared.
Parameters
----------
queues : int or an iterable of int (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be cleared.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues' data to clear. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will have their data cleared.
"""
queues = _get_queues(self.g, queues, edge, edge_type)
for k in queues:
self.edge2queue[k].data = {} | Clears data from all queues.
If none of the parameters are given then every queue's data is
cleared.
Parameters
----------
queues : int or an iterable of int (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be cleared.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues' data to clear. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will have their data cleared. | Below is the the instruction that describes the task:
### Input:
Clears data from all queues.
If none of the parameters are given then every queue's data is
cleared.
Parameters
----------
queues : int or an iterable of int (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be cleared.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues' data to clear. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will have their data cleared.
### Response:
def clear_data(self, queues=None, edge=None, edge_type=None):
"""Clears data from all queues.
If none of the parameters are given then every queue's data is
cleared.
Parameters
----------
queues : int or an iterable of int (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be cleared.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues' data to clear. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will have their data cleared.
"""
queues = _get_queues(self.g, queues, edge, edge_type)
for k in queues:
self.edge2queue[k].data = {} |
def sanity(request, sysmeta_pyxb):
"""Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes."""
_does_not_contain_replica_sections(sysmeta_pyxb)
_is_not_archived(sysmeta_pyxb)
_obsoleted_by_not_specified(sysmeta_pyxb)
if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META:
return
_has_correct_file_size(request, sysmeta_pyxb)
_is_supported_checksum_algorithm(sysmeta_pyxb)
_is_correct_checksum(request, sysmeta_pyxb) | Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes. | Below is the the instruction that describes the task:
### Input:
Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes.
### Response:
def sanity(request, sysmeta_pyxb):
"""Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes."""
_does_not_contain_replica_sections(sysmeta_pyxb)
_is_not_archived(sysmeta_pyxb)
_obsoleted_by_not_specified(sysmeta_pyxb)
if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META:
return
_has_correct_file_size(request, sysmeta_pyxb)
_is_supported_checksum_algorithm(sysmeta_pyxb)
_is_correct_checksum(request, sysmeta_pyxb) |
def _prt_qualifiers(associations, prt=sys.stdout):
"""Print Qualifiers found in the annotations.
QUALIFIERS:
1,462 colocalizes_with
1,454 contributes_to
1,157 not
13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)
4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)
"""
prt.write('QUALIFIERS:\n')
for fld, cnt in cx.Counter(q for nt in associations for q in nt.Qualifier).most_common():
prt.write(' {N:6,} {FLD}\n'.format(N=cnt, FLD=fld)) | Print Qualifiers found in the annotations.
QUALIFIERS:
1,462 colocalizes_with
1,454 contributes_to
1,157 not
13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)
4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs) | Below is the the instruction that describes the task:
### Input:
Print Qualifiers found in the annotations.
QUALIFIERS:
1,462 colocalizes_with
1,454 contributes_to
1,157 not
13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)
4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)
### Response:
def _prt_qualifiers(associations, prt=sys.stdout):
"""Print Qualifiers found in the annotations.
QUALIFIERS:
1,462 colocalizes_with
1,454 contributes_to
1,157 not
13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)
4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)
"""
prt.write('QUALIFIERS:\n')
for fld, cnt in cx.Counter(q for nt in associations for q in nt.Qualifier).most_common():
prt.write(' {N:6,} {FLD}\n'.format(N=cnt, FLD=fld)) |
def _compute_site_scaling(self, C, vs30):
"""
Returns the site scaling term as a simple coefficient
"""
site_term = np.zeros(len(vs30), dtype=float)
# For soil sites add on the site coefficient
site_term[vs30 < 760.0] = C["e"]
return site_term | Returns the site scaling term as a simple coefficient | Below is the the instruction that describes the task:
### Input:
Returns the site scaling term as a simple coefficient
### Response:
def _compute_site_scaling(self, C, vs30):
"""
Returns the site scaling term as a simple coefficient
"""
site_term = np.zeros(len(vs30), dtype=float)
# For soil sites add on the site coefficient
site_term[vs30 < 760.0] = C["e"]
return site_term |
def sort(self):
"""
Sort the fragments in the list.
:raises ValueError: if there is a fragment which violates
the list constraints
"""
if self.is_guaranteed_sorted:
self.log(u"Already sorted, returning")
return
self.log(u"Sorting...")
self.__fragments = sorted(self.__fragments)
self.log(u"Sorting... done")
self.log(u"Checking relative positions...")
for i in range(len(self) - 1):
current_interval = self[i].interval
next_interval = self[i + 1].interval
if current_interval.relative_position_of(next_interval) not in self.ALLOWED_POSITIONS:
self.log(u"Found overlapping fragments:")
self.log([u" Index %d => %s", i, current_interval])
self.log([u" Index %d => %s", i + 1, next_interval])
self.log_exc(u"The list contains two fragments overlapping in a forbidden way", None, True, ValueError)
self.log(u"Checking relative positions... done")
self.__sorted = True | Sort the fragments in the list.
:raises ValueError: if there is a fragment which violates
the list constraints | Below is the the instruction that describes the task:
### Input:
Sort the fragments in the list.
:raises ValueError: if there is a fragment which violates
the list constraints
### Response:
def sort(self):
"""
Sort the fragments in the list.
:raises ValueError: if there is a fragment which violates
the list constraints
"""
if self.is_guaranteed_sorted:
self.log(u"Already sorted, returning")
return
self.log(u"Sorting...")
self.__fragments = sorted(self.__fragments)
self.log(u"Sorting... done")
self.log(u"Checking relative positions...")
for i in range(len(self) - 1):
current_interval = self[i].interval
next_interval = self[i + 1].interval
if current_interval.relative_position_of(next_interval) not in self.ALLOWED_POSITIONS:
self.log(u"Found overlapping fragments:")
self.log([u" Index %d => %s", i, current_interval])
self.log([u" Index %d => %s", i + 1, next_interval])
self.log_exc(u"The list contains two fragments overlapping in a forbidden way", None, True, ValueError)
self.log(u"Checking relative positions... done")
self.__sorted = True |
def import_field(field_classpath):
"""
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
"""
if '.' in field_classpath:
fully_qualified = field_classpath
else:
fully_qualified = "django.db.models.%s" % field_classpath
try:
return import_dotted_path(fully_qualified)
except ImportError:
raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains "
"the field '%s' which could not be "
"imported." % field_classpath) | Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails. | Below is the the instruction that describes the task:
### Input:
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
### Response:
def import_field(field_classpath):
"""
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
"""
if '.' in field_classpath:
fully_qualified = field_classpath
else:
fully_qualified = "django.db.models.%s" % field_classpath
try:
return import_dotted_path(fully_qualified)
except ImportError:
raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains "
"the field '%s' which could not be "
"imported." % field_classpath) |
def columns(x, rho, proxop):
"""Applies a proximal operator to the columns of a matrix"""
xnext = np.zeros_like(x)
for ix in range(x.shape[1]):
xnext[:, ix] = proxop(x[:, ix], rho)
return xnext | Applies a proximal operator to the columns of a matrix | Below is the the instruction that describes the task:
### Input:
Applies a proximal operator to the columns of a matrix
### Response:
def columns(x, rho, proxop):
"""Applies a proximal operator to the columns of a matrix"""
xnext = np.zeros_like(x)
for ix in range(x.shape[1]):
xnext[:, ix] = proxop(x[:, ix], rho)
return xnext |
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j | Iterate over all JSON input (Generator) | Below is the the instruction that describes the task:
### Input:
Iterate over all JSON input (Generator)
### Response:
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j |
def check_hash(path, checksum, hash_type='md5'):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) | Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum | Below is the the instruction that describes the task:
### Input:
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
### Response:
def check_hash(path, checksum, hash_type='md5'):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) |
def get_consensus_module(module_name):
"""Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
"""
module_package = module_name
if module_name == 'genesis':
module_package = (
'sawtooth_validator.journal.consensus.genesis.'
'genesis_consensus'
)
elif module_name == 'devmode':
module_package = (
'sawtooth_validator.journal.consensus.dev_mode.'
'dev_mode_consensus'
)
try:
return importlib.import_module(module_package)
except ImportError:
raise UnknownConsensusModuleError(
'Consensus module "{}" does not exist.'.format(module_name)) | Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation. | Below is the the instruction that describes the task:
### Input:
Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
### Response:
def get_consensus_module(module_name):
"""Returns a consensus module by name.
Args:
module_name (str): The name of the module to load.
Returns:
module: The consensus module.
Raises:
UnknownConsensusModuleError: Raised if the given module_name does
not correspond to a consensus implementation.
"""
module_package = module_name
if module_name == 'genesis':
module_package = (
'sawtooth_validator.journal.consensus.genesis.'
'genesis_consensus'
)
elif module_name == 'devmode':
module_package = (
'sawtooth_validator.journal.consensus.dev_mode.'
'dev_mode_consensus'
)
try:
return importlib.import_module(module_package)
except ImportError:
raise UnknownConsensusModuleError(
'Consensus module "{}" does not exist.'.format(module_name)) |
def get_current_and_head_revision(
database_url: str,
alembic_config_filename: str,
alembic_base_dir: str = None,
version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]:
"""
Returns a tuple of ``(current_revision, head_revision)``; see
:func:`get_current_revision` and :func:`get_head_revision_from_alembic`.
Arguments:
database_url: SQLAlchemy URL for the database
alembic_config_filename: config filename
alembic_base_dir: directory to start in, so relative paths in the
config file work.
version_table: table name for Alembic versions
"""
# Where we are
head_revision = get_head_revision_from_alembic(
alembic_config_filename=alembic_config_filename,
alembic_base_dir=alembic_base_dir,
version_table=version_table
)
log.info("Intended database version: {}", head_revision)
# Where we want to be
current_revision = get_current_revision(
database_url=database_url,
version_table=version_table
)
log.info("Current database version: {}", current_revision)
# Are we where we want to be?
return current_revision, head_revision | Returns a tuple of ``(current_revision, head_revision)``; see
:func:`get_current_revision` and :func:`get_head_revision_from_alembic`.
Arguments:
database_url: SQLAlchemy URL for the database
alembic_config_filename: config filename
alembic_base_dir: directory to start in, so relative paths in the
config file work.
version_table: table name for Alembic versions | Below is the the instruction that describes the task:
### Input:
Returns a tuple of ``(current_revision, head_revision)``; see
:func:`get_current_revision` and :func:`get_head_revision_from_alembic`.
Arguments:
database_url: SQLAlchemy URL for the database
alembic_config_filename: config filename
alembic_base_dir: directory to start in, so relative paths in the
config file work.
version_table: table name for Alembic versions
### Response:
def get_current_and_head_revision(
database_url: str,
alembic_config_filename: str,
alembic_base_dir: str = None,
version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]:
"""
Returns a tuple of ``(current_revision, head_revision)``; see
:func:`get_current_revision` and :func:`get_head_revision_from_alembic`.
Arguments:
database_url: SQLAlchemy URL for the database
alembic_config_filename: config filename
alembic_base_dir: directory to start in, so relative paths in the
config file work.
version_table: table name for Alembic versions
"""
# Where we are
head_revision = get_head_revision_from_alembic(
alembic_config_filename=alembic_config_filename,
alembic_base_dir=alembic_base_dir,
version_table=version_table
)
log.info("Intended database version: {}", head_revision)
# Where we want to be
current_revision = get_current_revision(
database_url=database_url,
version_table=version_table
)
log.info("Current database version: {}", current_revision)
# Are we where we want to be?
return current_revision, head_revision |
def assert_valid(sysmeta_pyxb, pid):
"""Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system
"""
if not (_is_validation_enabled() and _is_installed_scimeta_format_id(sysmeta_pyxb)):
return
if _is_above_size_limit(sysmeta_pyxb):
if _is_action_accept():
return
else:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Science Metadata file is above size limit for validation and this '
'node has been configured to reject unvalidated Science Metadata '
'files. For more information, see the SCIMETA_VALIDATE* settings. '
'size={} size_limit={}'.format(
sysmeta_pyxb.size, django.conf.settings.SCIMETA_VALIDATION_MAX_SIZE
),
)
with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid_ctx(pid) as sciobj_file:
try:
d1_scimeta.xml_schema.validate(sysmeta_pyxb.formatId, sciobj_file.read())
except d1_scimeta.xml_schema.SciMetaValidationError as e:
raise d1_common.types.exceptions.InvalidRequest(0, str(e)) | Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system | Below is the the instruction that describes the task:
### Input:
Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system
### Response:
def assert_valid(sysmeta_pyxb, pid):
"""Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system
"""
if not (_is_validation_enabled() and _is_installed_scimeta_format_id(sysmeta_pyxb)):
return
if _is_above_size_limit(sysmeta_pyxb):
if _is_action_accept():
return
else:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Science Metadata file is above size limit for validation and this '
'node has been configured to reject unvalidated Science Metadata '
'files. For more information, see the SCIMETA_VALIDATE* settings. '
'size={} size_limit={}'.format(
sysmeta_pyxb.size, django.conf.settings.SCIMETA_VALIDATION_MAX_SIZE
),
)
with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid_ctx(pid) as sciobj_file:
try:
d1_scimeta.xml_schema.validate(sysmeta_pyxb.formatId, sciobj_file.read())
except d1_scimeta.xml_schema.SciMetaValidationError as e:
raise d1_common.types.exceptions.InvalidRequest(0, str(e)) |
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible | Below is the the instruction that describes the task:
### Input:
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
### Response:
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) |
def a_unexpected_prompt(ctx):
"""Provide message when received humphost prompt."""
prompt = ctx.ctrl.match.group(0)
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
ctx.device.connected = False
ctx.finished = True
raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname) | Provide message when received humphost prompt. | Below is the the instruction that describes the task:
### Input:
Provide message when received humphost prompt.
### Response:
def a_unexpected_prompt(ctx):
"""Provide message when received humphost prompt."""
prompt = ctx.ctrl.match.group(0)
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
ctx.device.connected = False
ctx.finished = True
raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname) |
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in range(1000):
alias = 'cust%d' % (i, )
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)') | Return an unused auth level alias | Below is the the instruction that describes the task:
### Input:
Return an unused auth level alias
### Response:
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in range(1000):
alias = 'cust%d' % (i, )
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)') |
def cumulative_min(self):
"""
Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0]
"""
from .. import extensions
agg_op = "__builtin__cum_min__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) | Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0] | Below is the the instruction that describes the task:
### Input:
Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0]
### Response:
def cumulative_min(self):
"""
Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0]
"""
from .. import extensions
agg_op = "__builtin__cum_min__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
def on_commit(self, changes):
"""Method that gets called when a model is changed. This serves
to do the actual index writing.
"""
if _get_config(self)['enable_indexing'] is False:
return None
for wh in self.whoosheers:
if not wh.auto_update:
continue
writer = None
for change in changes:
if change[0].__class__ in wh.models:
method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower())
method = getattr(wh, method_name, None)
if method:
if not writer:
writer = type(self).get_or_create_index(_get_app(self), wh).\
writer(timeout=_get_config(self)['writer_timeout'])
method(writer, change[0])
if writer:
writer.commit() | Method that gets called when a model is changed. This serves
to do the actual index writing. | Below is the the instruction that describes the task:
### Input:
Method that gets called when a model is changed. This serves
to do the actual index writing.
### Response:
def on_commit(self, changes):
"""Method that gets called when a model is changed. This serves
to do the actual index writing.
"""
if _get_config(self)['enable_indexing'] is False:
return None
for wh in self.whoosheers:
if not wh.auto_update:
continue
writer = None
for change in changes:
if change[0].__class__ in wh.models:
method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower())
method = getattr(wh, method_name, None)
if method:
if not writer:
writer = type(self).get_or_create_index(_get_app(self), wh).\
writer(timeout=_get_config(self)['writer_timeout'])
method(writer, change[0])
if writer:
writer.commit() |
def generate_parameters(self, parameter_id):
"""Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
"""
if not self.population:
raise RuntimeError('The population is empty')
pos = -1
for i in range(len(self.population)):
if self.population[i].result is None:
pos = i
break
if pos != -1:
indiv = copy.deepcopy(self.population[pos])
self.population.pop(pos)
total_config = indiv.config
else:
random.shuffle(self.population)
if self.population[0].result < self.population[1].result:
self.population[0] = self.population[1]
# mutation
space = json2space(self.searchspace_json,
self.population[0].config)
is_rand = dict()
mutation_pos = space[random.randint(0, len(space)-1)]
for i in range(len(self.space)):
is_rand[self.space[i]] = (self.space[i] == mutation_pos)
config = json2paramater(
self.searchspace_json, is_rand, self.random_state, self.population[0].config)
self.population.pop(1)
# remove "_index" from config and save params-id
total_config = config
self.total_data[parameter_id] = total_config
config = _split_index(total_config)
return config | Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict | Below is the the instruction that describes the task:
### Input:
Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
### Response:
def generate_parameters(self, parameter_id):
"""Returns a dict of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
config : dict
"""
if not self.population:
raise RuntimeError('The population is empty')
pos = -1
for i in range(len(self.population)):
if self.population[i].result is None:
pos = i
break
if pos != -1:
indiv = copy.deepcopy(self.population[pos])
self.population.pop(pos)
total_config = indiv.config
else:
random.shuffle(self.population)
if self.population[0].result < self.population[1].result:
self.population[0] = self.population[1]
# mutation
space = json2space(self.searchspace_json,
self.population[0].config)
is_rand = dict()
mutation_pos = space[random.randint(0, len(space)-1)]
for i in range(len(self.space)):
is_rand[self.space[i]] = (self.space[i] == mutation_pos)
config = json2paramater(
self.searchspace_json, is_rand, self.random_state, self.population[0].config)
self.population.pop(1)
# remove "_index" from config and save params-id
total_config = config
self.total_data[parameter_id] = total_config
config = _split_index(total_config)
return config |
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name)) | Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file. | Below is the the instruction that describes the task:
### Input:
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
### Response:
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name)) |
def merge_record_data(self, changes, orig_record=None):
"""This method merges PATCH requests with the db record to ensure no
data is lost. In addition, it is also a hook for other fields to
be overwritten, to ensure immutable fields aren't changed by a
request."""
current_app.logger.info("Merging request data with db record")
current_app.logger.debug("orig_record: {}".format(orig_record))
current_app.logger.debug("Changes".format(changes))
final_record = changes
if request.method == 'PATCH':
final_record = dict(orig_record)
final_record.update(changes)
elif request.method == 'PUT':
if '_id' in orig_record:
final_record['_id'] = orig_record['_id']
return final_record | This method merges PATCH requests with the db record to ensure no
data is lost. In addition, it is also a hook for other fields to
be overwritten, to ensure immutable fields aren't changed by a
request. | Below is the the instruction that describes the task:
### Input:
This method merges PATCH requests with the db record to ensure no
data is lost. In addition, it is also a hook for other fields to
be overwritten, to ensure immutable fields aren't changed by a
request.
### Response:
def merge_record_data(self, changes, orig_record=None):
"""This method merges PATCH requests with the db record to ensure no
data is lost. In addition, it is also a hook for other fields to
be overwritten, to ensure immutable fields aren't changed by a
request."""
current_app.logger.info("Merging request data with db record")
current_app.logger.debug("orig_record: {}".format(orig_record))
current_app.logger.debug("Changes".format(changes))
final_record = changes
if request.method == 'PATCH':
final_record = dict(orig_record)
final_record.update(changes)
elif request.method == 'PUT':
if '_id' in orig_record:
final_record['_id'] = orig_record['_id']
return final_record |
def _string_like(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLLike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
) | Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn | Below is the the instruction that describes the task:
### Input:
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
### Response:
def _string_like(self, patterns):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : str or List[str]
A pattern or list of patterns to match. If `pattern` is a list, then if
**any** pattern matches the input then the corresponding row in the
output is ``True``.
Returns
-------
matched : ir.BooleanColumn
"""
return functools.reduce(
operator.or_,
(
ops.StringSQLLike(self, pattern).to_expr()
for pattern in util.promote_list(patterns)
),
) |
def update_file(self, file_id, upload_id):
"""
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result
"""
put_data = {
"upload[id]": upload_id,
}
return self._put("/files/" + file_id, put_data, content_type=ContentType.form) | Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result | Below is the the instruction that describes the task:
### Input:
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result
### Response:
def update_file(self, file_id, upload_id):
"""
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result
"""
put_data = {
"upload[id]": upload_id,
}
return self._put("/files/" + file_id, put_data, content_type=ContentType.form) |
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines | Fix indentation undistinguish from the next logical line. | Below is the the instruction that describes the task:
### Input:
Fix indentation undistinguish from the next logical line.
### Response:
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines |
def global_iterator(self):
"""
Return global iterator sympy expression
"""
global_iterator = sympy.Integer(0)
total_length = sympy.Integer(1)
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
length = end - start # FIXME is incr handled correct here?
global_iterator += (loop_var - start) * total_length
total_length *= length
return global_iterator | Return global iterator sympy expression | Below is the the instruction that describes the task:
### Input:
Return global iterator sympy expression
### Response:
def global_iterator(self):
"""
Return global iterator sympy expression
"""
global_iterator = sympy.Integer(0)
total_length = sympy.Integer(1)
for var_name, start, end, incr in reversed(self._loop_stack):
loop_var = symbol_pos_int(var_name)
length = end - start # FIXME is incr handled correct here?
global_iterator += (loop_var - start) * total_length
total_length *= length
return global_iterator |
def getImportPeople(self):
"""
Return an L{ImportPeopleWidget} which is a child of this fragment and
which will add people to C{self.organizer}.
"""
fragment = ImportPeopleWidget(self.organizer)
fragment.setFragmentParent(self)
return fragment | Return an L{ImportPeopleWidget} which is a child of this fragment and
which will add people to C{self.organizer}. | Below is the the instruction that describes the task:
### Input:
Return an L{ImportPeopleWidget} which is a child of this fragment and
which will add people to C{self.organizer}.
### Response:
def getImportPeople(self):
"""
Return an L{ImportPeopleWidget} which is a child of this fragment and
which will add people to C{self.organizer}.
"""
fragment = ImportPeopleWidget(self.organizer)
fragment.setFragmentParent(self)
return fragment |
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker))) | Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>' | Below is the the instruction that describes the task:
### Input:
Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
### Response:
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker))) |
def make_relative (self, other):
"""Return a new path that is the equivalent of this one relative to the path
*other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is
not a sub-path of *other*; instead, it will use ``..`` to build a relative
path. This can result in invalid relative paths if *other* contains a
directory symbolic link.
If *self* is an absolute path, it is returned unmodified.
"""
if self.is_absolute ():
return self
from os.path import relpath
other = self.__class__ (other)
return self.__class__ (relpath (text_type (self), text_type (other))) | Return a new path that is the equivalent of this one relative to the path
*other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is
not a sub-path of *other*; instead, it will use ``..`` to build a relative
path. This can result in invalid relative paths if *other* contains a
directory symbolic link.
If *self* is an absolute path, it is returned unmodified. | Below is the the instruction that describes the task:
### Input:
Return a new path that is the equivalent of this one relative to the path
*other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is
not a sub-path of *other*; instead, it will use ``..`` to build a relative
path. This can result in invalid relative paths if *other* contains a
directory symbolic link.
If *self* is an absolute path, it is returned unmodified.
### Response:
def make_relative (self, other):
"""Return a new path that is the equivalent of this one relative to the path
*other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is
not a sub-path of *other*; instead, it will use ``..`` to build a relative
path. This can result in invalid relative paths if *other* contains a
directory symbolic link.
If *self* is an absolute path, it is returned unmodified.
"""
if self.is_absolute ():
return self
from os.path import relpath
other = self.__class__ (other)
return self.__class__ (relpath (text_type (self), text_type (other))) |
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
ia.do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask | Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image. | Below is the the instruction that describes the task:
### Input:
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
### Response:
def extract_from_image(self, image):
"""
Extract the image pixels within the polygon.
This function will zero-pad the image if the polygon is partially/fully outside of
the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the polygon.
Returns
-------
result : (H',W') ndarray or (H',W',C) ndarray
Pixels within the polygon. Zero-padded if the polygon is partially/fully
outside of the image.
"""
ia.do_assert(image.ndim in [2, 3])
if len(self.exterior) <= 2:
raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.")
bb = self.to_bounding_box()
bb_area = bb.extract_from_image(image)
if self.is_out_of_image(image, fully=True, partly=False):
return bb_area
xx = self.xx_int
yy = self.yy_int
xx_mask = xx - np.min(xx)
yy_mask = yy - np.min(yy)
height_mask = np.max(yy_mask)
width_mask = np.max(xx_mask)
rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask))
mask = np.zeros((height_mask, width_mask), dtype=np.bool)
mask[rr_face, cc_face] = True
if image.ndim == 3:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2]))
return bb_area * mask |
def as_list(value):
"""clever string spliting:
.. code-block:: python
>>> print(as_list('value'))
['value']
>>> print(as_list('v1 v2'))
['v1', 'v2']
>>> print(as_list(None))
[]
>>> print(as_list(['v1']))
['v1']
"""
if isinstance(value, (list, tuple)):
return value
if not value:
return []
for c in '\n ':
if c in value:
value = value.split(c)
return [v.strip() for v in value if v.strip()]
return [value] | clever string spliting:
.. code-block:: python
>>> print(as_list('value'))
['value']
>>> print(as_list('v1 v2'))
['v1', 'v2']
>>> print(as_list(None))
[]
>>> print(as_list(['v1']))
['v1'] | Below is the the instruction that describes the task:
### Input:
clever string spliting:
.. code-block:: python
>>> print(as_list('value'))
['value']
>>> print(as_list('v1 v2'))
['v1', 'v2']
>>> print(as_list(None))
[]
>>> print(as_list(['v1']))
['v1']
### Response:
def as_list(value):
"""clever string spliting:
.. code-block:: python
>>> print(as_list('value'))
['value']
>>> print(as_list('v1 v2'))
['v1', 'v2']
>>> print(as_list(None))
[]
>>> print(as_list(['v1']))
['v1']
"""
if isinstance(value, (list, tuple)):
return value
if not value:
return []
for c in '\n ':
if c in value:
value = value.split(c)
return [v.strip() for v in value if v.strip()]
return [value] |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
DOCTEST:
"""
if True:
if isinstance(R,nu.ndarray):
if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z
out= nu.array([self._Rforce(rr,zz) for rr,zz in zip(R,z)])
return out
if (R > 16.*self._hr or R > 6.) and hasattr(self,'_kp'): return self._kp.Rforce(R,z)
if R < 1.: R4max= 1.
else: R4max= R
kmax= self._kmaxFac*self._beta
kmax= 2.*self._kmaxFac*self._beta
maxj1zeroIndx= nu.argmin((self._j1zeros-kmax*R4max)**2.) #close enough
ks= nu.array([0.5*(self._glx+1.)*self._dj1zeros[ii+1] + self._j1zeros[ii] for ii in range(maxj1zeroIndx)]).flatten()
weights= nu.array([self._glw*self._dj1zeros[ii+1] for ii in range(maxj1zeroIndx)]).flatten()
evalInt= ks*special.jn(1,ks*R)*(self._alpha**2.+ks**2.)**-1.5*(self._beta*nu.exp(-ks*nu.fabs(z))-ks*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks**2.)
return -2.*nu.pi*self._alpha*nu.sum(weights*evalInt) | NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
DOCTEST: | Below is the the instruction that describes the task:
### Input:
NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
DOCTEST:
### Response:
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
DOCTEST:
"""
if True:
if isinstance(R,nu.ndarray):
if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z
out= nu.array([self._Rforce(rr,zz) for rr,zz in zip(R,z)])
return out
if (R > 16.*self._hr or R > 6.) and hasattr(self,'_kp'): return self._kp.Rforce(R,z)
if R < 1.: R4max= 1.
else: R4max= R
kmax= self._kmaxFac*self._beta
kmax= 2.*self._kmaxFac*self._beta
maxj1zeroIndx= nu.argmin((self._j1zeros-kmax*R4max)**2.) #close enough
ks= nu.array([0.5*(self._glx+1.)*self._dj1zeros[ii+1] + self._j1zeros[ii] for ii in range(maxj1zeroIndx)]).flatten()
weights= nu.array([self._glw*self._dj1zeros[ii+1] for ii in range(maxj1zeroIndx)]).flatten()
evalInt= ks*special.jn(1,ks*R)*(self._alpha**2.+ks**2.)**-1.5*(self._beta*nu.exp(-ks*nu.fabs(z))-ks*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks**2.)
return -2.*nu.pi*self._alpha*nu.sum(weights*evalInt) |
def required(self, fn):
"""Request decorator. Forces authentication."""
@functools.wraps(fn)
def decorated(*args, **kwargs):
if (not self._check_auth()
# Don't try to force authentication if the request is part
# of the authentication process - otherwise we end up in a
# loop.
and request.blueprint != self.blueprint.name):
return redirect(url_for("%s.login" % self.blueprint.name,
next=request.url))
return fn(*args, **kwargs)
return decorated | Request decorator. Forces authentication. | Below is the the instruction that describes the task:
### Input:
Request decorator. Forces authentication.
### Response:
def required(self, fn):
"""Request decorator. Forces authentication."""
@functools.wraps(fn)
def decorated(*args, **kwargs):
if (not self._check_auth()
# Don't try to force authentication if the request is part
# of the authentication process - otherwise we end up in a
# loop.
and request.blueprint != self.blueprint.name):
return redirect(url_for("%s.login" % self.blueprint.name,
next=request.url))
return fn(*args, **kwargs)
return decorated |
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite | A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files. | Below is the the instruction that describes the task:
### Input:
A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
### Response:
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite |
def parse_skip_comment(self):
"""Parse a definition comment for noqa skips."""
skipped_error_codes = ''
if self.current.kind == tk.COMMENT:
if 'noqa: ' in self.current.value:
skipped_error_codes = ''.join(
self.current.value.split('noqa: ')[1:])
elif self.current.value.startswith('# noqa'):
skipped_error_codes = 'all'
return skipped_error_codes | Parse a definition comment for noqa skips. | Below is the the instruction that describes the task:
### Input:
Parse a definition comment for noqa skips.
### Response:
def parse_skip_comment(self):
"""Parse a definition comment for noqa skips."""
skipped_error_codes = ''
if self.current.kind == tk.COMMENT:
if 'noqa: ' in self.current.value:
skipped_error_codes = ''.join(
self.current.value.split('noqa: ')[1:])
elif self.current.value.startswith('# noqa'):
skipped_error_codes = 'all'
return skipped_error_codes |
def up(self, role, root_priority, root_times):
""" A port is started in the state of LISTEN. """
self.port_priority = root_priority
self.port_times = root_times
state = (PORT_STATE_LISTEN if self.config_enable
else PORT_STATE_DISABLE)
self._change_role(role)
self._change_status(state) | A port is started in the state of LISTEN. | Below is the the instruction that describes the task:
### Input:
A port is started in the state of LISTEN.
### Response:
def up(self, role, root_priority, root_times):
""" A port is started in the state of LISTEN. """
self.port_priority = root_priority
self.port_times = root_times
state = (PORT_STATE_LISTEN if self.config_enable
else PORT_STATE_DISABLE)
self._change_role(role)
self._change_status(state) |
def set_joystick(self, x, y, n):
"""
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
"""
self.robots[n].set_joystick(x, y) | Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to | Below is the the instruction that describes the task:
### Input:
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
### Response:
def set_joystick(self, x, y, n):
"""
Receives joystick values from the SnakeBoard
x,y Coordinates
n Robot number to give it to
"""
self.robots[n].set_joystick(x, y) |
def add_station(self, lv_station):
"""Adds a LV station to _station and grid graph if not already existing"""
if not isinstance(lv_station, LVStationDing0):
raise Exception('Given LV station is not a LVStationDing0 object.')
if self._station is None:
self._station = lv_station
self.graph_add_node(lv_station)
self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station) | Adds a LV station to _station and grid graph if not already existing | Below is the the instruction that describes the task:
### Input:
Adds a LV station to _station and grid graph if not already existing
### Response:
def add_station(self, lv_station):
"""Adds a LV station to _station and grid graph if not already existing"""
if not isinstance(lv_station, LVStationDing0):
raise Exception('Given LV station is not a LVStationDing0 object.')
if self._station is None:
self._station = lv_station
self.graph_add_node(lv_station)
self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station) |
def get_queryset(self):
'''
Parameters are already validated in the QuerySetPermission
'''
model_type = self.request.GET.get("type")
pk = self.request.GET.get("id")
content_type_model = ContentType.objects.get(model=model_type.lower())
Model = content_type_model.model_class()
model_obj = Model.objects.filter(id=pk).first()
return Comment.objects.filter_by_object(model_obj) | Parameters are already validated in the QuerySetPermission | Below is the the instruction that describes the task:
### Input:
Parameters are already validated in the QuerySetPermission
### Response:
def get_queryset(self):
'''
Parameters are already validated in the QuerySetPermission
'''
model_type = self.request.GET.get("type")
pk = self.request.GET.get("id")
content_type_model = ContentType.objects.get(model=model_type.lower())
Model = content_type_model.model_class()
model_obj = Model.objects.filter(id=pk).first()
return Comment.objects.filter_by_object(model_obj) |
def getColRowWithinChannel(self, ra, dec, ch, wantZeroOffset=False,
allowIllegalReturnValues=True):
"""Returns (col, row) given a (ra, dec) coordinate and channel number.
"""
# How close is a given ra/dec to the origin of a KeplerModule?
x, y = self.defaultMap.skyToPix(ra, dec)
kepModule = self.getChannelAsPolygon(ch)
r = np.array([x[0],y[0]]) - kepModule.polygon[0, :]
v1 = kepModule.polygon[1, :] - kepModule.polygon[0, :]
v3 = kepModule.polygon[3, :] - kepModule.polygon[0, :]
# Divide by |v|^2 because you're normalising v and r
colFrac = np.dot(r, v1) / np.linalg.norm(v1)**2
rowFrac = np.dot(r, v3) / np.linalg.norm(v3)**2
# This is where it gets a little hairy. The channel "corners"
# supplied to me actually represent points 5x5 pixels inside
# the science array. Which isn't what you'd expect.
# These magic numbers are the pixel numbers of the corner
# edges given in fov.txt
col = colFrac*(1106-17) + 17
row = rowFrac*(1038-25) + 25
if not allowIllegalReturnValues:
if not self.colRowIsOnSciencePixel(col, row):
msg = "Request position %7f %.7f " % (ra, dec)
msg += "does not lie on science pixels for channel %i " % (ch)
msg += "[ %.1f %.1f]" % (col, row)
raise ValueError(msg)
# Convert from zero-offset to one-offset coords
if not wantZeroOffset:
col += 1
row += 1
return (col, row) | Returns (col, row) given a (ra, dec) coordinate and channel number. | Below is the the instruction that describes the task:
### Input:
Returns (col, row) given a (ra, dec) coordinate and channel number.
### Response:
def getColRowWithinChannel(self, ra, dec, ch, wantZeroOffset=False,
allowIllegalReturnValues=True):
"""Returns (col, row) given a (ra, dec) coordinate and channel number.
"""
# How close is a given ra/dec to the origin of a KeplerModule?
x, y = self.defaultMap.skyToPix(ra, dec)
kepModule = self.getChannelAsPolygon(ch)
r = np.array([x[0],y[0]]) - kepModule.polygon[0, :]
v1 = kepModule.polygon[1, :] - kepModule.polygon[0, :]
v3 = kepModule.polygon[3, :] - kepModule.polygon[0, :]
# Divide by |v|^2 because you're normalising v and r
colFrac = np.dot(r, v1) / np.linalg.norm(v1)**2
rowFrac = np.dot(r, v3) / np.linalg.norm(v3)**2
# This is where it gets a little hairy. The channel "corners"
# supplied to me actually represent points 5x5 pixels inside
# the science array. Which isn't what you'd expect.
# These magic numbers are the pixel numbers of the corner
# edges given in fov.txt
col = colFrac*(1106-17) + 17
row = rowFrac*(1038-25) + 25
if not allowIllegalReturnValues:
if not self.colRowIsOnSciencePixel(col, row):
msg = "Request position %7f %.7f " % (ra, dec)
msg += "does not lie on science pixels for channel %i " % (ch)
msg += "[ %.1f %.1f]" % (col, row)
raise ValueError(msg)
# Convert from zero-offset to one-offset coords
if not wantZeroOffset:
col += 1
row += 1
return (col, row) |
def to_text(path):
"""Wraps Tesseract OCR.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
"""
import subprocess
from distutils import spawn
# Check for dependencies. Needs Tesseract and Imagemagick installed.
if not spawn.find_executable('tesseract'):
raise EnvironmentError('tesseract not installed.')
if not spawn.find_executable('convert'):
raise EnvironmentError('imagemagick not installed.')
# convert = "convert -density 350 %s -depth 8 tiff:-" % (path)
convert = ['convert', '-density', '350', path, '-depth', '8', 'png:-']
p1 = subprocess.Popen(convert, stdout=subprocess.PIPE)
tess = ['tesseract', 'stdin', 'stdout']
p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE)
out, err = p2.communicate()
extracted_str = out
return extracted_str | Wraps Tesseract OCR.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format | Below is the the instruction that describes the task:
### Input:
Wraps Tesseract OCR.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
### Response:
def to_text(path):
"""Wraps Tesseract OCR.
Parameters
----------
path : str
path of electronic invoice in JPG or PNG format
Returns
-------
extracted_str : str
returns extracted text from image in JPG or PNG format
"""
import subprocess
from distutils import spawn
# Check for dependencies. Needs Tesseract and Imagemagick installed.
if not spawn.find_executable('tesseract'):
raise EnvironmentError('tesseract not installed.')
if not spawn.find_executable('convert'):
raise EnvironmentError('imagemagick not installed.')
# convert = "convert -density 350 %s -depth 8 tiff:-" % (path)
convert = ['convert', '-density', '350', path, '-depth', '8', 'png:-']
p1 = subprocess.Popen(convert, stdout=subprocess.PIPE)
tess = ['tesseract', 'stdin', 'stdout']
p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE)
out, err = p2.communicate()
extracted_str = out
return extracted_str |
def cells(self) -> Generator[Tuple[int, int], None, None]:
"""Generate cells in span."""
yield from itertools.product(
range(self.row_start, self.row_end),
range(self.column_start, self.column_end)
) | Generate cells in span. | Below is the the instruction that describes the task:
### Input:
Generate cells in span.
### Response:
def cells(self) -> Generator[Tuple[int, int], None, None]:
"""Generate cells in span."""
yield from itertools.product(
range(self.row_start, self.row_end),
range(self.column_start, self.column_end)
) |
def contract(self, jobs, result):
"""
Perform a contract on a number of jobs and block until a result is
retrieved for each job.
"""
for j in jobs:
WorkerPool.put(self, j)
r = []
for i in xrange(len(jobs)):
r.append(result.get())
return r | Perform a contract on a number of jobs and block until a result is
retrieved for each job. | Below is the the instruction that describes the task:
### Input:
Perform a contract on a number of jobs and block until a result is
retrieved for each job.
### Response:
def contract(self, jobs, result):
"""
Perform a contract on a number of jobs and block until a result is
retrieved for each job.
"""
for j in jobs:
WorkerPool.put(self, j)
r = []
for i in xrange(len(jobs)):
r.append(result.get())
return r |
def get_authservers(self, domainid, page=None):
"""Get Authentication servers"""
opts = {}
if page:
opts['page'] = page
return self.api_call(
ENDPOINTS['authservers']['list'],
dict(domainid=domainid), **opts) | Get Authentication servers | Below is the the instruction that describes the task:
### Input:
Get Authentication servers
### Response:
def get_authservers(self, domainid, page=None):
"""Get Authentication servers"""
opts = {}
if page:
opts['page'] = page
return self.api_call(
ENDPOINTS['authservers']['list'],
dict(domainid=domainid), **opts) |
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
json = None
if ref and ref.count('/') >= 2 and sha:
data = {'ref': ref, 'sha': sha}
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data=data), 201)
return Reference(json, self) if json else None | Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None | Below is the the instruction that describes the task:
### Input:
Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
### Response:
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
json = None
if ref and ref.count('/') >= 2 and sha:
data = {'ref': ref, 'sha': sha}
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data=data), 201)
return Reference(json, self) if json else None |
def ping():
'''
Is the marathon api responding?
'''
try:
response = salt.utils.http.query(
"{0}/ping".format(CONFIG[CONFIG_BASE_URL]),
decode_type='plain',
decode=True,
)
log.debug(
'marathon.info returned successfully: %s',
response,
)
if 'text' in response and response['text'].strip() == 'pong':
return True
except Exception as ex:
log.error(
'error calling marathon.info with base_url %s: %s',
CONFIG[CONFIG_BASE_URL],
ex,
)
return False | Is the marathon api responding? | Below is the the instruction that describes the task:
### Input:
Is the marathon api responding?
### Response:
def ping():
'''
Is the marathon api responding?
'''
try:
response = salt.utils.http.query(
"{0}/ping".format(CONFIG[CONFIG_BASE_URL]),
decode_type='plain',
decode=True,
)
log.debug(
'marathon.info returned successfully: %s',
response,
)
if 'text' in response and response['text'].strip() == 'pong':
return True
except Exception as ex:
log.error(
'error calling marathon.info with base_url %s: %s',
CONFIG[CONFIG_BASE_URL],
ex,
)
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.