code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def listMethods(self, pid, sdefpid=None):
'''List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods ? [format, datetime]
# /objects/{pid}/methods/{sdefpid} ? [format, datetime]
## NOTE: getting an error when sdefpid is specified; fedora issue?
uri = 'objects/%(pid)s/methods' % {'pid': pid}
if sdefpid:
uri += '/' + sdefpid
return self.get(uri, params=self.format_xml) | List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response` | Below is the the instruction that describes the task:
### Input:
List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response`
### Response:
def listMethods(self, pid, sdefpid=None):
'''List available service methods.
:param pid: object pid
:param sDefPid: service definition pid
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/methods ? [format, datetime]
# /objects/{pid}/methods/{sdefpid} ? [format, datetime]
## NOTE: getting an error when sdefpid is specified; fedora issue?
uri = 'objects/%(pid)s/methods' % {'pid': pid}
if sdefpid:
uri += '/' + sdefpid
return self.get(uri, params=self.format_xml) |
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator | this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply. | Below is the the instruction that describes the task:
### Input:
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
### Response:
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator |
def str2listtuple(self, string_message):
"Covert a string that is ready to be sent to graphite into a tuple"
if type(string_message).__name__ not in ('str', 'unicode'):
raise TypeError("Must provide a string or unicode")
if not string_message.endswith('\n'):
string_message += "\n"
tpl_list = []
for line in string_message.split('\n'):
line = line.strip()
if not line:
continue
path, metric, timestamp = (None, None, None)
try:
(path, metric, timestamp) = line.split()
except ValueError:
raise ValueError(
"message must contain - metric_name, value and timestamp '%s'"
% line)
try:
timestamp = float(timestamp)
except ValueError:
raise ValueError("Timestamp must be float or int")
tpl_list.append((path, (timestamp, metric)))
if len(tpl_list) == 0:
raise GraphiteSendException("No messages to send")
payload = pickle.dumps(tpl_list)
header = struct.pack("!L", len(payload))
message = header + payload
return message | Covert a string that is ready to be sent to graphite into a tuple | Below is the the instruction that describes the task:
### Input:
Covert a string that is ready to be sent to graphite into a tuple
### Response:
def str2listtuple(self, string_message):
"Covert a string that is ready to be sent to graphite into a tuple"
if type(string_message).__name__ not in ('str', 'unicode'):
raise TypeError("Must provide a string or unicode")
if not string_message.endswith('\n'):
string_message += "\n"
tpl_list = []
for line in string_message.split('\n'):
line = line.strip()
if not line:
continue
path, metric, timestamp = (None, None, None)
try:
(path, metric, timestamp) = line.split()
except ValueError:
raise ValueError(
"message must contain - metric_name, value and timestamp '%s'"
% line)
try:
timestamp = float(timestamp)
except ValueError:
raise ValueError("Timestamp must be float or int")
tpl_list.append((path, (timestamp, metric)))
if len(tpl_list) == 0:
raise GraphiteSendException("No messages to send")
payload = pickle.dumps(tpl_list)
header = struct.pack("!L", len(payload))
message = header + payload
return message |
def calculate_heartbeats(shb, chb):
"""
Given a heartbeat string from the server, and a heartbeat tuple from the client,
calculate what the actual heartbeat settings should be.
:param (str,str) shb: server heartbeat numbers
:param (int,int) chb: client heartbeat numbers
:rtype: (int,int)
"""
(sx, sy) = shb
(cx, cy) = chb
x = 0
y = 0
if cx != 0 and sy != '0':
x = max(cx, int(sy))
if cy != 0 and sx != '0':
y = max(cy, int(sx))
return x, y | Given a heartbeat string from the server, and a heartbeat tuple from the client,
calculate what the actual heartbeat settings should be.
:param (str,str) shb: server heartbeat numbers
:param (int,int) chb: client heartbeat numbers
:rtype: (int,int) | Below is the the instruction that describes the task:
### Input:
Given a heartbeat string from the server, and a heartbeat tuple from the client,
calculate what the actual heartbeat settings should be.
:param (str,str) shb: server heartbeat numbers
:param (int,int) chb: client heartbeat numbers
:rtype: (int,int)
### Response:
def calculate_heartbeats(shb, chb):
"""
Given a heartbeat string from the server, and a heartbeat tuple from the client,
calculate what the actual heartbeat settings should be.
:param (str,str) shb: server heartbeat numbers
:param (int,int) chb: client heartbeat numbers
:rtype: (int,int)
"""
(sx, sy) = shb
(cx, cy) = chb
x = 0
y = 0
if cx != 0 and sy != '0':
x = max(cx, int(sy))
if cy != 0 and sx != '0':
y = max(cy, int(sx))
return x, y |
def get_unique_figname(dirname, root, ext):
"""
Append a number to "root" to form a filename that does not already exist
in "dirname".
"""
i = 1
figname = root + '_%d' % i + ext
while True:
if osp.exists(osp.join(dirname, figname)):
i += 1
figname = root + '_%d' % i + ext
else:
return osp.join(dirname, figname) | Append a number to "root" to form a filename that does not already exist
in "dirname". | Below is the the instruction that describes the task:
### Input:
Append a number to "root" to form a filename that does not already exist
in "dirname".
### Response:
def get_unique_figname(dirname, root, ext):
"""
Append a number to "root" to form a filename that does not already exist
in "dirname".
"""
i = 1
figname = root + '_%d' % i + ext
while True:
if osp.exists(osp.join(dirname, figname)):
i += 1
figname = root + '_%d' % i + ext
else:
return osp.join(dirname, figname) |
def virtualchain_set_opfields( op, **fields ):
"""
Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly.
"""
# warn about unsupported fields
for f in fields.keys():
if f not in indexer.RESERVED_KEYS:
log.warning("Unsupported virtualchain field '%s'" % f)
# propagate reserved fields
for f in fields.keys():
if f in indexer.RESERVED_KEYS:
op[f] = fields[f]
return op | Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly. | Below is the the instruction that describes the task:
### Input:
Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly.
### Response:
def virtualchain_set_opfields( op, **fields ):
"""
Pass along virtualchain-reserved fields to a virtualchain operation.
This layer of indirection is meant to help with future compatibility,
so virtualchain implementations do not try to set operation fields
directly.
"""
# warn about unsupported fields
for f in fields.keys():
if f not in indexer.RESERVED_KEYS:
log.warning("Unsupported virtualchain field '%s'" % f)
# propagate reserved fields
for f in fields.keys():
if f in indexer.RESERVED_KEYS:
op[f] = fields[f]
return op |
def file(cls, uri_or_path):
"""
Given either a URI like s3://bucket/path.txt or a path like /path.txt,
return a file object for it.
"""
uri = urlparse(uri_or_path)
if not uri.scheme:
# Just a normal path
return open(uri_or_path, 'rb')
else:
it = cls(uri_or_path).download_iter(uri.path.lstrip('/'), skip_hash=True)
if not it:
raise ValueError('{0} not found'.format(uri_or_path))
tmp = tempfile.TemporaryFile()
for chunk in it:
tmp.write(chunk)
tmp.seek(0, 0)
return tmp | Given either a URI like s3://bucket/path.txt or a path like /path.txt,
return a file object for it. | Below is the the instruction that describes the task:
### Input:
Given either a URI like s3://bucket/path.txt or a path like /path.txt,
return a file object for it.
### Response:
def file(cls, uri_or_path):
"""
Given either a URI like s3://bucket/path.txt or a path like /path.txt,
return a file object for it.
"""
uri = urlparse(uri_or_path)
if not uri.scheme:
# Just a normal path
return open(uri_or_path, 'rb')
else:
it = cls(uri_or_path).download_iter(uri.path.lstrip('/'), skip_hash=True)
if not it:
raise ValueError('{0} not found'.format(uri_or_path))
tmp = tempfile.TemporaryFile()
for chunk in it:
tmp.write(chunk)
tmp.seek(0, 0)
return tmp |
def start(self, rootJob):
"""
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
"""
self._assertContextManagerUsed()
self.writePIDFile()
if self.config.restart:
raise ToilRestartException('A Toil workflow can only be started once. Use '
'Toil.restart() to resume it.')
self._batchSystem = self.createBatchSystem(self.config)
self._setupAutoDeployment(rootJob.getUserScript())
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
# Pickle the promised return value of the root job, then write the pickled promise to
# a shared file, where we can find and unpickle it at the end of the workflow.
# Unpickling the promise will automatically substitute the promise for the actual
# return value.
with self._jobStore.writeSharedFileStream('rootJobReturnValue') as fH:
rootJob.prepareForPromiseRegistration(self._jobStore)
promise = rootJob.rv()
pickle.dump(promise, fH, protocol=pickle.HIGHEST_PROTOCOL)
# Setup the first wrapper and cache it
rootJobGraph = rootJob._serialiseFirstJob(self._jobStore)
self._cacheJob(rootJobGraph)
self._setProvisioner()
return self._runMainLoop(rootJobGraph)
finally:
self._shutdownBatchSystem() | Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value | Below is the the instruction that describes the task:
### Input:
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
### Response:
def start(self, rootJob):
"""
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
"""
self._assertContextManagerUsed()
self.writePIDFile()
if self.config.restart:
raise ToilRestartException('A Toil workflow can only be started once. Use '
'Toil.restart() to resume it.')
self._batchSystem = self.createBatchSystem(self.config)
self._setupAutoDeployment(rootJob.getUserScript())
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
# Pickle the promised return value of the root job, then write the pickled promise to
# a shared file, where we can find and unpickle it at the end of the workflow.
# Unpickling the promise will automatically substitute the promise for the actual
# return value.
with self._jobStore.writeSharedFileStream('rootJobReturnValue') as fH:
rootJob.prepareForPromiseRegistration(self._jobStore)
promise = rootJob.rv()
pickle.dump(promise, fH, protocol=pickle.HIGHEST_PROTOCOL)
# Setup the first wrapper and cache it
rootJobGraph = rootJob._serialiseFirstJob(self._jobStore)
self._cacheJob(rootJobGraph)
self._setProvisioner()
return self._runMainLoop(rootJobGraph)
finally:
self._shutdownBatchSystem() |
def get_last_version(skip_tags=None) -> Optional[str]:
"""
Return last version from repo tags.
:return: A string contains version number.
"""
debug('get_last_version skip_tags=', skip_tags)
check_repo()
skip_tags = skip_tags or []
def version_finder(tag):
if isinstance(tag.commit, TagObject):
return tag.tag.tagged_date
return tag.commit.committed_date
for i in sorted(repo.tags, reverse=True, key=version_finder):
if re.match(r'v\d+\.\d+\.\d+', i.name):
if i.name in skip_tags:
continue
return i.name[1:]
return None | Return last version from repo tags.
:return: A string contains version number. | Below is the the instruction that describes the task:
### Input:
Return last version from repo tags.
:return: A string contains version number.
### Response:
def get_last_version(skip_tags=None) -> Optional[str]:
"""
Return last version from repo tags.
:return: A string contains version number.
"""
debug('get_last_version skip_tags=', skip_tags)
check_repo()
skip_tags = skip_tags or []
def version_finder(tag):
if isinstance(tag.commit, TagObject):
return tag.tag.tagged_date
return tag.commit.committed_date
for i in sorted(repo.tags, reverse=True, key=version_finder):
if re.match(r'v\d+\.\d+\.\d+', i.name):
if i.name in skip_tags:
continue
return i.name[1:]
return None |
def t_escaped_CARRIAGE_RETURN_CHAR(self, t):
r'\x72' # 'r'
t.lexer.pop_state()
t.value = unichr(0x000d)
return t | r'\x72 | Below is the the instruction that describes the task:
### Input:
r'\x72
### Response:
def t_escaped_CARRIAGE_RETURN_CHAR(self, t):
r'\x72' # 'r'
t.lexer.pop_state()
t.value = unichr(0x000d)
return t |
def call(self, request=None, *args, **kwargs):
"""
Calls multiple time - with retry.
:param request:
:return: response
"""
if request is not None:
self.request = request
retry = self.request.configuration.retry
if not isinstance(retry, SimpleRetry):
raise Error('Currently only the fast retry strategy is supported')
last_exception = None
for i in range(0, retry.max_retry):
try:
if i > 0:
retry.sleep_jitter()
self.call_once()
return self.response
except Exception as ex:
last_exception = RequestFailed(message='Request failed', cause=ex)
logger.debug("Request %d failed, exception: %s" % (i, ex))
# Last exception - throw it here to have a stack
if i+1 == retry.max_retry:
raise last_exception
raise last_exception | Calls multiple time - with retry.
:param request:
:return: response | Below is the the instruction that describes the task:
### Input:
Calls multiple time - with retry.
:param request:
:return: response
### Response:
def call(self, request=None, *args, **kwargs):
"""
Calls multiple time - with retry.
:param request:
:return: response
"""
if request is not None:
self.request = request
retry = self.request.configuration.retry
if not isinstance(retry, SimpleRetry):
raise Error('Currently only the fast retry strategy is supported')
last_exception = None
for i in range(0, retry.max_retry):
try:
if i > 0:
retry.sleep_jitter()
self.call_once()
return self.response
except Exception as ex:
last_exception = RequestFailed(message='Request failed', cause=ex)
logger.debug("Request %d failed, exception: %s" % (i, ex))
# Last exception - throw it here to have a stack
if i+1 == retry.max_retry:
raise last_exception
raise last_exception |
def read(self, domain, type_name, search_command, body=None):
"""Read entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES.
body (str): JSON body
"""
return self._request(domain, type_name, search_command, 'GET', body) | Read entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES.
body (str): JSON body | Below is the the instruction that describes the task:
### Input:
Read entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES.
body (str): JSON body
### Response:
def read(self, domain, type_name, search_command, body=None):
"""Read entry in ThreatConnect Data Store
Args:
domain (string): One of 'local', 'organization', or 'system'.
type_name (string): This is a free form index type name. The ThreatConnect API will use
this resource verbatim.
search_command (string): Search command to pass to ES.
body (str): JSON body
"""
return self._request(domain, type_name, search_command, 'GET', body) |
def validate_complex_list(prop, value, xpath_map=None):
""" Default validation for Attribute Details data structure """
if value is not None:
validate_type(prop, value, (dict, list))
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for idx, complex_struct in enumerate(wrap_value(value)):
cs_idx = prop + '[' + str(idx) + ']'
validate_type(cs_idx, complex_struct, dict)
for cs_prop, cs_val in iteritems(complex_struct):
cs_key = '.'.join((cs_idx, cs_prop))
if cs_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
if not isinstance(cs_val, list):
validate_type(cs_key, cs_val, (string_types, list))
else:
for list_idx, list_val in enumerate(cs_val):
list_prop = cs_key + '[' + str(list_idx) + ']'
validate_type(list_prop, list_val, string_types) | Default validation for Attribute Details data structure | Below is the the instruction that describes the task:
### Input:
Default validation for Attribute Details data structure
### Response:
def validate_complex_list(prop, value, xpath_map=None):
""" Default validation for Attribute Details data structure """
if value is not None:
validate_type(prop, value, (dict, list))
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for idx, complex_struct in enumerate(wrap_value(value)):
cs_idx = prop + '[' + str(idx) + ']'
validate_type(cs_idx, complex_struct, dict)
for cs_prop, cs_val in iteritems(complex_struct):
cs_key = '.'.join((cs_idx, cs_prop))
if cs_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
if not isinstance(cs_val, list):
validate_type(cs_key, cs_val, (string_types, list))
else:
for list_idx, list_val in enumerate(cs_val):
list_prop = cs_key + '[' + str(list_idx) + ']'
validate_type(list_prop, list_val, string_types) |
def add_predicate(self, predicate):
"""
Add a new predicate callback to this handler.
"""
_predicate = predicate
if isinstance(predicate, partial):
_predicate = 'partial(%s, %s, %s)' % (predicate.func, predicate.args, predicate.keywords)
if LOG_OPTS['register']:
hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG)
self._predicates.append(predicate) | Add a new predicate callback to this handler. | Below is the the instruction that describes the task:
### Input:
Add a new predicate callback to this handler.
### Response:
def add_predicate(self, predicate):
"""
Add a new predicate callback to this handler.
"""
_predicate = predicate
if isinstance(predicate, partial):
_predicate = 'partial(%s, %s, %s)' % (predicate.func, predicate.args, predicate.keywords)
if LOG_OPTS['register']:
hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG)
self._predicates.append(predicate) |
def ping(daemon, channel, data=None):
"""
Process the 'ping' control message.
:param daemon: The control daemon; used to get at the
configuration and the database.
:param channel: The publish channel to which to send the
response.
:param data: Optional extra data. Will be returned as the
second argument of the response.
Responds to the named channel with a command of 'pong' and
with the node_name (if configured) and provided data as
arguments.
"""
if not channel:
# No place to reply to
return
# Get our configured node name
node_name = daemon.config['control'].get('node_name')
# Format the response
reply = ['pong']
if node_name or data:
reply.append(node_name or '')
if data:
reply.append(data)
# And send it
with utils.ignore_except():
daemon.db.publish(channel, ':'.join(reply)) | Process the 'ping' control message.
:param daemon: The control daemon; used to get at the
configuration and the database.
:param channel: The publish channel to which to send the
response.
:param data: Optional extra data. Will be returned as the
second argument of the response.
Responds to the named channel with a command of 'pong' and
with the node_name (if configured) and provided data as
arguments. | Below is the the instruction that describes the task:
### Input:
Process the 'ping' control message.
:param daemon: The control daemon; used to get at the
configuration and the database.
:param channel: The publish channel to which to send the
response.
:param data: Optional extra data. Will be returned as the
second argument of the response.
Responds to the named channel with a command of 'pong' and
with the node_name (if configured) and provided data as
arguments.
### Response:
def ping(daemon, channel, data=None):
"""
Process the 'ping' control message.
:param daemon: The control daemon; used to get at the
configuration and the database.
:param channel: The publish channel to which to send the
response.
:param data: Optional extra data. Will be returned as the
second argument of the response.
Responds to the named channel with a command of 'pong' and
with the node_name (if configured) and provided data as
arguments.
"""
if not channel:
# No place to reply to
return
# Get our configured node name
node_name = daemon.config['control'].get('node_name')
# Format the response
reply = ['pong']
if node_name or data:
reply.append(node_name or '')
if data:
reply.append(data)
# And send it
with utils.ignore_except():
daemon.db.publish(channel, ':'.join(reply)) |
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
d.tm_mday,
delim,
(
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
)[d.tm_mon - 1],
delim,
str(d.tm_year),
d.tm_hour,
d.tm_min,
d.tm_sec,
) | Used for `http_date` and `cookie_date`. | Below is the the instruction that describes the task:
### Input:
Used for `http_date` and `cookie_date`.
### Response:
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
d.tm_mday,
delim,
(
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
)[d.tm_mon - 1],
delim,
str(d.tm_year),
d.tm_hour,
d.tm_min,
d.tm_sec,
) |
def index_to_time_seg(time_seg_idx, slide_step):
"""
将时间片索引值转换为时间片字符串
:param time_seg_idx:
:param slide_step:
:return:
"""
assert (time_seg_idx * slide_step < const.MINUTES_IN_A_DAY)
return time_util.minutes_to_time_str(time_seg_idx * slide_step) | 将时间片索引值转换为时间片字符串
:param time_seg_idx:
:param slide_step:
:return: | Below is the the instruction that describes the task:
### Input:
将时间片索引值转换为时间片字符串
:param time_seg_idx:
:param slide_step:
:return:
### Response:
def index_to_time_seg(time_seg_idx, slide_step):
"""
将时间片索引值转换为时间片字符串
:param time_seg_idx:
:param slide_step:
:return:
"""
assert (time_seg_idx * slide_step < const.MINUTES_IN_A_DAY)
return time_util.minutes_to_time_str(time_seg_idx * slide_step) |
def process(self):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@see get_clusters()
@see get_medoids()
"""
random.seed()
for _ in range(0, self.__numlocal):
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
self.__update_clusters(self.__optimal_medoids) | !
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@see get_clusters()
@see get_medoids() | Below is the the instruction that describes the task:
### Input:
!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@see get_clusters()
@see get_medoids()
### Response:
def process(self):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@see get_clusters()
@see get_medoids()
"""
random.seed()
for _ in range(0, self.__numlocal):
# set (current) random medoids
self.__current = random.sample(range(0, len(self.__pointer_data)), self.__number_clusters)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
self.__update_clusters(self.__optimal_medoids) |
def ebic_select(self, gamma=0):
"""Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
"""
if not isinstance(self.precision_, list):
raise ValueError("EBIC requires multiple models to select from.")
return
if not self.is_fitted_:
return
ebic_scores = self.ebic(gamma=gamma)
min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10)
return np.max(min_indices) | Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score. | Below is the the instruction that describes the task:
### Input:
Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
### Response:
def ebic_select(self, gamma=0):
"""Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
"""
if not isinstance(self.precision_, list):
raise ValueError("EBIC requires multiple models to select from.")
return
if not self.is_fitted_:
return
ebic_scores = self.ebic(gamma=gamma)
min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10)
return np.max(min_indices) |
def all_distinct(l, idx):
"""
Returns the list [l[i] for i in idx]
When needed, objects l[i] are replaced by a copy, to make sure that
the elements of the list are all distinct
Parameters
---------
l: iterable
idx: iterable that generates ints (e.g. ndarray of ints)
Returns
-------
a list
"""
out = []
deja_vu = [False for _ in l]
for i in idx:
to_add = cp.deepcopy(l[i]) if deja_vu[i] else l[i]
out.append(to_add)
deja_vu[i] = True
return out | Returns the list [l[i] for i in idx]
When needed, objects l[i] are replaced by a copy, to make sure that
the elements of the list are all distinct
Parameters
---------
l: iterable
idx: iterable that generates ints (e.g. ndarray of ints)
Returns
-------
a list | Below is the the instruction that describes the task:
### Input:
Returns the list [l[i] for i in idx]
When needed, objects l[i] are replaced by a copy, to make sure that
the elements of the list are all distinct
Parameters
---------
l: iterable
idx: iterable that generates ints (e.g. ndarray of ints)
Returns
-------
a list
### Response:
def all_distinct(l, idx):
"""
Returns the list [l[i] for i in idx]
When needed, objects l[i] are replaced by a copy, to make sure that
the elements of the list are all distinct
Parameters
---------
l: iterable
idx: iterable that generates ints (e.g. ndarray of ints)
Returns
-------
a list
"""
out = []
deja_vu = [False for _ in l]
for i in idx:
to_add = cp.deepcopy(l[i]) if deja_vu[i] else l[i]
out.append(to_add)
deja_vu[i] = True
return out |
def _decode_alt_names(self, alt_names):
"""Load SubjectAltName from a ASN.1 GeneralNames value.
:Values:
- `alt_names`: the SubjectAltNama extension value
:Types:
- `alt_name`: `GeneralNames`
"""
for alt_name in alt_names:
tname = alt_name.getName()
comp = alt_name.getComponent()
if tname == "dNSName":
key = "DNS"
value = _decode_asn1_string(comp)
elif tname == "uniformResourceIdentifier":
key = "URI"
value = _decode_asn1_string(comp)
elif tname == "otherName":
oid = comp.getComponentByName("type-id")
value = comp.getComponentByName("value")
if oid == XMPPADDR_OID:
key = "XmppAddr"
value = der_decoder.decode(value,
asn1Spec = UTF8String())[0]
value = _decode_asn1_string(value)
elif oid == SRVNAME_OID:
key = "SRVName"
value = der_decoder.decode(value,
asn1Spec = IA5String())[0]
value = _decode_asn1_string(value)
else:
logger.debug("Unknown other name: {0}".format(oid))
continue
else:
logger.debug("Unsupported general name: {0}"
.format(tname))
continue
self.alt_names[key].append(value) | Load SubjectAltName from a ASN.1 GeneralNames value.
:Values:
- `alt_names`: the SubjectAltNama extension value
:Types:
- `alt_name`: `GeneralNames` | Below is the the instruction that describes the task:
### Input:
Load SubjectAltName from a ASN.1 GeneralNames value.
:Values:
- `alt_names`: the SubjectAltNama extension value
:Types:
- `alt_name`: `GeneralNames`
### Response:
def _decode_alt_names(self, alt_names):
"""Load SubjectAltName from a ASN.1 GeneralNames value.
:Values:
- `alt_names`: the SubjectAltNama extension value
:Types:
- `alt_name`: `GeneralNames`
"""
for alt_name in alt_names:
tname = alt_name.getName()
comp = alt_name.getComponent()
if tname == "dNSName":
key = "DNS"
value = _decode_asn1_string(comp)
elif tname == "uniformResourceIdentifier":
key = "URI"
value = _decode_asn1_string(comp)
elif tname == "otherName":
oid = comp.getComponentByName("type-id")
value = comp.getComponentByName("value")
if oid == XMPPADDR_OID:
key = "XmppAddr"
value = der_decoder.decode(value,
asn1Spec = UTF8String())[0]
value = _decode_asn1_string(value)
elif oid == SRVNAME_OID:
key = "SRVName"
value = der_decoder.decode(value,
asn1Spec = IA5String())[0]
value = _decode_asn1_string(value)
else:
logger.debug("Unknown other name: {0}".format(oid))
continue
else:
logger.debug("Unsupported general name: {0}"
.format(tname))
continue
self.alt_names[key].append(value) |
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
) | Delete a record.
:param record: Record instance. | Below is the the instruction that describes the task:
### Input:
Delete a record.
:param record: Record instance.
### Response:
def delete(self, record):
"""Delete a record.
:param record: Record instance.
"""
index, doc_type = self.record_to_index(record)
return self.client.delete(
id=str(record.id),
index=index,
doc_type=doc_type,
) |
def types(self, *args):
"""Used for debugging, returns type of each arg.
TYPES,ARG_1,...,ARG_N
%{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
"""
return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args]) | Used for debugging, returns type of each arg.
TYPES,ARG_1,...,ARG_N
%{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)' | Below is the the instruction that describes the task:
### Input:
Used for debugging, returns type of each arg.
TYPES,ARG_1,...,ARG_N
%{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
### Response:
def types(self, *args):
"""Used for debugging, returns type of each arg.
TYPES,ARG_1,...,ARG_N
%{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)'
"""
return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args]) |
def _build_provider_list():
"""
Construct the provider registry, using the app settings.
"""
registry = None
if appsettings.FLUENT_OEMBED_SOURCE == 'basic':
registry = bootstrap_basic()
elif appsettings.FLUENT_OEMBED_SOURCE == 'embedly':
params = {}
if appsettings.MICAWBER_EMBEDLY_KEY:
params['key'] = appsettings.MICAWBER_EMBEDLY_KEY
registry = bootstrap_embedly(**params)
elif appsettings.FLUENT_OEMBED_SOURCE == 'noembed':
registry = bootstrap_noembed(nowrap=1)
elif appsettings.FLUENT_OEMBED_SOURCE == 'list':
# Fill list manually in the settings, e.g. to have a fixed set of supported secure providers.
registry = ProviderRegistry()
for regex, provider in appsettings.FLUENT_OEMBED_PROVIDER_LIST:
registry.register(regex, Provider(provider))
else:
raise ImproperlyConfigured("Invalid value of FLUENT_OEMBED_SOURCE, only 'basic', 'list', 'noembed' or 'embedly' is supported.")
# Add any extra providers defined in the settings
for regex, provider in appsettings.FLUENT_OEMBED_EXTRA_PROVIDERS:
registry.register(regex, Provider(provider))
return registry | Construct the provider registry, using the app settings. | Below is the the instruction that describes the task:
### Input:
Construct the provider registry, using the app settings.
### Response:
def _build_provider_list():
"""
Construct the provider registry, using the app settings.
"""
registry = None
if appsettings.FLUENT_OEMBED_SOURCE == 'basic':
registry = bootstrap_basic()
elif appsettings.FLUENT_OEMBED_SOURCE == 'embedly':
params = {}
if appsettings.MICAWBER_EMBEDLY_KEY:
params['key'] = appsettings.MICAWBER_EMBEDLY_KEY
registry = bootstrap_embedly(**params)
elif appsettings.FLUENT_OEMBED_SOURCE == 'noembed':
registry = bootstrap_noembed(nowrap=1)
elif appsettings.FLUENT_OEMBED_SOURCE == 'list':
# Fill list manually in the settings, e.g. to have a fixed set of supported secure providers.
registry = ProviderRegistry()
for regex, provider in appsettings.FLUENT_OEMBED_PROVIDER_LIST:
registry.register(regex, Provider(provider))
else:
raise ImproperlyConfigured("Invalid value of FLUENT_OEMBED_SOURCE, only 'basic', 'list', 'noembed' or 'embedly' is supported.")
# Add any extra providers defined in the settings
for regex, provider in appsettings.FLUENT_OEMBED_EXTRA_PROVIDERS:
registry.register(regex, Provider(provider))
return registry |
def system_config_files(self):
"""Get a list of absolute paths to the system config files."""
return [os.path.join(f, self.filename) for f in get_system_config_dirs(
self.app_name, self.app_author)] | Get a list of absolute paths to the system config files. | Below is the the instruction that describes the task:
### Input:
Get a list of absolute paths to the system config files.
### Response:
def system_config_files(self):
"""Get a list of absolute paths to the system config files."""
return [os.path.join(f, self.filename) for f in get_system_config_dirs(
self.app_name, self.app_author)] |
def dew_point(self, db):
"""Get the dew point (C), which is constant throughout the day (except at saturation).
args:
db: The maximum dry bulb temperature over the day.
"""
if self._hum_type == 'Dewpoint':
return self._hum_value
elif self._hum_type == 'Wetbulb':
return dew_point_from_db_wb(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'HumidityRatio':
return dew_point_from_db_hr(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'Enthalpy':
return dew_point_from_db_enth(
db, self._hum_value / 1000, self._barometric_pressure) | Get the dew point (C), which is constant throughout the day (except at saturation).
args:
db: The maximum dry bulb temperature over the day. | Below is the the instruction that describes the task:
### Input:
Get the dew point (C), which is constant throughout the day (except at saturation).
args:
db: The maximum dry bulb temperature over the day.
### Response:
def dew_point(self, db):
"""Get the dew point (C), which is constant throughout the day (except at saturation).
args:
db: The maximum dry bulb temperature over the day.
"""
if self._hum_type == 'Dewpoint':
return self._hum_value
elif self._hum_type == 'Wetbulb':
return dew_point_from_db_wb(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'HumidityRatio':
return dew_point_from_db_hr(
db, self._hum_value, self._barometric_pressure)
elif self._hum_type == 'Enthalpy':
return dew_point_from_db_enth(
db, self._hum_value / 1000, self._barometric_pressure) |
def update(self, of):
"""Update a file from another file, for copying"""
# The other values should be set when the file object is created with dataset.bsfile()
for p in ('mime_type', 'preference', 'state', 'hash', 'modified', 'size', 'contents', 'source_hash', 'data'):
setattr(self, p, getattr(of, p))
return self | Update a file from another file, for copying | Below is the the instruction that describes the task:
### Input:
Update a file from another file, for copying
### Response:
def update(self, of):
"""Update a file from another file, for copying"""
# The other values should be set when the file object is created with dataset.bsfile()
for p in ('mime_type', 'preference', 'state', 'hash', 'modified', 'size', 'contents', 'source_hash', 'data'):
setattr(self, p, getattr(of, p))
return self |
def aggregations(self):
"""
Override parent method. Obtain list of the terms and their corresponding
values using "terms" aggregations for the previous time period.
:returns: a data frame containing terms and their corresponding values
"""
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
self.query.get_terms("author_name")
return self.query.get_list(dataframe=True) | Override parent method. Obtain list of the terms and their corresponding
values using "terms" aggregations for the previous time period.
:returns: a data frame containing terms and their corresponding values | Below is the the instruction that describes the task:
### Input:
Override parent method. Obtain list of the terms and their corresponding
values using "terms" aggregations for the previous time period.
:returns: a data frame containing terms and their corresponding values
### Response:
def aggregations(self):
"""
Override parent method. Obtain list of the terms and their corresponding
values using "terms" aggregations for the previous time period.
:returns: a data frame containing terms and their corresponding values
"""
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
self.query.get_terms("author_name")
return self.query.get_list(dataframe=True) |
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension ) | Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing). | Below is the the instruction that describes the task:
### Input:
Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
### Response:
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension ) |
def forms_valid(self, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url()) | If the form and formsets are valid, save the associated models. | Below is the the instruction that describes the task:
### Input:
If the form and formsets are valid, save the associated models.
### Response:
def forms_valid(self, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url()) |
def _build_url(self, endpoint):
""" Build complete URL """
if not issubclass(type(self), ChatApiBase) and not self.subdomain:
raise ZenpyException("subdomain is required when accessing the Zendesk API!")
if self.subdomain:
endpoint.netloc = '{}.{}'.format(self.subdomain, self.domain)
else:
endpoint.netloc = self.domain
endpoint.prefix_path(self.api_prefix)
return endpoint.build() | Build complete URL | Below is the the instruction that describes the task:
### Input:
Build complete URL
### Response:
def _build_url(self, endpoint):
""" Build complete URL """
if not issubclass(type(self), ChatApiBase) and not self.subdomain:
raise ZenpyException("subdomain is required when accessing the Zendesk API!")
if self.subdomain:
endpoint.netloc = '{}.{}'.format(self.subdomain, self.domain)
else:
endpoint.netloc = self.domain
endpoint.prefix_path(self.api_prefix)
return endpoint.build() |
def parse_args():
''' Parse the arguments '''
parser = argparse.ArgumentParser(prog='setup_tree_modules', usage='%(prog)s [opts]')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Print extra information.', default=False)
parser.add_argument('-r', '--root', action='store', dest='root', default=os.getenv('SAS_BASE_DIR'),
help='Override the value of $SAS_BASE_DIR.', metavar='SAS_BASE_DIR')
parser.add_argument('-t', '--treedir', action='store', dest='treedir', default=os.getenv('TREE_DIR'),
help='Override the value of $TREE_DIR.', metavar='TREE_DIR')
parser.add_argument('-m', '--modulesdir', action='store', dest='modulesdir', default=os.getenv('MODULES_DIR'),
help='Your modules directory', metavar='MODULES_DIR')
parser.add_argument('-e', '--env', action='store_true', dest='env',
help='Create tree environment symlinks.', default=False)
parser.add_argument('-i', '--mirror', action='store_true', dest='mirror',
help='Use the mirror site (SAM) instead.')
parser.add_argument('-o', '--only', action='store', dest='only', metavar='[xxx].cfg',
default=None, help='create links for only the specified tree config.')
opts = parser.parse_args()
return opts | Parse the arguments | Below is the the instruction that describes the task:
### Input:
Parse the arguments
### Response:
def parse_args():
''' Parse the arguments '''
parser = argparse.ArgumentParser(prog='setup_tree_modules', usage='%(prog)s [opts]')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Print extra information.', default=False)
parser.add_argument('-r', '--root', action='store', dest='root', default=os.getenv('SAS_BASE_DIR'),
help='Override the value of $SAS_BASE_DIR.', metavar='SAS_BASE_DIR')
parser.add_argument('-t', '--treedir', action='store', dest='treedir', default=os.getenv('TREE_DIR'),
help='Override the value of $TREE_DIR.', metavar='TREE_DIR')
parser.add_argument('-m', '--modulesdir', action='store', dest='modulesdir', default=os.getenv('MODULES_DIR'),
help='Your modules directory', metavar='MODULES_DIR')
parser.add_argument('-e', '--env', action='store_true', dest='env',
help='Create tree environment symlinks.', default=False)
parser.add_argument('-i', '--mirror', action='store_true', dest='mirror',
help='Use the mirror site (SAM) instead.')
parser.add_argument('-o', '--only', action='store', dest='only', metavar='[xxx].cfg',
default=None, help='create links for only the specified tree config.')
opts = parser.parse_args()
return opts |
def strptime_fuzzy(s):
"""Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day
"""
import dateutil.parser
dt = dateutil.parser.parse(str(s), fuzzy=True)
return dt | Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day | Below is the the instruction that describes the task:
### Input:
Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day
### Response:
def strptime_fuzzy(s):
"""Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day
"""
import dateutil.parser
dt = dateutil.parser.parse(str(s), fuzzy=True)
return dt |
def compress_and_sort_index(self):
"""Sort index, add header, and compress.
:rtype: bool
:returns: True
"""
idx_fname = '{name}.{date}.mbox.csv'.format(**self.__dict__)
try:
reader = csv.reader(open(idx_fname), dialect='excel-tab')
except IOError:
return False
index = [x for x in reader if x]
sorted_index = sorted(index, key=itemgetter(0))
gzip_idx_fname = idx_fname + '.gz'
# Include UTF-8 BOM in header.
header = [
'\xef\xbb\xbf#date', 'msg_id', 'from', 'newsgroups', 'subject', 'references',
'start', 'length',
]
s = cStringIO.StringIO()
writer = csv.writer(s, dialect='excel-tab')
writer.writerow(header)
for line in sorted_index:
writer.writerow(line)
compressed_index = inline_compress_chunk(s.getvalue())
s.close()
with open(gzip_idx_fname, 'ab') as fp:
fp.write(compressed_index)
os.remove(idx_fname)
return True | Sort index, add header, and compress.
:rtype: bool
:returns: True | Below is the the instruction that describes the task:
### Input:
Sort index, add header, and compress.
:rtype: bool
:returns: True
### Response:
def compress_and_sort_index(self):
"""Sort index, add header, and compress.
:rtype: bool
:returns: True
"""
idx_fname = '{name}.{date}.mbox.csv'.format(**self.__dict__)
try:
reader = csv.reader(open(idx_fname), dialect='excel-tab')
except IOError:
return False
index = [x for x in reader if x]
sorted_index = sorted(index, key=itemgetter(0))
gzip_idx_fname = idx_fname + '.gz'
# Include UTF-8 BOM in header.
header = [
'\xef\xbb\xbf#date', 'msg_id', 'from', 'newsgroups', 'subject', 'references',
'start', 'length',
]
s = cStringIO.StringIO()
writer = csv.writer(s, dialect='excel-tab')
writer.writerow(header)
for line in sorted_index:
writer.writerow(line)
compressed_index = inline_compress_chunk(s.getvalue())
s.close()
with open(gzip_idx_fname, 'ab') as fp:
fp.write(compressed_index)
os.remove(idx_fname)
return True |
def _parse(self, stream, context, path):
"""Parse until the end of objects data."""
num_players = context._._._.replay.num_players
start = stream.tell()
# Have to read everything to be able to use find()
read_bytes = stream.read()
# Try to find the first marker, a portion of the next player structure
marker_up14 = read_bytes.find(b"\x16\xc6\x00\x00\x00\x21")
marker_up15 = read_bytes.find(b"\x16\xf0\x00\x00\x00\x21")
marker = -1
if marker_up14 > 0 and marker_up15 < 0:
marker = marker_up14
elif marker_up15 > 0 and marker_up14 < 0:
marker = marker_up15
# If it exists, we're not on the last player yet
if marker > 0:
# Backtrack through the player name
count = 0
while struct.unpack("<H", read_bytes[marker-2:marker])[0] != count:
marker -= 1
count += 1
# Backtrack through the rest of the next player structure
backtrack = 43 + num_players
# Otherwise, this is the last player
else:
# Search for the scenario header
marker = read_bytes.find(b"\xf6\x28\x9c\x3f")
# Backtrack through the achievements and initial structure footer
backtrack = ((1817 * (num_players - 1)) + 4 + 19)
# Seek to the position we found
end = start + marker - backtrack
stream.seek(end)
return end | Parse until the end of objects data. | Below is the the instruction that describes the task:
### Input:
Parse until the end of objects data.
### Response:
def _parse(self, stream, context, path):
"""Parse until the end of objects data."""
num_players = context._._._.replay.num_players
start = stream.tell()
# Have to read everything to be able to use find()
read_bytes = stream.read()
# Try to find the first marker, a portion of the next player structure
marker_up14 = read_bytes.find(b"\x16\xc6\x00\x00\x00\x21")
marker_up15 = read_bytes.find(b"\x16\xf0\x00\x00\x00\x21")
marker = -1
if marker_up14 > 0 and marker_up15 < 0:
marker = marker_up14
elif marker_up15 > 0 and marker_up14 < 0:
marker = marker_up15
# If it exists, we're not on the last player yet
if marker > 0:
# Backtrack through the player name
count = 0
while struct.unpack("<H", read_bytes[marker-2:marker])[0] != count:
marker -= 1
count += 1
# Backtrack through the rest of the next player structure
backtrack = 43 + num_players
# Otherwise, this is the last player
else:
# Search for the scenario header
marker = read_bytes.find(b"\xf6\x28\x9c\x3f")
# Backtrack through the achievements and initial structure footer
backtrack = ((1817 * (num_players - 1)) + 4 + 19)
# Seek to the position we found
end = start + marker - backtrack
stream.seek(end)
return end |
def a_torispherical(D, f, k):
r'''Calculates depth of a torispherical head according to [1]_.
.. math::
a = a_1 + a_2
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
Returns
-------
a : float
Depth of head [m]
Examples
--------
Example from [1]_.
>>> a_torispherical(D=96., f=0.9, k=0.2)
25.684268924767125
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
alpha = asin((1-2*k)/(2*(f-k)))
a1 = f*D*(1 - cos(alpha))
a2 = k*D*cos(alpha)
return a1 + a2 | r'''Calculates depth of a torispherical head according to [1]_.
.. math::
a = a_1 + a_2
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
Returns
-------
a : float
Depth of head [m]
Examples
--------
Example from [1]_.
>>> a_torispherical(D=96., f=0.9, k=0.2)
25.684268924767125
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF | Below is the the instruction that describes the task:
### Input:
r'''Calculates depth of a torispherical head according to [1]_.
.. math::
a = a_1 + a_2
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
Returns
-------
a : float
Depth of head [m]
Examples
--------
Example from [1]_.
>>> a_torispherical(D=96., f=0.9, k=0.2)
25.684268924767125
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF
### Response:
def a_torispherical(D, f, k):
r'''Calculates depth of a torispherical head according to [1]_.
.. math::
a = a_1 + a_2
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
Returns
-------
a : float
Depth of head [m]
Examples
--------
Example from [1]_.
>>> a_torispherical(D=96., f=0.9, k=0.2)
25.684268924767125
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
alpha = asin((1-2*k)/(2*(f-k)))
a1 = f*D*(1 - cos(alpha))
a2 = k*D*cos(alpha)
return a1 + a2 |
def _update_object_map(self, obj_map):
"""loop through all the keys in self.my_osid_object._my_map, and
see if any of them contain text like "AssetContent:<label>"
If so, assume it is markup (?), replace the string with asset_content.get_url()"""
# TODO: Look for <img> tags to add in alt-text and description
# TODO: Look for <video> and <audio> tags to add in description, transcripts and vtt files?
try:
super(FilesRecord, self)._update_object_map(obj_map)
except AttributeError:
pass
bypass_asset_content_authorization = False
acls = None
try:
config = self.my_osid_object._runtime.get_configuration()
parameter_id = Id('parameter:bypassAuthorizationForFilesRecordAssetContentLookup@json')
bypass_asset_content_authorization = config.get_value_by_parameter(parameter_id).get_boolean_value()
except (AttributeError, KeyError, NotFound):
pass
def replace_url_in_display_text(potential_display_text, dict_files_map):
if ('text' in potential_display_text and
potential_display_text['text'] is not None and
'AssetContent' in potential_display_text['text']):
# assume markup? Wrap this in case it's not a valid XML doc
# with a single parent object
wrapped_text = '<wrapper>{0}</wrapper'.format(potential_display_text['text'])
soup = BeautifulSoup(wrapped_text, 'xml')
media_file_elements = soup.find_all(src=media_regex)
media_file_elements += soup.find_all(data=media_regex)
for media_file_element in media_file_elements:
if 'src' in media_file_element.attrs:
media_key = 'src'
else:
media_key = 'data'
if ':' not in media_file_element[media_key]:
continue
media_label = media_file_element[media_key].split(':')[-1]
if media_label in dict_files_map:
asset_id = Id(dict_files_map[media_label]['assetId'])
ac_id = Id(dict_files_map[media_label]['assetContentId'])
if bypass_asset_content_authorization:
ac = acls.get_asset_content(ac_id)
else:
ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id)
if media_file_element.name == 'track':
try:
if not ac.has_files():
continue
except AttributeError:
# non-multi-language VTT files
media_file_element[media_key] = ac.get_url()
else:
media_file_element[media_key] = ac.get_url()
media_file_element['srclang'] = ac.get_vtt_locale_identifier().lower()[0:2]
media_file_element['label'] = ac.get_vtt_locale_label()
elif media_file_element.name == 'transcript':
if not ac.has_files():
continue
transcript_template_path = '{0}/osid/transcript_template.xml'.format(ABS_PATH)
with codecs.open(transcript_template_path, 'r', encoding='utf-8') as template_file:
template = template_file.read().format(media_label,
ac.get_transcript_locale_label().lower(),
ac.get_transcript_locale_label().title(),
ac.get_transcript_text())
new_template_tag = BeautifulSoup(template, 'xml').div
# media_file_element.replace_with(new_template_tag)
p_parent = None
for parent in media_file_element.parents:
if parent is not None and parent.name != 'p':
# insert the transcript after the top p tag
# so that we don't create invalid HTML by nesting
# <div> and <aside> inside of a <p> tag
p_parent.insert_after(new_template_tag)
break
p_parent = parent
media_file_element.extract()
else:
media_file_element[media_key] = ac.get_url()
# check for alt-tags
if 'alt' in media_file_element.attrs:
alt_tag_label = media_file_element['alt'].split(':')[-1]
if alt_tag_label in dict_files_map:
asset_id = Id(dict_files_map[alt_tag_label]['assetId'])
ac_id = Id(dict_files_map[alt_tag_label]['assetContentId'])
if bypass_asset_content_authorization:
ac = acls.get_asset_content(ac_id)
else:
ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id)
try:
media_file_element['alt'] = ac.get_alt_text().text
except AttributeError:
pass
potential_display_text['text'] = soup.wrapper.renderContents().decode('utf-8')
else:
for new_key, value in potential_display_text.items():
if isinstance(value, list):
new_files_map = dict_files_map
if 'fileIds' in potential_display_text:
new_files_map = potential_display_text['fileIds']
potential_display_text[new_key] = check_list_children(value, new_files_map)
return potential_display_text
def check_list_children(potential_text_list, list_files_map):
updated_list = []
for child in potential_text_list:
if isinstance(child, dict):
files_map = list_files_map
if 'fileIds' in child:
files_map = child['fileIds']
updated_list.append(replace_url_in_display_text(child, files_map))
elif isinstance(child, list):
updated_list.append(check_list_children(child, list_files_map))
else:
updated_list.append(child)
return updated_list
if bypass_asset_content_authorization:
# One assumption is that the object's catalogId can be used
# as the repositoryId
manager = self.my_osid_object._get_provider_manager('REPOSITORY')
try:
if self.my_osid_object._proxy is not None:
acls = manager.get_asset_content_lookup_session(proxy=self.my_osid_object._proxy)
else:
acls = manager.get_asset_content_lookup_session()
except AttributeError:
pass
else:
acls.use_federated_repository_view()
media_regex = re.compile('(AssetContent:)')
original_files_map = {}
if 'fileIds' in obj_map:
original_files_map = obj_map['fileIds']
for key, data in obj_map.items():
if isinstance(data, dict):
obj_map[key] = replace_url_in_display_text(data, original_files_map)
elif isinstance(data, list):
obj_map[key] = check_list_children(data, original_files_map) | loop through all the keys in self.my_osid_object._my_map, and
see if any of them contain text like "AssetContent:<label>"
If so, assume it is markup (?), replace the string with asset_content.get_url() | Below is the the instruction that describes the task:
### Input:
loop through all the keys in self.my_osid_object._my_map, and
see if any of them contain text like "AssetContent:<label>"
If so, assume it is markup (?), replace the string with asset_content.get_url()
### Response:
def _update_object_map(self, obj_map):
"""loop through all the keys in self.my_osid_object._my_map, and
see if any of them contain text like "AssetContent:<label>"
If so, assume it is markup (?), replace the string with asset_content.get_url()"""
# TODO: Look for <img> tags to add in alt-text and description
# TODO: Look for <video> and <audio> tags to add in description, transcripts and vtt files?
try:
super(FilesRecord, self)._update_object_map(obj_map)
except AttributeError:
pass
bypass_asset_content_authorization = False
acls = None
try:
config = self.my_osid_object._runtime.get_configuration()
parameter_id = Id('parameter:bypassAuthorizationForFilesRecordAssetContentLookup@json')
bypass_asset_content_authorization = config.get_value_by_parameter(parameter_id).get_boolean_value()
except (AttributeError, KeyError, NotFound):
pass
def replace_url_in_display_text(potential_display_text, dict_files_map):
if ('text' in potential_display_text and
potential_display_text['text'] is not None and
'AssetContent' in potential_display_text['text']):
# assume markup? Wrap this in case it's not a valid XML doc
# with a single parent object
wrapped_text = '<wrapper>{0}</wrapper'.format(potential_display_text['text'])
soup = BeautifulSoup(wrapped_text, 'xml')
media_file_elements = soup.find_all(src=media_regex)
media_file_elements += soup.find_all(data=media_regex)
for media_file_element in media_file_elements:
if 'src' in media_file_element.attrs:
media_key = 'src'
else:
media_key = 'data'
if ':' not in media_file_element[media_key]:
continue
media_label = media_file_element[media_key].split(':')[-1]
if media_label in dict_files_map:
asset_id = Id(dict_files_map[media_label]['assetId'])
ac_id = Id(dict_files_map[media_label]['assetContentId'])
if bypass_asset_content_authorization:
ac = acls.get_asset_content(ac_id)
else:
ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id)
if media_file_element.name == 'track':
try:
if not ac.has_files():
continue
except AttributeError:
# non-multi-language VTT files
media_file_element[media_key] = ac.get_url()
else:
media_file_element[media_key] = ac.get_url()
media_file_element['srclang'] = ac.get_vtt_locale_identifier().lower()[0:2]
media_file_element['label'] = ac.get_vtt_locale_label()
elif media_file_element.name == 'transcript':
if not ac.has_files():
continue
transcript_template_path = '{0}/osid/transcript_template.xml'.format(ABS_PATH)
with codecs.open(transcript_template_path, 'r', encoding='utf-8') as template_file:
template = template_file.read().format(media_label,
ac.get_transcript_locale_label().lower(),
ac.get_transcript_locale_label().title(),
ac.get_transcript_text())
new_template_tag = BeautifulSoup(template, 'xml').div
# media_file_element.replace_with(new_template_tag)
p_parent = None
for parent in media_file_element.parents:
if parent is not None and parent.name != 'p':
# insert the transcript after the top p tag
# so that we don't create invalid HTML by nesting
# <div> and <aside> inside of a <p> tag
p_parent.insert_after(new_template_tag)
break
p_parent = parent
media_file_element.extract()
else:
media_file_element[media_key] = ac.get_url()
# check for alt-tags
if 'alt' in media_file_element.attrs:
alt_tag_label = media_file_element['alt'].split(':')[-1]
if alt_tag_label in dict_files_map:
asset_id = Id(dict_files_map[alt_tag_label]['assetId'])
ac_id = Id(dict_files_map[alt_tag_label]['assetContentId'])
if bypass_asset_content_authorization:
ac = acls.get_asset_content(ac_id)
else:
ac = self._get_asset_content(asset_id=asset_id, asset_content_id=ac_id)
try:
media_file_element['alt'] = ac.get_alt_text().text
except AttributeError:
pass
potential_display_text['text'] = soup.wrapper.renderContents().decode('utf-8')
else:
for new_key, value in potential_display_text.items():
if isinstance(value, list):
new_files_map = dict_files_map
if 'fileIds' in potential_display_text:
new_files_map = potential_display_text['fileIds']
potential_display_text[new_key] = check_list_children(value, new_files_map)
return potential_display_text
def check_list_children(potential_text_list, list_files_map):
updated_list = []
for child in potential_text_list:
if isinstance(child, dict):
files_map = list_files_map
if 'fileIds' in child:
files_map = child['fileIds']
updated_list.append(replace_url_in_display_text(child, files_map))
elif isinstance(child, list):
updated_list.append(check_list_children(child, list_files_map))
else:
updated_list.append(child)
return updated_list
if bypass_asset_content_authorization:
# One assumption is that the object's catalogId can be used
# as the repositoryId
manager = self.my_osid_object._get_provider_manager('REPOSITORY')
try:
if self.my_osid_object._proxy is not None:
acls = manager.get_asset_content_lookup_session(proxy=self.my_osid_object._proxy)
else:
acls = manager.get_asset_content_lookup_session()
except AttributeError:
pass
else:
acls.use_federated_repository_view()
media_regex = re.compile('(AssetContent:)')
original_files_map = {}
if 'fileIds' in obj_map:
original_files_map = obj_map['fileIds']
for key, data in obj_map.items():
if isinstance(data, dict):
obj_map[key] = replace_url_in_display_text(data, original_files_map)
elif isinstance(data, list):
obj_map[key] = check_list_children(data, original_files_map) |
def AppConfigFlagHandler(feature=None):
""" This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
}
"""
if not current_app:
log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature))
return False
try:
return current_app.config[FEATURE_FLAGS_CONFIG][feature]
except (AttributeError, KeyError):
raise NoFeatureFlagFound() | This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
} | Below is the the instruction that describes the task:
### Input:
This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
}
### Response:
def AppConfigFlagHandler(feature=None):
""" This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
}
"""
if not current_app:
log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature))
return False
try:
return current_app.config[FEATURE_FLAGS_CONFIG][feature]
except (AttributeError, KeyError):
raise NoFeatureFlagFound() |
def _read_mode_ts(self, size, kind):
"""Read Time Stamp option.
Positional arguments:
* size - int, length of option
* kind - int, 68 (TS)
Returns:
* dict -- extracted Time Stamp (TS) option
Structure of Timestamp (TS) option [RFC 791]:
+--------+--------+--------+--------+
|01000100| length | pointer|oflw|flg|
+--------+--------+--------+--------+
| internet address |
+--------+--------+--------+--------+
| timestamp |
+--------+--------+--------+--------+
| . |
.
.
Octets Bits Name Description
0 0 ip.ts.kind Kind (25)
0 0 ip.ts.type.copy Copied Flag (0)
0 1 ip.ts.type.class Option Class (0)
0 3 ip.ts.type.number Option Number (25)
1 8 ip.ts.length Length (≤40)
2 16 ip.ts.pointer Pointer (≥5)
3 24 ip.ts.overflow Overflow Octets
3 28 ip.ts.flag Flag
4 32 ip.ts.ip Internet Address
8 64 ip.ts.timestamp Timestamp
"""
if size > 40 or size < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_tptr = self._read_unpack(1)
_oflg = self._read_binary(1)
_oflw = int(_oflg[:4], base=2)
_flag = int(_oflg[4:], base=2)
if _tptr < 5:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
pointer=_tptr,
overflow=_oflw,
flag=_flag,
)
endpoint = min(_tptr, size)
if _flag == 0:
if (size - 4) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
counter = 5
timestamp = list()
while counter < endpoint:
counter += 4
time = self._read_unpack(4, lilendian=True)
timestamp.append(datetime.datetime.fromtimestamp(time))
data['timestamp'] = timestamp or None
elif _flag == 1 or _flag == 3:
if (size - 4) % 8 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
counter = 5
ipaddress = list()
timestamp = list()
while counter < endpoint:
counter += 8
ipaddress.append(self._read_ipv4_addr())
time = self._read_unpack(4, lilendian=True)
timestamp.append(datetime.datetime.fromtimestamp(time))
data['ip'] = ipaddress or None
data['timestamp'] = timestamp or None
else:
data['data'] = self._read_fileng(size - 4) or None
return data | Read Time Stamp option.
Positional arguments:
* size - int, length of option
* kind - int, 68 (TS)
Returns:
* dict -- extracted Time Stamp (TS) option
Structure of Timestamp (TS) option [RFC 791]:
+--------+--------+--------+--------+
|01000100| length | pointer|oflw|flg|
+--------+--------+--------+--------+
| internet address |
+--------+--------+--------+--------+
| timestamp |
+--------+--------+--------+--------+
| . |
.
.
Octets Bits Name Description
0 0 ip.ts.kind Kind (25)
0 0 ip.ts.type.copy Copied Flag (0)
0 1 ip.ts.type.class Option Class (0)
0 3 ip.ts.type.number Option Number (25)
1 8 ip.ts.length Length (≤40)
2 16 ip.ts.pointer Pointer (≥5)
3 24 ip.ts.overflow Overflow Octets
3 28 ip.ts.flag Flag
4 32 ip.ts.ip Internet Address
8 64 ip.ts.timestamp Timestamp | Below is the the instruction that describes the task:
### Input:
Read Time Stamp option.
Positional arguments:
* size - int, length of option
* kind - int, 68 (TS)
Returns:
* dict -- extracted Time Stamp (TS) option
Structure of Timestamp (TS) option [RFC 791]:
+--------+--------+--------+--------+
|01000100| length | pointer|oflw|flg|
+--------+--------+--------+--------+
| internet address |
+--------+--------+--------+--------+
| timestamp |
+--------+--------+--------+--------+
| . |
.
.
Octets Bits Name Description
0 0 ip.ts.kind Kind (25)
0 0 ip.ts.type.copy Copied Flag (0)
0 1 ip.ts.type.class Option Class (0)
0 3 ip.ts.type.number Option Number (25)
1 8 ip.ts.length Length (≤40)
2 16 ip.ts.pointer Pointer (≥5)
3 24 ip.ts.overflow Overflow Octets
3 28 ip.ts.flag Flag
4 32 ip.ts.ip Internet Address
8 64 ip.ts.timestamp Timestamp
### Response:
def _read_mode_ts(self, size, kind):
"""Read Time Stamp option.
Positional arguments:
* size - int, length of option
* kind - int, 68 (TS)
Returns:
* dict -- extracted Time Stamp (TS) option
Structure of Timestamp (TS) option [RFC 791]:
+--------+--------+--------+--------+
|01000100| length | pointer|oflw|flg|
+--------+--------+--------+--------+
| internet address |
+--------+--------+--------+--------+
| timestamp |
+--------+--------+--------+--------+
| . |
.
.
Octets Bits Name Description
0 0 ip.ts.kind Kind (25)
0 0 ip.ts.type.copy Copied Flag (0)
0 1 ip.ts.type.class Option Class (0)
0 3 ip.ts.type.number Option Number (25)
1 8 ip.ts.length Length (≤40)
2 16 ip.ts.pointer Pointer (≥5)
3 24 ip.ts.overflow Overflow Octets
3 28 ip.ts.flag Flag
4 32 ip.ts.ip Internet Address
8 64 ip.ts.timestamp Timestamp
"""
if size > 40 or size < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_tptr = self._read_unpack(1)
_oflg = self._read_binary(1)
_oflw = int(_oflg[:4], base=2)
_flag = int(_oflg[4:], base=2)
if _tptr < 5:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
pointer=_tptr,
overflow=_oflw,
flag=_flag,
)
endpoint = min(_tptr, size)
if _flag == 0:
if (size - 4) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
counter = 5
timestamp = list()
while counter < endpoint:
counter += 4
time = self._read_unpack(4, lilendian=True)
timestamp.append(datetime.datetime.fromtimestamp(time))
data['timestamp'] = timestamp or None
elif _flag == 1 or _flag == 3:
if (size - 4) % 8 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
counter = 5
ipaddress = list()
timestamp = list()
while counter < endpoint:
counter += 8
ipaddress.append(self._read_ipv4_addr())
time = self._read_unpack(4, lilendian=True)
timestamp.append(datetime.datetime.fromtimestamp(time))
data['ip'] = ipaddress or None
data['timestamp'] = timestamp or None
else:
data['data'] = self._read_fileng(size - 4) or None
return data |
def _infer_shape(self, dimensions):
"""Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input
n = np.prod(dimensions)
# Size of output where defined
m = np.prod(abs(np.array(self._shape)))
# Replace wildcard
v = np.array(self._shape)
v[v == -1] = n // m
return tuple(v) | Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions. | Below is the the instruction that describes the task:
### Input:
Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
### Response:
def _infer_shape(self, dimensions):
"""Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input
n = np.prod(dimensions)
# Size of output where defined
m = np.prod(abs(np.array(self._shape)))
# Replace wildcard
v = np.array(self._shape)
v[v == -1] = n // m
return tuple(v) |
def _show_annotation_box(self, event):
"""Update an existing box or create an annotation box for an event."""
ax = event.artist.axes
# Get the pre-created annotation box for the axes or create a new one.
if self.display != 'multiple':
annotation = self.annotations[ax]
elif event.mouseevent in self.annotations:
# Avoid creating multiple datacursors for the same click event
# when several artists are selected.
annotation = self.annotations[event.mouseevent]
else:
annotation = self.annotate(ax, **self._annotation_kwargs)
self.annotations[event.mouseevent] = annotation
if self.display == 'single':
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
self.update(event, annotation) | Update an existing box or create an annotation box for an event. | Below is the the instruction that describes the task:
### Input:
Update an existing box or create an annotation box for an event.
### Response:
def _show_annotation_box(self, event):
"""Update an existing box or create an annotation box for an event."""
ax = event.artist.axes
# Get the pre-created annotation box for the axes or create a new one.
if self.display != 'multiple':
annotation = self.annotations[ax]
elif event.mouseevent in self.annotations:
# Avoid creating multiple datacursors for the same click event
# when several artists are selected.
annotation = self.annotations[event.mouseevent]
else:
annotation = self.annotate(ax, **self._annotation_kwargs)
self.annotations[event.mouseevent] = annotation
if self.display == 'single':
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
self.update(event, annotation) |
def post_freeze_hook(self):
"""Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions.
This method is called at the end of the
:meth:`dtoolcore.ProtoDataSet.freeze` method.
In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes
the temporary directory for storing item metadata fragment files.
"""
if os.path.isdir(self._metadata_fragments_abspath):
shutil.rmtree(self._metadata_fragments_abspath) | Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions.
This method is called at the end of the
:meth:`dtoolcore.ProtoDataSet.freeze` method.
In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes
the temporary directory for storing item metadata fragment files. | Below is the the instruction that describes the task:
### Input:
Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions.
This method is called at the end of the
:meth:`dtoolcore.ProtoDataSet.freeze` method.
In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes
the temporary directory for storing item metadata fragment files.
### Response:
def post_freeze_hook(self):
"""Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions.
This method is called at the end of the
:meth:`dtoolcore.ProtoDataSet.freeze` method.
In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes
the temporary directory for storing item metadata fragment files.
"""
if os.path.isdir(self._metadata_fragments_abspath):
shutil.rmtree(self._metadata_fragments_abspath) |
def com_google_fonts_check_whitespace_ink(ttFont):
"""Whitespace glyphs have ink?"""
from fontbakery.utils import get_glyph_name, glyph_has_ink
# code-points for all "whitespace" chars:
WHITESPACE_CHARACTERS = [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680,
0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008,
0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000, 0x180E, 0x200B,
0x2060, 0xFEFF
]
failed = False
for codepoint in WHITESPACE_CHARACTERS:
g = get_glyph_name(ttFont, codepoint)
if g is not None and glyph_has_ink(ttFont, g):
failed = True
yield FAIL, ("Glyph \"{}\" has ink."
" It needs to be replaced by"
" an empty glyph.").format(g)
if not failed:
yield PASS, "There is no whitespace glyph with ink." | Whitespace glyphs have ink? | Below is the the instruction that describes the task:
### Input:
Whitespace glyphs have ink?
### Response:
def com_google_fonts_check_whitespace_ink(ttFont):
"""Whitespace glyphs have ink?"""
from fontbakery.utils import get_glyph_name, glyph_has_ink
# code-points for all "whitespace" chars:
WHITESPACE_CHARACTERS = [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x0020, 0x0085, 0x00A0, 0x1680,
0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008,
0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000, 0x180E, 0x200B,
0x2060, 0xFEFF
]
failed = False
for codepoint in WHITESPACE_CHARACTERS:
g = get_glyph_name(ttFont, codepoint)
if g is not None and glyph_has_ink(ttFont, g):
failed = True
yield FAIL, ("Glyph \"{}\" has ink."
" It needs to be replaced by"
" an empty glyph.").format(g)
if not failed:
yield PASS, "There is no whitespace glyph with ink." |
def get_html_attrs(kwargs=None):
"""Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked'
"""
kwargs = kwargs or {}
attrs = []
props = []
classes = kwargs.get('classes', '').strip()
if classes:
classes = ' '.join(re.split(r'\s+', classes))
classes = to_unicode(quoteattr(classes))
attrs.append('class=%s' % classes)
try:
del kwargs['classes']
except KeyError:
pass
for key, value in iteritems(kwargs):
key = key.replace('_', '-')
key = to_unicode(key)
if isinstance(value, bool):
if value is True:
props.append(key)
else:
value = quoteattr(Markup(value))
attrs.append(u'%s=%s' % (key, value))
attrs.sort()
props.sort()
attrs.extend(props)
return u' '.join(attrs) | Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked' | Below is the the instruction that describes the task:
### Input:
Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked'
### Response:
def get_html_attrs(kwargs=None):
"""Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked'
"""
kwargs = kwargs or {}
attrs = []
props = []
classes = kwargs.get('classes', '').strip()
if classes:
classes = ' '.join(re.split(r'\s+', classes))
classes = to_unicode(quoteattr(classes))
attrs.append('class=%s' % classes)
try:
del kwargs['classes']
except KeyError:
pass
for key, value in iteritems(kwargs):
key = key.replace('_', '-')
key = to_unicode(key)
if isinstance(value, bool):
if value is True:
props.append(key)
else:
value = quoteattr(Markup(value))
attrs.append(u'%s=%s' % (key, value))
attrs.sort()
props.sort()
attrs.extend(props)
return u' '.join(attrs) |
def get_dataset():
"""Create a dataset for machine learning of segmentations.
Returns
-------
tuple :
(X, y) where X is a list of tuples. Each tuple is a feature. y
is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
"""
seg_data = "segmentation-X.npy"
seg_labels = "segmentation-y.npy"
# seg_ids = "segmentation-ids.npy"
if os.path.isfile(seg_data) and os.path.isfile(seg_labels):
X = numpy.load(seg_data)
y = numpy.load(seg_labels)
with open('datasets.pickle', 'rb') as f:
datasets = pickle.load(f)
return (X, y, datasets)
datasets = get_segmented_raw_data()
X, y = [], []
for i, data in enumerate(datasets):
if i % 10 == 0:
logging.info("[Create Dataset] i=%i/%i", i, len(datasets))
segmentation = json.loads(data['segmentation'])
recording = json.loads(data['data'])
X_symbol = [get_median_stroke_distance(recording)]
if len([p for s in recording for p in s if p['time'] is None]) > 0:
continue
combis = itertools.combinations(list(range(len(recording))), 2)
for strokeid1, strokeid2 in combis:
stroke1 = recording[strokeid1]
stroke2 = recording[strokeid2]
if len(stroke1) == 0 or len(stroke2) == 0:
logging.debug("stroke len 0. Skip.")
continue
X.append(get_stroke_features(recording, strokeid1, strokeid2) +
X_symbol)
same_symbol = (_get_symbol_index(strokeid1, segmentation) ==
_get_symbol_index(strokeid2, segmentation))
y.append(int(same_symbol))
X = numpy.array(X, dtype=numpy.float32)
y = numpy.array(y, dtype=numpy.int32)
numpy.save(seg_data, X)
numpy.save(seg_labels, y)
datasets = filter_recordings(datasets)
with open('datasets.pickle', 'wb') as f:
pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL)
return (X, y, datasets) | Create a dataset for machine learning of segmentations.
Returns
-------
tuple :
(X, y) where X is a list of tuples. Each tuple is a feature. y
is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol') | Below is the the instruction that describes the task:
### Input:
Create a dataset for machine learning of segmentations.
Returns
-------
tuple :
(X, y) where X is a list of tuples. Each tuple is a feature. y
is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
### Response:
def get_dataset():
"""Create a dataset for machine learning of segmentations.
Returns
-------
tuple :
(X, y) where X is a list of tuples. Each tuple is a feature. y
is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
"""
seg_data = "segmentation-X.npy"
seg_labels = "segmentation-y.npy"
# seg_ids = "segmentation-ids.npy"
if os.path.isfile(seg_data) and os.path.isfile(seg_labels):
X = numpy.load(seg_data)
y = numpy.load(seg_labels)
with open('datasets.pickle', 'rb') as f:
datasets = pickle.load(f)
return (X, y, datasets)
datasets = get_segmented_raw_data()
X, y = [], []
for i, data in enumerate(datasets):
if i % 10 == 0:
logging.info("[Create Dataset] i=%i/%i", i, len(datasets))
segmentation = json.loads(data['segmentation'])
recording = json.loads(data['data'])
X_symbol = [get_median_stroke_distance(recording)]
if len([p for s in recording for p in s if p['time'] is None]) > 0:
continue
combis = itertools.combinations(list(range(len(recording))), 2)
for strokeid1, strokeid2 in combis:
stroke1 = recording[strokeid1]
stroke2 = recording[strokeid2]
if len(stroke1) == 0 or len(stroke2) == 0:
logging.debug("stroke len 0. Skip.")
continue
X.append(get_stroke_features(recording, strokeid1, strokeid2) +
X_symbol)
same_symbol = (_get_symbol_index(strokeid1, segmentation) ==
_get_symbol_index(strokeid2, segmentation))
y.append(int(same_symbol))
X = numpy.array(X, dtype=numpy.float32)
y = numpy.array(y, dtype=numpy.int32)
numpy.save(seg_data, X)
numpy.save(seg_labels, y)
datasets = filter_recordings(datasets)
with open('datasets.pickle', 'wb') as f:
pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL)
return (X, y, datasets) |
def enable_support_autoupload(self, **kwargs):
"""Set Spanning Tree state.
Args:
enabled (bool): Is Autoupload enabled? (True, False).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... enabled = True
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
... enabled = False
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
"""
enabled = kwargs.pop('enabled')
callback = kwargs.pop('callback', self._callback)
if not isinstance(enabled, bool):
raise ValueError('%s must be `True` or `False`.' % repr(enabled))
state_args = dict()
autoupload_state = getattr(self._ras, 'support_autoupload_enable')
config = autoupload_state(**state_args)
if not enabled:
shutdown = config.find('.//*enable')
shutdown.set('operation', 'delete')
return callback(config) | Set Spanning Tree state.
Args:
enabled (bool): Is Autoupload enabled? (True, False).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... enabled = True
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
... enabled = False
... output = dev.ras.enable_support_autoupload(
... enabled=enabled) | Below is the the instruction that describes the task:
### Input:
Set Spanning Tree state.
Args:
enabled (bool): Is Autoupload enabled? (True, False).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... enabled = True
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
... enabled = False
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
### Response:
def enable_support_autoupload(self, **kwargs):
"""Set Spanning Tree state.
Args:
enabled (bool): Is Autoupload enabled? (True, False).
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... enabled = True
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
... enabled = False
... output = dev.ras.enable_support_autoupload(
... enabled=enabled)
"""
enabled = kwargs.pop('enabled')
callback = kwargs.pop('callback', self._callback)
if not isinstance(enabled, bool):
raise ValueError('%s must be `True` or `False`.' % repr(enabled))
state_args = dict()
autoupload_state = getattr(self._ras, 'support_autoupload_enable')
config = autoupload_state(**state_args)
if not enabled:
shutdown = config.find('.//*enable')
shutdown.set('operation', 'delete')
return callback(config) |
def retryable_http_error( e ):
"""
Determine if an error encountered during an HTTP download is likely to go away if we try again.
"""
if isinstance( e, urllib.error.HTTPError ) and e.code in ('503', '408', '500'):
# The server returned one of:
# 503 Service Unavailable
# 408 Request Timeout
# 500 Internal Server Error
return True
if isinstance( e, BadStatusLine ):
# The server didn't return a valid response at all
return True
return False | Determine if an error encountered during an HTTP download is likely to go away if we try again. | Below is the the instruction that describes the task:
### Input:
Determine if an error encountered during an HTTP download is likely to go away if we try again.
### Response:
def retryable_http_error( e ):
"""
Determine if an error encountered during an HTTP download is likely to go away if we try again.
"""
if isinstance( e, urllib.error.HTTPError ) and e.code in ('503', '408', '500'):
# The server returned one of:
# 503 Service Unavailable
# 408 Request Timeout
# 500 Internal Server Error
return True
if isinstance( e, BadStatusLine ):
# The server didn't return a valid response at all
return True
return False |
def insert(self, name, index, value):
"""Insert a value at the passed index in the named header."""
return self._sequence[name].insert(index, value) | Insert a value at the passed index in the named header. | Below is the the instruction that describes the task:
### Input:
Insert a value at the passed index in the named header.
### Response:
def insert(self, name, index, value):
"""Insert a value at the passed index in the named header."""
return self._sequence[name].insert(index, value) |
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
self._socket.close() | Stop the client. | Below is the the instruction that describes the task:
### Input:
Stop the client.
### Response:
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
self._socket.close() |
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s | Merge dictionary t into s. | Below is the the instruction that describes the task:
### Input:
Merge dictionary t into s.
### Response:
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s |
def create_node(args):
''' Create a node.
'''
node = query(method='servers', args=args, http_method='POST')
action = query(
method='servers',
server_id=node['server']['id'],
command='action',
args={'action': 'poweron'},
http_method='POST'
)
return node | Create a node. | Below is the the instruction that describes the task:
### Input:
Create a node.
### Response:
def create_node(args):
''' Create a node.
'''
node = query(method='servers', args=args, http_method='POST')
action = query(
method='servers',
server_id=node['server']['id'],
command='action',
args={'action': 'poweron'},
http_method='POST'
)
return node |
def _validate_image_orientation(image_orientation):
'''
Ensure that the image orientation is supported
- The direction cosines have magnitudes of 1 (just in case)
- The direction cosines are perpendicular
'''
row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation)
if not _almost_zero(np.dot(row_cosine, column_cosine), 1e-4):
raise DicomImportException("Non-orthogonal direction cosines: {}, {}".format(row_cosine, column_cosine))
elif not _almost_zero(np.dot(row_cosine, column_cosine), 1e-8):
logger.warning("Direction cosines aren't quite orthogonal: {}, {}".format(row_cosine, column_cosine))
if not _almost_one(np.linalg.norm(row_cosine), 1e-4):
raise DicomImportException("The row direction cosine's magnitude is not 1: {}".format(row_cosine))
elif not _almost_one(np.linalg.norm(row_cosine), 1e-8):
logger.warning("The row direction cosine's magnitude is not quite 1: {}".format(row_cosine))
if not _almost_one(np.linalg.norm(column_cosine), 1e-4):
raise DicomImportException("The column direction cosine's magnitude is not 1: {}".format(column_cosine))
elif not _almost_one(np.linalg.norm(column_cosine), 1e-8):
logger.warning("The column direction cosine's magnitude is not quite 1: {}".format(column_cosine)) | Ensure that the image orientation is supported
- The direction cosines have magnitudes of 1 (just in case)
- The direction cosines are perpendicular | Below is the the instruction that describes the task:
### Input:
Ensure that the image orientation is supported
- The direction cosines have magnitudes of 1 (just in case)
- The direction cosines are perpendicular
### Response:
def _validate_image_orientation(image_orientation):
'''
Ensure that the image orientation is supported
- The direction cosines have magnitudes of 1 (just in case)
- The direction cosines are perpendicular
'''
row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation)
if not _almost_zero(np.dot(row_cosine, column_cosine), 1e-4):
raise DicomImportException("Non-orthogonal direction cosines: {}, {}".format(row_cosine, column_cosine))
elif not _almost_zero(np.dot(row_cosine, column_cosine), 1e-8):
logger.warning("Direction cosines aren't quite orthogonal: {}, {}".format(row_cosine, column_cosine))
if not _almost_one(np.linalg.norm(row_cosine), 1e-4):
raise DicomImportException("The row direction cosine's magnitude is not 1: {}".format(row_cosine))
elif not _almost_one(np.linalg.norm(row_cosine), 1e-8):
logger.warning("The row direction cosine's magnitude is not quite 1: {}".format(row_cosine))
if not _almost_one(np.linalg.norm(column_cosine), 1e-4):
raise DicomImportException("The column direction cosine's magnitude is not 1: {}".format(column_cosine))
elif not _almost_one(np.linalg.norm(column_cosine), 1e-8):
logger.warning("The column direction cosine's magnitude is not quite 1: {}".format(column_cosine)) |
def register_filter(self, filter_name, filter_ref, force=False):
"""
Add/register one filter.
Args:
filter_name (str): Filter name used inside :program:`Jinja2` tags.
filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function.
force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.
Note:
The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
"""
if not force and (filter_name in self.filters_list()):
self.log_warning("Extension %s already exist, ignore redefinition." % ext_in)
return
self.__jinja2_environment.filters[filter_name] = filter_ref | Add/register one filter.
Args:
filter_name (str): Filter name used inside :program:`Jinja2` tags.
filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function.
force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.
Note:
The list of user added/registered filters can be retrieve with :mth:`registered_filters_list` | Below is the the instruction that describes the task:
### Input:
Add/register one filter.
Args:
filter_name (str): Filter name used inside :program:`Jinja2` tags.
filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function.
force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.
Note:
The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
### Response:
def register_filter(self, filter_name, filter_ref, force=False):
"""
Add/register one filter.
Args:
filter_name (str): Filter name used inside :program:`Jinja2` tags.
filter_ref: Reference to the filter itself, i.e. the corresponding :program:`Python` function.
force (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.
Note:
The list of user added/registered filters can be retrieve with :mth:`registered_filters_list`
"""
if not force and (filter_name in self.filters_list()):
self.log_warning("Extension %s already exist, ignore redefinition." % ext_in)
return
self.__jinja2_environment.filters[filter_name] = filter_ref |
def append_overhead_costs(costs, new_id, overhead_percentage=0.15):
"""
Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get.
"""
total_time = 0
for item in costs:
total_time += item['time']
costs.append({
'id': new_id,
'task': 'Overhead, Bufixes & Iterations',
'time': total_time * overhead_percentage, },
)
return costs | Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get. | Below is the the instruction that describes the task:
### Input:
Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get.
### Response:
def append_overhead_costs(costs, new_id, overhead_percentage=0.15):
"""
Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get.
"""
total_time = 0
for item in costs:
total_time += item['time']
costs.append({
'id': new_id,
'task': 'Overhead, Bufixes & Iterations',
'time': total_time * overhead_percentage, },
)
return costs |
def tournament_name2number(self, name):
"""Translate tournament name to tournament number.
Args:
name (str): tournament name to translate
Returns:
number (int): number of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_name2number('delta')
4
>>> NumerAPI().tournament_name2number('foo')
None
"""
tournaments = self.get_tournaments()
d = {t['name']: t['tournament'] for t in tournaments}
return d.get(name, None) | Translate tournament name to tournament number.
Args:
name (str): tournament name to translate
Returns:
number (int): number of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_name2number('delta')
4
>>> NumerAPI().tournament_name2number('foo')
None | Below is the the instruction that describes the task:
### Input:
Translate tournament name to tournament number.
Args:
name (str): tournament name to translate
Returns:
number (int): number of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_name2number('delta')
4
>>> NumerAPI().tournament_name2number('foo')
None
### Response:
def tournament_name2number(self, name):
"""Translate tournament name to tournament number.
Args:
name (str): tournament name to translate
Returns:
number (int): number of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_name2number('delta')
4
>>> NumerAPI().tournament_name2number('foo')
None
"""
tournaments = self.get_tournaments()
d = {t['name']: t['tournament'] for t in tournaments}
return d.get(name, None) |
def _find_package(self, root_package):
"""Finds package name of file
:param root_package: root package
:return: package name
"""
package = self.path.replace(root_package, "")
if package.endswith(".py"):
package = package[:-3]
package = package.replace(os.path.sep, MODULE_SEP)
root_package = get_folder_name(root_package)
package = root_package + package # add root
return package | Finds package name of file
:param root_package: root package
:return: package name | Below is the the instruction that describes the task:
### Input:
Finds package name of file
:param root_package: root package
:return: package name
### Response:
def _find_package(self, root_package):
"""Finds package name of file
:param root_package: root package
:return: package name
"""
package = self.path.replace(root_package, "")
if package.endswith(".py"):
package = package[:-3]
package = package.replace(os.path.sep, MODULE_SEP)
root_package = get_folder_name(root_package)
package = root_package + package # add root
return package |
def stem(self):
"""
Stem tokens with Porter Stemmer.
"""
def s(tokens):
return [PorterStemmer().stem(t) for t in tokens]
self.stems = list(map(s, self.tokens)) | Stem tokens with Porter Stemmer. | Below is the the instruction that describes the task:
### Input:
Stem tokens with Porter Stemmer.
### Response:
def stem(self):
"""
Stem tokens with Porter Stemmer.
"""
def s(tokens):
return [PorterStemmer().stem(t) for t in tokens]
self.stems = list(map(s, self.tokens)) |
def rank(self, dte):
'''The rank of a given *dte* in the timeseries'''
timestamp = self.pickler.dumps(dte)
return self.backend_structure().rank(timestamp) | The rank of a given *dte* in the timeseries | Below is the the instruction that describes the task:
### Input:
The rank of a given *dte* in the timeseries
### Response:
def rank(self, dte):
'''The rank of a given *dte* in the timeseries'''
timestamp = self.pickler.dumps(dte)
return self.backend_structure().rank(timestamp) |
def pre_save_title(instance, **kwargs):
''' Update article.languages
'''
if instance.article.languages:
languages = instance.article.languages.split(',')
else:
languages = []
if instance.language not in languages:
languages.append(instance.language)
instance.article.languages = ','.join(languages)
instance.article._publisher_keep_state = True
instance.article.save(no_signals=True) | Update article.languages | Below is the the instruction that describes the task:
### Input:
Update article.languages
### Response:
def pre_save_title(instance, **kwargs):
''' Update article.languages
'''
if instance.article.languages:
languages = instance.article.languages.split(',')
else:
languages = []
if instance.language not in languages:
languages.append(instance.language)
instance.article.languages = ','.join(languages)
instance.article._publisher_keep_state = True
instance.article.save(no_signals=True) |
def set_window_user_pointer(window, pointer):
"""
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
"""
data = (False, pointer)
if not isinstance(pointer, ctypes.c_void_p):
data = (True, pointer)
# Create a void pointer for the python object
pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
_window_user_data_repository[window_addr] = data
_glfw.glfwSetWindowUserPointer(window, pointer) | Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer); | Below is the the instruction that describes the task:
### Input:
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
### Response:
def set_window_user_pointer(window, pointer):
"""
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will
be wrapped automatically. The object will be kept in existence until the pointer is set to something else or
until the window is destroyed.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
"""
data = (False, pointer)
if not isinstance(pointer, ctypes.c_void_p):
data = (True, pointer)
# Create a void pointer for the python object
pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
_window_user_data_repository[window_addr] = data
_glfw.glfwSetWindowUserPointer(window, pointer) |
def has_causal_out_edges(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if the node contains any out_edges that are causal."""
return any(
data[RELATION] in CAUSAL_RELATIONS
for _, _, data in graph.out_edges(node, data=True)
) | Return true if the node contains any out_edges that are causal. | Below is the the instruction that describes the task:
### Input:
Return true if the node contains any out_edges that are causal.
### Response:
def has_causal_out_edges(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if the node contains any out_edges that are causal."""
return any(
data[RELATION] in CAUSAL_RELATIONS
for _, _, data in graph.out_edges(node, data=True)
) |
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]:
"""Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
"""
response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL),
json=params,
headers=_build_auth_header(access_token))
response.raise_for_status()
return response.json() | Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned) | Below is the the instruction that describes the task:
### Input:
Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
### Response:
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]:
"""Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
"""
response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL),
json=params,
headers=_build_auth_header(access_token))
response.raise_for_status()
return response.json() |
def create_repo(self, repo_name=None, feed=None, envs=[], checksum_type="sha256", query='/repositories/'):
"""
`repo_name` - Name of repository to create
`feed` - Repo URL to feed from
`checksum_type` - Used for generating meta-data
Create repository in specified environments, associate the
yum_distributor with it and publish the repo
"""
data = {'display_name': repo_name,
'notes': {
'_repo-type': 'rpm-repo',
}
}
juicer.utils.Log.log_debug("Create Repo: %s", repo_name)
for env in envs:
if juicer.utils.repo_exists_p(repo_name, self.connectors[env], env):
juicer.utils.Log.log_info("repo `%s` already exists in %s... skipping!",
(repo_name, env))
continue
else:
data['relative_path'] = '/%s/%s/' % (env, repo_name)
data['id'] = '-'.join([repo_name, env])
_r = self.connectors[env].post(query, data)
if _r.status_code == Constants.PULP_POST_CREATED:
imp_query = '/repositories/%s/importers/' % data['id']
imp_data = {
'importer_id': 'yum_importer',
'importer_type_id': 'yum_importer',
'importer_config': {},
}
if feed:
imp_data['importer_config']['feed_url'] = feed
_r = self.connectors[env].post(imp_query, imp_data)
dist_query = '/repositories/%s/distributors/' % data['id']
dist_data = {'distributor_id': 'yum_distributor',
'distributor_type_id': 'yum_distributor',
'distributor_config': {
'relative_url': '/%s/%s/' % (env, repo_name),
'http': True,
'https': True,
'checksum_type': checksum_type
},
'auto_publish': True,
'relative_path': '/%s/%s/' % (env, repo_name)
}
_r = self.connectors[env].post(dist_query, dist_data)
if _r.status_code == Constants.PULP_POST_CREATED:
pub_query = '/repositories/%s/actions/publish/' % data['id']
pub_data = {'id': 'yum_distributor'}
_r = self.connectors[env].post(pub_query, pub_data)
if _r.status_code == Constants.PULP_POST_ACCEPTED:
juicer.utils.Log.log_info("created repo `%s` in %s", repo_name, env)
else:
_r.raise_for_status()
else:
_r.raise_for_status()
return True | `repo_name` - Name of repository to create
`feed` - Repo URL to feed from
`checksum_type` - Used for generating meta-data
Create repository in specified environments, associate the
yum_distributor with it and publish the repo | Below is the the instruction that describes the task:
### Input:
`repo_name` - Name of repository to create
`feed` - Repo URL to feed from
`checksum_type` - Used for generating meta-data
Create repository in specified environments, associate the
yum_distributor with it and publish the repo
### Response:
def create_repo(self, repo_name=None, feed=None, envs=[], checksum_type="sha256", query='/repositories/'):
"""
`repo_name` - Name of repository to create
`feed` - Repo URL to feed from
`checksum_type` - Used for generating meta-data
Create repository in specified environments, associate the
yum_distributor with it and publish the repo
"""
data = {'display_name': repo_name,
'notes': {
'_repo-type': 'rpm-repo',
}
}
juicer.utils.Log.log_debug("Create Repo: %s", repo_name)
for env in envs:
if juicer.utils.repo_exists_p(repo_name, self.connectors[env], env):
juicer.utils.Log.log_info("repo `%s` already exists in %s... skipping!",
(repo_name, env))
continue
else:
data['relative_path'] = '/%s/%s/' % (env, repo_name)
data['id'] = '-'.join([repo_name, env])
_r = self.connectors[env].post(query, data)
if _r.status_code == Constants.PULP_POST_CREATED:
imp_query = '/repositories/%s/importers/' % data['id']
imp_data = {
'importer_id': 'yum_importer',
'importer_type_id': 'yum_importer',
'importer_config': {},
}
if feed:
imp_data['importer_config']['feed_url'] = feed
_r = self.connectors[env].post(imp_query, imp_data)
dist_query = '/repositories/%s/distributors/' % data['id']
dist_data = {'distributor_id': 'yum_distributor',
'distributor_type_id': 'yum_distributor',
'distributor_config': {
'relative_url': '/%s/%s/' % (env, repo_name),
'http': True,
'https': True,
'checksum_type': checksum_type
},
'auto_publish': True,
'relative_path': '/%s/%s/' % (env, repo_name)
}
_r = self.connectors[env].post(dist_query, dist_data)
if _r.status_code == Constants.PULP_POST_CREATED:
pub_query = '/repositories/%s/actions/publish/' % data['id']
pub_data = {'id': 'yum_distributor'}
_r = self.connectors[env].post(pub_query, pub_data)
if _r.status_code == Constants.PULP_POST_ACCEPTED:
juicer.utils.Log.log_info("created repo `%s` in %s", repo_name, env)
else:
_r.raise_for_status()
else:
_r.raise_for_status()
return True |
def start(self, container, **kwargs):
"""
Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging.
"""
self.push_log("Starting container '{0}'.".format(container))
super(DockerFabricClient, self).start(container, **kwargs) | Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging. | Below is the the instruction that describes the task:
### Input:
Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging.
### Response:
def start(self, container, **kwargs):
"""
Identical to :meth:`docker.api.container.ContainerApiMixin.start` with additional logging.
"""
self.push_log("Starting container '{0}'.".format(container))
super(DockerFabricClient, self).start(container, **kwargs) |
def new():
"""Create a new community."""
form = CommunityForm(formdata=request.values)
ctx = mycommunities_ctx()
ctx.update({
'form': form,
'is_new': True,
'community': None,
})
if form.validate_on_submit():
data = copy.deepcopy(form.data)
community_id = data.pop('identifier')
del data['logo']
community = Community.create(
community_id, current_user.get_id(), **data)
file = request.files.get('logo', None)
if file:
if not community.save_logo(file.stream, file.filename):
form.logo.errors.append(_(
'Cannot add this file as a logo. Supported formats: '
'PNG, JPG and SVG. Max file size: 1.5 MB.'))
db.session.rollback()
community = None
if community:
db.session.commit()
flash("Community was successfully created.", category='success')
return redirect(url_for('.edit', community_id=community.id))
return render_template(
current_app.config['COMMUNITIES_NEW_TEMPLATE'],
community_form=form,
**ctx
) | Create a new community. | Below is the the instruction that describes the task:
### Input:
Create a new community.
### Response:
def new():
"""Create a new community."""
form = CommunityForm(formdata=request.values)
ctx = mycommunities_ctx()
ctx.update({
'form': form,
'is_new': True,
'community': None,
})
if form.validate_on_submit():
data = copy.deepcopy(form.data)
community_id = data.pop('identifier')
del data['logo']
community = Community.create(
community_id, current_user.get_id(), **data)
file = request.files.get('logo', None)
if file:
if not community.save_logo(file.stream, file.filename):
form.logo.errors.append(_(
'Cannot add this file as a logo. Supported formats: '
'PNG, JPG and SVG. Max file size: 1.5 MB.'))
db.session.rollback()
community = None
if community:
db.session.commit()
flash("Community was successfully created.", category='success')
return redirect(url_for('.edit', community_id=community.id))
return render_template(
current_app.config['COMMUNITIES_NEW_TEMPLATE'],
community_form=form,
**ctx
) |
def write(self):
"""Write the configuration to :attr:`path`"""
with open(self.path, 'w') as f:
self.config.write(f) | Write the configuration to :attr:`path` | Below is the the instruction that describes the task:
### Input:
Write the configuration to :attr:`path`
### Response:
def write(self):
"""Write the configuration to :attr:`path`"""
with open(self.path, 'w') as f:
self.config.write(f) |
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process | Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing. | Below is the the instruction that describes the task:
### Input:
Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
### Response:
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process |
def legacy_events_view(request):
"""
View to see legacy events.
"""
events = TeacherEvent.objects.all()
event_count = events.count()
paginator = Paginator(events, 100)
page = request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_events.html',
{'page_name': "Legacy Events",
'events': events,
'event_count': event_count,},
context_instance=RequestContext(request)
) | View to see legacy events. | Below is the the instruction that describes the task:
### Input:
View to see legacy events.
### Response:
def legacy_events_view(request):
"""
View to see legacy events.
"""
events = TeacherEvent.objects.all()
event_count = events.count()
paginator = Paginator(events, 100)
page = request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_events.html',
{'page_name': "Legacy Events",
'events': events,
'event_count': event_count,},
context_instance=RequestContext(request)
) |
def _uses_db(func, self, *args, **kwargs):
""" Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
"""
if not self.session:
_logger.debug('Creating new db session')
self._init_db_session()
try:
ret = func(self, *args, **kwargs)
self.session.commit()
except:
self.session.rollback()
tb = traceback.format_exc()
_logger.debug(tb)
raise
finally:
_logger.debug('Closing db session')
self.session.close()
return ret | Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute. | Below is the the instruction that describes the task:
### Input:
Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
### Response:
def _uses_db(func, self, *args, **kwargs):
""" Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
"""
if not self.session:
_logger.debug('Creating new db session')
self._init_db_session()
try:
ret = func(self, *args, **kwargs)
self.session.commit()
except:
self.session.rollback()
tb = traceback.format_exc()
_logger.debug(tb)
raise
finally:
_logger.debug('Closing db session')
self.session.close()
return ret |
def orient_undirected_graph(self, data, umg, alg='HC'):
"""Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
"""
warnings.warn("The pairwise GNN model is computed on each edge of the UMG "
"to initialize the model and start CGNN with a DAG")
gnn = GNN(nh=self.nh, lr=self.lr)
og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs,
nb_jobs=self.nb_jobs, train_epochs=self.train_epochs,
test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method
# print(nx.adj_matrix(og).todense().shape)
# print(list(og.edges()))
dag = dagify_min_edge(og)
# print(nx.adj_matrix(dag).todense().shape)
return self.orient_directed_graph(data, dag, alg=alg) | Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN. | Below is the the instruction that describes the task:
### Input:
Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
### Response:
def orient_undirected_graph(self, data, umg, alg='HC'):
"""Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algorithm will be applied.
alg (str): Exploration heuristic to use, among ["HC", "HCr",
"tabu", "EHC"]
Returns:
networkx.DiGraph: Solution given by CGNN.
.. note::
GNN (``cdt.causality.pairwise.GNN``) is first used to orient the
undirected graph and output a DAG before applying CGNN.
"""
warnings.warn("The pairwise GNN model is computed on each edge of the UMG "
"to initialize the model and start CGNN with a DAG")
gnn = GNN(nh=self.nh, lr=self.lr)
og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs,
nb_jobs=self.nb_jobs, train_epochs=self.train_epochs,
test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method
# print(nx.adj_matrix(og).todense().shape)
# print(list(og.edges()))
dag = dagify_min_edge(og)
# print(nx.adj_matrix(dag).todense().shape)
return self.orient_directed_graph(data, dag, alg=alg) |
def gradient(self, P, Q, Y, i):
"""Computes the gradient of KL divergence with respect to the i'th example of Y"""
return 4 * sum([
(P[i, j] - Q[i, j]) * (Y[i] - Y[j]) * (1 + np.linalg.norm(Y[i] - Y[j]) ** 2) ** -1 \
for j in range(Y.shape[0])
]) | Computes the gradient of KL divergence with respect to the i'th example of Y | Below is the the instruction that describes the task:
### Input:
Computes the gradient of KL divergence with respect to the i'th example of Y
### Response:
def gradient(self, P, Q, Y, i):
"""Computes the gradient of KL divergence with respect to the i'th example of Y"""
return 4 * sum([
(P[i, j] - Q[i, j]) * (Y[i] - Y[j]) * (1 + np.linalg.norm(Y[i] - Y[j]) ** 2) ** -1 \
for j in range(Y.shape[0])
]) |
def read_file(input_filename):
'''Like read() except uses a file rather than an IO stream, for convenience'''
with open(input_filename) as f:
g = GreenGenesTaxonomy.read(f)
return g | Like read() except uses a file rather than an IO stream, for convenience | Below is the the instruction that describes the task:
### Input:
Like read() except uses a file rather than an IO stream, for convenience
### Response:
def read_file(input_filename):
'''Like read() except uses a file rather than an IO stream, for convenience'''
with open(input_filename) as f:
g = GreenGenesTaxonomy.read(f)
return g |
def _color_attr(self, ground, attr):
"""
Given a color attribute, set the current cursor appropriately.
"""
attr = colors[ground][attr]
attrs = self.cursor_attributes
if ground == "foreground":
self.cursor_attributes = (attrs[0], attr, attrs[2])
elif ground == "background":
self.cursor_attributes = (attrs[0], attrs[1], attr) | Given a color attribute, set the current cursor appropriately. | Below is the the instruction that describes the task:
### Input:
Given a color attribute, set the current cursor appropriately.
### Response:
def _color_attr(self, ground, attr):
"""
Given a color attribute, set the current cursor appropriately.
"""
attr = colors[ground][attr]
attrs = self.cursor_attributes
if ground == "foreground":
self.cursor_attributes = (attrs[0], attr, attrs[2])
elif ground == "background":
self.cursor_attributes = (attrs[0], attrs[1], attr) |
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options)) | Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException` | Below is the the instruction that describes the task:
### Input:
Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
### Response:
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options)) |
def _add_getter(self):
"""
Add a read-only ``{prop_name}`` property to the element class for
this child element.
"""
property_ = property(self._getter, None, None)
# assign unconditionally to overwrite element name definition
setattr(self._element_cls, self._prop_name, property_) | Add a read-only ``{prop_name}`` property to the element class for
this child element. | Below is the the instruction that describes the task:
### Input:
Add a read-only ``{prop_name}`` property to the element class for
this child element.
### Response:
def _add_getter(self):
"""
Add a read-only ``{prop_name}`` property to the element class for
this child element.
"""
property_ = property(self._getter, None, None)
# assign unconditionally to overwrite element name definition
setattr(self._element_cls, self._prop_name, property_) |
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name) | Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass | Below is the the instruction that describes the task:
### Input:
Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
### Response:
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name) |
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
oldIdx = self._currentIdx
Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
self._currentIdx = idx
if oldIdx is not None and oldIdx != idx:
self._componentValues[oldIdx] = noValue
return self | Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
### Response:
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
oldIdx = self._currentIdx
Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
self._currentIdx = idx
if oldIdx is not None and oldIdx != idx:
self._componentValues[oldIdx] = noValue
return self |
def update_user(self, user_id, **kwargs):
"""Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User
"""
api = self._get_api(iam.AccountAdminApi)
user = User._create_request_map(kwargs)
body = iam.UserUpdateReq(**user)
return User(api.update_user(user_id, body)) | Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User | Below is the the instruction that describes the task:
### Input:
Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User
### Response:
def update_user(self, user_id, **kwargs):
"""Update user properties of specified user.
:param str user_id: The ID of the user to update (Required)
:param str username: The unique username of the user
:param str email: The unique email of the user
:param str full_name: The full name of the user
:param str password: The password string of the user.
:param str phone_number: Phone number of the user
:param bool terms_accepted: Is 'General Terms & Conditions' accepted
:param bool marketing_accepted: Is receiving marketing information accepted?
:returns: the updated user object
:rtype: User
"""
api = self._get_api(iam.AccountAdminApi)
user = User._create_request_map(kwargs)
body = iam.UserUpdateReq(**user)
return User(api.update_user(user_id, body)) |
def write_file_chunk(
file_path: str,
packed_chunk: str,
append: bool = True,
offset: int = -1
):
"""
Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append.
"""
mode = 'ab' if append else 'wb'
contents = unpack_chunk(packed_chunk)
writer.write_file(file_path, contents, mode=mode, offset=offset) | Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append. | Below is the the instruction that describes the task:
### Input:
Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append.
### Response:
def write_file_chunk(
file_path: str,
packed_chunk: str,
append: bool = True,
offset: int = -1
):
"""
Write or append the specified chunk data to the given file path, unpacking
the chunk before writing. If the file does not yet exist, it will be
created. Set the append argument to False if you do not want the chunk
to be appended to an existing file.
:param file_path:
The file where the chunk will be written or appended
:param packed_chunk:
The packed chunk data to write to the file. It will be unpacked before
the file is written.
:param append:
Whether or not the chunk should be appended to the existing file. If
False the chunk data will overwrite the existing file.
:param offset:
The byte offset in the file where the chunk should be written.
If the value is less than zero, the chunk will be written or appended
based on the `append` argument. Note that if you indicate an append
write mode and an offset, the mode will be forced to write instead of
append.
"""
mode = 'ab' if append else 'wb'
contents = unpack_chunk(packed_chunk)
writer.write_file(file_path, contents, mode=mode, offset=offset) |
def update_region_to_controller(self, region):
"""
Update the controller string with dynamic region info.
Controller string should end up as `<name[-env]>.<region>.cloudgenix.com`
**Parameters:**
- **region:** region string.
**Returns:** No return value, mutates the controller in the class namespace
"""
# default region position in a list
region_position = 1
# Check for a global "ignore region" flag
if self.ignore_region:
# bypass
api_logger.debug("IGNORE_REGION set, not updating controller region.")
return
api_logger.debug("Updating Controller Region")
api_logger.debug("CONTROLLER = %s", self.controller)
api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig)
api_logger.debug("CONTROLLER_REGION = %s", self.controller_region)
# Check if this is an initial region use or an update region use
if self.controller_orig:
controller_base = self.controller_orig
else:
controller_base = self.controller
self.controller_orig = self.controller
# splice controller string
controller_full_part_list = controller_base.split('.')
for idx, part in enumerate(controller_full_part_list):
# is the region already in the controller string?
if region == part:
# yes, controller already has apropriate region
api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx)
# update region if it is not already set.
if self.controller_region != region:
self.controller_region = region
api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region)
return
controller_part_count = len(controller_full_part_list)
# handle short domain case
if controller_part_count > 1:
# insert region
controller_full_part_list[region_position] = region
self.controller = ".".join(controller_full_part_list)
else:
# short domain, just add region
self.controller = ".".join(controller_full_part_list) + '.' + region
# update SDK vars with region info
self.controller_orig = controller_base
self.controller_region = region
api_logger.debug("UPDATED_CONTROLLER = %s", self.controller)
api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig)
api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region)
return | Update the controller string with dynamic region info.
Controller string should end up as `<name[-env]>.<region>.cloudgenix.com`
**Parameters:**
- **region:** region string.
**Returns:** No return value, mutates the controller in the class namespace | Below is the the instruction that describes the task:
### Input:
Update the controller string with dynamic region info.
Controller string should end up as `<name[-env]>.<region>.cloudgenix.com`
**Parameters:**
- **region:** region string.
**Returns:** No return value, mutates the controller in the class namespace
### Response:
def update_region_to_controller(self, region):
"""
Update the controller string with dynamic region info.
Controller string should end up as `<name[-env]>.<region>.cloudgenix.com`
**Parameters:**
- **region:** region string.
**Returns:** No return value, mutates the controller in the class namespace
"""
# default region position in a list
region_position = 1
# Check for a global "ignore region" flag
if self.ignore_region:
# bypass
api_logger.debug("IGNORE_REGION set, not updating controller region.")
return
api_logger.debug("Updating Controller Region")
api_logger.debug("CONTROLLER = %s", self.controller)
api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig)
api_logger.debug("CONTROLLER_REGION = %s", self.controller_region)
# Check if this is an initial region use or an update region use
if self.controller_orig:
controller_base = self.controller_orig
else:
controller_base = self.controller
self.controller_orig = self.controller
# splice controller string
controller_full_part_list = controller_base.split('.')
for idx, part in enumerate(controller_full_part_list):
# is the region already in the controller string?
if region == part:
# yes, controller already has apropriate region
api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx)
# update region if it is not already set.
if self.controller_region != region:
self.controller_region = region
api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region)
return
controller_part_count = len(controller_full_part_list)
# handle short domain case
if controller_part_count > 1:
# insert region
controller_full_part_list[region_position] = region
self.controller = ".".join(controller_full_part_list)
else:
# short domain, just add region
self.controller = ".".join(controller_full_part_list) + '.' + region
# update SDK vars with region info
self.controller_orig = controller_base
self.controller_region = region
api_logger.debug("UPDATED_CONTROLLER = %s", self.controller)
api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig)
api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region)
return |
def clone(srcpath, destpath, vcs=None):
"""Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
"""
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath) | Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`. | Below is the the instruction that describes the task:
### Input:
Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
### Response:
def clone(srcpath, destpath, vcs=None):
"""Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
"""
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath) |
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw() | Select the current item and run it | Below is the the instruction that describes the task:
### Input:
Select the current item and run it
### Response:
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw() |
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs):
'''
.. versionadded:: Fluorine
Creates or updates a record set within a DNS zone.
:param name: The name of the record set, relative to the name of the zone.
:param zone_name: The name of the DNS zone (without a terminating dot).
:param resource_group: The name of the resource group.
:param record_type: The type of DNS record in this record set. Record sets of type SOA can be
updated but not created (they are created when the DNS zone is created).
Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
.. code-block:: bash
salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A
arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
'''
dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
try:
record_set_model = __utils__['azurearm.create_object_model']('dns', 'RecordSet', **kwargs)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
record_set = dnsconn.record_sets.create_or_update(
relative_record_set_name=name,
zone_name=zone_name,
resource_group_name=resource_group,
record_type=record_type,
parameters=record_set_model,
if_match=kwargs.get('if_match'),
if_none_match=kwargs.get('if_none_match')
)
result = record_set.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result | .. versionadded:: Fluorine
Creates or updates a record set within a DNS zone.
:param name: The name of the record set, relative to the name of the zone.
:param zone_name: The name of the DNS zone (without a terminating dot).
:param resource_group: The name of the resource group.
:param record_type: The type of DNS record in this record set. Record sets of type SOA can be
updated but not created (they are created when the DNS zone is created).
Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
.. code-block:: bash
salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A
arecords='[{ipv4_address: 10.0.0.1}]' ttl=300 | Below is the the instruction that describes the task:
### Input:
.. versionadded:: Fluorine
Creates or updates a record set within a DNS zone.
:param name: The name of the record set, relative to the name of the zone.
:param zone_name: The name of the DNS zone (without a terminating dot).
:param resource_group: The name of the resource group.
:param record_type: The type of DNS record in this record set. Record sets of type SOA can be
updated but not created (they are created when the DNS zone is created).
Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
.. code-block:: bash
salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A
arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
### Response:
def record_set_create_or_update(name, zone_name, resource_group, record_type, **kwargs):
'''
.. versionadded:: Fluorine
Creates or updates a record set within a DNS zone.
:param name: The name of the record set, relative to the name of the zone.
:param zone_name: The name of the DNS zone (without a terminating dot).
:param resource_group: The name of the resource group.
:param record_type: The type of DNS record in this record set. Record sets of type SOA can be
updated but not created (they are created when the DNS zone is created).
Possible values include: 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'
CLI Example:
.. code-block:: bash
salt-call azurearm_dns.record_set_create_or_update myhost myzone testgroup A
arecords='[{ipv4_address: 10.0.0.1}]' ttl=300
'''
dnsconn = __utils__['azurearm.get_client']('dns', **kwargs)
try:
record_set_model = __utils__['azurearm.create_object_model']('dns', 'RecordSet', **kwargs)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
record_set = dnsconn.record_sets.create_or_update(
relative_record_set_name=name,
zone_name=zone_name,
resource_group_name=resource_group,
record_type=record_type,
parameters=record_set_model,
if_match=kwargs.get('if_match'),
if_none_match=kwargs.get('if_none_match')
)
result = record_set.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('dns', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result |
def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False):
"""
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
"""
string = string.strip(separator)
if not max_length:
return string
if len(string) < max_length:
return string
if not word_boundary:
return string[:max_length].strip(separator)
if separator not in string:
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = len(truncated) + len(word)
if next_len < max_length:
truncated += '{0}{1}'.format(word, separator)
elif next_len == max_length:
truncated += '{0}'.format(word)
break
else:
if save_order:
break
if not truncated: # pragma: no cover
truncated = string[:max_length]
return truncated.strip(separator) | Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return: | Below is the the instruction that describes the task:
### Input:
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
### Response:
def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False):
"""
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
"""
string = string.strip(separator)
if not max_length:
return string
if len(string) < max_length:
return string
if not word_boundary:
return string[:max_length].strip(separator)
if separator not in string:
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = len(truncated) + len(word)
if next_len < max_length:
truncated += '{0}{1}'.format(word, separator)
elif next_len == max_length:
truncated += '{0}'.format(word)
break
else:
if save_order:
break
if not truncated: # pragma: no cover
truncated = string[:max_length]
return truncated.strip(separator) |
def grant_winsta_and_desktop(th):
'''
Grant the token's user access to the current process's window station and
desktop.
'''
current_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0]
# Add permissions for the sid to the current windows station and thread id.
# This prevents windows error 0xC0000142.
winsta = win32process.GetProcessWindowStation()
set_user_perm(winsta, WINSTA_ALL, current_sid)
desktop = win32service.GetThreadDesktop(win32api.GetCurrentThreadId())
set_user_perm(desktop, DESKTOP_ALL, current_sid) | Grant the token's user access to the current process's window station and
desktop. | Below is the the instruction that describes the task:
### Input:
Grant the token's user access to the current process's window station and
desktop.
### Response:
def grant_winsta_and_desktop(th):
'''
Grant the token's user access to the current process's window station and
desktop.
'''
current_sid = win32security.GetTokenInformation(th, win32security.TokenUser)[0]
# Add permissions for the sid to the current windows station and thread id.
# This prevents windows error 0xC0000142.
winsta = win32process.GetProcessWindowStation()
set_user_perm(winsta, WINSTA_ALL, current_sid)
desktop = win32service.GetThreadDesktop(win32api.GetCurrentThreadId())
set_user_perm(desktop, DESKTOP_ALL, current_sid) |
def _load(formula):
'''
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
'''
# Compute possibilities
_mk_client()
paths = []
for ext in ('yaml', 'json'):
source_url = salt.utils.url.create(formula + '/defaults.' + ext)
paths.append(source_url)
# Fetch files from master
defaults_files = __context__['cp.fileclient'].cache_files(paths)
for file_ in defaults_files:
if not file_:
# Skip empty string returned by cp.fileclient.cache_files.
continue
suffix = file_.rsplit('.', 1)[-1]
if suffix == 'yaml':
loader = salt.utils.yaml.safe_load
elif suffix == 'json':
loader = salt.utils.json.load
else:
log.debug("Failed to determine loader for %r", file_)
continue
if os.path.exists(file_):
log.debug("Reading defaults from %r", file_)
with salt.utils.files.fopen(file_) as fhr:
defaults = loader(fhr)
log.debug("Read defaults %r", defaults)
return defaults or {} | Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict. | Below is the the instruction that describes the task:
### Input:
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
### Response:
def _load(formula):
'''
Generates a list of salt://<formula>/defaults.(json|yaml) files
and fetches them from the Salt master.
Returns first defaults file as python dict.
'''
# Compute possibilities
_mk_client()
paths = []
for ext in ('yaml', 'json'):
source_url = salt.utils.url.create(formula + '/defaults.' + ext)
paths.append(source_url)
# Fetch files from master
defaults_files = __context__['cp.fileclient'].cache_files(paths)
for file_ in defaults_files:
if not file_:
# Skip empty string returned by cp.fileclient.cache_files.
continue
suffix = file_.rsplit('.', 1)[-1]
if suffix == 'yaml':
loader = salt.utils.yaml.safe_load
elif suffix == 'json':
loader = salt.utils.json.load
else:
log.debug("Failed to determine loader for %r", file_)
continue
if os.path.exists(file_):
log.debug("Reading defaults from %r", file_)
with salt.utils.files.fopen(file_) as fhr:
defaults = loader(fhr)
log.debug("Read defaults %r", defaults)
return defaults or {} |
def grab_filt(self, filt, analyte=None):
"""
Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter
"""
if isinstance(filt, str):
if filt in self.components:
if analyte is None:
return self.components[filt]
else:
if self.switches[analyte][filt]:
return self.components[filt]
else:
try:
ind = self.make_fromkey(filt)
except KeyError:
print(("\n\n***Filter key invalid. Please consult "
"manual and try again."))
elif isinstance(filt, dict):
try:
ind = self.make_fromkey(filt[analyte])
except ValueError:
print(("\n\n***Filter key invalid. Please consult manual "
"and try again.\nOR\nAnalyte missing from filter "
"key dict."))
elif filt:
ind = self.make(analyte)
else:
ind = ~np.zeros(self.size, dtype=bool)
return ind | Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter | Below is the the instruction that describes the task:
### Input:
Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter
### Response:
def grab_filt(self, filt, analyte=None):
"""
Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter
"""
if isinstance(filt, str):
if filt in self.components:
if analyte is None:
return self.components[filt]
else:
if self.switches[analyte][filt]:
return self.components[filt]
else:
try:
ind = self.make_fromkey(filt)
except KeyError:
print(("\n\n***Filter key invalid. Please consult "
"manual and try again."))
elif isinstance(filt, dict):
try:
ind = self.make_fromkey(filt[analyte])
except ValueError:
print(("\n\n***Filter key invalid. Please consult manual "
"and try again.\nOR\nAnalyte missing from filter "
"key dict."))
elif filt:
ind = self.make(analyte)
else:
ind = ~np.zeros(self.size, dtype=bool)
return ind |
def parse_deltat_data(fileobj):
"""Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
array = np.loadtxt(fileobj)
year, month, day = array[-1,:3].astype(int)
expiration_date = date(year + 1, month, day)
year, month, day, delta_t = array.T
data = np.array((julian_date(year, month, day), delta_t))
return expiration_date, data | Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values. | Below is the the instruction that describes the task:
### Input:
Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
### Response:
def parse_deltat_data(fileobj):
"""Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
array = np.loadtxt(fileobj)
year, month, day = array[-1,:3].astype(int)
expiration_date = date(year + 1, month, day)
year, month, day, delta_t = array.T
data = np.array((julian_date(year, month, day), delta_t))
return expiration_date, data |
def lu_solve_ATAI(A, rho, b, lu, piv, check_finite=True):
r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
lu : array_like
Matrix containing U in its upper triangle, and L in its lower triangle,
as returned by :func:`scipy.linalg.lu_factor`
piv : array_like
Pivot indices representing the permutation matrix P, as returned by
:func:`scipy.linalg.lu_factor`
check_finite : bool, optional (default False)
Flag indicating whether the input array should be checked for Inf
and NaN values
Returns
-------
x : ndarray
Solution to the linear system
"""
N, M = A.shape
if N >= M:
x = linalg.lu_solve((lu, piv), b, check_finite=check_finite)
else:
x = (b - A.T.dot(linalg.lu_solve((lu, piv), A.dot(b), 1,
check_finite=check_finite))) / rho
return x | r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
lu : array_like
Matrix containing U in its upper triangle, and L in its lower triangle,
as returned by :func:`scipy.linalg.lu_factor`
piv : array_like
Pivot indices representing the permutation matrix P, as returned by
:func:`scipy.linalg.lu_factor`
check_finite : bool, optional (default False)
Flag indicating whether the input array should be checked for Inf
and NaN values
Returns
-------
x : ndarray
Solution to the linear system | Below is the the instruction that describes the task:
### Input:
r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
lu : array_like
Matrix containing U in its upper triangle, and L in its lower triangle,
as returned by :func:`scipy.linalg.lu_factor`
piv : array_like
Pivot indices representing the permutation matrix P, as returned by
:func:`scipy.linalg.lu_factor`
check_finite : bool, optional (default False)
Flag indicating whether the input array should be checked for Inf
and NaN values
Returns
-------
x : ndarray
Solution to the linear system
### Response:
def lu_solve_ATAI(A, rho, b, lu, piv, check_finite=True):
r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.lu_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
lu : array_like
Matrix containing U in its upper triangle, and L in its lower triangle,
as returned by :func:`scipy.linalg.lu_factor`
piv : array_like
Pivot indices representing the permutation matrix P, as returned by
:func:`scipy.linalg.lu_factor`
check_finite : bool, optional (default False)
Flag indicating whether the input array should be checked for Inf
and NaN values
Returns
-------
x : ndarray
Solution to the linear system
"""
N, M = A.shape
if N >= M:
x = linalg.lu_solve((lu, piv), b, check_finite=check_finite)
else:
x = (b - A.T.dot(linalg.lu_solve((lu, piv), A.dot(b), 1,
check_finite=check_finite))) / rho
return x |
def remove(self, item, count=0):
""" Removes @item from the list for @count number of occurences """
self._client.lrem(self.key_prefix, count, self._dumps(item)) | Removes @item from the list for @count number of occurences | Below is the the instruction that describes the task:
### Input:
Removes @item from the list for @count number of occurences
### Response:
def remove(self, item, count=0):
""" Removes @item from the list for @count number of occurences """
self._client.lrem(self.key_prefix, count, self._dumps(item)) |
def expandf(m, format): # noqa A002
"""Expand the string using the format replace pattern or function."""
_assert_expandable(format, True)
return _apply_replace_backrefs(m, format, flags=FORMAT) | Expand the string using the format replace pattern or function. | Below is the the instruction that describes the task:
### Input:
Expand the string using the format replace pattern or function.
### Response:
def expandf(m, format): # noqa A002
"""Expand the string using the format replace pattern or function."""
_assert_expandable(format, True)
return _apply_replace_backrefs(m, format, flags=FORMAT) |
def smart_insert(cls, engine_or_session, data, minimal_size=5, op_counter=0):
"""
An optimized Insert strategy.
:return: number of insertion operation been executed. Usually it is
greatly smaller than ``len(data)``.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
"""
ses, auto_close = ensure_session(engine_or_session)
if isinstance(data, list):
# 首先进行尝试bulk insert
try:
ses.add_all(data)
ses.commit()
op_counter += 1
# 失败了
except (IntegrityError, FlushError):
ses.rollback()
# 分析数据量
n = len(data)
# 如果数据条数多于一定数量
if n >= minimal_size ** 2:
# 则进行分包
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
op_counter = cls.smart_insert(
ses, chunk, minimal_size, op_counter)
# 否则则一条条地逐条插入
else:
for obj in data:
try:
ses.add(obj)
ses.commit()
op_counter += 1
except (IntegrityError, FlushError):
ses.rollback()
else:
try:
ses.add(data)
ses.commit()
except (IntegrityError, FlushError):
ses.rollback()
if auto_close:
ses.close()
return op_counter | An optimized Insert strategy.
:return: number of insertion operation been executed. Usually it is
greatly smaller than ``len(data)``.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。 | Below is the the instruction that describes the task:
### Input:
An optimized Insert strategy.
:return: number of insertion operation been executed. Usually it is
greatly smaller than ``len(data)``.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
### Response:
def smart_insert(cls, engine_or_session, data, minimal_size=5, op_counter=0):
"""
An optimized Insert strategy.
:return: number of insertion operation been executed. Usually it is
greatly smaller than ``len(data)``.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
"""
ses, auto_close = ensure_session(engine_or_session)
if isinstance(data, list):
# 首先进行尝试bulk insert
try:
ses.add_all(data)
ses.commit()
op_counter += 1
# 失败了
except (IntegrityError, FlushError):
ses.rollback()
# 分析数据量
n = len(data)
# 如果数据条数多于一定数量
if n >= minimal_size ** 2:
# 则进行分包
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
op_counter = cls.smart_insert(
ses, chunk, minimal_size, op_counter)
# 否则则一条条地逐条插入
else:
for obj in data:
try:
ses.add(obj)
ses.commit()
op_counter += 1
except (IntegrityError, FlushError):
ses.rollback()
else:
try:
ses.add(data)
ses.commit()
except (IntegrityError, FlushError):
ses.rollback()
if auto_close:
ses.close()
return op_counter |
def get_documents(self, doc_format='dict'):
""" Get the documents returned from Storege in this response.
Keyword args:
doc_format -- Specifies the doc_format for the returned documents.
Can be 'dict', 'etree' or 'string'. Default is 'dict'.
Returns:
A dict where keys are document ids and values depending of the required doc_format:
A dict representations of documents (see etree_to_dict());
A etree Element representing the document;
A raw XML document string.
Raises:
ParameterError -- The doc_format value is not allowed.
"""
def get_doc_id(root, rel_path):
if not rel_path:
return root.text
else:
child = root.find(rel_path[0])
if child is None:
return None
return get_doc_id(child, rel_path[1:])
if doc_format == 'dict':
return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for
document in self._get_doc_list()])
elif doc_format == 'etree':
return dict([(get_doc_id(document, self._id_xpath), document) for
document in self._get_doc_list()])
elif doc_format == 'list-etree':
return self._get_doc_list()
elif doc_format == 'list-string':
return list([(ET.tostring(document)) for
document in self._get_doc_list()])
elif doc_format in ('', None, 'string'):
return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for
document in self._get_doc_list()])
else:
raise ParameterError("doc_format=" + doc_format) | Get the documents returned from Storege in this response.
Keyword args:
doc_format -- Specifies the doc_format for the returned documents.
Can be 'dict', 'etree' or 'string'. Default is 'dict'.
Returns:
A dict where keys are document ids and values depending of the required doc_format:
A dict representations of documents (see etree_to_dict());
A etree Element representing the document;
A raw XML document string.
Raises:
ParameterError -- The doc_format value is not allowed. | Below is the the instruction that describes the task:
### Input:
Get the documents returned from Storege in this response.
Keyword args:
doc_format -- Specifies the doc_format for the returned documents.
Can be 'dict', 'etree' or 'string'. Default is 'dict'.
Returns:
A dict where keys are document ids and values depending of the required doc_format:
A dict representations of documents (see etree_to_dict());
A etree Element representing the document;
A raw XML document string.
Raises:
ParameterError -- The doc_format value is not allowed.
### Response:
def get_documents(self, doc_format='dict'):
""" Get the documents returned from Storege in this response.
Keyword args:
doc_format -- Specifies the doc_format for the returned documents.
Can be 'dict', 'etree' or 'string'. Default is 'dict'.
Returns:
A dict where keys are document ids and values depending of the required doc_format:
A dict representations of documents (see etree_to_dict());
A etree Element representing the document;
A raw XML document string.
Raises:
ParameterError -- The doc_format value is not allowed.
"""
def get_doc_id(root, rel_path):
if not rel_path:
return root.text
else:
child = root.find(rel_path[0])
if child is None:
return None
return get_doc_id(child, rel_path[1:])
if doc_format == 'dict':
return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for
document in self._get_doc_list()])
elif doc_format == 'etree':
return dict([(get_doc_id(document, self._id_xpath), document) for
document in self._get_doc_list()])
elif doc_format == 'list-etree':
return self._get_doc_list()
elif doc_format == 'list-string':
return list([(ET.tostring(document)) for
document in self._get_doc_list()])
elif doc_format in ('', None, 'string'):
return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for
document in self._get_doc_list()])
else:
raise ParameterError("doc_format=" + doc_format) |
def dist_string(self, val_meters):
'''return a distance as a string'''
if self.settings.dist_unit == 'nm':
return "%.1fnm" % (val_meters * 0.000539957)
if self.settings.dist_unit == 'miles':
return "%.1fmiles" % (val_meters * 0.000621371)
return "%um" % val_meters | return a distance as a string | Below is the the instruction that describes the task:
### Input:
return a distance as a string
### Response:
def dist_string(self, val_meters):
'''return a distance as a string'''
if self.settings.dist_unit == 'nm':
return "%.1fnm" % (val_meters * 0.000539957)
if self.settings.dist_unit == 'miles':
return "%.1fmiles" % (val_meters * 0.000621371)
return "%um" % val_meters |
def branches(self):
# type: () -> List[str]
""" List of all branches this commit is a part of. """
if self._branches is None:
cmd = 'git branch --contains {}'.format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip('* \t\n') for x in out.splitlines()]
return self._branches | List of all branches this commit is a part of. | Below is the the instruction that describes the task:
### Input:
List of all branches this commit is a part of.
### Response:
def branches(self):
# type: () -> List[str]
""" List of all branches this commit is a part of. """
if self._branches is None:
cmd = 'git branch --contains {}'.format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip('* \t\n') for x in out.splitlines()]
return self._branches |
def _vax_to_ieee_single_float(data):
"""Converts a float in Vax format to IEEE format.
data should be a single string of chars that have been read in from
a binary file. These will be processed 4 at a time into float values.
Thus the total number of byte/chars in the string should be divisible
by 4.
Based on VAX data organization in a byte file, we need to do a bunch of
bitwise operations to separate out the numbers that correspond to the
sign, the exponent and the fraction portions of this floating point
number
role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF
bits : 1 2 9 10 32
bytes : byte2 byte1 byte4 byte3
"""
f = []
nfloat = int(len(data) / 4)
for i in range(nfloat):
byte2 = data[0 + i*4]
byte1 = data[1 + i*4]
byte4 = data[2 + i*4]
byte3 = data[3 + i*4]
# hex 0x80 = binary mask 10000000
# hex 0x7f = binary mask 01111111
sign = (byte1 & 0x80) >> 7
expon = ((byte1 & 0x7f) << 1) + ((byte2 & 0x80) >> 7)
fract = ((byte2 & 0x7f) << 16) + (byte3 << 8) + byte4
if sign == 0:
sign_mult = 1.0
else:
sign_mult = -1.0
if 0 < expon:
# note 16777216.0 == 2^24
val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0)
f.append(val)
elif expon == 0 and sign == 0:
f.append(0)
else:
f.append(0)
# may want to raise an exception here ...
return f | Converts a float in Vax format to IEEE format.
data should be a single string of chars that have been read in from
a binary file. These will be processed 4 at a time into float values.
Thus the total number of byte/chars in the string should be divisible
by 4.
Based on VAX data organization in a byte file, we need to do a bunch of
bitwise operations to separate out the numbers that correspond to the
sign, the exponent and the fraction portions of this floating point
number
role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF
bits : 1 2 9 10 32
bytes : byte2 byte1 byte4 byte3 | Below is the the instruction that describes the task:
### Input:
Converts a float in Vax format to IEEE format.
data should be a single string of chars that have been read in from
a binary file. These will be processed 4 at a time into float values.
Thus the total number of byte/chars in the string should be divisible
by 4.
Based on VAX data organization in a byte file, we need to do a bunch of
bitwise operations to separate out the numbers that correspond to the
sign, the exponent and the fraction portions of this floating point
number
role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF
bits : 1 2 9 10 32
bytes : byte2 byte1 byte4 byte3
### Response:
def _vax_to_ieee_single_float(data):
"""Converts a float in Vax format to IEEE format.
data should be a single string of chars that have been read in from
a binary file. These will be processed 4 at a time into float values.
Thus the total number of byte/chars in the string should be divisible
by 4.
Based on VAX data organization in a byte file, we need to do a bunch of
bitwise operations to separate out the numbers that correspond to the
sign, the exponent and the fraction portions of this floating point
number
role : S EEEEEEEE FFFFFFF FFFFFFFF FFFFFFFF
bits : 1 2 9 10 32
bytes : byte2 byte1 byte4 byte3
"""
f = []
nfloat = int(len(data) / 4)
for i in range(nfloat):
byte2 = data[0 + i*4]
byte1 = data[1 + i*4]
byte4 = data[2 + i*4]
byte3 = data[3 + i*4]
# hex 0x80 = binary mask 10000000
# hex 0x7f = binary mask 01111111
sign = (byte1 & 0x80) >> 7
expon = ((byte1 & 0x7f) << 1) + ((byte2 & 0x80) >> 7)
fract = ((byte2 & 0x7f) << 16) + (byte3 << 8) + byte4
if sign == 0:
sign_mult = 1.0
else:
sign_mult = -1.0
if 0 < expon:
# note 16777216.0 == 2^24
val = sign_mult * (0.5 + (fract/16777216.0)) * pow(2.0, expon - 128.0)
f.append(val)
elif expon == 0 and sign == 0:
f.append(0)
else:
f.append(0)
# may want to raise an exception here ...
return f |
def process_full_position(data, header, var_only=False):
"""
Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences
"""
feature_type = data[header['varType']]
# Skip unmatchable, uncovered, or pseudoautosomal-in-X
if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')):
return None
if var_only and feature_type in ['no-call', 'ref']:
return None
filters = []
if feature_type == 'no-call':
filters.append('NOCALL')
if 'varQuality' in header:
if 'VQLOW' in data[header['varQuality']]:
filters.append('VQLOW')
else:
var_filter = data[header['varFilter']]
if var_filter and not var_filter == "PASS":
filters = filters + var_filter.split(';')
chrom = data[header['chromosome']]
start = data[header['begin']]
ref_allele = data[header['reference']]
alleles = [data[header['alleleSeq']]]
dbsnp_data = []
dbsnp_data = data[header['xRef']].split(';')
assert data[header['ploidy']] in ['1', '2']
if feature_type == 'ref' or feature_type == 'no-call':
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters,
'end': data[header['end']]}]
else:
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters}] | Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences | Below is the the instruction that describes the task:
### Input:
Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences
### Response:
def process_full_position(data, header, var_only=False):
"""
Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences
"""
feature_type = data[header['varType']]
# Skip unmatchable, uncovered, or pseudoautosomal-in-X
if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')):
return None
if var_only and feature_type in ['no-call', 'ref']:
return None
filters = []
if feature_type == 'no-call':
filters.append('NOCALL')
if 'varQuality' in header:
if 'VQLOW' in data[header['varQuality']]:
filters.append('VQLOW')
else:
var_filter = data[header['varFilter']]
if var_filter and not var_filter == "PASS":
filters = filters + var_filter.split(';')
chrom = data[header['chromosome']]
start = data[header['begin']]
ref_allele = data[header['reference']]
alleles = [data[header['alleleSeq']]]
dbsnp_data = []
dbsnp_data = data[header['xRef']].split(';')
assert data[header['ploidy']] in ['1', '2']
if feature_type == 'ref' or feature_type == 'no-call':
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters,
'end': data[header['end']]}]
else:
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters}] |
def get_rotation_and_scale(header, skew_threshold=0.001):
"""Calculate rotation and CDELT."""
((xrot, yrot),
(cdelt1, cdelt2)) = get_xy_rotation_and_scale(header)
if math.fabs(xrot) - math.fabs(yrot) > skew_threshold:
raise ValueError("Skew detected: xrot=%.4f yrot=%.4f" % (
xrot, yrot))
rot = yrot
lonpole = float(header.get('LONPOLE', 180.0))
if lonpole != 180.0:
rot += 180.0 - lonpole
return (rot, cdelt1, cdelt2) | Calculate rotation and CDELT. | Below is the the instruction that describes the task:
### Input:
Calculate rotation and CDELT.
### Response:
def get_rotation_and_scale(header, skew_threshold=0.001):
"""Calculate rotation and CDELT."""
((xrot, yrot),
(cdelt1, cdelt2)) = get_xy_rotation_and_scale(header)
if math.fabs(xrot) - math.fabs(yrot) > skew_threshold:
raise ValueError("Skew detected: xrot=%.4f yrot=%.4f" % (
xrot, yrot))
rot = yrot
lonpole = float(header.get('LONPOLE', 180.0))
if lonpole != 180.0:
rot += 180.0 - lonpole
return (rot, cdelt1, cdelt2) |
def read_network_file(self, path_to_network_file):
"""
Parameters
-----------
path_to_network_file : str
Expects a network file with columns "pw0" and "pw1." A "features"
column that specifies the features where the (pw0, pw1) edge is
present will assign a weight to the edge, though it is not required
(edge will have weight 1 if no "features" column exists).
"features" format: space-separated feature numbers,
e.g. 0.0 1.0 2.0
"""
network_df = pd.read_table(path_to_network_file)
network_edges = {}
for _, row in network_df.iterrows():
vertex0_id = self.add_pathway(row["pw0"])
vertex1_id = self.add_pathway(row["pw1"])
edge_id = self.edge_tuple(vertex0_id, vertex1_id)
if "features" in row:
network_edges[edge_id] = \
[int(float(f)) for f in row["features"].split(" ")]
else:
network_edges[edge_id] = []
self._augment_network(network_edges) | Parameters
-----------
path_to_network_file : str
Expects a network file with columns "pw0" and "pw1." A "features"
column that specifies the features where the (pw0, pw1) edge is
present will assign a weight to the edge, though it is not required
(edge will have weight 1 if no "features" column exists).
"features" format: space-separated feature numbers,
e.g. 0.0 1.0 2.0 | Below is the the instruction that describes the task:
### Input:
Parameters
-----------
path_to_network_file : str
Expects a network file with columns "pw0" and "pw1." A "features"
column that specifies the features where the (pw0, pw1) edge is
present will assign a weight to the edge, though it is not required
(edge will have weight 1 if no "features" column exists).
"features" format: space-separated feature numbers,
e.g. 0.0 1.0 2.0
### Response:
def read_network_file(self, path_to_network_file):
"""
Parameters
-----------
path_to_network_file : str
Expects a network file with columns "pw0" and "pw1." A "features"
column that specifies the features where the (pw0, pw1) edge is
present will assign a weight to the edge, though it is not required
(edge will have weight 1 if no "features" column exists).
"features" format: space-separated feature numbers,
e.g. 0.0 1.0 2.0
"""
network_df = pd.read_table(path_to_network_file)
network_edges = {}
for _, row in network_df.iterrows():
vertex0_id = self.add_pathway(row["pw0"])
vertex1_id = self.add_pathway(row["pw1"])
edge_id = self.edge_tuple(vertex0_id, vertex1_id)
if "features" in row:
network_edges[edge_id] = \
[int(float(f)) for f in row["features"].split(" ")]
else:
network_edges[edge_id] = []
self._augment_network(network_edges) |
def sample_from_largest_budget(self, info_dict):
"""We opted for a single multidimensional KDE compared to the
hierarchy of one-dimensional KDEs used in TPE. The dimensional is
seperated by budget. This function sample a configuration from
largest budget. Firstly we sample "num_samples" configurations,
then prefer one with the largest l(x)/g(x).
Parameters:
-----------
info_dict: dict
record the information of this configuration
Returns
-------
dict:
new configuration named sample
dict:
info_dict, record the information of this configuration
"""
best = np.inf
best_vector = None
budget = max(self.kde_models.keys())
l = self.kde_models[budget]['good'].pdf
g = self.kde_models[budget]['bad'].pdf
minimize_me = lambda x: max(1e-32, g(x))/max(l(x), 1e-32)
kde_good = self.kde_models[budget]['good']
kde_bad = self.kde_models[budget]['bad']
for i in range(self.num_samples):
idx = np.random.randint(0, len(kde_good.data))
datum = kde_good.data[idx]
vector = []
for m, bw, t in zip(datum, kde_good.bw, self.vartypes):
bw = max(bw, self.min_bandwidth)
if t == 0:
bw = self.bw_factor*bw
vector.append(sps.truncnorm.rvs(-m/bw, (1-m)/bw, loc=m, scale=bw))
else:
if np.random.rand() < (1-bw):
vector.append(int(m))
else:
vector.append(np.random.randint(t))
val = minimize_me(vector)
if not np.isfinite(val):
logger.warning('sampled vector: %s has EI value %s'%(vector, val))
logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data))
logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw))
logger.warning("l(x) = %s"%(l(vector)))
logger.warning("g(x) = %s"%(g(vector)))
# right now, this happens because a KDE does not contain all values for a categorical parameter
# this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one
# if the good_kde has a finite value, i.e. there is no config with that value in the bad kde,
# so it shouldn't be terrible.
if np.isfinite(l(vector)):
best_vector = vector
break
if val < best:
best = val
best_vector = vector
if best_vector is None:
logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples)
sample = self.configspace.sample_configuration().get_dictionary()
info_dict['model_based_pick'] = False
else:
logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector)))
for i, hp_value in enumerate(best_vector):
if isinstance(
self.configspace.get_hyperparameter(
self.configspace.get_hyperparameter_by_idx(i)
),
ConfigSpace.hyperparameters.CategoricalHyperparameter
):
best_vector[i] = int(np.rint(best_vector[i]))
sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary()
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
configuration_space=self.configspace,
configuration=sample)
info_dict['model_based_pick'] = True
return sample, info_dict | We opted for a single multidimensional KDE compared to the
hierarchy of one-dimensional KDEs used in TPE. The dimensional is
seperated by budget. This function sample a configuration from
largest budget. Firstly we sample "num_samples" configurations,
then prefer one with the largest l(x)/g(x).
Parameters:
-----------
info_dict: dict
record the information of this configuration
Returns
-------
dict:
new configuration named sample
dict:
info_dict, record the information of this configuration | Below is the the instruction that describes the task:
### Input:
We opted for a single multidimensional KDE compared to the
hierarchy of one-dimensional KDEs used in TPE. The dimensional is
seperated by budget. This function sample a configuration from
largest budget. Firstly we sample "num_samples" configurations,
then prefer one with the largest l(x)/g(x).
Parameters:
-----------
info_dict: dict
record the information of this configuration
Returns
-------
dict:
new configuration named sample
dict:
info_dict, record the information of this configuration
### Response:
def sample_from_largest_budget(self, info_dict):
"""We opted for a single multidimensional KDE compared to the
hierarchy of one-dimensional KDEs used in TPE. The dimensional is
seperated by budget. This function sample a configuration from
largest budget. Firstly we sample "num_samples" configurations,
then prefer one with the largest l(x)/g(x).
Parameters:
-----------
info_dict: dict
record the information of this configuration
Returns
-------
dict:
new configuration named sample
dict:
info_dict, record the information of this configuration
"""
best = np.inf
best_vector = None
budget = max(self.kde_models.keys())
l = self.kde_models[budget]['good'].pdf
g = self.kde_models[budget]['bad'].pdf
minimize_me = lambda x: max(1e-32, g(x))/max(l(x), 1e-32)
kde_good = self.kde_models[budget]['good']
kde_bad = self.kde_models[budget]['bad']
for i in range(self.num_samples):
idx = np.random.randint(0, len(kde_good.data))
datum = kde_good.data[idx]
vector = []
for m, bw, t in zip(datum, kde_good.bw, self.vartypes):
bw = max(bw, self.min_bandwidth)
if t == 0:
bw = self.bw_factor*bw
vector.append(sps.truncnorm.rvs(-m/bw, (1-m)/bw, loc=m, scale=bw))
else:
if np.random.rand() < (1-bw):
vector.append(int(m))
else:
vector.append(np.random.randint(t))
val = minimize_me(vector)
if not np.isfinite(val):
logger.warning('sampled vector: %s has EI value %s'%(vector, val))
logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data))
logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw))
logger.warning("l(x) = %s"%(l(vector)))
logger.warning("g(x) = %s"%(g(vector)))
# right now, this happens because a KDE does not contain all values for a categorical parameter
# this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one
# if the good_kde has a finite value, i.e. there is no config with that value in the bad kde,
# so it shouldn't be terrible.
if np.isfinite(l(vector)):
best_vector = vector
break
if val < best:
best = val
best_vector = vector
if best_vector is None:
logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples)
sample = self.configspace.sample_configuration().get_dictionary()
info_dict['model_based_pick'] = False
else:
logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector)))
for i, hp_value in enumerate(best_vector):
if isinstance(
self.configspace.get_hyperparameter(
self.configspace.get_hyperparameter_by_idx(i)
),
ConfigSpace.hyperparameters.CategoricalHyperparameter
):
best_vector[i] = int(np.rint(best_vector[i]))
sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary()
sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
configuration_space=self.configspace,
configuration=sample)
info_dict['model_based_pick'] = True
return sample, info_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.