code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def register_nonzero_counter(network, stats):
"""
Register forward hooks to count the number of nonzero floating points
values from all the tensors used by the given network during inference.
:param network: The network to attach the counter
:param stats: Dictionary holding the counter.
"""
if hasattr(network, "__counter_nonzero__"):
raise ValueError("nonzero counter was already registered for this network")
if not isinstance(stats, dict):
raise ValueError("stats must be a dictionary")
network.__counter_nonzero__ = stats
handles = []
for name, module in network.named_modules():
handles.append(module.register_forward_hook(_nonzero_counter_hook))
if network != module:
if hasattr(module, "__counter_nonzero__"):
raise ValueError("nonzero counter was already registered for this module")
child_data = dict()
network.__counter_nonzero__[name] = child_data
module.__counter_nonzero__ = child_data
network.__counter_nonzero_handles__ = handles | Register forward hooks to count the number of nonzero floating points
values from all the tensors used by the given network during inference.
:param network: The network to attach the counter
:param stats: Dictionary holding the counter. | Below is the the instruction that describes the task:
### Input:
Register forward hooks to count the number of nonzero floating points
values from all the tensors used by the given network during inference.
:param network: The network to attach the counter
:param stats: Dictionary holding the counter.
### Response:
def register_nonzero_counter(network, stats):
"""
Register forward hooks to count the number of nonzero floating points
values from all the tensors used by the given network during inference.
:param network: The network to attach the counter
:param stats: Dictionary holding the counter.
"""
if hasattr(network, "__counter_nonzero__"):
raise ValueError("nonzero counter was already registered for this network")
if not isinstance(stats, dict):
raise ValueError("stats must be a dictionary")
network.__counter_nonzero__ = stats
handles = []
for name, module in network.named_modules():
handles.append(module.register_forward_hook(_nonzero_counter_hook))
if network != module:
if hasattr(module, "__counter_nonzero__"):
raise ValueError("nonzero counter was already registered for this module")
child_data = dict()
network.__counter_nonzero__[name] = child_data
module.__counter_nonzero__ = child_data
network.__counter_nonzero_handles__ = handles |
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates | Generates update dictionary entries for the version string | Below is the the instruction that describes the task:
### Input:
Generates update dictionary entries for the version string
### Response:
def get_final_version_string(release_mode, semver, commit_count=0):
"""Generates update dictionary entries for the version string"""
version_string = ".".join(semver)
maybe_dev_version_string = version_string
updates = {}
if release_mode:
# in production, we have something like `1.2.3`, as well as a flag e.g. PRODUCTION=True
updates[Constants.RELEASE_FIELD] = config.RELEASED_VALUE
else:
# in dev mode, we have a dev marker e.g. `1.2.3.dev678`
maybe_dev_version_string = config.DEVMODE_TEMPLATE.format(
version=version_string, count=commit_count
)
# make available all components of the semantic version including the full string
updates[Constants.VERSION_FIELD] = maybe_dev_version_string
updates[Constants.VERSION_STRICT_FIELD] = version_string
return updates |
def clone(self, id, name=None):
"""Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo
"""
url = '[email protected]:/{}'.format(id)
if name is None:
os.system('git clone {}'.format(url))
else:
os.system('git clone {} {}'.format(url, name)) | Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo | Below is the the instruction that describes the task:
### Input:
Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo
### Response:
def clone(self, id, name=None):
"""Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo
"""
url = '[email protected]:/{}'.format(id)
if name is None:
os.system('git clone {}'.format(url))
else:
os.system('git clone {} {}'.format(url, name)) |
def call_with_context(func, context, *args):
"""
Check if given function has more arguments than given. Call it with context
as last argument or without it.
"""
return make_context_aware(func, len(args))(*args + (context,)) | Check if given function has more arguments than given. Call it with context
as last argument or without it. | Below is the the instruction that describes the task:
### Input:
Check if given function has more arguments than given. Call it with context
as last argument or without it.
### Response:
def call_with_context(func, context, *args):
"""
Check if given function has more arguments than given. Call it with context
as last argument or without it.
"""
return make_context_aware(func, len(args))(*args + (context,)) |
def authenticate(self, req, resp, resource):
"""
Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception`
"""
username, password = self._extract_credentials(req)
user = self.user_loader(username, password)
if not user:
raise falcon.HTTPUnauthorized(
description='Invalid Username/Password')
return user | Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception` | Below is the the instruction that describes the task:
### Input:
Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception`
### Response:
def authenticate(self, req, resp, resource):
"""
Extract basic auth token from request `authorization` header, deocode the
token, verifies the username/password and return either a ``user``
object if successful else raise an `falcon.HTTPUnauthoried exception`
"""
username, password = self._extract_credentials(req)
user = self.user_loader(username, password)
if not user:
raise falcon.HTTPUnauthorized(
description='Invalid Username/Password')
return user |
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False,
return_ess=False, modif_forward=None,
modif_info=None):
"""Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
"""
ti = self.T - 2 - t # t+1 in reverse
if t < 0 or t >= self.T - 1:
raise ValueError(
'two-filter smoothing: t must be in range 0,...,T-2')
lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti])
if linear_cost:
return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo,
return_ess,
modif_forward, modif_info)
else:
return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo) | Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) | Below is the the instruction that describes the task:
### Input:
Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
### Response:
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False,
return_ess=False, modif_forward=None,
modif_info=None):
"""Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
"""
ti = self.T - 2 - t # t+1 in reverse
if t < 0 or t >= self.T - 1:
raise ValueError(
'two-filter smoothing: t must be in range 0,...,T-2')
lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti])
if linear_cost:
return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo,
return_ess,
modif_forward, modif_info)
else:
return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo) |
def account_update(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account"
api_path = "/api/v2/account"
return self.call(api_path, method="PUT", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account
### Response:
def account_update(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account"
api_path = "/api/v2/account"
return self.call(api_path, method="PUT", data=data, **kwargs) |
def list_objects(self, bucket_name=None, **kwargs):
"""
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
"""
if not bucket_name: bucket_name = self.bucket_name
return self.client.list_objects(Bucket=bucket_name, **kwargs) | This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users. | Below is the the instruction that describes the task:
### Input:
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
### Response:
def list_objects(self, bucket_name=None, **kwargs):
"""
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
"""
if not bucket_name: bucket_name = self.bucket_name
return self.client.list_objects(Bucket=bucket_name, **kwargs) |
def create_vpnservice(subnet, router, name, admin_state_up=True, profile=None):
'''
Creates a new VPN service
CLI Example:
.. code-block:: bash
salt '*' neutron.create_vpnservice router-name name
:param subnet: Subnet unique identifier for the VPN service deployment
:param router: Router unique identifier for the VPN service
:param name: Set a name for the VPN service
:param admin_state_up: Set admin state up to true or false,
default:True (Optional)
:param profile: Profile to build on (Optional)
:return: Created VPN service information
'''
conn = _auth(profile)
return conn.create_vpnservice(subnet, router, name, admin_state_up) | Creates a new VPN service
CLI Example:
.. code-block:: bash
salt '*' neutron.create_vpnservice router-name name
:param subnet: Subnet unique identifier for the VPN service deployment
:param router: Router unique identifier for the VPN service
:param name: Set a name for the VPN service
:param admin_state_up: Set admin state up to true or false,
default:True (Optional)
:param profile: Profile to build on (Optional)
:return: Created VPN service information | Below is the the instruction that describes the task:
### Input:
Creates a new VPN service
CLI Example:
.. code-block:: bash
salt '*' neutron.create_vpnservice router-name name
:param subnet: Subnet unique identifier for the VPN service deployment
:param router: Router unique identifier for the VPN service
:param name: Set a name for the VPN service
:param admin_state_up: Set admin state up to true or false,
default:True (Optional)
:param profile: Profile to build on (Optional)
:return: Created VPN service information
### Response:
def create_vpnservice(subnet, router, name, admin_state_up=True, profile=None):
'''
Creates a new VPN service
CLI Example:
.. code-block:: bash
salt '*' neutron.create_vpnservice router-name name
:param subnet: Subnet unique identifier for the VPN service deployment
:param router: Router unique identifier for the VPN service
:param name: Set a name for the VPN service
:param admin_state_up: Set admin state up to true or false,
default:True (Optional)
:param profile: Profile to build on (Optional)
:return: Created VPN service information
'''
conn = _auth(profile)
return conn.create_vpnservice(subnet, router, name, admin_state_up) |
def settings(self):
'''Generator which returns all of the statements in all of the settings tables'''
for table in self.tables:
if isinstance(table, SettingTable):
for statement in table.statements:
yield statement | Generator which returns all of the statements in all of the settings tables | Below is the the instruction that describes the task:
### Input:
Generator which returns all of the statements in all of the settings tables
### Response:
def settings(self):
'''Generator which returns all of the statements in all of the settings tables'''
for table in self.tables:
if isinstance(table, SettingTable):
for statement in table.statements:
yield statement |
def install_package(self, name, index=None, force=False, update=False):
"""Install a given package.
Args:
name (str): The package name to install. This can be any valid
pip package specification.
index (str): The URL for a pypi index to use.
force (bool): For the reinstall of packages during updates.
update (bool): Update the package if it is out of date.
"""
cmd = 'install'
if force:
cmd = '{0} {1}'.format(cmd, '--force-reinstall')
if update:
cmd = '{0} {1}'.format(cmd, '--update')
if index:
cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index))
self.pip('{0} {1}'.format(cmd, name)) | Install a given package.
Args:
name (str): The package name to install. This can be any valid
pip package specification.
index (str): The URL for a pypi index to use.
force (bool): For the reinstall of packages during updates.
update (bool): Update the package if it is out of date. | Below is the the instruction that describes the task:
### Input:
Install a given package.
Args:
name (str): The package name to install. This can be any valid
pip package specification.
index (str): The URL for a pypi index to use.
force (bool): For the reinstall of packages during updates.
update (bool): Update the package if it is out of date.
### Response:
def install_package(self, name, index=None, force=False, update=False):
"""Install a given package.
Args:
name (str): The package name to install. This can be any valid
pip package specification.
index (str): The URL for a pypi index to use.
force (bool): For the reinstall of packages during updates.
update (bool): Update the package if it is out of date.
"""
cmd = 'install'
if force:
cmd = '{0} {1}'.format(cmd, '--force-reinstall')
if update:
cmd = '{0} {1}'.format(cmd, '--update')
if index:
cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index))
self.pip('{0} {1}'.format(cmd, name)) |
def rot(vm):
"""Rotate topmost three items once to the left. ( a b c -- b c a )"""
c = vm.pop()
b = vm.pop()
a = vm.pop()
vm.push(b)
vm.push(c)
vm.push(a) | Rotate topmost three items once to the left. ( a b c -- b c a ) | Below is the the instruction that describes the task:
### Input:
Rotate topmost three items once to the left. ( a b c -- b c a )
### Response:
def rot(vm):
"""Rotate topmost three items once to the left. ( a b c -- b c a )"""
c = vm.pop()
b = vm.pop()
a = vm.pop()
vm.push(b)
vm.push(c)
vm.push(a) |
def parsecommonarguments(object, doc, annotationtype, required, allowed, **kwargs):
"""Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly."""
object.doc = doc #The FoLiA root document
if required is None:
required = tuple()
if allowed is None:
allowed = tuple()
supported = required + allowed
if 'generate_id_in' in kwargs:
try:
kwargs['id'] = kwargs['generate_id_in'].generate_id(object.__class__)
except GenerateIDException:
pass #ID could not be generated, just skip
del kwargs['generate_id_in']
if 'id' in kwargs:
if Attrib.ID not in supported:
raise ValueError("ID is not supported on " + object.__class__.__name__)
isncname(kwargs['id'])
object.id = kwargs['id']
del kwargs['id']
elif Attrib.ID in required:
raise ValueError("ID is required for " + object.__class__.__name__)
else:
object.id = None
if 'set' in kwargs:
if Attrib.CLASS not in supported and not object.SETONLY:
raise ValueError("Set is not supported on " + object.__class__.__name__)
if not kwargs['set']:
object.set ="undefined"
else:
object.set = kwargs['set']
del kwargs['set']
if object.set:
if doc and (not (annotationtype in doc.annotationdefaults) or not (object.set in doc.annotationdefaults[annotationtype])):
if object.set in doc.alias_set:
object.set = doc.alias_set[object.set]
elif doc.autodeclare:
doc.annotations.append( (annotationtype, object.set ) )
doc.annotationdefaults[annotationtype] = {object.set: {} }
else:
raise ValueError("Set '" + object.set + "' is used for " + object.__class__.__name__ + ", but has no declaration!")
elif annotationtype in doc.annotationdefaults and len(doc.annotationdefaults[annotationtype]) == 1:
object.set = list(doc.annotationdefaults[annotationtype].keys())[0]
elif object.ANNOTATIONTYPE == AnnotationType.TEXT:
object.set = "undefined" #text content needs never be declared (for backward compatibility) and is in set 'undefined'
elif Attrib.CLASS in required: #or (hasattr(object,'SETONLY') and object.SETONLY):
raise ValueError("Set is required for " + object.__class__.__name__)
if 'class' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported for " + object.__class__.__name__)
object.cls = kwargs['class']
del kwargs['class']
elif 'cls' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported on " + object.__class__.__name__)
object.cls = kwargs['cls']
del kwargs['cls']
elif Attrib.CLASS in required:
raise ValueError("Class is required for " + object.__class__.__name__)
if object.cls and not object.set:
if doc and doc.autodeclare:
if not (annotationtype, 'undefined') in doc.annotations:
doc.annotations.append( (annotationtype, 'undefined') )
doc.annotationdefaults[annotationtype] = {'undefined': {} }
object.set = 'undefined'
else:
raise ValueError("Set is required for " + object.__class__.__name__ + ". Class '" + object.cls + "' assigned without set.")
if 'annotator' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotator is not supported for " + object.__class__.__name__)
object.annotator = kwargs['annotator']
del kwargs['annotator']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotator' in doc.annotationdefaults[annotationtype][object.set]:
object.annotator = doc.annotationdefaults[annotationtype][object.set]['annotator']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotator is required for " + object.__class__.__name__)
if 'annotatortype' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotatortype is not supported for " + object.__class__.__name__)
if kwargs['annotatortype'] == 'auto' or kwargs['annotatortype'] == AnnotatorType.AUTO:
object.annotatortype = AnnotatorType.AUTO
elif kwargs['annotatortype'] == 'manual' or kwargs['annotatortype'] == AnnotatorType.MANUAL:
object.annotatortype = AnnotatorType.MANUAL
else:
raise ValueError("annotatortype must be 'auto' or 'manual', got " + repr(kwargs['annotatortype']))
del kwargs['annotatortype']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotatortype' in doc.annotationdefaults[annotationtype][object.set]:
object.annotatortype = doc.annotationdefaults[annotationtype][object.set]['annotatortype']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotatortype is required for " + object.__class__.__name__)
if 'confidence' in kwargs:
if not Attrib.CONFIDENCE in supported:
raise ValueError("Confidence is not supported")
if kwargs['confidence'] is not None:
try:
object.confidence = float(kwargs['confidence'])
assert object.confidence >= 0.0 and object.confidence <= 1.0
except:
raise ValueError("Confidence must be a floating point number between 0 and 1, got " + repr(kwargs['confidence']) )
del kwargs['confidence']
elif Attrib.CONFIDENCE in required:
raise ValueError("Confidence is required for " + object.__class__.__name__)
if 'n' in kwargs:
if not Attrib.N in supported:
raise ValueError("N is not supported for " + object.__class__.__name__)
object.n = kwargs['n']
del kwargs['n']
elif Attrib.N in required:
raise ValueError("N is required for " + object.__class__.__name__)
if 'datetime' in kwargs:
if not Attrib.DATETIME in supported:
raise ValueError("Datetime is not supported")
if isinstance(kwargs['datetime'], datetime):
object.datetime = kwargs['datetime']
else:
#try:
object.datetime = parse_datetime(kwargs['datetime'])
#except:
# raise ValueError("Unable to parse datetime: " + str(repr(kwargs['datetime'])))
del kwargs['datetime']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'datetime' in doc.annotationdefaults[annotationtype][object.set]:
object.datetime = doc.annotationdefaults[annotationtype][object.set]['datetime']
elif Attrib.DATETIME in required:
raise ValueError("Datetime is required for " + object.__class__.__name__)
if 'src' in kwargs:
if not Attrib.SRC in supported:
raise ValueError("Source is not supported for " + object.__class__.__name__)
object.src = kwargs['src']
del kwargs['src']
elif Attrib.SRC in required:
raise ValueError("Source is required for " + object.__class__.__name__)
if 'begintime' in kwargs:
if not Attrib.BEGINTIME in supported:
raise ValueError("Begintime is not supported for " + object.__class__.__name__)
object.begintime = parsetime(kwargs['begintime'])
del kwargs['begintime']
elif Attrib.BEGINTIME in required:
raise ValueError("Begintime is required for " + object.__class__.__name__)
if 'endtime' in kwargs:
if not Attrib.ENDTIME in supported:
raise ValueError("Endtime is not supported for " + object.__class__.__name__)
object.endtime = parsetime(kwargs['endtime'])
del kwargs['endtime']
elif Attrib.ENDTIME in required:
raise ValueError("Endtime is required for " + object.__class__.__name__)
if 'speaker' in kwargs:
if not Attrib.SPEAKER in supported:
raise ValueError("Speaker is not supported for " + object.__class__.__name__)
object.speaker = kwargs['speaker']
del kwargs['speaker']
elif Attrib.SPEAKER in required:
raise ValueError("Speaker is required for " + object.__class__.__name__)
if 'auth' in kwargs:
if kwargs['auth'] in ('no','false'):
object.auth = False
else:
object.auth = bool(kwargs['auth'])
del kwargs['auth']
else:
object.auth = object.__class__.AUTH
if 'text' in kwargs:
if kwargs['text']:
object.settext(kwargs['text'])
del kwargs['text']
if 'phon' in kwargs:
if kwargs['phon']:
object.setphon(kwargs['phon'])
del kwargs['phon']
if 'textclass' in kwargs:
if not Attrib.TEXTCLASS in supported:
raise ValueError("Textclass is not supported for " + object.__class__.__name__)
object.textclass = kwargs['textclass']
del kwargs['textclass']
else:
if Attrib.TEXTCLASS in supported:
object.textclass = "current"
if 'metadata' in kwargs:
if not Attrib.METADATA in supported:
raise ValueError("Metadata is not supported for " + object.__class__.__name__)
object.metadata = kwargs['metadata']
if doc:
try:
doc.submetadata[kwargs['metadata']]
except KeyError:
raise KeyError("No such metadata defined: " + kwargs['metadata'])
del kwargs['metadata']
if object.XLINK:
if 'href' in kwargs:
object.href =kwargs['href']
del kwargs['href']
if 'xlinktype' in kwargs:
object.xlinktype = kwargs['xlinktype']
del kwargs['xlinktype']
if 'xlinkrole' in kwargs:
object.xlinkrole = kwargs['xlinkrole']
del kwargs['xlinkrole']
if 'xlinklabel' in kwargs:
object.xlinklabel = kwargs['xlinklabel']
del kwargs['xlinklabel']
if 'xlinkshow' in kwargs:
object.xlinkshow = kwargs['xlinkshow']
del kwargs['xlinklabel']
if 'xlinktitle' in kwargs:
object.xlinktitle = kwargs['xlinktitle']
del kwargs['xlinktitle']
if doc and doc.debug >= 2:
print(" @id = ", repr(object.id),file=stderr)
print(" @set = ", repr(object.set),file=stderr)
print(" @class = ", repr(object.cls),file=stderr)
print(" @annotator = ", repr(object.annotator),file=stderr)
print(" @annotatortype= ", repr(object.annotatortype),file=stderr)
print(" @confidence = ", repr(object.confidence),file=stderr)
print(" @n = ", repr(object.n),file=stderr)
print(" @datetime = ", repr(object.datetime),file=stderr)
#set index
if object.id and doc:
if object.id in doc.index:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:" + object.id,file=stderr)
raise DuplicateIDError("Duplicate ID not permitted: " + object.id)
else:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Adding to index: " + object.id,file=stderr)
doc.index[object.id] = object
#Parse feature attributes (shortcut for feature specification for some elements)
for c in object.ACCEPTED_DATA:
if issubclass(c, Feature):
if c.SUBSET in kwargs:
if kwargs[c.SUBSET]:
object.append(c,cls=kwargs[c.SUBSET])
del kwargs[c.SUBSET]
return kwargs | Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly. | Below is the the instruction that describes the task:
### Input:
Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly.
### Response:
def parsecommonarguments(object, doc, annotationtype, required, allowed, **kwargs):
"""Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly."""
object.doc = doc #The FoLiA root document
if required is None:
required = tuple()
if allowed is None:
allowed = tuple()
supported = required + allowed
if 'generate_id_in' in kwargs:
try:
kwargs['id'] = kwargs['generate_id_in'].generate_id(object.__class__)
except GenerateIDException:
pass #ID could not be generated, just skip
del kwargs['generate_id_in']
if 'id' in kwargs:
if Attrib.ID not in supported:
raise ValueError("ID is not supported on " + object.__class__.__name__)
isncname(kwargs['id'])
object.id = kwargs['id']
del kwargs['id']
elif Attrib.ID in required:
raise ValueError("ID is required for " + object.__class__.__name__)
else:
object.id = None
if 'set' in kwargs:
if Attrib.CLASS not in supported and not object.SETONLY:
raise ValueError("Set is not supported on " + object.__class__.__name__)
if not kwargs['set']:
object.set ="undefined"
else:
object.set = kwargs['set']
del kwargs['set']
if object.set:
if doc and (not (annotationtype in doc.annotationdefaults) or not (object.set in doc.annotationdefaults[annotationtype])):
if object.set in doc.alias_set:
object.set = doc.alias_set[object.set]
elif doc.autodeclare:
doc.annotations.append( (annotationtype, object.set ) )
doc.annotationdefaults[annotationtype] = {object.set: {} }
else:
raise ValueError("Set '" + object.set + "' is used for " + object.__class__.__name__ + ", but has no declaration!")
elif annotationtype in doc.annotationdefaults and len(doc.annotationdefaults[annotationtype]) == 1:
object.set = list(doc.annotationdefaults[annotationtype].keys())[0]
elif object.ANNOTATIONTYPE == AnnotationType.TEXT:
object.set = "undefined" #text content needs never be declared (for backward compatibility) and is in set 'undefined'
elif Attrib.CLASS in required: #or (hasattr(object,'SETONLY') and object.SETONLY):
raise ValueError("Set is required for " + object.__class__.__name__)
if 'class' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported for " + object.__class__.__name__)
object.cls = kwargs['class']
del kwargs['class']
elif 'cls' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported on " + object.__class__.__name__)
object.cls = kwargs['cls']
del kwargs['cls']
elif Attrib.CLASS in required:
raise ValueError("Class is required for " + object.__class__.__name__)
if object.cls and not object.set:
if doc and doc.autodeclare:
if not (annotationtype, 'undefined') in doc.annotations:
doc.annotations.append( (annotationtype, 'undefined') )
doc.annotationdefaults[annotationtype] = {'undefined': {} }
object.set = 'undefined'
else:
raise ValueError("Set is required for " + object.__class__.__name__ + ". Class '" + object.cls + "' assigned without set.")
if 'annotator' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotator is not supported for " + object.__class__.__name__)
object.annotator = kwargs['annotator']
del kwargs['annotator']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotator' in doc.annotationdefaults[annotationtype][object.set]:
object.annotator = doc.annotationdefaults[annotationtype][object.set]['annotator']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotator is required for " + object.__class__.__name__)
if 'annotatortype' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotatortype is not supported for " + object.__class__.__name__)
if kwargs['annotatortype'] == 'auto' or kwargs['annotatortype'] == AnnotatorType.AUTO:
object.annotatortype = AnnotatorType.AUTO
elif kwargs['annotatortype'] == 'manual' or kwargs['annotatortype'] == AnnotatorType.MANUAL:
object.annotatortype = AnnotatorType.MANUAL
else:
raise ValueError("annotatortype must be 'auto' or 'manual', got " + repr(kwargs['annotatortype']))
del kwargs['annotatortype']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotatortype' in doc.annotationdefaults[annotationtype][object.set]:
object.annotatortype = doc.annotationdefaults[annotationtype][object.set]['annotatortype']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotatortype is required for " + object.__class__.__name__)
if 'confidence' in kwargs:
if not Attrib.CONFIDENCE in supported:
raise ValueError("Confidence is not supported")
if kwargs['confidence'] is not None:
try:
object.confidence = float(kwargs['confidence'])
assert object.confidence >= 0.0 and object.confidence <= 1.0
except:
raise ValueError("Confidence must be a floating point number between 0 and 1, got " + repr(kwargs['confidence']) )
del kwargs['confidence']
elif Attrib.CONFIDENCE in required:
raise ValueError("Confidence is required for " + object.__class__.__name__)
if 'n' in kwargs:
if not Attrib.N in supported:
raise ValueError("N is not supported for " + object.__class__.__name__)
object.n = kwargs['n']
del kwargs['n']
elif Attrib.N in required:
raise ValueError("N is required for " + object.__class__.__name__)
if 'datetime' in kwargs:
if not Attrib.DATETIME in supported:
raise ValueError("Datetime is not supported")
if isinstance(kwargs['datetime'], datetime):
object.datetime = kwargs['datetime']
else:
#try:
object.datetime = parse_datetime(kwargs['datetime'])
#except:
# raise ValueError("Unable to parse datetime: " + str(repr(kwargs['datetime'])))
del kwargs['datetime']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'datetime' in doc.annotationdefaults[annotationtype][object.set]:
object.datetime = doc.annotationdefaults[annotationtype][object.set]['datetime']
elif Attrib.DATETIME in required:
raise ValueError("Datetime is required for " + object.__class__.__name__)
if 'src' in kwargs:
if not Attrib.SRC in supported:
raise ValueError("Source is not supported for " + object.__class__.__name__)
object.src = kwargs['src']
del kwargs['src']
elif Attrib.SRC in required:
raise ValueError("Source is required for " + object.__class__.__name__)
if 'begintime' in kwargs:
if not Attrib.BEGINTIME in supported:
raise ValueError("Begintime is not supported for " + object.__class__.__name__)
object.begintime = parsetime(kwargs['begintime'])
del kwargs['begintime']
elif Attrib.BEGINTIME in required:
raise ValueError("Begintime is required for " + object.__class__.__name__)
if 'endtime' in kwargs:
if not Attrib.ENDTIME in supported:
raise ValueError("Endtime is not supported for " + object.__class__.__name__)
object.endtime = parsetime(kwargs['endtime'])
del kwargs['endtime']
elif Attrib.ENDTIME in required:
raise ValueError("Endtime is required for " + object.__class__.__name__)
if 'speaker' in kwargs:
if not Attrib.SPEAKER in supported:
raise ValueError("Speaker is not supported for " + object.__class__.__name__)
object.speaker = kwargs['speaker']
del kwargs['speaker']
elif Attrib.SPEAKER in required:
raise ValueError("Speaker is required for " + object.__class__.__name__)
if 'auth' in kwargs:
if kwargs['auth'] in ('no','false'):
object.auth = False
else:
object.auth = bool(kwargs['auth'])
del kwargs['auth']
else:
object.auth = object.__class__.AUTH
if 'text' in kwargs:
if kwargs['text']:
object.settext(kwargs['text'])
del kwargs['text']
if 'phon' in kwargs:
if kwargs['phon']:
object.setphon(kwargs['phon'])
del kwargs['phon']
if 'textclass' in kwargs:
if not Attrib.TEXTCLASS in supported:
raise ValueError("Textclass is not supported for " + object.__class__.__name__)
object.textclass = kwargs['textclass']
del kwargs['textclass']
else:
if Attrib.TEXTCLASS in supported:
object.textclass = "current"
if 'metadata' in kwargs:
if not Attrib.METADATA in supported:
raise ValueError("Metadata is not supported for " + object.__class__.__name__)
object.metadata = kwargs['metadata']
if doc:
try:
doc.submetadata[kwargs['metadata']]
except KeyError:
raise KeyError("No such metadata defined: " + kwargs['metadata'])
del kwargs['metadata']
if object.XLINK:
if 'href' in kwargs:
object.href =kwargs['href']
del kwargs['href']
if 'xlinktype' in kwargs:
object.xlinktype = kwargs['xlinktype']
del kwargs['xlinktype']
if 'xlinkrole' in kwargs:
object.xlinkrole = kwargs['xlinkrole']
del kwargs['xlinkrole']
if 'xlinklabel' in kwargs:
object.xlinklabel = kwargs['xlinklabel']
del kwargs['xlinklabel']
if 'xlinkshow' in kwargs:
object.xlinkshow = kwargs['xlinkshow']
del kwargs['xlinklabel']
if 'xlinktitle' in kwargs:
object.xlinktitle = kwargs['xlinktitle']
del kwargs['xlinktitle']
if doc and doc.debug >= 2:
print(" @id = ", repr(object.id),file=stderr)
print(" @set = ", repr(object.set),file=stderr)
print(" @class = ", repr(object.cls),file=stderr)
print(" @annotator = ", repr(object.annotator),file=stderr)
print(" @annotatortype= ", repr(object.annotatortype),file=stderr)
print(" @confidence = ", repr(object.confidence),file=stderr)
print(" @n = ", repr(object.n),file=stderr)
print(" @datetime = ", repr(object.datetime),file=stderr)
#set index
if object.id and doc:
if object.id in doc.index:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:" + object.id,file=stderr)
raise DuplicateIDError("Duplicate ID not permitted: " + object.id)
else:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Adding to index: " + object.id,file=stderr)
doc.index[object.id] = object
#Parse feature attributes (shortcut for feature specification for some elements)
for c in object.ACCEPTED_DATA:
if issubclass(c, Feature):
if c.SUBSET in kwargs:
if kwargs[c.SUBSET]:
object.append(c,cls=kwargs[c.SUBSET])
del kwargs[c.SUBSET]
return kwargs |
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.browser.shot_taskfile_sel_changed.connect(self.shot_taskfile_sel_changed)
self.browser.asset_taskfile_sel_changed.connect(self.asset_taskfile_sel_changed)
self.shot_open_pb.clicked.connect(self.shot_open_callback)
self.asset_open_pb.clicked.connect(self.asset_open_callback)
self.shot_save_pb.clicked.connect(self.shot_save_callback)
self.asset_save_pb.clicked.connect(self.asset_save_callback) | Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
### Response:
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.browser.shot_taskfile_sel_changed.connect(self.shot_taskfile_sel_changed)
self.browser.asset_taskfile_sel_changed.connect(self.asset_taskfile_sel_changed)
self.shot_open_pb.clicked.connect(self.shot_open_callback)
self.asset_open_pb.clicked.connect(self.asset_open_callback)
self.shot_save_pb.clicked.connect(self.shot_save_callback)
self.asset_save_pb.clicked.connect(self.asset_save_callback) |
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
fill_zeros = _gen_fill_zeros(op_name)
construct_result = (_construct_divmod_result
if op in [divmod, rdivmod] else _construct_result)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
"""
return the result of evaluating na_op on the passed in values
try coercion to object type if the native types are not compatible
Parameters
----------
lvalues : array-like
rvalues : array-like
Raises
------
TypeError: invalid operation
"""
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues,
lambda x: op(x, rvalues))
raise
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
right = maybe_upcast_for_op(right)
if is_categorical_dtype(left):
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(left).__name__, op=str_rep))
elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
# Give dispatch_to_index_op a chance for tests like
# test_dt64_series_add_intlike, which the index dispatching handles
# specifically.
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
elif (is_extension_array_dtype(left) or
(is_extension_array_dtype(right) and not is_scalar(right))):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return dispatch_to_extension_op(op, left, right)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
return construct_result(left, result,
index=left.index, name=res_name)
elif is_timedelta64_dtype(right):
# We should only get here with non-scalar or timedelta64('NaT')
# values for right
# Note: we cannot use dispatch_to_index_op because
# that may incorrectly raise TypeError when we
# should get NullFrequencyError
result = op(pd.Index(left), right)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
rvalues = rvalues.values
result = safe_na_op(lvalues, rvalues)
return construct_result(left, result,
index=left.index, name=res_name, dtype=None)
wrapper.__name__ = op_name
return wrapper | Wrapper function for Series arithmetic operations, to avoid
code duplication. | Below is the the instruction that describes the task:
### Input:
Wrapper function for Series arithmetic operations, to avoid
code duplication.
### Response:
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
fill_zeros = _gen_fill_zeros(op_name)
construct_result = (_construct_divmod_result
if op in [divmod, rdivmod] else _construct_result)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
"""
return the result of evaluating na_op on the passed in values
try coercion to object type if the native types are not compatible
Parameters
----------
lvalues : array-like
rvalues : array-like
Raises
------
TypeError: invalid operation
"""
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues,
lambda x: op(x, rvalues))
raise
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
right = maybe_upcast_for_op(right)
if is_categorical_dtype(left):
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(left).__name__, op=str_rep))
elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
# Give dispatch_to_index_op a chance for tests like
# test_dt64_series_add_intlike, which the index dispatching handles
# specifically.
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
elif (is_extension_array_dtype(left) or
(is_extension_array_dtype(right) and not is_scalar(right))):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return dispatch_to_extension_op(op, left, right)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
return construct_result(left, result,
index=left.index, name=res_name)
elif is_timedelta64_dtype(right):
# We should only get here with non-scalar or timedelta64('NaT')
# values for right
# Note: we cannot use dispatch_to_index_op because
# that may incorrectly raise TypeError when we
# should get NullFrequencyError
result = op(pd.Index(left), right)
return construct_result(left, result,
index=left.index, name=res_name,
dtype=result.dtype)
lvalues = left.values
rvalues = right
if isinstance(rvalues, ABCSeries):
rvalues = rvalues.values
result = safe_na_op(lvalues, rvalues)
return construct_result(left, result,
index=left.index, name=res_name, dtype=None)
wrapper.__name__ = op_name
return wrapper |
def _fill_cropping(self, image_size, view_size):
"""
Return a (left, top, right, bottom) 4-tuple containing the cropping
values required to display an image of *image_size* in *view_size*
when stretched proportionately. Each value is a percentage expressed
as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and
*view_size* are each (width, height) pairs.
"""
def aspect_ratio(width, height):
return width / height
ar_view = aspect_ratio(*view_size)
ar_image = aspect_ratio(*image_size)
if ar_view < ar_image: # image too wide
crop = (1.0 - (ar_view/ar_image)) / 2.0
return (crop, 0.0, crop, 0.0)
if ar_view > ar_image: # image too tall
crop = (1.0 - (ar_image/ar_view)) / 2.0
return (0.0, crop, 0.0, crop)
return (0.0, 0.0, 0.0, 0.0) | Return a (left, top, right, bottom) 4-tuple containing the cropping
values required to display an image of *image_size* in *view_size*
when stretched proportionately. Each value is a percentage expressed
as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and
*view_size* are each (width, height) pairs. | Below is the the instruction that describes the task:
### Input:
Return a (left, top, right, bottom) 4-tuple containing the cropping
values required to display an image of *image_size* in *view_size*
when stretched proportionately. Each value is a percentage expressed
as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and
*view_size* are each (width, height) pairs.
### Response:
def _fill_cropping(self, image_size, view_size):
"""
Return a (left, top, right, bottom) 4-tuple containing the cropping
values required to display an image of *image_size* in *view_size*
when stretched proportionately. Each value is a percentage expressed
as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and
*view_size* are each (width, height) pairs.
"""
def aspect_ratio(width, height):
return width / height
ar_view = aspect_ratio(*view_size)
ar_image = aspect_ratio(*image_size)
if ar_view < ar_image: # image too wide
crop = (1.0 - (ar_view/ar_image)) / 2.0
return (crop, 0.0, crop, 0.0)
if ar_view > ar_image: # image too tall
crop = (1.0 - (ar_image/ar_view)) / 2.0
return (0.0, crop, 0.0, crop)
return (0.0, 0.0, 0.0, 0.0) |
def c_rho0(self, rho0):
"""
computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c)
:param rho0: density normalization in h^2/Mpc^3 (comoving)
:return: concentration parameter c
"""
if not hasattr(self, '_c_rho0_interp'):
c_array = np.linspace(0.1, 10, 100)
rho0_array = self.rho0_c(c_array)
from scipy import interpolate
self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline(rho0_array, c_array, w=None, bbox=[None, None], k=3)
return self._c_rho0_interp(rho0) | computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c)
:param rho0: density normalization in h^2/Mpc^3 (comoving)
:return: concentration parameter c | Below is the the instruction that describes the task:
### Input:
computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c)
:param rho0: density normalization in h^2/Mpc^3 (comoving)
:return: concentration parameter c
### Response:
def c_rho0(self, rho0):
"""
computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c)
:param rho0: density normalization in h^2/Mpc^3 (comoving)
:return: concentration parameter c
"""
if not hasattr(self, '_c_rho0_interp'):
c_array = np.linspace(0.1, 10, 100)
rho0_array = self.rho0_c(c_array)
from scipy import interpolate
self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline(rho0_array, c_array, w=None, bbox=[None, None], k=3)
return self._c_rho0_interp(rho0) |
def hierarchy_name(self, adjust_for_printing=True):
"""
return the name for this object with the parents names attached by dots.
:param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`
on the names, recursively
"""
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
else: adjust = lambda x: x
if self.has_parent():
return self._parent_.hierarchy_name() + "." + adjust(self.name)
return adjust(self.name) | return the name for this object with the parents names attached by dots.
:param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`
on the names, recursively | Below is the the instruction that describes the task:
### Input:
return the name for this object with the parents names attached by dots.
:param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`
on the names, recursively
### Response:
def hierarchy_name(self, adjust_for_printing=True):
"""
return the name for this object with the parents names attached by dots.
:param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`
on the names, recursively
"""
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
else: adjust = lambda x: x
if self.has_parent():
return self._parent_.hierarchy_name() + "." + adjust(self.name)
return adjust(self.name) |
def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
"""Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
else:
(data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data | Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
"""Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
else:
(data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data |
def jsName(path,name):
'''Returns a name string without \, -, and . so that
the string will play nicely with javascript.'''
shortPath=path.replace(
"C:\\Users\\scheinerbock\\Desktop\\"+
"ideogram\\scrapeSource\\test\\","")
noDash = shortPath.replace("-","_dash_")
jsPath=noDash.replace("\\","_slash_").replace(".","_dot_")
jsName=jsPath+'_slash_'+name
return jsName | Returns a name string without \, -, and . so that
the string will play nicely with javascript. | Below is the the instruction that describes the task:
### Input:
Returns a name string without \, -, and . so that
the string will play nicely with javascript.
### Response:
def jsName(path,name):
'''Returns a name string without \, -, and . so that
the string will play nicely with javascript.'''
shortPath=path.replace(
"C:\\Users\\scheinerbock\\Desktop\\"+
"ideogram\\scrapeSource\\test\\","")
noDash = shortPath.replace("-","_dash_")
jsPath=noDash.replace("\\","_slash_").replace(".","_dot_")
jsName=jsPath+'_slash_'+name
return jsName |
def get_config(cls, key, default=None):
"""
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return cls._app.config.get(key, default) | Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed: | Below is the the instruction that describes the task:
### Input:
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
### Response:
def get_config(cls, key, default=None):
"""
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return cls._app.config.get(key, default) |
def add_done_callback(self, callback):
"""
Add a callback to run after the task completes.
The callable must take 1 argument which will be
the completed Task.
:param callback: a callable that takes a single argument which
will be the completed Task.
"""
if self._done is None or self._done.is_set():
raise ValueError('Task has already finished')
if callable(callback):
self.callbacks.append(callback) | Add a callback to run after the task completes.
The callable must take 1 argument which will be
the completed Task.
:param callback: a callable that takes a single argument which
will be the completed Task. | Below is the the instruction that describes the task:
### Input:
Add a callback to run after the task completes.
The callable must take 1 argument which will be
the completed Task.
:param callback: a callable that takes a single argument which
will be the completed Task.
### Response:
def add_done_callback(self, callback):
"""
Add a callback to run after the task completes.
The callable must take 1 argument which will be
the completed Task.
:param callback: a callable that takes a single argument which
will be the completed Task.
"""
if self._done is None or self._done.is_set():
raise ValueError('Task has already finished')
if callable(callback):
self.callbacks.append(callback) |
async def from_href(self):
"""Get the full object from spotify with a `href` attribute."""
if not hasattr(self, 'href'):
raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived')
elif hasattr(self, 'http'):
return await self.http.request(('GET', self.href))
else:
cls = type(self)
try:
client = getattr(self, '_{0}__client'.format(cls.__name__))
except AttributeError:
raise TypeError('Spotify object has no way to access a HTTPClient.')
else:
http = client.http
data = await http.request(('GET', self.href))
return cls(client, data) | Get the full object from spotify with a `href` attribute. | Below is the the instruction that describes the task:
### Input:
Get the full object from spotify with a `href` attribute.
### Response:
async def from_href(self):
"""Get the full object from spotify with a `href` attribute."""
if not hasattr(self, 'href'):
raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived')
elif hasattr(self, 'http'):
return await self.http.request(('GET', self.href))
else:
cls = type(self)
try:
client = getattr(self, '_{0}__client'.format(cls.__name__))
except AttributeError:
raise TypeError('Spotify object has no way to access a HTTPClient.')
else:
http = client.http
data = await http.request(('GET', self.href))
return cls(client, data) |
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id) | Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object. | Below is the the instruction that describes the task:
### Input:
Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
### Response:
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id) |
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name]) | Return any corresponding targets in a variant directory. | Below is the the instruction that describes the task:
### Input:
Return any corresponding targets in a variant directory.
### Response:
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name]) |
def showDescription( self ):
"""
Shows the description for the current plugin in the interface.
"""
plugin = self.currentPlugin()
if ( not plugin ):
self.uiDescriptionTXT.setText('')
else:
self.uiDescriptionTXT.setText(plugin.description()) | Shows the description for the current plugin in the interface. | Below is the the instruction that describes the task:
### Input:
Shows the description for the current plugin in the interface.
### Response:
def showDescription( self ):
"""
Shows the description for the current plugin in the interface.
"""
plugin = self.currentPlugin()
if ( not plugin ):
self.uiDescriptionTXT.setText('')
else:
self.uiDescriptionTXT.setText(plugin.description()) |
def show_hist(self, props=[], bins=20, **kwargs):
r"""
Show a quick plot of key property distributions.
Parameters
----------
props : string or list of strings
The pore and/or throat properties to be plotted as histograms
bins : int or array_like
The number of bins to use when generating the histogram. If an
array is given they are used as the bin spacing instead.
Notes
-----
Other keyword arguments are passed to the ``matplotlib.pyplot.hist``
function.
"""
if type(props) is str:
props = [props]
N = len(props)
if N == 1:
r = 1
c = 1
elif N < 4:
r = 1
c = N
else:
r = int(sp.ceil(N**0.5))
c = int(sp.floor(N**0.5))
for i in range(len(props)):
plt.subplot(r, c, i+1)
plt.hist(self[props[i]], bins=bins, **kwargs) | r"""
Show a quick plot of key property distributions.
Parameters
----------
props : string or list of strings
The pore and/or throat properties to be plotted as histograms
bins : int or array_like
The number of bins to use when generating the histogram. If an
array is given they are used as the bin spacing instead.
Notes
-----
Other keyword arguments are passed to the ``matplotlib.pyplot.hist``
function. | Below is the the instruction that describes the task:
### Input:
r"""
Show a quick plot of key property distributions.
Parameters
----------
props : string or list of strings
The pore and/or throat properties to be plotted as histograms
bins : int or array_like
The number of bins to use when generating the histogram. If an
array is given they are used as the bin spacing instead.
Notes
-----
Other keyword arguments are passed to the ``matplotlib.pyplot.hist``
function.
### Response:
def show_hist(self, props=[], bins=20, **kwargs):
r"""
Show a quick plot of key property distributions.
Parameters
----------
props : string or list of strings
The pore and/or throat properties to be plotted as histograms
bins : int or array_like
The number of bins to use when generating the histogram. If an
array is given they are used as the bin spacing instead.
Notes
-----
Other keyword arguments are passed to the ``matplotlib.pyplot.hist``
function.
"""
if type(props) is str:
props = [props]
N = len(props)
if N == 1:
r = 1
c = 1
elif N < 4:
r = 1
c = N
else:
r = int(sp.ceil(N**0.5))
c = int(sp.floor(N**0.5))
for i in range(len(props)):
plt.subplot(r, c, i+1)
plt.hist(self[props[i]], bins=bins, **kwargs) |
def fetch_more(self, rows=False, columns=False):
"""Get more columns or rows (based on axis)."""
if self.axis == 1 and self.total_rows > self.rows_loaded:
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.axis == 0 and self.total_cols > self.cols_loaded:
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns() | Get more columns or rows (based on axis). | Below is the the instruction that describes the task:
### Input:
Get more columns or rows (based on axis).
### Response:
def fetch_more(self, rows=False, columns=False):
"""Get more columns or rows (based on axis)."""
if self.axis == 1 and self.total_rows > self.rows_loaded:
reminder = self.total_rows - self.rows_loaded
items_to_fetch = min(reminder, ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
if self.axis == 0 and self.total_cols > self.cols_loaded:
reminder = self.total_cols - self.cols_loaded
items_to_fetch = min(reminder, COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns() |
def boolean(value):
"""
Configuration-friendly boolean type converter.
Supports both boolean-valued and string-valued inputs (e.g. from env vars).
"""
if isinstance(value, bool):
return value
if value == "":
return False
return strtobool(value) | Configuration-friendly boolean type converter.
Supports both boolean-valued and string-valued inputs (e.g. from env vars). | Below is the the instruction that describes the task:
### Input:
Configuration-friendly boolean type converter.
Supports both boolean-valued and string-valued inputs (e.g. from env vars).
### Response:
def boolean(value):
"""
Configuration-friendly boolean type converter.
Supports both boolean-valued and string-valued inputs (e.g. from env vars).
"""
if isinstance(value, bool):
return value
if value == "":
return False
return strtobool(value) |
def base64ToImage(imgData, out_path, out_file):
""" converts a base64 string to a file """
fh = open(os.path.join(out_path, out_file), "wb")
fh.write(imgData.decode('base64'))
fh.close()
del fh
return os.path.join(out_path, out_file) | converts a base64 string to a file | Below is the the instruction that describes the task:
### Input:
converts a base64 string to a file
### Response:
def base64ToImage(imgData, out_path, out_file):
""" converts a base64 string to a file """
fh = open(os.path.join(out_path, out_file), "wb")
fh.write(imgData.decode('base64'))
fh.close()
del fh
return os.path.join(out_path, out_file) |
def convertDate(date):
"""Convert DATE string into a decimal year."""
d, t = date.split('T')
return decimal_date(d, timeobs=t) | Convert DATE string into a decimal year. | Below is the the instruction that describes the task:
### Input:
Convert DATE string into a decimal year.
### Response:
def convertDate(date):
"""Convert DATE string into a decimal year."""
d, t = date.split('T')
return decimal_date(d, timeobs=t) |
def settingsAsFacts(self, settings):
"""
Parses a string of settings.
:param setting: String of settings in the form:
``set(name1, val1), set(name2, val2)...``
"""
pattern = re.compile('set\(([a-zA-Z0-9_]+),(\[a-zA-Z0-9_]+)\)')
pairs = pattern.findall(settings)
for name, val in pairs:
self.set(name, val) | Parses a string of settings.
:param setting: String of settings in the form:
``set(name1, val1), set(name2, val2)...`` | Below is the the instruction that describes the task:
### Input:
Parses a string of settings.
:param setting: String of settings in the form:
``set(name1, val1), set(name2, val2)...``
### Response:
def settingsAsFacts(self, settings):
"""
Parses a string of settings.
:param setting: String of settings in the form:
``set(name1, val1), set(name2, val2)...``
"""
pattern = re.compile('set\(([a-zA-Z0-9_]+),(\[a-zA-Z0-9_]+)\)')
pairs = pattern.findall(settings)
for name, val in pairs:
self.set(name, val) |
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step) | Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step | Below is the the instruction that describes the task:
### Input:
Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
### Response:
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step) |
def StoreStat(self, responses):
"""Stores stat entry in the flow's state."""
index = responses.request_data["index"]
if not responses.success:
self.Log("Failed to stat file: %s", responses.status)
# Report failure.
self._FileFetchFailed(index, responses.request_data["request_name"])
return
tracker = self.state.pending_hashes[index]
tracker["stat_entry"] = responses.First() | Stores stat entry in the flow's state. | Below is the the instruction that describes the task:
### Input:
Stores stat entry in the flow's state.
### Response:
def StoreStat(self, responses):
"""Stores stat entry in the flow's state."""
index = responses.request_data["index"]
if not responses.success:
self.Log("Failed to stat file: %s", responses.status)
# Report failure.
self._FileFetchFailed(index, responses.request_data["request_name"])
return
tracker = self.state.pending_hashes[index]
tracker["stat_entry"] = responses.First() |
def config_default(option, default=None, type=None, section=cli.name):
"""Guesses a default value of a CLI option from the configuration.
::
@click.option('--locale', default=config_default('locale'))
"""
def f(option=option, default=default, type=type, section=section):
config = read_config()
if type is None and default is not None:
# detect type from default.
type = builtins.type(default)
get_option = option_getter(type)
try:
return get_option(config, section, option)
except (NoOptionError, NoSectionError):
return default
return f | Guesses a default value of a CLI option from the configuration.
::
@click.option('--locale', default=config_default('locale')) | Below is the the instruction that describes the task:
### Input:
Guesses a default value of a CLI option from the configuration.
::
@click.option('--locale', default=config_default('locale'))
### Response:
def config_default(option, default=None, type=None, section=cli.name):
"""Guesses a default value of a CLI option from the configuration.
::
@click.option('--locale', default=config_default('locale'))
"""
def f(option=option, default=default, type=type, section=section):
config = read_config()
if type is None and default is not None:
# detect type from default.
type = builtins.type(default)
get_option = option_getter(type)
try:
return get_option(config, section, option)
except (NoOptionError, NoSectionError):
return default
return f |
def spherical(cls, mag, theta, phi=0):
'''Returns a Vector instance from spherical coordinates'''
return cls(
mag * math.sin(phi) * math.cos(theta), # X
mag * math.sin(phi) * math.sin(theta), # Y
mag * math.cos(phi) # Z
) | Returns a Vector instance from spherical coordinates | Below is the the instruction that describes the task:
### Input:
Returns a Vector instance from spherical coordinates
### Response:
def spherical(cls, mag, theta, phi=0):
'''Returns a Vector instance from spherical coordinates'''
return cls(
mag * math.sin(phi) * math.cos(theta), # X
mag * math.sin(phi) * math.sin(theta), # Y
mag * math.cos(phi) # Z
) |
def GetAttributes(self, urns, age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = set([utils.SmartUnicode(u) for u in urns])
to_read = {urn: self._MakeCacheInvariant(urn, age) for urn in urns}
# Urns not present in the cache we need to get from the database.
if to_read:
for subject, values in data_store.DB.MultiResolvePrefix(
to_read,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
limit=None):
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
yield utils.SmartUnicode(subject), values | Retrieves all the attributes for all the urns. | Below is the the instruction that describes the task:
### Input:
Retrieves all the attributes for all the urns.
### Response:
def GetAttributes(self, urns, age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = set([utils.SmartUnicode(u) for u in urns])
to_read = {urn: self._MakeCacheInvariant(urn, age) for urn in urns}
# Urns not present in the cache we need to get from the database.
if to_read:
for subject, values in data_store.DB.MultiResolvePrefix(
to_read,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
limit=None):
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
yield utils.SmartUnicode(subject), values |
def mimebundle_to_html(bundle):
"""
Converts a MIME bundle into HTML.
"""
if isinstance(bundle, tuple):
data, metadata = bundle
else:
data = bundle
html = data.get('text/html', '')
if 'application/javascript' in data:
js = data['application/javascript']
html += '\n<script type="application/javascript">{js}</script>'.format(js=js)
return html | Converts a MIME bundle into HTML. | Below is the the instruction that describes the task:
### Input:
Converts a MIME bundle into HTML.
### Response:
def mimebundle_to_html(bundle):
"""
Converts a MIME bundle into HTML.
"""
if isinstance(bundle, tuple):
data, metadata = bundle
else:
data = bundle
html = data.get('text/html', '')
if 'application/javascript' in data:
js = data['application/javascript']
html += '\n<script type="application/javascript">{js}</script>'.format(js=js)
return html |
def save(self, p_todolist):
"""
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
"""
self._trim()
current_hash = hash_todolist(p_todolist)
list_todo = (self.todolist.print_todos()+'\n').splitlines(True)
try:
list_archive = (self.archive.print_todos()+'\n').splitlines(True)
except AttributeError:
list_archive = []
self.backup_dict[self.timestamp] = (list_todo, list_archive, self.label)
index = self._get_index()
index.insert(0, (self.timestamp, current_hash))
self._save_index(index)
self._write()
self.close() | Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards. | Below is the the instruction that describes the task:
### Input:
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
### Response:
def save(self, p_todolist):
"""
Saves a tuple with archive, todolist and command with its arguments
into the backup file with unix timestamp as the key. Tuple is then
indexed in backup file with combination of hash calculated from
p_todolist and unix timestamp. Backup file is closed afterwards.
"""
self._trim()
current_hash = hash_todolist(p_todolist)
list_todo = (self.todolist.print_todos()+'\n').splitlines(True)
try:
list_archive = (self.archive.print_todos()+'\n').splitlines(True)
except AttributeError:
list_archive = []
self.backup_dict[self.timestamp] = (list_todo, list_archive, self.label)
index = self._get_index()
index.insert(0, (self.timestamp, current_hash))
self._save_index(index)
self._write()
self.close() |
def add_path(self, w, h):
"""Return a newly created `a:path` child element."""
path = self._add_path()
path.w, path.h = w, h
return path | Return a newly created `a:path` child element. | Below is the the instruction that describes the task:
### Input:
Return a newly created `a:path` child element.
### Response:
def add_path(self, w, h):
"""Return a newly created `a:path` child element."""
path = self._add_path()
path.w, path.h = w, h
return path |
def instruction_PUL(self, opcode, m, register):
"""
All, some, or none of the processor registers are pulled from stack
(with the exception of stack pointer itself).
A single register may be pulled from the stack with condition codes set
by doing an autoincrement load from the stack (example: LDX ,S++).
source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull
order
CC bits "HNZVC": ccccc
"""
assert register in (self.system_stack_pointer, self.user_stack_pointer)
def pull(register_str, stack_pointer):
reg_obj = self.register_str2object[register_str]
reg_width = reg_obj.WIDTH # 8 / 16
if reg_width == 8:
data = self.pull_byte(stack_pointer)
else:
assert reg_width == 16
data = self.pull_word(stack_pointer)
reg_obj.set(data)
# log.debug("$%x PUL%s:", self.program_counter, register.name)
# m = postbyte
if m & 0x01: pull(REG_CC, register) # 8 bit condition code register
if m & 0x02: pull(REG_A, register) # 8 bit accumulator
if m & 0x04: pull(REG_B, register) # 8 bit accumulator
if m & 0x08: pull(REG_DP, register) # 8 bit direct page register
if m & 0x10: pull(REG_X, register) # 16 bit index register
if m & 0x20: pull(REG_Y, register) # 16 bit index register
if m & 0x40: pull(REG_U, register) # 16 bit user-stack pointer
if m & 0x80: pull(REG_PC, register) | All, some, or none of the processor registers are pulled from stack
(with the exception of stack pointer itself).
A single register may be pulled from the stack with condition codes set
by doing an autoincrement load from the stack (example: LDX ,S++).
source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull
order
CC bits "HNZVC": ccccc | Below is the the instruction that describes the task:
### Input:
All, some, or none of the processor registers are pulled from stack
(with the exception of stack pointer itself).
A single register may be pulled from the stack with condition codes set
by doing an autoincrement load from the stack (example: LDX ,S++).
source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull
order
CC bits "HNZVC": ccccc
### Response:
def instruction_PUL(self, opcode, m, register):
"""
All, some, or none of the processor registers are pulled from stack
(with the exception of stack pointer itself).
A single register may be pulled from the stack with condition codes set
by doing an autoincrement load from the stack (example: LDX ,S++).
source code forms: b7 b6 b5 b4 b3 b2 b1 b0 PC U Y X DP B A CC = pull
order
CC bits "HNZVC": ccccc
"""
assert register in (self.system_stack_pointer, self.user_stack_pointer)
def pull(register_str, stack_pointer):
reg_obj = self.register_str2object[register_str]
reg_width = reg_obj.WIDTH # 8 / 16
if reg_width == 8:
data = self.pull_byte(stack_pointer)
else:
assert reg_width == 16
data = self.pull_word(stack_pointer)
reg_obj.set(data)
# log.debug("$%x PUL%s:", self.program_counter, register.name)
# m = postbyte
if m & 0x01: pull(REG_CC, register) # 8 bit condition code register
if m & 0x02: pull(REG_A, register) # 8 bit accumulator
if m & 0x04: pull(REG_B, register) # 8 bit accumulator
if m & 0x08: pull(REG_DP, register) # 8 bit direct page register
if m & 0x10: pull(REG_X, register) # 16 bit index register
if m & 0x20: pull(REG_Y, register) # 16 bit index register
if m & 0x40: pull(REG_U, register) # 16 bit user-stack pointer
if m & 0x80: pull(REG_PC, register) |
def addfield(self, pkt, s, val):
# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501
""" An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError
"""
assert(val >= 0)
if isinstance(s, bytes):
assert self.size == 8, 'EINVAL: s: tuple expected when prefix_len is not a full byte' # noqa: E501
return s + self.i2m(pkt, val)
# s is a tuple
# assert(s[1] >= 0)
# assert(s[2] >= 0)
# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501
if val >= self._max_value:
return s[0] + chb((s[2] << self.size) + self._max_value) + self.i2m(pkt, val)[1:] # noqa: E501
# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501
# and appending the resulting byte to the string
return s[0] + chb((s[2] << self.size) + orb(self.i2m(pkt, val))) | An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError | Below is the the instruction that describes the task:
### Input:
An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError
### Response:
def addfield(self, pkt, s, val):
# type: (Optional[packet.Packet], Union[str, Tuple[str, int, int]], int) -> str # noqa: E501
""" An AbstractUVarIntField prefix always consumes the remaining bits
of a BitField;if no current BitField is in use (no tuple in
entry) then the prefix length is 8 bits and the whole byte is to
be consumed
@param packet.Packet|None pkt: the packet containing this field. Probably unused. # noqa: E501
@param str|(str, int, long) s: the string to append this field to. A tuple indicates that some bits were already # noqa: E501
generated by another bitfield-compatible field. This MUST be the case if "size" is not 8. The int is the # noqa: E501
number of bits already generated in the first byte of the str. The long is the value that was generated by the # noqa: E501
previous bitfield-compatible fields.
@param int val: the positive or null value to be added.
@return str: s concatenated with the machine representation of this field. # noqa: E501
@raise AssertionError
"""
assert(val >= 0)
if isinstance(s, bytes):
assert self.size == 8, 'EINVAL: s: tuple expected when prefix_len is not a full byte' # noqa: E501
return s + self.i2m(pkt, val)
# s is a tuple
# assert(s[1] >= 0)
# assert(s[2] >= 0)
# assert (8 - s[1]) == self.size, 'EINVAL: s: not enough bits remaining in current byte to read the prefix' # noqa: E501
if val >= self._max_value:
return s[0] + chb((s[2] << self.size) + self._max_value) + self.i2m(pkt, val)[1:] # noqa: E501
# This AbstractUVarIntField is only one byte long; setting the prefix value # noqa: E501
# and appending the resulting byte to the string
return s[0] + chb((s[2] << self.size) + orb(self.i2m(pkt, val))) |
def volume_list(search_opts=None, profile=None, **kwargs):
'''
List storage volumes
search_opts
Dictionary of search options
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack
'''
conn = _auth(profile, **kwargs)
return conn.volume_list(search_opts=search_opts) | List storage volumes
search_opts
Dictionary of search options
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack | Below is the the instruction that describes the task:
### Input:
List storage volumes
search_opts
Dictionary of search options
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack
### Response:
def volume_list(search_opts=None, profile=None, **kwargs):
'''
List storage volumes
search_opts
Dictionary of search options
profile
Profile to use
CLI Example:
.. code-block:: bash
salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack
'''
conn = _auth(profile, **kwargs)
return conn.volume_list(search_opts=search_opts) |
def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2 | generate year, month, day features from specified date features | Below is the the instruction that describes the task:
### Input:
generate year, month, day features from specified date features
### Response:
def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2 |
def healpixMap(nside, lon, lat, fill_value=0., nest=False):
"""
Input (lon, lat) in degrees instead of (theta, phi) in radians.
Returns HEALPix map at the desired resolution
"""
lon_median, lat_median = np.median(lon), np.median(lat)
max_angsep = np.max(ugali.utils.projector.angsep(lon, lat, lon_median, lat_median))
pix = angToPix(nside, lon, lat, nest=nest)
if max_angsep < 10:
# More efficient histograming for small regions of sky
m = np.tile(fill_value, healpy.nside2npix(nside))
pix_subset = ugali.utils.healpix.angToDisc(nside, lon_median, lat_median, max_angsep, nest=nest)
bins = np.arange(np.min(pix_subset), np.max(pix_subset) + 1)
m_subset = np.histogram(pix, bins=bins - 0.5)[0].astype(float)
m[bins[0:-1]] = m_subset
else:
m = np.histogram(pix, np.arange(hp.nside2npix(nside) + 1))[0].astype(float)
if fill_value != 0.:
m[m == 0.] = fill_value
return m | Input (lon, lat) in degrees instead of (theta, phi) in radians.
Returns HEALPix map at the desired resolution | Below is the the instruction that describes the task:
### Input:
Input (lon, lat) in degrees instead of (theta, phi) in radians.
Returns HEALPix map at the desired resolution
### Response:
def healpixMap(nside, lon, lat, fill_value=0., nest=False):
"""
Input (lon, lat) in degrees instead of (theta, phi) in radians.
Returns HEALPix map at the desired resolution
"""
lon_median, lat_median = np.median(lon), np.median(lat)
max_angsep = np.max(ugali.utils.projector.angsep(lon, lat, lon_median, lat_median))
pix = angToPix(nside, lon, lat, nest=nest)
if max_angsep < 10:
# More efficient histograming for small regions of sky
m = np.tile(fill_value, healpy.nside2npix(nside))
pix_subset = ugali.utils.healpix.angToDisc(nside, lon_median, lat_median, max_angsep, nest=nest)
bins = np.arange(np.min(pix_subset), np.max(pix_subset) + 1)
m_subset = np.histogram(pix, bins=bins - 0.5)[0].astype(float)
m[bins[0:-1]] = m_subset
else:
m = np.histogram(pix, np.arange(hp.nside2npix(nside) + 1))[0].astype(float)
if fill_value != 0.:
m[m == 0.] = fill_value
return m |
def getPermanence(self, columnIndex, permanence):
"""
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex] | Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences. | Below is the the instruction that describes the task:
### Input:
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
### Response:
def getPermanence(self, columnIndex, permanence):
"""
Returns the permanence values for a given column. ``permanence`` size
must match the number of inputs.
:param columnIndex: (int) column index to get permanence for.
:param permanence: (list) will be overwritten with permanences.
"""
assert(columnIndex < self._numColumns)
permanence[:] = self._permanences[columnIndex] |
def getName(obj):
"""This method finds the first parent class which is within the buildbot namespace
it prepends the name with as many ">" as the class is subclassed
"""
# elastic search does not like '.' in dict keys, so we replace by /
def sanitize(name):
return name.replace(".", "/")
if isinstance(obj, _BuildStepFactory):
klass = obj.factory
else:
klass = type(obj)
name = ""
klasses = (klass, ) + inspect.getmro(klass)
for klass in klasses:
if hasattr(klass, "__module__") and klass.__module__.startswith("buildbot."):
return sanitize(name + klass.__module__ + "." + klass.__name__)
else:
name += ">"
return sanitize(type(obj).__name__) | This method finds the first parent class which is within the buildbot namespace
it prepends the name with as many ">" as the class is subclassed | Below is the the instruction that describes the task:
### Input:
This method finds the first parent class which is within the buildbot namespace
it prepends the name with as many ">" as the class is subclassed
### Response:
def getName(obj):
"""This method finds the first parent class which is within the buildbot namespace
it prepends the name with as many ">" as the class is subclassed
"""
# elastic search does not like '.' in dict keys, so we replace by /
def sanitize(name):
return name.replace(".", "/")
if isinstance(obj, _BuildStepFactory):
klass = obj.factory
else:
klass = type(obj)
name = ""
klasses = (klass, ) + inspect.getmro(klass)
for klass in klasses:
if hasattr(klass, "__module__") and klass.__module__.startswith("buildbot."):
return sanitize(name + klass.__module__ + "." + klass.__name__)
else:
name += ">"
return sanitize(type(obj).__name__) |
def run(self, cmd, *args, **kwargs):
"""Run a command."""
runner = self.ctx.run if self.ctx else None
return run(cmd, runner=runner, *args, **kwargs) | Run a command. | Below is the the instruction that describes the task:
### Input:
Run a command.
### Response:
def run(self, cmd, *args, **kwargs):
"""Run a command."""
runner = self.ctx.run if self.ctx else None
return run(cmd, runner=runner, *args, **kwargs) |
def eccentricity(self, **kw):
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
"""
ra = self.apocenter(**kw)
rp = self.pericenter(**kw)
return (ra - rp) / (ra + rp) | r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity. | Below is the the instruction that describes the task:
### Input:
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
### Response:
def eccentricity(self, **kw):
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
"""
ra = self.apocenter(**kw)
rp = self.pericenter(**kw)
return (ra - rp) / (ra + rp) |
def set_title(self, title=None):
"""
Sets the editor title.
:param title: Editor title.
:type title: unicode
:return: Method success.
:rtype: bool
"""
if not title:
# TODO: https://bugreports.qt-project.org/browse/QTBUG-27084
# titleTemplate = self.is_modified() and "{0} *" or "{0}"
# title = titleTemplate.format(self.get_file_short_name())
title = self.get_file_short_name()
LOGGER.debug("> Setting editor title to '{0}'.".format(title))
self.__title = title
self.setWindowTitle(title)
self.title_changed.emit()
return True | Sets the editor title.
:param title: Editor title.
:type title: unicode
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Sets the editor title.
:param title: Editor title.
:type title: unicode
:return: Method success.
:rtype: bool
### Response:
def set_title(self, title=None):
"""
Sets the editor title.
:param title: Editor title.
:type title: unicode
:return: Method success.
:rtype: bool
"""
if not title:
# TODO: https://bugreports.qt-project.org/browse/QTBUG-27084
# titleTemplate = self.is_modified() and "{0} *" or "{0}"
# title = titleTemplate.format(self.get_file_short_name())
title = self.get_file_short_name()
LOGGER.debug("> Setting editor title to '{0}'.".format(title))
self.__title = title
self.setWindowTitle(title)
self.title_changed.emit()
return True |
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values) | Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict. | Below is the the instruction that describes the task:
### Input:
Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
### Response:
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values) |
def clean(self):
"""
Check unauthenticated user's cookie as a light check to
prevent duplicate votes.
"""
bits = (self.data["content_type"], self.data["object_pk"])
request = self.request
self.current = "%s.%s" % bits
self.previous = request.COOKIES.get("yacms-rating", "").split(",")
already_rated = self.current in self.previous
if already_rated and not self.request.user.is_authenticated():
raise forms.ValidationError(ugettext("Already rated."))
return self.cleaned_data | Check unauthenticated user's cookie as a light check to
prevent duplicate votes. | Below is the the instruction that describes the task:
### Input:
Check unauthenticated user's cookie as a light check to
prevent duplicate votes.
### Response:
def clean(self):
"""
Check unauthenticated user's cookie as a light check to
prevent duplicate votes.
"""
bits = (self.data["content_type"], self.data["object_pk"])
request = self.request
self.current = "%s.%s" % bits
self.previous = request.COOKIES.get("yacms-rating", "").split(",")
already_rated = self.current in self.previous
if already_rated and not self.request.user.is_authenticated():
raise forms.ValidationError(ugettext("Already rated."))
return self.cleaned_data |
def _build_context(self, request, customer_uuid):
"""
Build common context parts used by different handlers in this view.
"""
# TODO: pylint acts stupid - find a way around it without suppressing
enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member
search_keyword = self.get_search_keyword(request)
linked_learners = self.get_enterprise_customer_user_queryset(request, search_keyword, customer_uuid)
pending_linked_learners = self.get_pending_users_queryset(search_keyword, customer_uuid)
context = {
self.ContextParameters.ENTERPRISE_CUSTOMER: enterprise_customer,
self.ContextParameters.PENDING_LEARNERS: pending_linked_learners,
self.ContextParameters.LEARNERS: linked_learners,
self.ContextParameters.SEARCH_KEYWORD: search_keyword or '',
self.ContextParameters.ENROLLMENT_URL: settings.LMS_ENROLLMENT_API_PATH,
}
context.update(admin.site.each_context(request))
context.update(self._build_admin_context(request, enterprise_customer))
return context | Build common context parts used by different handlers in this view. | Below is the the instruction that describes the task:
### Input:
Build common context parts used by different handlers in this view.
### Response:
def _build_context(self, request, customer_uuid):
"""
Build common context parts used by different handlers in this view.
"""
# TODO: pylint acts stupid - find a way around it without suppressing
enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid) # pylint: disable=no-member
search_keyword = self.get_search_keyword(request)
linked_learners = self.get_enterprise_customer_user_queryset(request, search_keyword, customer_uuid)
pending_linked_learners = self.get_pending_users_queryset(search_keyword, customer_uuid)
context = {
self.ContextParameters.ENTERPRISE_CUSTOMER: enterprise_customer,
self.ContextParameters.PENDING_LEARNERS: pending_linked_learners,
self.ContextParameters.LEARNERS: linked_learners,
self.ContextParameters.SEARCH_KEYWORD: search_keyword or '',
self.ContextParameters.ENROLLMENT_URL: settings.LMS_ENROLLMENT_API_PATH,
}
context.update(admin.site.each_context(request))
context.update(self._build_admin_context(request, enterprise_customer))
return context |
def makevAndvPfuncs(self,policyFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income.
'''
# Get state dimension sizes
mCount = self.aXtraGrid.size
pCount = self.pLvlGrid.size
MedCount = self.MedShkVals.size
# Make temporary grids to evaluate the consumption function
temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount))
aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)),
(mCount,1,MedCount))
pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount))
mGrid = temp_grid*pGrid + aMinGrid
if self.pLvlGrid[0] == 0:
mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount))
MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1))
probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1))
# Get optimal consumption (and medical care) for each state
cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid)
# Calculate expected value by "integrating" across medical shocks
if self.vFuncBool:
MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely)
aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations
vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid)
vNow = np.sum(vGrid*probsGrid,axis=2)
# Calculate expected marginal value by "integrating" across medical shocks
vPgrid = self.uP(cGrid)
vPnow = np.sum(vPgrid*probsGrid,axis=2)
# Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0)
mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0]))
vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow)))
if self.vFuncBool:
vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0)
vNvrsPnow = vPnow*self.uinvP(vNow)
vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0)
# Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl
vPnvrsFunc_by_pLvl = []
vNvrsFunc_by_pLvl = []
for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl
pLvl = self.pLvlGrid[j]
m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl)
vPnvrs_temp = vPnvrsNow[:,j]
vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp))
if self.vFuncBool:
vNvrs_temp = vNvrsNow[:,j]
vNvrsP_temp = vNvrsPnow[:,j]
vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp))
vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid)
vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
if self.vFuncBool:
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid)
vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
# "Re-curve" the (marginal) value function
vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA)
if self.vFuncBool:
vFunc = ValueFunc2D(vNvrsFunc,self.CRRA)
else:
vFunc = NullFunc()
return vFunc, vPfunc | Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income. | Below is the the instruction that describes the task:
### Input:
Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income.
### Response:
def makevAndvPfuncs(self,policyFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income.
'''
# Get state dimension sizes
mCount = self.aXtraGrid.size
pCount = self.pLvlGrid.size
MedCount = self.MedShkVals.size
# Make temporary grids to evaluate the consumption function
temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount))
aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)),
(mCount,1,MedCount))
pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount))
mGrid = temp_grid*pGrid + aMinGrid
if self.pLvlGrid[0] == 0:
mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount))
MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1))
probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1))
# Get optimal consumption (and medical care) for each state
cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid)
# Calculate expected value by "integrating" across medical shocks
if self.vFuncBool:
MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely)
aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations
vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid)
vNow = np.sum(vGrid*probsGrid,axis=2)
# Calculate expected marginal value by "integrating" across medical shocks
vPgrid = self.uP(cGrid)
vPnow = np.sum(vPgrid*probsGrid,axis=2)
# Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0)
mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0]))
vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow)))
if self.vFuncBool:
vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0)
vNvrsPnow = vPnow*self.uinvP(vNow)
vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0)
# Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl
vPnvrsFunc_by_pLvl = []
vNvrsFunc_by_pLvl = []
for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl
pLvl = self.pLvlGrid[j]
m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl)
vPnvrs_temp = vPnvrsNow[:,j]
vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp))
if self.vFuncBool:
vNvrs_temp = vNvrsNow[:,j]
vNvrsP_temp = vNvrsPnow[:,j]
vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp))
vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid)
vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
if self.vFuncBool:
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid)
vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
# "Re-curve" the (marginal) value function
vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA)
if self.vFuncBool:
vFunc = ValueFunc2D(vNvrsFunc,self.CRRA)
else:
vFunc = NullFunc()
return vFunc, vPfunc |
def purge(self, jid, node):
"""
Delete all items from a node.
:param jid: JID of the PubSub service
:param node: Name of the PubSub node
:type node: :class:`str`
Requires :attr:`.xso.Feature.PURGE`.
"""
iq = aioxmpp.stanza.IQ(
type_=aioxmpp.structs.IQType.SET,
to=jid,
payload=pubsub_xso.OwnerRequest(
pubsub_xso.OwnerPurge(
node
)
)
)
yield from self.client.send(iq) | Delete all items from a node.
:param jid: JID of the PubSub service
:param node: Name of the PubSub node
:type node: :class:`str`
Requires :attr:`.xso.Feature.PURGE`. | Below is the the instruction that describes the task:
### Input:
Delete all items from a node.
:param jid: JID of the PubSub service
:param node: Name of the PubSub node
:type node: :class:`str`
Requires :attr:`.xso.Feature.PURGE`.
### Response:
def purge(self, jid, node):
"""
Delete all items from a node.
:param jid: JID of the PubSub service
:param node: Name of the PubSub node
:type node: :class:`str`
Requires :attr:`.xso.Feature.PURGE`.
"""
iq = aioxmpp.stanza.IQ(
type_=aioxmpp.structs.IQType.SET,
to=jid,
payload=pubsub_xso.OwnerRequest(
pubsub_xso.OwnerPurge(
node
)
)
)
yield from self.client.send(iq) |
def is_readable(filename):
"""Check if file is a regular file and is readable."""
return os.path.isfile(filename) and os.access(filename, os.R_OK) | Check if file is a regular file and is readable. | Below is the the instruction that describes the task:
### Input:
Check if file is a regular file and is readable.
### Response:
def is_readable(filename):
"""Check if file is a regular file and is readable."""
return os.path.isfile(filename) and os.access(filename, os.R_OK) |
def _add(self, *rules):
# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]
"""
Add rules into the set. Each rule is validated and split if needed.
The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals.
:param rules: Rules to insert.
:return: Inserted rules.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
"""
for rule in rules:
if rule in self:
continue
self._validate_rule(rule)
for rule in rules:
for r in self._split_rules(rule):
for side in r.rule:
for s in side:
self._assign_map[s].add(r)
super().add(r)
yield r | Add rules into the set. Each rule is validated and split if needed.
The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals.
:param rules: Rules to insert.
:return: Inserted rules.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid. | Below is the the instruction that describes the task:
### Input:
Add rules into the set. Each rule is validated and split if needed.
The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals.
:param rules: Rules to insert.
:return: Inserted rules.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
### Response:
def _add(self, *rules):
# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]
"""
Add rules into the set. Each rule is validated and split if needed.
The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals.
:param rules: Rules to insert.
:return: Inserted rules.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
"""
for rule in rules:
if rule in self:
continue
self._validate_rule(rule)
for rule in rules:
for r in self._split_rules(rule):
for side in r.rule:
for s in side:
self._assign_map[s].add(r)
super().add(r)
yield r |
def write_json_to_temp_file(data):
"""Writes JSON data to a temporary file and returns the path to it"""
fp = tempfile.NamedTemporaryFile(delete=False)
fp.write(json.dumps(data).encode('utf-8'))
fp.close()
return fp.name | Writes JSON data to a temporary file and returns the path to it | Below is the the instruction that describes the task:
### Input:
Writes JSON data to a temporary file and returns the path to it
### Response:
def write_json_to_temp_file(data):
"""Writes JSON data to a temporary file and returns the path to it"""
fp = tempfile.NamedTemporaryFile(delete=False)
fp.write(json.dumps(data).encode('utf-8'))
fp.close()
return fp.name |
def delete_subtrie(self, key):
"""
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
"""
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
) | Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True. | Below is the the instruction that describes the task:
### Input:
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
### Response:
def delete_subtrie(self, key):
"""
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
"""
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
) |
def AddTask(self,
target,
args=(),
name="Unnamed task",
blocking=True,
inline=True):
"""Adds a task to be processed later.
Args:
target: A callable which should be processed by one of the workers.
args: A tuple of arguments to target.
name: The name of this task. Used to identify tasks in the log.
blocking: If True we block until the task is finished, otherwise we raise
queue.Full
inline: If set, process the task inline when the queue is full. This
implies no blocking. Specifying inline helps if the worker tasks are
blocked because it still ensures some progress is made. However, this
can generally block the calling thread even after the threadpool is
available again and therefore decrease efficiency.
Raises:
ThreadPoolNotStartedError: if the pool was not started yet.
queue.Full: if the pool is full and can not accept new jobs.
"""
if not self.started:
raise ThreadPoolNotStartedError(self.name)
# This pool should have no worker threads - just run the task inline.
if self.max_threads == 0:
target(*args)
return
if inline:
blocking = False
with self.lock:
while True:
# This check makes sure that the threadpool will add new workers
# even if the queue is not full. This is needed for a scenario when
# a fresh threadpool is created (say, with min_threads=1 and
# max_threads=10) and 2 long-running tasks are added. The code below
# will spawn a new worker for a second long-running task.
if len(self) < self.max_threads:
try:
self._AddWorker()
except (RuntimeError, threading.ThreadError) as e:
logging.error(
"Threadpool exception: "
"Could not spawn worker threads: %s", e)
try:
# Push the task on the queue but raise if unsuccessful.
self._queue.put((target, args, name, time.time()), block=False)
return
except queue.Full:
# We increase the number of active threads if we do not exceed the
# maximum _and_ our process CPU utilization is not too high. This
# ensures that if the workers are waiting on IO we add more workers,
# but we do not waste workers when tasks are CPU bound.
if len(self) < self.max_threads:
try:
self._AddWorker()
continue
# If we fail to add a worker we should keep going anyway.
except (RuntimeError, threading.ThreadError) as e:
logging.error(
"Threadpool exception: "
"Could not spawn worker threads: %s", e)
# If we need to process the task inline just break out of the loop,
# therefore releasing the lock and run the task inline.
if inline:
break
# We should block and try again soon.
elif blocking:
try:
self._queue.put((target, args, name, time.time()),
block=True,
timeout=1)
return
except queue.Full:
continue
else:
raise Full()
# We don't want to hold the lock while running the task inline
if inline:
target(*args) | Adds a task to be processed later.
Args:
target: A callable which should be processed by one of the workers.
args: A tuple of arguments to target.
name: The name of this task. Used to identify tasks in the log.
blocking: If True we block until the task is finished, otherwise we raise
queue.Full
inline: If set, process the task inline when the queue is full. This
implies no blocking. Specifying inline helps if the worker tasks are
blocked because it still ensures some progress is made. However, this
can generally block the calling thread even after the threadpool is
available again and therefore decrease efficiency.
Raises:
ThreadPoolNotStartedError: if the pool was not started yet.
queue.Full: if the pool is full and can not accept new jobs. | Below is the the instruction that describes the task:
### Input:
Adds a task to be processed later.
Args:
target: A callable which should be processed by one of the workers.
args: A tuple of arguments to target.
name: The name of this task. Used to identify tasks in the log.
blocking: If True we block until the task is finished, otherwise we raise
queue.Full
inline: If set, process the task inline when the queue is full. This
implies no blocking. Specifying inline helps if the worker tasks are
blocked because it still ensures some progress is made. However, this
can generally block the calling thread even after the threadpool is
available again and therefore decrease efficiency.
Raises:
ThreadPoolNotStartedError: if the pool was not started yet.
queue.Full: if the pool is full and can not accept new jobs.
### Response:
def AddTask(self,
target,
args=(),
name="Unnamed task",
blocking=True,
inline=True):
"""Adds a task to be processed later.
Args:
target: A callable which should be processed by one of the workers.
args: A tuple of arguments to target.
name: The name of this task. Used to identify tasks in the log.
blocking: If True we block until the task is finished, otherwise we raise
queue.Full
inline: If set, process the task inline when the queue is full. This
implies no blocking. Specifying inline helps if the worker tasks are
blocked because it still ensures some progress is made. However, this
can generally block the calling thread even after the threadpool is
available again and therefore decrease efficiency.
Raises:
ThreadPoolNotStartedError: if the pool was not started yet.
queue.Full: if the pool is full and can not accept new jobs.
"""
if not self.started:
raise ThreadPoolNotStartedError(self.name)
# This pool should have no worker threads - just run the task inline.
if self.max_threads == 0:
target(*args)
return
if inline:
blocking = False
with self.lock:
while True:
# This check makes sure that the threadpool will add new workers
# even if the queue is not full. This is needed for a scenario when
# a fresh threadpool is created (say, with min_threads=1 and
# max_threads=10) and 2 long-running tasks are added. The code below
# will spawn a new worker for a second long-running task.
if len(self) < self.max_threads:
try:
self._AddWorker()
except (RuntimeError, threading.ThreadError) as e:
logging.error(
"Threadpool exception: "
"Could not spawn worker threads: %s", e)
try:
# Push the task on the queue but raise if unsuccessful.
self._queue.put((target, args, name, time.time()), block=False)
return
except queue.Full:
# We increase the number of active threads if we do not exceed the
# maximum _and_ our process CPU utilization is not too high. This
# ensures that if the workers are waiting on IO we add more workers,
# but we do not waste workers when tasks are CPU bound.
if len(self) < self.max_threads:
try:
self._AddWorker()
continue
# If we fail to add a worker we should keep going anyway.
except (RuntimeError, threading.ThreadError) as e:
logging.error(
"Threadpool exception: "
"Could not spawn worker threads: %s", e)
# If we need to process the task inline just break out of the loop,
# therefore releasing the lock and run the task inline.
if inline:
break
# We should block and try again soon.
elif blocking:
try:
self._queue.put((target, args, name, time.time()),
block=True,
timeout=1)
return
except queue.Full:
continue
else:
raise Full()
# We don't want to hold the lock while running the task inline
if inline:
target(*args) |
def partial_update(self, request, *args, **kwargs):
""" We do not include the mixin as we want only PATCH and no PUT """
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=True, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
serializer.save()
if getattr(instance, '_prefetched_objects_cache', None): #pragma: no cover
instance = self.get_object()
serializer = self.get_serializer(instance)
return response.Response(serializer.data) | We do not include the mixin as we want only PATCH and no PUT | Below is the the instruction that describes the task:
### Input:
We do not include the mixin as we want only PATCH and no PUT
### Response:
def partial_update(self, request, *args, **kwargs):
""" We do not include the mixin as we want only PATCH and no PUT """
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=True, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
serializer.save()
if getattr(instance, '_prefetched_objects_cache', None): #pragma: no cover
instance = self.get_object()
serializer = self.get_serializer(instance)
return response.Response(serializer.data) |
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn) and not str(fn).startswith("http"):
raise OSError('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn) and not str(fn).startswith("http"):
raise OSError('Is a directory: {}'.format(fn))
elif isdicom(fn):
slice = pydicom.read_file(fn)
if slice.PhotometricInterpretation.startswith('MONOCHROME'):
# Make a fake RGB image
im = np.stack([slice.pixel_array]*3,-1)
return im / ((1 << slice.BitsStored)-1)
else:
# No support for RGB yet, as it involves various color spaces.
# It shouldn't be too difficult to add though, if needed.
raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation))
else:
#res = np.array(Image.open(fn), dtype=np.float32)/255
#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)
#return res
try:
if str(fn).startswith("http"):
req = urllib.urlopen(str(fn))
image = np.asarray(bytearray(req.read()), dtype="uint8")
im = cv2.imdecode(image, flags).astype(np.float32)/255
else:
im = cv2.imread(str(fn), flags).astype(np.float32)/255
if im is None: raise OSError(f'File not recognized by opencv: {fn}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e | Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 | Below is the the instruction that describes the task:
### Input:
Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
### Response:
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn) and not str(fn).startswith("http"):
raise OSError('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn) and not str(fn).startswith("http"):
raise OSError('Is a directory: {}'.format(fn))
elif isdicom(fn):
slice = pydicom.read_file(fn)
if slice.PhotometricInterpretation.startswith('MONOCHROME'):
# Make a fake RGB image
im = np.stack([slice.pixel_array]*3,-1)
return im / ((1 << slice.BitsStored)-1)
else:
# No support for RGB yet, as it involves various color spaces.
# It shouldn't be too difficult to add though, if needed.
raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation))
else:
#res = np.array(Image.open(fn), dtype=np.float32)/255
#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)
#return res
try:
if str(fn).startswith("http"):
req = urllib.urlopen(str(fn))
image = np.asarray(bytearray(req.read()), dtype="uint8")
im = cv2.imdecode(image, flags).astype(np.float32)/255
else:
im = cv2.imread(str(fn), flags).astype(np.float32)/255
if im is None: raise OSError(f'File not recognized by opencv: {fn}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e |
def update(self, request, *args, **kwargs):
"""
Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item.
Only item_type, key value and units can be updated.
Only customer owner and staff can update price items.
"""
return super(PriceListItemViewSet, self).update(request, *args, **kwargs) | Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item.
Only item_type, key value and units can be updated.
Only customer owner and staff can update price items. | Below is the the instruction that describes the task:
### Input:
Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item.
Only item_type, key value and units can be updated.
Only customer owner and staff can update price items.
### Response:
def update(self, request, *args, **kwargs):
"""
Run **PATCH** request against */api/price-list-items/<uuid>/* to update price list item.
Only item_type, key value and units can be updated.
Only customer owner and staff can update price items.
"""
return super(PriceListItemViewSet, self).update(request, *args, **kwargs) |
def date_from_quarter(base_date, ordinal, year):
"""
Extract date from quarter of a year
"""
interval = 3
month_start = interval * (ordinal - 1)
if month_start < 0:
month_start = 9
month_end = month_start + interval
if month_start == 0:
month_start = 1
return [
datetime(year, month_start, 1),
datetime(year, month_end, calendar.monthrange(year, month_end)[1])
] | Extract date from quarter of a year | Below is the the instruction that describes the task:
### Input:
Extract date from quarter of a year
### Response:
def date_from_quarter(base_date, ordinal, year):
"""
Extract date from quarter of a year
"""
interval = 3
month_start = interval * (ordinal - 1)
if month_start < 0:
month_start = 9
month_end = month_start + interval
if month_start == 0:
month_start = 1
return [
datetime(year, month_start, 1),
datetime(year, month_end, calendar.monthrange(year, month_end)[1])
] |
def fetch_all_droplet_neighbors(self):
r"""
Returns a generator of all sets of multiple droplets that are running
on the same physical hardware
:rtype: generator of lists of `Droplet`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
for hood in self.paginate('/v2/reports/droplet_neighbors', 'neighbors'):
yield list(map(self._droplet, hood)) | r"""
Returns a generator of all sets of multiple droplets that are running
on the same physical hardware
:rtype: generator of lists of `Droplet`\ s
:raises DOAPIError: if the API endpoint replies with an error | Below is the the instruction that describes the task:
### Input:
r"""
Returns a generator of all sets of multiple droplets that are running
on the same physical hardware
:rtype: generator of lists of `Droplet`\ s
:raises DOAPIError: if the API endpoint replies with an error
### Response:
def fetch_all_droplet_neighbors(self):
r"""
Returns a generator of all sets of multiple droplets that are running
on the same physical hardware
:rtype: generator of lists of `Droplet`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
for hood in self.paginate('/v2/reports/droplet_neighbors', 'neighbors'):
yield list(map(self._droplet, hood)) |
def create_ramp_plan(err, ramp):
"""
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
"""
if ramp == 1: # basecase
yield int(err)
while True:
yield 0
# np.arange(n).sum() == err
# --> solve for n
# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n
# 0 = n**2 - n --> solve for n
n = np.abs(np.roots([.5, -.5, 0]).max())
niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp
MV = n
log.info('Initializing a ramp plan', extra=dict(
ramp_size=ramp, err=err, niter=niter))
for x in range(int(n)):
budget = MV
for x in range(niter):
budget -= MV // niter
yield int(np.sign(err) * (MV // niter))
yield int(budget * np.sign(err))
MV -= 1
while True:
yield 0 | Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1 | Below is the the instruction that describes the task:
### Input:
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
### Response:
def create_ramp_plan(err, ramp):
"""
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
"""
if ramp == 1: # basecase
yield int(err)
while True:
yield 0
# np.arange(n).sum() == err
# --> solve for n
# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n
# 0 = n**2 - n --> solve for n
n = np.abs(np.roots([.5, -.5, 0]).max())
niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp
MV = n
log.info('Initializing a ramp plan', extra=dict(
ramp_size=ramp, err=err, niter=niter))
for x in range(int(n)):
budget = MV
for x in range(niter):
budget -= MV // niter
yield int(np.sign(err) * (MV // niter))
yield int(budget * np.sign(err))
MV -= 1
while True:
yield 0 |
def create_attribute(self,column=None,listType=None,namespace=None, network=None, atype=None, verbose=False):
"""
Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["column","listType","namespace","network","type"],[column,listType,namespace,network,atype])
response=api(url=self.__url+"/create attribute", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more | Below is the the instruction that describes the task:
### Input:
Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more
### Response:
def create_attribute(self,column=None,listType=None,namespace=None, network=None, atype=None, verbose=False):
"""
Creates a new edge column.
:param column (string, optional): Unique name of column
:param listType (string, optional): Can be one of integer, long, double,
or string.
:param namespace (string, optional): Node, Edge, and Network objects
support the default, local, and hidden namespaces. Root networks
also support the shared namespace. Custom namespaces may be specified
by Apps.
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param atype (string, optional): Can be one of integer, long, double,
string, or list.
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["column","listType","namespace","network","type"],[column,listType,namespace,network,atype])
response=api(url=self.__url+"/create attribute", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def eValueToBitScore(eValue, dbSize, dbSequenceCount, queryLength,
lengthAdjustment):
"""
Convert an e-value to a bit score.
@param eValue: The C{float} e-value to convert.
@param dbSize: The C{int} total size of the database (i.e., the sum of
the lengths of all sequences in the BLAST database).
@param dbSequenceCount: The C{int} number of sequences in the database.
@param queryLength: The C{int} length of the query.
@param lengthAdjustment: The C{int} length adjustment (BLAST XML output
calls this the Statistics_hsp-len).
@return: A C{float} bit score.
"""
effectiveDbSize = (
(dbSize - dbSequenceCount * lengthAdjustment) *
(queryLength - lengthAdjustment)
)
return -1.0 * (log(eValue / effectiveDbSize) / _LOG2) | Convert an e-value to a bit score.
@param eValue: The C{float} e-value to convert.
@param dbSize: The C{int} total size of the database (i.e., the sum of
the lengths of all sequences in the BLAST database).
@param dbSequenceCount: The C{int} number of sequences in the database.
@param queryLength: The C{int} length of the query.
@param lengthAdjustment: The C{int} length adjustment (BLAST XML output
calls this the Statistics_hsp-len).
@return: A C{float} bit score. | Below is the the instruction that describes the task:
### Input:
Convert an e-value to a bit score.
@param eValue: The C{float} e-value to convert.
@param dbSize: The C{int} total size of the database (i.e., the sum of
the lengths of all sequences in the BLAST database).
@param dbSequenceCount: The C{int} number of sequences in the database.
@param queryLength: The C{int} length of the query.
@param lengthAdjustment: The C{int} length adjustment (BLAST XML output
calls this the Statistics_hsp-len).
@return: A C{float} bit score.
### Response:
def eValueToBitScore(eValue, dbSize, dbSequenceCount, queryLength,
lengthAdjustment):
"""
Convert an e-value to a bit score.
@param eValue: The C{float} e-value to convert.
@param dbSize: The C{int} total size of the database (i.e., the sum of
the lengths of all sequences in the BLAST database).
@param dbSequenceCount: The C{int} number of sequences in the database.
@param queryLength: The C{int} length of the query.
@param lengthAdjustment: The C{int} length adjustment (BLAST XML output
calls this the Statistics_hsp-len).
@return: A C{float} bit score.
"""
effectiveDbSize = (
(dbSize - dbSequenceCount * lengthAdjustment) *
(queryLength - lengthAdjustment)
)
return -1.0 * (log(eValue / effectiveDbSize) / _LOG2) |
def _draw_content(self):
"""
Loop through submissions and fill up the content page.
"""
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(n_rows - self._row - 1, n_cols, self._row, 0)
window.erase()
win_n_rows, win_n_cols = window.getmaxyx()
self._subwindows = []
page_index, cursor_index, inverted = self.nav.position
step = self.nav.step
# If not inverted, align the first submission with the top and draw
# downwards. If inverted, align the first submission with the bottom
# and draw upwards.
cancel_inverted = True
current_row = (win_n_rows - 1) if inverted else 0
available_rows = win_n_rows
top_item_height = None if inverted else self.nav.top_item_height
for data in self.content.iterate(page_index, step, win_n_cols - 2):
subwin_n_rows = min(available_rows, data['n_rows'])
subwin_inverted = inverted
if top_item_height is not None:
# Special case: draw the page as non-inverted, except for the
# top element. This element will be drawn as inverted with a
# restricted height
subwin_n_rows = min(subwin_n_rows, top_item_height)
subwin_inverted = True
top_item_height = None
subwin_n_cols = win_n_cols - data['h_offset']
start = current_row - subwin_n_rows + 1 if inverted else current_row
subwindow = window.derwin(subwin_n_rows, subwin_n_cols, start, data['h_offset'])
self._subwindows.append((subwindow, data, subwin_inverted))
available_rows -= (subwin_n_rows + 1) # Add one for the blank line
current_row += step * (subwin_n_rows + 1)
if available_rows <= 0:
# Indicate the page is full and we can keep the inverted screen.
cancel_inverted = False
break
if len(self._subwindows) == 1:
# Never draw inverted if only one subwindow. The top of the
# subwindow should always be aligned with the top of the screen.
cancel_inverted = True
if cancel_inverted and self.nav.inverted:
# In some cases we need to make sure that the screen is NOT
# inverted. Unfortunately, this currently means drawing the whole
# page over again. Could not think of a better way to pre-determine
# if the content will fill up the page, given that it is dependent
# on the size of the terminal.
self.nav.flip((len(self._subwindows) - 1))
self._draw_content()
return
if self.nav.cursor_index >= len(self._subwindows):
# Don't allow the cursor to go over the number of subwindows
# This could happen if the window is resized and the cursor index is
# pushed out of bounds
self.nav.cursor_index = len(self._subwindows) - 1
# Now that the windows are setup, we can take a second pass through
# to draw the text onto each subwindow
for index, (win, data, inverted) in enumerate(self._subwindows):
if self.nav.absolute_index >= 0 and index == self.nav.cursor_index:
win.bkgd(str(' '), self.term.attr('Selected'))
with self.term.theme.turn_on_selected():
self._draw_item(win, data, inverted)
else:
win.bkgd(str(' '), self.term.attr('Normal'))
self._draw_item(win, data, inverted)
self._row += win_n_rows | Loop through submissions and fill up the content page. | Below is the the instruction that describes the task:
### Input:
Loop through submissions and fill up the content page.
### Response:
def _draw_content(self):
"""
Loop through submissions and fill up the content page.
"""
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(n_rows - self._row - 1, n_cols, self._row, 0)
window.erase()
win_n_rows, win_n_cols = window.getmaxyx()
self._subwindows = []
page_index, cursor_index, inverted = self.nav.position
step = self.nav.step
# If not inverted, align the first submission with the top and draw
# downwards. If inverted, align the first submission with the bottom
# and draw upwards.
cancel_inverted = True
current_row = (win_n_rows - 1) if inverted else 0
available_rows = win_n_rows
top_item_height = None if inverted else self.nav.top_item_height
for data in self.content.iterate(page_index, step, win_n_cols - 2):
subwin_n_rows = min(available_rows, data['n_rows'])
subwin_inverted = inverted
if top_item_height is not None:
# Special case: draw the page as non-inverted, except for the
# top element. This element will be drawn as inverted with a
# restricted height
subwin_n_rows = min(subwin_n_rows, top_item_height)
subwin_inverted = True
top_item_height = None
subwin_n_cols = win_n_cols - data['h_offset']
start = current_row - subwin_n_rows + 1 if inverted else current_row
subwindow = window.derwin(subwin_n_rows, subwin_n_cols, start, data['h_offset'])
self._subwindows.append((subwindow, data, subwin_inverted))
available_rows -= (subwin_n_rows + 1) # Add one for the blank line
current_row += step * (subwin_n_rows + 1)
if available_rows <= 0:
# Indicate the page is full and we can keep the inverted screen.
cancel_inverted = False
break
if len(self._subwindows) == 1:
# Never draw inverted if only one subwindow. The top of the
# subwindow should always be aligned with the top of the screen.
cancel_inverted = True
if cancel_inverted and self.nav.inverted:
# In some cases we need to make sure that the screen is NOT
# inverted. Unfortunately, this currently means drawing the whole
# page over again. Could not think of a better way to pre-determine
# if the content will fill up the page, given that it is dependent
# on the size of the terminal.
self.nav.flip((len(self._subwindows) - 1))
self._draw_content()
return
if self.nav.cursor_index >= len(self._subwindows):
# Don't allow the cursor to go over the number of subwindows
# This could happen if the window is resized and the cursor index is
# pushed out of bounds
self.nav.cursor_index = len(self._subwindows) - 1
# Now that the windows are setup, we can take a second pass through
# to draw the text onto each subwindow
for index, (win, data, inverted) in enumerate(self._subwindows):
if self.nav.absolute_index >= 0 and index == self.nav.cursor_index:
win.bkgd(str(' '), self.term.attr('Selected'))
with self.term.theme.turn_on_selected():
self._draw_item(win, data, inverted)
else:
win.bkgd(str(' '), self.term.attr('Normal'))
self._draw_item(win, data, inverted)
self._row += win_n_rows |
def is_associated_file(self):
# type: () -> bool
'''
A method to determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
return self.file_flags & (1 << self.FILE_FLAG_ASSOCIATED_FILE_BIT) | A method to determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise. | Below is the the instruction that describes the task:
### Input:
A method to determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise.
### Response:
def is_associated_file(self):
# type: () -> bool
'''
A method to determine whether this file is 'associated' with another file
on the ISO.
Parameters:
None.
Returns:
True if this file is associated with another file on the ISO, False
otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
return self.file_flags & (1 << self.FILE_FLAG_ASSOCIATED_FILE_BIT) |
async def connect(host, port, ssl=None):
'''
Async connect and return a Link().
'''
info = {'host': host, 'port': port, 'ssl': ssl}
reader, writer = await asyncio.open_connection(host, port, ssl=ssl)
return await Link.anit(reader, writer, info=info) | Async connect and return a Link(). | Below is the the instruction that describes the task:
### Input:
Async connect and return a Link().
### Response:
async def connect(host, port, ssl=None):
'''
Async connect and return a Link().
'''
info = {'host': host, 'port': port, 'ssl': ssl}
reader, writer = await asyncio.open_connection(host, port, ssl=ssl)
return await Link.anit(reader, writer, info=info) |
def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker | Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators | Below is the the instruction that describes the task:
### Input:
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
### Response:
def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker |
def build_authorization_arg(authdict):
"""
Create an "Authorization" header value from an authdict (created by generate_response()).
"""
vallist = []
for k in authdict.keys():
vallist += ['%s=%s' % (k,authdict[k])]
return 'Digest '+', '.join(vallist) | Create an "Authorization" header value from an authdict (created by generate_response()). | Below is the the instruction that describes the task:
### Input:
Create an "Authorization" header value from an authdict (created by generate_response()).
### Response:
def build_authorization_arg(authdict):
"""
Create an "Authorization" header value from an authdict (created by generate_response()).
"""
vallist = []
for k in authdict.keys():
vallist += ['%s=%s' % (k,authdict[k])]
return 'Digest '+', '.join(vallist) |
def strict_logical(self, value):
"""Validate and set the strict logical flag."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
'f90nml: error: strict_logical must be a logical value.')
else:
self._strict_logical = value | Validate and set the strict logical flag. | Below is the the instruction that describes the task:
### Input:
Validate and set the strict logical flag.
### Response:
def strict_logical(self, value):
"""Validate and set the strict logical flag."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
'f90nml: error: strict_logical must be a logical value.')
else:
self._strict_logical = value |
def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
} | Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label. | Below is the the instruction that describes the task:
### Input:
Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
### Response:
def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
} |
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0} | Returns a set of blocks which should be marked complete on view and haven't been yet. | Below is the the instruction that describes the task:
### Input:
Returns a set of blocks which should be marked complete on view and haven't been yet.
### Response:
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0} |
def _find_usage_security_groups(self):
"""find usage for security groups"""
vpc_count = 0
paginator = self.conn.get_paginator('describe_db_security_groups')
for page in paginator.paginate():
for group in page['DBSecurityGroups']:
if 'VpcId' in group and group['VpcId'] is not None:
vpc_count += 1
self.limits['Max auths per security group']._add_current_usage(
len(group["EC2SecurityGroups"]) + len(group["IPRanges"]),
aws_type='AWS::RDS::DBSecurityGroup',
resource_id=group['DBSecurityGroupName']
)
self.limits['VPC Security Groups']._add_current_usage(
vpc_count,
aws_type='AWS::RDS::DBSecurityGroup',
) | find usage for security groups | Below is the the instruction that describes the task:
### Input:
find usage for security groups
### Response:
def _find_usage_security_groups(self):
"""find usage for security groups"""
vpc_count = 0
paginator = self.conn.get_paginator('describe_db_security_groups')
for page in paginator.paginate():
for group in page['DBSecurityGroups']:
if 'VpcId' in group and group['VpcId'] is not None:
vpc_count += 1
self.limits['Max auths per security group']._add_current_usage(
len(group["EC2SecurityGroups"]) + len(group["IPRanges"]),
aws_type='AWS::RDS::DBSecurityGroup',
resource_id=group['DBSecurityGroupName']
)
self.limits['VPC Security Groups']._add_current_usage(
vpc_count,
aws_type='AWS::RDS::DBSecurityGroup',
) |
def pem(self):
"""
Serialize in PEM format
"""
bio = Membio()
if not libcrypto.PEM_write_bio_CMS(bio.bio, self.ptr):
raise CMSError("writing CMS to PEM")
return str(bio) | Serialize in PEM format | Below is the the instruction that describes the task:
### Input:
Serialize in PEM format
### Response:
def pem(self):
"""
Serialize in PEM format
"""
bio = Membio()
if not libcrypto.PEM_write_bio_CMS(bio.bio, self.ptr):
raise CMSError("writing CMS to PEM")
return str(bio) |
def Read(f):
"""Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file.
"""
try:
yaml_data = yaml.load(f)
except yaml.YAMLError as e:
raise ParseError('%s' % e)
except IOError as e:
raise YAMLLoadError('%s' % e)
_CheckData(yaml_data)
try:
return Config(
yaml_data.get('blacklist', ()),
yaml_data.get('whitelist', ('*')))
except UnicodeDecodeError as e:
raise YAMLLoadError('%s' % e) | Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file. | Below is the the instruction that describes the task:
### Input:
Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file.
### Response:
def Read(f):
"""Reads and returns Config data from a yaml file.
Args:
f: Yaml file to parse.
Returns:
Config object as defined in this file.
Raises:
Error (some subclass): If there is a problem loading or parsing the file.
"""
try:
yaml_data = yaml.load(f)
except yaml.YAMLError as e:
raise ParseError('%s' % e)
except IOError as e:
raise YAMLLoadError('%s' % e)
_CheckData(yaml_data)
try:
return Config(
yaml_data.get('blacklist', ()),
yaml_data.get('whitelist', ('*')))
except UnicodeDecodeError as e:
raise YAMLLoadError('%s' % e) |
def clipped_area(ts, thresh=0, integrator=integrate.trapz):
"""Total value * time above the starting value within a TimeSeries
Arguments:
ts (pandas.Series): Time series to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
"""
integrator = get_integrator(integrator or 0)
ts = insert_crossings(ts, thresh) - thresh
ts = ts[ts >= 0]
# timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units)
return integrator(ts, ts.index.astype(np.int64)) / 1.0e9 | Total value * time above the starting value within a TimeSeries
Arguments:
ts (pandas.Series): Time series to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5 | Below is the the instruction that describes the task:
### Input:
Total value * time above the starting value within a TimeSeries
Arguments:
ts (pandas.Series): Time series to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
### Response:
def clipped_area(ts, thresh=0, integrator=integrate.trapz):
"""Total value * time above the starting value within a TimeSeries
Arguments:
ts (pandas.Series): Time series to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
"""
integrator = get_integrator(integrator or 0)
ts = insert_crossings(ts, thresh) - thresh
ts = ts[ts >= 0]
# timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units)
return integrator(ts, ts.index.astype(np.int64)) / 1.0e9 |
def _filter_by_statement(self, statement):
"""Filter the data collection based on a conditional statement."""
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes | Filter the data collection based on a conditional statement. | Below is the the instruction that describes the task:
### Input:
Filter the data collection based on a conditional statement.
### Response:
def _filter_by_statement(self, statement):
"""Filter the data collection based on a conditional statement."""
self.__class__._check_conditional_statement(statement, 1)
_filt_values, _filt_datetimes = [], []
for i, a in enumerate(self._values):
if eval(statement, {'a': a}):
_filt_values.append(a)
_filt_datetimes.append(self.datetimes[i])
return _filt_values, _filt_datetimes |
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None | Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media` | Below is the the instruction that describes the task:
### Input:
Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
### Response:
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None |
def resize_file_to(self, in_path, out_path, keep_filename=False):
""" Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
"""
if keep_filename:
filename = path.join(out_path, path.basename(in_path))
else:
filename = path.join(out_path, self.get_thumbnail_name(in_path))
out_path = path.dirname(filename)
if not path.exists(out_path):
os.makedirs(out_path)
if not path.exists(filename):
try:
image = Image.open(in_path)
thumbnail = self.resize(image)
thumbnail.save(filename)
logger.info("Generated Thumbnail {0}".format(path.basename(filename)))
except IOError:
logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename))) | Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None | Below is the the instruction that describes the task:
### Input:
Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
### Response:
def resize_file_to(self, in_path, out_path, keep_filename=False):
""" Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
"""
if keep_filename:
filename = path.join(out_path, path.basename(in_path))
else:
filename = path.join(out_path, self.get_thumbnail_name(in_path))
out_path = path.dirname(filename)
if not path.exists(out_path):
os.makedirs(out_path)
if not path.exists(filename):
try:
image = Image.open(in_path)
thumbnail = self.resize(image)
thumbnail.save(filename)
logger.info("Generated Thumbnail {0}".format(path.basename(filename)))
except IOError:
logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename))) |
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1) | get the unit of number | Below is the the instruction that describes the task:
### Input:
get the unit of number
### Response:
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1) |
def html_scientific_notation_rate(rate):
"""Helper for convert decimal rate using scientific notation.
For example we want to show the very detail value of fatality rate
because it might be a very small number.
:param rate: Rate value
:type rate: float
:return: Rate value with html tag to show the exponent
:rtype: str
"""
precision = '%.3f'
if rate * 100 > 0:
decimal_rate = Decimal(precision % (rate * 100))
if decimal_rate == Decimal((precision % 0)):
decimal_rate = Decimal(str(rate * 100))
else:
decimal_rate = Decimal(str(rate * 100))
if decimal_rate.as_tuple().exponent >= -3:
rate_percentage = str(decimal_rate)
else:
rate = '%.2E' % decimal_rate
html_rate = rate.split('E')
# we use html tag to show exponent
html_rate[1] = '10<sup>{exponent}</sup>'.format(
exponent=html_rate[1])
html_rate.insert(1, 'x')
rate_percentage = ''.join(html_rate)
return rate_percentage | Helper for convert decimal rate using scientific notation.
For example we want to show the very detail value of fatality rate
because it might be a very small number.
:param rate: Rate value
:type rate: float
:return: Rate value with html tag to show the exponent
:rtype: str | Below is the the instruction that describes the task:
### Input:
Helper for convert decimal rate using scientific notation.
For example we want to show the very detail value of fatality rate
because it might be a very small number.
:param rate: Rate value
:type rate: float
:return: Rate value with html tag to show the exponent
:rtype: str
### Response:
def html_scientific_notation_rate(rate):
"""Helper for convert decimal rate using scientific notation.
For example we want to show the very detail value of fatality rate
because it might be a very small number.
:param rate: Rate value
:type rate: float
:return: Rate value with html tag to show the exponent
:rtype: str
"""
precision = '%.3f'
if rate * 100 > 0:
decimal_rate = Decimal(precision % (rate * 100))
if decimal_rate == Decimal((precision % 0)):
decimal_rate = Decimal(str(rate * 100))
else:
decimal_rate = Decimal(str(rate * 100))
if decimal_rate.as_tuple().exponent >= -3:
rate_percentage = str(decimal_rate)
else:
rate = '%.2E' % decimal_rate
html_rate = rate.split('E')
# we use html tag to show exponent
html_rate[1] = '10<sup>{exponent}</sup>'.format(
exponent=html_rate[1])
html_rate.insert(1, 'x')
rate_percentage = ''.join(html_rate)
return rate_percentage |
def docker_installed_rpms(broker):
""" Command: /usr/bin/rpm -qa --root `%s` --qf `%s`"""
ctx = broker[DockerImageContext]
root = ctx.root
fmt = DefaultSpecs.rpm_format
cmd = "/usr/bin/rpm -qa --root %s --qf '%s'" % (root, fmt)
result = ctx.shell_out(cmd)
return CommandOutputProvider(cmd, ctx, content=result) | Command: /usr/bin/rpm -qa --root `%s` --qf `%s` | Below is the the instruction that describes the task:
### Input:
Command: /usr/bin/rpm -qa --root `%s` --qf `%s`
### Response:
def docker_installed_rpms(broker):
""" Command: /usr/bin/rpm -qa --root `%s` --qf `%s`"""
ctx = broker[DockerImageContext]
root = ctx.root
fmt = DefaultSpecs.rpm_format
cmd = "/usr/bin/rpm -qa --root %s --qf '%s'" % (root, fmt)
result = ctx.shell_out(cmd)
return CommandOutputProvider(cmd, ctx, content=result) |
def as_batch_body(self):
''' return the current message as expected by batch body format'''
if sys.version_info >= (3,) and isinstance(self.body, bytes):
# It HAS to be string to be serialized in JSON
body = self.body.decode('utf-8')
else:
# Python 2.7 people handle this themself
body = self.body
result = {'Body': body}
# Adds custom properties
if self.custom_properties:
result['UserProperties'] = {name: self._serialize_basic_properties_value(value)
for name, value
in self.custom_properties.items()}
# Adds BrokerProperties
if self.broker_properties:
result['BrokerProperties'] = {name: self._serialize_basic_properties_value(value)
for name, value
in self.broker_properties.items()}
return result | return the current message as expected by batch body format | Below is the the instruction that describes the task:
### Input:
return the current message as expected by batch body format
### Response:
def as_batch_body(self):
''' return the current message as expected by batch body format'''
if sys.version_info >= (3,) and isinstance(self.body, bytes):
# It HAS to be string to be serialized in JSON
body = self.body.decode('utf-8')
else:
# Python 2.7 people handle this themself
body = self.body
result = {'Body': body}
# Adds custom properties
if self.custom_properties:
result['UserProperties'] = {name: self._serialize_basic_properties_value(value)
for name, value
in self.custom_properties.items()}
# Adds BrokerProperties
if self.broker_properties:
result['BrokerProperties'] = {name: self._serialize_basic_properties_value(value)
for name, value
in self.broker_properties.items()}
return result |
def get_command_line_key_for_unknown_config_file_setting(self, key):
"""Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set.
"""
key_without_prefix_chars = key.strip(self.prefix_chars)
command_line_key = self.prefix_chars[0]*2 + key_without_prefix_chars
return command_line_key | Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set. | Below is the the instruction that describes the task:
### Input:
Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set.
### Response:
def get_command_line_key_for_unknown_config_file_setting(self, key):
"""Compute a commandline arg key to be used for a config file setting
that doesn't correspond to any defined configargparse arg (and so
doesn't have a user-specified commandline arg key).
Args:
key: The config file key that was being set.
"""
key_without_prefix_chars = key.strip(self.prefix_chars)
command_line_key = self.prefix_chars[0]*2 + key_without_prefix_chars
return command_line_key |
def compare_content_type(url, content_type):
'''
Compare the content type header of url param with content_type param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> string e.g. text/html
'''
try:
response = urllib2.urlopen(url)
except:
return False
return response.headers.type == content_type | Compare the content type header of url param with content_type param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> string e.g. text/html | Below is the the instruction that describes the task:
### Input:
Compare the content type header of url param with content_type param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> string e.g. text/html
### Response:
def compare_content_type(url, content_type):
'''
Compare the content type header of url param with content_type param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> string e.g. text/html
'''
try:
response = urllib2.urlopen(url)
except:
return False
return response.headers.type == content_type |
def match(self, keys, partial=True):
"""
Check if the value of this namespace is matched by
keys
'*' is treated as wildcard
Arguments:
keys -- list of keys
Examples:
ns = Namespace("a.b.c")
ns.match(["a"]) #True
ns.match(["a","b"]) #True
ns.match(["a","b","c"]) #True
ns.match(["a","*","c"]) #True
ns.match(["b","b","c"]) #False
"""
if not partial and len(keys) != self.length:
return False
c = 0
for k in keys:
if c >= self.length:
return False
a = self.keys[c]
if a != "*" and k != "*" and k != a:
return False
c += 1
return True | Check if the value of this namespace is matched by
keys
'*' is treated as wildcard
Arguments:
keys -- list of keys
Examples:
ns = Namespace("a.b.c")
ns.match(["a"]) #True
ns.match(["a","b"]) #True
ns.match(["a","b","c"]) #True
ns.match(["a","*","c"]) #True
ns.match(["b","b","c"]) #False | Below is the the instruction that describes the task:
### Input:
Check if the value of this namespace is matched by
keys
'*' is treated as wildcard
Arguments:
keys -- list of keys
Examples:
ns = Namespace("a.b.c")
ns.match(["a"]) #True
ns.match(["a","b"]) #True
ns.match(["a","b","c"]) #True
ns.match(["a","*","c"]) #True
ns.match(["b","b","c"]) #False
### Response:
def match(self, keys, partial=True):
"""
Check if the value of this namespace is matched by
keys
'*' is treated as wildcard
Arguments:
keys -- list of keys
Examples:
ns = Namespace("a.b.c")
ns.match(["a"]) #True
ns.match(["a","b"]) #True
ns.match(["a","b","c"]) #True
ns.match(["a","*","c"]) #True
ns.match(["b","b","c"]) #False
"""
if not partial and len(keys) != self.length:
return False
c = 0
for k in keys:
if c >= self.length:
return False
a = self.keys[c]
if a != "*" and k != "*" and k != a:
return False
c += 1
return True |
def remove_lb_nodes(self, lb_id, node_ids):
"""
Remove one or more nodes
:param string lb_id: Balancer id
:param list node_ids: List of node ids
"""
log.info("Removing load balancer nodes %s" % node_ids)
for node_id in node_ids:
self._request('delete', '/loadbalancers/%s/nodes/%s' % (lb_id, node_id)) | Remove one or more nodes
:param string lb_id: Balancer id
:param list node_ids: List of node ids | Below is the the instruction that describes the task:
### Input:
Remove one or more nodes
:param string lb_id: Balancer id
:param list node_ids: List of node ids
### Response:
def remove_lb_nodes(self, lb_id, node_ids):
"""
Remove one or more nodes
:param string lb_id: Balancer id
:param list node_ids: List of node ids
"""
log.info("Removing load balancer nodes %s" % node_ids)
for node_id in node_ids:
self._request('delete', '/loadbalancers/%s/nodes/%s' % (lb_id, node_id)) |
def tuple_of(*generators):
"""
Generates a tuple by generating values for each of the specified
generators.
This is a class factory, it makes a class which is a closure around the
specified generators.
"""
class TupleOfGenerators(ArbitraryInterface):
"""
A closure class around the generators specified above, which
generates a tuple of the generators.
"""
@classmethod
def arbitrary(cls):
"""
Generate a tuple of the enclosed generators.
"""
return tuple([
arbitrary(generator) for generator in generators
if generator is not tuple
])
TupleOfGenerators.__name__ = ''.join([
'tuple_of(', ', '.join(generator.__name__ for generator in generators),
')'
])
return TupleOfGenerators | Generates a tuple by generating values for each of the specified
generators.
This is a class factory, it makes a class which is a closure around the
specified generators. | Below is the the instruction that describes the task:
### Input:
Generates a tuple by generating values for each of the specified
generators.
This is a class factory, it makes a class which is a closure around the
specified generators.
### Response:
def tuple_of(*generators):
"""
Generates a tuple by generating values for each of the specified
generators.
This is a class factory, it makes a class which is a closure around the
specified generators.
"""
class TupleOfGenerators(ArbitraryInterface):
"""
A closure class around the generators specified above, which
generates a tuple of the generators.
"""
@classmethod
def arbitrary(cls):
"""
Generate a tuple of the enclosed generators.
"""
return tuple([
arbitrary(generator) for generator in generators
if generator is not tuple
])
TupleOfGenerators.__name__ = ''.join([
'tuple_of(', ', '.join(generator.__name__ for generator in generators),
')'
])
return TupleOfGenerators |
def uncompress_file(input_file_name, file_extension, dest_dir):
"""
Uncompress gz and bz2 files
"""
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
.format(file_extension))
if file_extension.lower() == '.gz':
fmodule = gzip.GzipFile
elif file_extension.lower() == '.bz2':
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode='rb') as f_compressed,\
NamedTemporaryFile(dir=dest_dir,
mode='wb',
delete=False) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name | Uncompress gz and bz2 files | Below is the the instruction that describes the task:
### Input:
Uncompress gz and bz2 files
### Response:
def uncompress_file(input_file_name, file_extension, dest_dir):
"""
Uncompress gz and bz2 files
"""
if file_extension.lower() not in ('.gz', '.bz2'):
raise NotImplementedError("Received {} format. Only gz and bz2 "
"files can currently be uncompressed."
.format(file_extension))
if file_extension.lower() == '.gz':
fmodule = gzip.GzipFile
elif file_extension.lower() == '.bz2':
fmodule = bz2.BZ2File
with fmodule(input_file_name, mode='rb') as f_compressed,\
NamedTemporaryFile(dir=dest_dir,
mode='wb',
delete=False) as f_uncompressed:
shutil.copyfileobj(f_compressed, f_uncompressed)
return f_uncompressed.name |
def reverse_lookup_from_nested_dict(values_dict):
"""
Create reverse-lookup dictionary mapping each row key to a list of triplets:
[(column key, value), ...]
Parameters
----------
nested_values_dict : dict
column_key -> row_key -> value
weights_dict : dict
column_key -> row_key -> sample weight
Returns dictionary mapping row_key -> [(column key, value)]
"""
reverse_lookup = defaultdict(list)
for column_key, column_dict in values_dict.items():
for row_key, value in column_dict.items():
entry = (column_key, value)
reverse_lookup[row_key].append(entry)
return reverse_lookup | Create reverse-lookup dictionary mapping each row key to a list of triplets:
[(column key, value), ...]
Parameters
----------
nested_values_dict : dict
column_key -> row_key -> value
weights_dict : dict
column_key -> row_key -> sample weight
Returns dictionary mapping row_key -> [(column key, value)] | Below is the the instruction that describes the task:
### Input:
Create reverse-lookup dictionary mapping each row key to a list of triplets:
[(column key, value), ...]
Parameters
----------
nested_values_dict : dict
column_key -> row_key -> value
weights_dict : dict
column_key -> row_key -> sample weight
Returns dictionary mapping row_key -> [(column key, value)]
### Response:
def reverse_lookup_from_nested_dict(values_dict):
"""
Create reverse-lookup dictionary mapping each row key to a list of triplets:
[(column key, value), ...]
Parameters
----------
nested_values_dict : dict
column_key -> row_key -> value
weights_dict : dict
column_key -> row_key -> sample weight
Returns dictionary mapping row_key -> [(column key, value)]
"""
reverse_lookup = defaultdict(list)
for column_key, column_dict in values_dict.items():
for row_key, value in column_dict.items():
entry = (column_key, value)
reverse_lookup[row_key].append(entry)
return reverse_lookup |
def selected(self):
"""Action to be executed when a valid item has been selected"""
self.selected_text = self.currentText()
self.valid.emit(True, True)
self.open_dir.emit(self.selected_text) | Action to be executed when a valid item has been selected | Below is the the instruction that describes the task:
### Input:
Action to be executed when a valid item has been selected
### Response:
def selected(self):
"""Action to be executed when a valid item has been selected"""
self.selected_text = self.currentText()
self.valid.emit(True, True)
self.open_dir.emit(self.selected_text) |
def load_from_json(data):
"""
Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
applications = [
ApplicationResponse.load_from_json(a) for a in data['applications']
] if data['applications'] is not None else []
return RegistryResponse(
data['query_uri'], data['success'],
data['has_references'], data['count'], applications
) | Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json). | Below is the the instruction that describes the task:
### Input:
Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json).
### Response:
def load_from_json(data):
"""
Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
applications = [
ApplicationResponse.load_from_json(a) for a in data['applications']
] if data['applications'] is not None else []
return RegistryResponse(
data['query_uri'], data['success'],
data['has_references'], data['count'], applications
) |
def digest_content(self, rule):
"""
Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties.
"""
data = OrderedDict()
current_key = None
for token in rule.content:
# Assume first identity token is the property name
if token.type == 'ident':
# Ignore starting '-' from css variables
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
# Assume first following string token is the property value.
if token.type == 'string':
data[current_key] = token.value
return data | Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties. | Below is the the instruction that describes the task:
### Input:
Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties.
### Response:
def digest_content(self, rule):
"""
Walk on rule content tokens to return a dict of properties.
This is pretty naive and will choke/fail on everything that is more
evolved than simple ``ident(string):value(string)``
Arguments:
rule (tinycss2.ast.QualifiedRule): Qualified rule object as
returned by tinycss2.
Returns:
dict: Dictionnary of retrieved variables and properties.
"""
data = OrderedDict()
current_key = None
for token in rule.content:
# Assume first identity token is the property name
if token.type == 'ident':
# Ignore starting '-' from css variables
name = token.value
if name.startswith('-'):
name = name[1:]
current_key = name
data[current_key] = None
# Assume first following string token is the property value.
if token.type == 'string':
data[current_key] = token.value
return data |
def walk_direction_preheel(self, data_frame):
"""
Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray
"""
# Sum of absolute values across accelerometer axes:
data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs()
# Find maximum peaks of smoothed data:
dummy, ipeaks_smooth = self.heel_strikes(data)
data = data.values
# Compute number of samples between peaks using the real part of the FFT:
interpeak = compute_interpeak(data, self.sampling_frequency)
decel = np.int(np.round(self.stride_fraction * interpeak))
# Find maximum peaks close to maximum peaks of smoothed data:
ipeaks = []
for ipeak_smooth in ipeaks_smooth:
ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel])
ipeak += ipeak_smooth - decel
ipeaks.append(ipeak)
# Compute the average vector for each deceleration phase:
vectors = []
for ipeak in ipeaks:
decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]]
for i in range(ipeak - decel, ipeak)])
vectors.append(np.mean(decel_vectors, axis=0))
# Compute the average deceleration vector and take the opposite direction:
direction = -1 * np.mean(vectors, axis=0)
# Return the unit vector in this direction:
direction /= np.sqrt(direction.dot(direction))
return direction | Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray
### Response:
def walk_direction_preheel(self, data_frame):
"""
Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray
"""
# Sum of absolute values across accelerometer axes:
data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs()
# Find maximum peaks of smoothed data:
dummy, ipeaks_smooth = self.heel_strikes(data)
data = data.values
# Compute number of samples between peaks using the real part of the FFT:
interpeak = compute_interpeak(data, self.sampling_frequency)
decel = np.int(np.round(self.stride_fraction * interpeak))
# Find maximum peaks close to maximum peaks of smoothed data:
ipeaks = []
for ipeak_smooth in ipeaks_smooth:
ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel])
ipeak += ipeak_smooth - decel
ipeaks.append(ipeak)
# Compute the average vector for each deceleration phase:
vectors = []
for ipeak in ipeaks:
decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]]
for i in range(ipeak - decel, ipeak)])
vectors.append(np.mean(decel_vectors, axis=0))
# Compute the average deceleration vector and take the opposite direction:
direction = -1 * np.mean(vectors, axis=0)
# Return the unit vector in this direction:
direction /= np.sqrt(direction.dot(direction))
return direction |
def ctcp_reply(self, command, dst, message=None):
"""
Sends a reply to a CTCP request.
:param command: CTCP command to use.
:type command: str
:param dst: sender of the initial request.
:type dst: str
:param message: data to attach to the reply.
:type message: str
"""
if message is None:
raw_cmd = u'\x01{0}\x01'.format(command)
else:
raw_cmd = u'\x01{0} {1}\x01'.format(command, message)
self.notice(dst, raw_cmd) | Sends a reply to a CTCP request.
:param command: CTCP command to use.
:type command: str
:param dst: sender of the initial request.
:type dst: str
:param message: data to attach to the reply.
:type message: str | Below is the the instruction that describes the task:
### Input:
Sends a reply to a CTCP request.
:param command: CTCP command to use.
:type command: str
:param dst: sender of the initial request.
:type dst: str
:param message: data to attach to the reply.
:type message: str
### Response:
def ctcp_reply(self, command, dst, message=None):
"""
Sends a reply to a CTCP request.
:param command: CTCP command to use.
:type command: str
:param dst: sender of the initial request.
:type dst: str
:param message: data to attach to the reply.
:type message: str
"""
if message is None:
raw_cmd = u'\x01{0}\x01'.format(command)
else:
raw_cmd = u'\x01{0} {1}\x01'.format(command, message)
self.notice(dst, raw_cmd) |
def __set_style_sheet(self):
"""
Sets the Widget stylesheet.
"""
colors = map(
lambda x: "rgb({0}, {1}, {2}, {3})".format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)),
(self.__color, self.__background_color, self.__border_color))
self.setStyleSheet(self.__style.format(*colors)) | Sets the Widget stylesheet. | Below is the the instruction that describes the task:
### Input:
Sets the Widget stylesheet.
### Response:
def __set_style_sheet(self):
"""
Sets the Widget stylesheet.
"""
colors = map(
lambda x: "rgb({0}, {1}, {2}, {3})".format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)),
(self.__color, self.__background_color, self.__border_color))
self.setStyleSheet(self.__style.format(*colors)) |
Subsets and Splits