code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_callback(self, method):
"""
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
"""
from_actor = get_current()
if from_actor is not None:
callback = (method, from_actor.channel, from_actor.url)
with self.__condition:
if self.__state is not FINISHED:
self.__callbacks.append(callback)
return
# Invoke the callback directly
# msg = TellRequest(TELL, method, [self], from_actor.url)
msg = {TYPE: TELL, METHOD: method, PARAMS: ([self], {}),
TO: from_actor.url}
from_actor.channel.send(msg)
else:
raise FutureError("add_callback only works when called " +
"from inside an actor") | Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor. | Below is the the instruction that describes the task:
### Input:
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
### Response:
def add_callback(self, method):
"""
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
"""
from_actor = get_current()
if from_actor is not None:
callback = (method, from_actor.channel, from_actor.url)
with self.__condition:
if self.__state is not FINISHED:
self.__callbacks.append(callback)
return
# Invoke the callback directly
# msg = TellRequest(TELL, method, [self], from_actor.url)
msg = {TYPE: TELL, METHOD: method, PARAMS: ([self], {}),
TO: from_actor.url}
from_actor.channel.send(msg)
else:
raise FutureError("add_callback only works when called " +
"from inside an actor") |
def _file_num_records_cached(filename):
"""Return the number of TFRecords in a file."""
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_records_cache[filename] = ret
return ret | Return the number of TFRecords in a file. | Below is the the instruction that describes the task:
### Input:
Return the number of TFRecords in a file.
### Response:
def _file_num_records_cached(filename):
"""Return the number of TFRecords in a file."""
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_records_cache[filename] = ret
return ret |
def _PreParse(self, key, value):
"""Executed against each field of each row read from index table."""
if key == "Command":
return re.sub(r"(\[\[.+?\]\])", self._Completion, value)
else:
return value | Executed against each field of each row read from index table. | Below is the the instruction that describes the task:
### Input:
Executed against each field of each row read from index table.
### Response:
def _PreParse(self, key, value):
"""Executed against each field of each row read from index table."""
if key == "Command":
return re.sub(r"(\[\[.+?\]\])", self._Completion, value)
else:
return value |
def power(a,b):
'''
power(a,b) is equivalent to a**b except that, like the neuropythy.util.times function, it
threads over the earliest dimension possible rather than the latest, as numpy's power function
and ** syntax do. The power() function also works with sparse arrays; though it must reify
them during the process.
'''
(a,b) = unbroadcast(a,b)
return cpower(a,b) | power(a,b) is equivalent to a**b except that, like the neuropythy.util.times function, it
threads over the earliest dimension possible rather than the latest, as numpy's power function
and ** syntax do. The power() function also works with sparse arrays; though it must reify
them during the process. | Below is the the instruction that describes the task:
### Input:
power(a,b) is equivalent to a**b except that, like the neuropythy.util.times function, it
threads over the earliest dimension possible rather than the latest, as numpy's power function
and ** syntax do. The power() function also works with sparse arrays; though it must reify
them during the process.
### Response:
def power(a,b):
'''
power(a,b) is equivalent to a**b except that, like the neuropythy.util.times function, it
threads over the earliest dimension possible rather than the latest, as numpy's power function
and ** syntax do. The power() function also works with sparse arrays; though it must reify
them during the process.
'''
(a,b) = unbroadcast(a,b)
return cpower(a,b) |
def create(self, equipments):
"""
Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None
"""
data = {'equipments': equipments}
return super(ApiEquipment, self).post('api/v3/equipment/', data) | Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None | Below is the the instruction that describes the task:
### Input:
Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None
### Response:
def create(self, equipments):
"""
Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None
"""
data = {'equipments': equipments}
return super(ApiEquipment, self).post('api/v3/equipment/', data) |
def transform(self, job_name, model_name, strategy, max_concurrent_transforms, max_payload, env,
input_config, output_config, resource_config, tags):
"""Create an Amazon SageMaker transform job.
Args:
job_name (str): Name of the transform job being created.
model_name (str): Name of the SageMaker model being used for the transform job.
strategy (str): The strategy used to decide how to batch records in a single request.
Possible values are 'MULTI_RECORD' and 'SINGLE_RECORD'.
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
env (dict): Environment variables to be set for use during the transform job.
input_config (dict): A dictionary describing the input data (and its location) for the job.
output_config (dict): A dictionary describing the output location for the job.
resource_config (dict): A dictionary describing the resources to complete the job.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
"""
transform_request = {
'TransformJobName': job_name,
'ModelName': model_name,
'TransformInput': input_config,
'TransformOutput': output_config,
'TransformResources': resource_config,
}
if strategy is not None:
transform_request['BatchStrategy'] = strategy
if max_concurrent_transforms is not None:
transform_request['MaxConcurrentTransforms'] = max_concurrent_transforms
if max_payload is not None:
transform_request['MaxPayloadInMB'] = max_payload
if env is not None:
transform_request['Environment'] = env
if tags is not None:
transform_request['Tags'] = tags
LOGGER.info('Creating transform job with name: {}'.format(job_name))
LOGGER.debug('Transform request: {}'.format(json.dumps(transform_request, indent=4)))
self.sagemaker_client.create_transform_job(**transform_request) | Create an Amazon SageMaker transform job.
Args:
job_name (str): Name of the transform job being created.
model_name (str): Name of the SageMaker model being used for the transform job.
strategy (str): The strategy used to decide how to batch records in a single request.
Possible values are 'MULTI_RECORD' and 'SINGLE_RECORD'.
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
env (dict): Environment variables to be set for use during the transform job.
input_config (dict): A dictionary describing the input data (and its location) for the job.
output_config (dict): A dictionary describing the output location for the job.
resource_config (dict): A dictionary describing the resources to complete the job.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html. | Below is the the instruction that describes the task:
### Input:
Create an Amazon SageMaker transform job.
Args:
job_name (str): Name of the transform job being created.
model_name (str): Name of the SageMaker model being used for the transform job.
strategy (str): The strategy used to decide how to batch records in a single request.
Possible values are 'MULTI_RECORD' and 'SINGLE_RECORD'.
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
env (dict): Environment variables to be set for use during the transform job.
input_config (dict): A dictionary describing the input data (and its location) for the job.
output_config (dict): A dictionary describing the output location for the job.
resource_config (dict): A dictionary describing the resources to complete the job.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
### Response:
def transform(self, job_name, model_name, strategy, max_concurrent_transforms, max_payload, env,
input_config, output_config, resource_config, tags):
"""Create an Amazon SageMaker transform job.
Args:
job_name (str): Name of the transform job being created.
model_name (str): Name of the SageMaker model being used for the transform job.
strategy (str): The strategy used to decide how to batch records in a single request.
Possible values are 'MULTI_RECORD' and 'SINGLE_RECORD'.
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
env (dict): Environment variables to be set for use during the transform job.
input_config (dict): A dictionary describing the input data (and its location) for the job.
output_config (dict): A dictionary describing the output location for the job.
resource_config (dict): A dictionary describing the resources to complete the job.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
"""
transform_request = {
'TransformJobName': job_name,
'ModelName': model_name,
'TransformInput': input_config,
'TransformOutput': output_config,
'TransformResources': resource_config,
}
if strategy is not None:
transform_request['BatchStrategy'] = strategy
if max_concurrent_transforms is not None:
transform_request['MaxConcurrentTransforms'] = max_concurrent_transforms
if max_payload is not None:
transform_request['MaxPayloadInMB'] = max_payload
if env is not None:
transform_request['Environment'] = env
if tags is not None:
transform_request['Tags'] = tags
LOGGER.info('Creating transform job with name: {}'.format(job_name))
LOGGER.debug('Transform request: {}'.format(json.dumps(transform_request, indent=4)))
self.sagemaker_client.create_transform_job(**transform_request) |
def get_queryset(self):
"""
This method is repeated because some managers that don't use super() or alter queryset class
may return queryset that is not subclass of MultilingualQuerySet.
"""
qs = super(MultilingualManager, self).get_queryset()
if isinstance(qs, MultilingualQuerySet):
# Is already patched by MultilingualQuerysetManager - in most of the cases
# when custom managers use super() properly in get_queryset.
return qs
return self._patch_queryset(qs) | This method is repeated because some managers that don't use super() or alter queryset class
may return queryset that is not subclass of MultilingualQuerySet. | Below is the the instruction that describes the task:
### Input:
This method is repeated because some managers that don't use super() or alter queryset class
may return queryset that is not subclass of MultilingualQuerySet.
### Response:
def get_queryset(self):
"""
This method is repeated because some managers that don't use super() or alter queryset class
may return queryset that is not subclass of MultilingualQuerySet.
"""
qs = super(MultilingualManager, self).get_queryset()
if isinstance(qs, MultilingualQuerySet):
# Is already patched by MultilingualQuerysetManager - in most of the cases
# when custom managers use super() properly in get_queryset.
return qs
return self._patch_queryset(qs) |
def _create_file():
"""
Returns a file handle which is used to record audio
"""
f = wave.open('audio.wav', mode='wb')
f.setnchannels(2)
p = pyaudio.PyAudio()
f.setsampwidth(p.get_sample_size(pyaudio.paInt16))
f.setframerate(p.get_default_input_device_info()['defaultSampleRate'])
try:
yield f
finally:
f.close() | Returns a file handle which is used to record audio | Below is the the instruction that describes the task:
### Input:
Returns a file handle which is used to record audio
### Response:
def _create_file():
"""
Returns a file handle which is used to record audio
"""
f = wave.open('audio.wav', mode='wb')
f.setnchannels(2)
p = pyaudio.PyAudio()
f.setsampwidth(p.get_sample_size(pyaudio.paInt16))
f.setframerate(p.get_default_input_device_info()['defaultSampleRate'])
try:
yield f
finally:
f.close() |
def compute_payments(self, precision=None):
'''
Returns the total amount of payments made to this invoice.
@param precision:int Number of decimal places
@return: Decimal
'''
return quantize(sum([payment.amount for payment in self.__payments]),
precision) | Returns the total amount of payments made to this invoice.
@param precision:int Number of decimal places
@return: Decimal | Below is the the instruction that describes the task:
### Input:
Returns the total amount of payments made to this invoice.
@param precision:int Number of decimal places
@return: Decimal
### Response:
def compute_payments(self, precision=None):
'''
Returns the total amount of payments made to this invoice.
@param precision:int Number of decimal places
@return: Decimal
'''
return quantize(sum([payment.amount for payment in self.__payments]),
precision) |
def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True):
"""writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True)
"""
# dictionary
# based on smet spec V.1.1 and selfdefined
# daily data
dict_d= {'tmean':'TA',
'tmin':'TMAX', #no spec
'tmax':'TMIN', #no spec
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#hourly data
dict_h= {'temp':'TA',
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
if check_nan:
#get all colums with data
datas_in = data.sum().dropna().to_frame().T
#get colums with no datas
drop = [data_nan for data_nan in data.columns if data_nan not in datas_in]
#delete columns
data = data.drop(drop, axis=1)
with open(filename, 'w') as f:
#preparing data
#converte date_times to SMET timestamps
if mode == "d":
t = '%Y-%m-%dT00:00'
if mode == "h":
t = '%Y-%m-%dT%H:%M'
data['timestamp'] = [d.strftime(t) for d in data.index]
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
#metadatas update
metadata['fields'] = ' '.join(data.columns)
metadata["units_multiplier"] = len(metadata['fields'].split())*"1 "
#writing data
#metadata
f.write('SMET 1.1 ASCII\n')
f.write('[HEADER]\n')
for k, v in metadata.items():
f.write('{} = {}\n'.format(k, v))
#data
f.write('[DATA]\n')
data_str = data.fillna(nodata_value).to_string(
header=False,
index=False,
float_format=lambda x: '{:.2f}'.format(x),
)
f.write(data_str) | writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True) | Below is the the instruction that describes the task:
### Input:
writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True)
### Response:
def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True):
"""writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True)
"""
# dictionary
# based on smet spec V.1.1 and selfdefined
# daily data
dict_d= {'tmean':'TA',
'tmin':'TMAX', #no spec
'tmax':'TMIN', #no spec
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#hourly data
dict_h= {'temp':'TA',
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
if check_nan:
#get all colums with data
datas_in = data.sum().dropna().to_frame().T
#get colums with no datas
drop = [data_nan for data_nan in data.columns if data_nan not in datas_in]
#delete columns
data = data.drop(drop, axis=1)
with open(filename, 'w') as f:
#preparing data
#converte date_times to SMET timestamps
if mode == "d":
t = '%Y-%m-%dT00:00'
if mode == "h":
t = '%Y-%m-%dT%H:%M'
data['timestamp'] = [d.strftime(t) for d in data.index]
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
#metadatas update
metadata['fields'] = ' '.join(data.columns)
metadata["units_multiplier"] = len(metadata['fields'].split())*"1 "
#writing data
#metadata
f.write('SMET 1.1 ASCII\n')
f.write('[HEADER]\n')
for k, v in metadata.items():
f.write('{} = {}\n'.format(k, v))
#data
f.write('[DATA]\n')
data_str = data.fillna(nodata_value).to_string(
header=False,
index=False,
float_format=lambda x: '{:.2f}'.format(x),
)
f.write(data_str) |
def remove_colormap(self, removal_type):
"""Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_*
"""
with _LeptonicaErrorTrap():
return Pix(
lept.pixRemoveColormapGeneral(self._cdata, removal_type, lept.L_COPY)
) | Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_* | Below is the the instruction that describes the task:
### Input:
Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_*
### Response:
def remove_colormap(self, removal_type):
"""Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_*
"""
with _LeptonicaErrorTrap():
return Pix(
lept.pixRemoveColormapGeneral(self._cdata, removal_type, lept.L_COPY)
) |
def get_aws_s3_handle(config_map):
"""Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3
"""
url = 'https://' + config_map['s3_bucket'] + '.s3.amazonaws.com'
if not AWS_CLIENT.is_aws_s3_client_set():
client = boto3.client(
's3',
aws_access_key_id=config_map['put_public_key'],
aws_secret_access_key=config_map['put_private_key']
)
AWS_CLIENT.set_aws_s3_client(client)
else:
client = AWS_CLIENT.s3
return client, url | Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3 | Below is the the instruction that describes the task:
### Input:
Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3
### Response:
def get_aws_s3_handle(config_map):
"""Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3
"""
url = 'https://' + config_map['s3_bucket'] + '.s3.amazonaws.com'
if not AWS_CLIENT.is_aws_s3_client_set():
client = boto3.client(
's3',
aws_access_key_id=config_map['put_public_key'],
aws_secret_access_key=config_map['put_private_key']
)
AWS_CLIENT.set_aws_s3_client(client)
else:
client = AWS_CLIENT.s3
return client, url |
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
result = query.get_connection().select_from_write_connection(sql, values)
id = result[0][0]
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id | Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int | Below is the the instruction that describes the task:
### Input:
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
### Response:
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
result = query.get_connection().select_from_write_connection(sql, values)
id = result[0][0]
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id |
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html) | Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation. | Below is the the instruction that describes the task:
### Input:
Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
### Response:
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html) |
def get_config(context):
"""
Return the formatted javascript for any disqus config variables.
"""
conf_vars = ['disqus_developer',
'disqus_identifier',
'disqus_url',
'disqus_title',
'disqus_category_id'
]
js = '\tvar {} = "{}";'
output = [js.format(item, context[item]) for item in conf_vars \
if item in context]
return '\n'.join(output) | Return the formatted javascript for any disqus config variables. | Below is the the instruction that describes the task:
### Input:
Return the formatted javascript for any disqus config variables.
### Response:
def get_config(context):
"""
Return the formatted javascript for any disqus config variables.
"""
conf_vars = ['disqus_developer',
'disqus_identifier',
'disqus_url',
'disqus_title',
'disqus_category_id'
]
js = '\tvar {} = "{}";'
output = [js.format(item, context[item]) for item in conf_vars \
if item in context]
return '\n'.join(output) |
def point_rotate(pt, ax, theta):
""" Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small
"""
# Imports
import numpy as np
# Ensure pt is reducible to 3-D vector.
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Calculate the rotation
rot_pt = np.dot(mtx_rot(ax, theta, reps=1), pt)
# Should be ready to return
return rot_pt | Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small | Below is the the instruction that describes the task:
### Input:
Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small
### Response:
def point_rotate(pt, ax, theta):
""" Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small
"""
# Imports
import numpy as np
# Ensure pt is reducible to 3-D vector.
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Calculate the rotation
rot_pt = np.dot(mtx_rot(ax, theta, reps=1), pt)
# Should be ready to return
return rot_pt |
def _request_devices(self, url, _type):
"""Request list of devices."""
res = self._request(url)
return res.get(_type) if res else {} | Request list of devices. | Below is the the instruction that describes the task:
### Input:
Request list of devices.
### Response:
def _request_devices(self, url, _type):
"""Request list of devices."""
res = self._request(url)
return res.get(_type) if res else {} |
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals() | When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change! | Below is the the instruction that describes the task:
### Input:
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
### Response:
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals() |
def save(self):
"""Save the changes to the instance and any related objects."""
# first call save with commit=False for all Forms
for form in self._forms:
if isinstance(form, BaseForm):
form.save(commit=False)
# call save on the instance
self.instance.save()
# call any post-commit hooks that have been stashed on Forms
for form in self.forms:
if isinstance(form, BaseForm):
if hasattr(form, 'save_m2m'):
form.save_m2m()
if hasattr(form, 'save_related'):
form.save_related()
# call save on any formsets
for form in self._forms:
if isinstance(form, BaseFormSet):
form.save(commit=True)
return self.instance | Save the changes to the instance and any related objects. | Below is the the instruction that describes the task:
### Input:
Save the changes to the instance and any related objects.
### Response:
def save(self):
"""Save the changes to the instance and any related objects."""
# first call save with commit=False for all Forms
for form in self._forms:
if isinstance(form, BaseForm):
form.save(commit=False)
# call save on the instance
self.instance.save()
# call any post-commit hooks that have been stashed on Forms
for form in self.forms:
if isinstance(form, BaseForm):
if hasattr(form, 'save_m2m'):
form.save_m2m()
if hasattr(form, 'save_related'):
form.save_related()
# call save on any formsets
for form in self._forms:
if isinstance(form, BaseFormSet):
form.save(commit=True)
return self.instance |
def sort_func(self, key):
"""Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.DATA:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key | Logic for sorting keys in a `Spectrum` relative to one another. | Below is the the instruction that describes the task:
### Input:
Logic for sorting keys in a `Spectrum` relative to one another.
### Response:
def sort_func(self, key):
"""Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME:
return 'aaa'
if key == self._KEYS.DATA:
return 'zzy'
if key == self._KEYS.SOURCE:
return 'zzz'
return key |
def mask_cmp_op(x, y, op, allowed_types):
"""
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
"""
# TODO: Can we make the allowed_types arg unnecessary?
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, allowed_types):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result | Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool] | Below is the the instruction that describes the task:
### Input:
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
### Response:
def mask_cmp_op(x, y, op, allowed_types):
"""
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
"""
# TODO: Can we make the allowed_types arg unnecessary?
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, allowed_types):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result |
def north_arrow_path(feature, parent):
"""Retrieve the full path of default north arrow logo."""
_ = feature, parent # NOQA
north_arrow_file = setting(inasafe_north_arrow_path['setting_key'])
if os.path.exists(north_arrow_file):
return north_arrow_file
else:
LOGGER.info(
'The custom north arrow is not found in {north_arrow_file}. '
'Default north arrow will be used.').format(
north_arrow_file=north_arrow_file)
return inasafe_default_settings['north_arrow_path'] | Retrieve the full path of default north arrow logo. | Below is the the instruction that describes the task:
### Input:
Retrieve the full path of default north arrow logo.
### Response:
def north_arrow_path(feature, parent):
"""Retrieve the full path of default north arrow logo."""
_ = feature, parent # NOQA
north_arrow_file = setting(inasafe_north_arrow_path['setting_key'])
if os.path.exists(north_arrow_file):
return north_arrow_file
else:
LOGGER.info(
'The custom north arrow is not found in {north_arrow_file}. '
'Default north arrow will be used.').format(
north_arrow_file=north_arrow_file)
return inasafe_default_settings['north_arrow_path'] |
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)] | Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0 | Below is the the instruction that describes the task:
### Input:
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
### Response:
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)] |
def _parse(root):
"""Recursively convert an Element into python data types"""
if root.tag == "nil-classes":
return []
elif root.get("type") == "array":
return [_parse(child) for child in root]
d = {}
for child in root:
type = child.get("type") or "string"
if child.get("nil"):
value = None
elif type == "boolean":
value = True if child.text.lower() == "true" else False
elif type == "dateTime":
value = iso8601.parse_date(child.text)
elif type == "decimal":
value = decimal.Decimal(child.text)
elif type == "integer":
value = int(child.text)
else:
value = child.text
d[child.tag] = value
return d | Recursively convert an Element into python data types | Below is the the instruction that describes the task:
### Input:
Recursively convert an Element into python data types
### Response:
def _parse(root):
"""Recursively convert an Element into python data types"""
if root.tag == "nil-classes":
return []
elif root.get("type") == "array":
return [_parse(child) for child in root]
d = {}
for child in root:
type = child.get("type") or "string"
if child.get("nil"):
value = None
elif type == "boolean":
value = True if child.text.lower() == "true" else False
elif type == "dateTime":
value = iso8601.parse_date(child.text)
elif type == "decimal":
value = decimal.Decimal(child.text)
elif type == "integer":
value = int(child.text)
else:
value = child.text
d[child.tag] = value
return d |
def _get_parser_call_method(self, parser_to_method):
"""Return the parser special method 'call' that handles sub-command
calling.
Args:
parser_to_method: mapping of the parser registered name
to the method it is linked to
"""
def inner_call(args=None, instance=None):
"""Allows to call the method invoked from the command line or
provided argument.
Args:
args: list of arguments to parse, defaults to command line
arguments
instance: an instance of the decorated class. If instance is
None, the default, and __init__ is decorated the object will be
instantiated on the fly from the command line arguments
"""
parser = self._cls.parser
namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))
if instance is None:
# If the __init__ method is not part of the method to
# decorate we cannot instantiate the class
if "__init__" not in parser_to_method:
raise ParseThisError(("'__init__' method is not decorated. "
"Please provide an instance to "
"'{}.parser.call' or decorate the "
"'__init___' method with "
"'create_parser'"
.format(self._cls.__name__)))
# We instantiate the class from the command line arguments
instance = _call_method_from_namespace(self._cls, "__init__",
namespace)
method_name = parser_to_method[namespace.method]
return _call_method_from_namespace(instance, method_name, namespace)
return inner_call | Return the parser special method 'call' that handles sub-command
calling.
Args:
parser_to_method: mapping of the parser registered name
to the method it is linked to | Below is the the instruction that describes the task:
### Input:
Return the parser special method 'call' that handles sub-command
calling.
Args:
parser_to_method: mapping of the parser registered name
to the method it is linked to
### Response:
def _get_parser_call_method(self, parser_to_method):
"""Return the parser special method 'call' that handles sub-command
calling.
Args:
parser_to_method: mapping of the parser registered name
to the method it is linked to
"""
def inner_call(args=None, instance=None):
"""Allows to call the method invoked from the command line or
provided argument.
Args:
args: list of arguments to parse, defaults to command line
arguments
instance: an instance of the decorated class. If instance is
None, the default, and __init__ is decorated the object will be
instantiated on the fly from the command line arguments
"""
parser = self._cls.parser
namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))
if instance is None:
# If the __init__ method is not part of the method to
# decorate we cannot instantiate the class
if "__init__" not in parser_to_method:
raise ParseThisError(("'__init__' method is not decorated. "
"Please provide an instance to "
"'{}.parser.call' or decorate the "
"'__init___' method with "
"'create_parser'"
.format(self._cls.__name__)))
# We instantiate the class from the command line arguments
instance = _call_method_from_namespace(self._cls, "__init__",
namespace)
method_name = parser_to_method[namespace.method]
return _call_method_from_namespace(instance, method_name, namespace)
return inner_call |
def describe_root(record, root, indent=0, suppress_values=False):
"""
Args:
record (Evtx.Record):
indent (int):
"""
def format_node(n, extra=None, indent=0):
"""
Depends on closure over `record` and `suppress_values`.
Args:
n (Evtx.Nodes.BXmlNode):
extra (str):
Returns:
str:
"""
ret = ""
indent_s = ' ' * indent
name = n.__class__.__name__
offset = n.offset() - record.offset()
if extra is not None:
ret = "%s%s(offset=%s, %s)" % (indent_s, name, hex(offset), extra)
else:
ret = "%s%s(offset=%s)" % (indent_s, name, hex(offset))
if not suppress_values and isinstance(n, VariantTypeNode):
ret += " --> %s" % (n.string())
if isinstance(n, BXmlTypeNode):
ret += "\n"
ret += describe_root(record, n._root, indent=indent + 1)
return ret
def rec(node, indent=0):
"""
Args:
node (Evtx.Nodes.BXmlNode):
indent (int):
Returns:
str:
"""
ret = ""
if isinstance(node, TemplateInstanceNode):
if node.is_resident_template():
extra = "resident=True, length=%s" % (hex(node.template().data_length()))
ret += "%s\n" % (format_node(node, extra=extra, indent=indent))
ret += rec(node.template(), indent=indent + 1)
else:
ret += "%s\n" % (format_node(node, extra="resident=False", indent=indent))
else:
ret += "%s\n" % (format_node(node, indent=indent))
for child in node.children():
ret += rec(child, indent=indent + 1)
if isinstance(node, RootNode):
ofs = node.tag_and_children_length()
indent_s = ' ' * (indent + 1)
offset = node.offset() - record.offset() + ofs
ret += "%sSubstitutions(offset=%s)\n" % (indent_s, hex(offset))
for sub in node.substitutions():
ret += "%s\n" % (format_node(sub, indent=indent + 2))
return ret
ret = ""
ret += rec(root, indent=indent)
return ret | Args:
record (Evtx.Record):
indent (int): | Below is the the instruction that describes the task:
### Input:
Args:
record (Evtx.Record):
indent (int):
### Response:
def describe_root(record, root, indent=0, suppress_values=False):
"""
Args:
record (Evtx.Record):
indent (int):
"""
def format_node(n, extra=None, indent=0):
"""
Depends on closure over `record` and `suppress_values`.
Args:
n (Evtx.Nodes.BXmlNode):
extra (str):
Returns:
str:
"""
ret = ""
indent_s = ' ' * indent
name = n.__class__.__name__
offset = n.offset() - record.offset()
if extra is not None:
ret = "%s%s(offset=%s, %s)" % (indent_s, name, hex(offset), extra)
else:
ret = "%s%s(offset=%s)" % (indent_s, name, hex(offset))
if not suppress_values and isinstance(n, VariantTypeNode):
ret += " --> %s" % (n.string())
if isinstance(n, BXmlTypeNode):
ret += "\n"
ret += describe_root(record, n._root, indent=indent + 1)
return ret
def rec(node, indent=0):
"""
Args:
node (Evtx.Nodes.BXmlNode):
indent (int):
Returns:
str:
"""
ret = ""
if isinstance(node, TemplateInstanceNode):
if node.is_resident_template():
extra = "resident=True, length=%s" % (hex(node.template().data_length()))
ret += "%s\n" % (format_node(node, extra=extra, indent=indent))
ret += rec(node.template(), indent=indent + 1)
else:
ret += "%s\n" % (format_node(node, extra="resident=False", indent=indent))
else:
ret += "%s\n" % (format_node(node, indent=indent))
for child in node.children():
ret += rec(child, indent=indent + 1)
if isinstance(node, RootNode):
ofs = node.tag_and_children_length()
indent_s = ' ' * (indent + 1)
offset = node.offset() - record.offset() + ofs
ret += "%sSubstitutions(offset=%s)\n" % (indent_s, hex(offset))
for sub in node.substitutions():
ret += "%s\n" % (format_node(sub, indent=indent + 2))
return ret
ret = ""
ret += rec(root, indent=indent)
return ret |
def remove_member_from(self, leaderboard_name, member):
'''
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
'''
pipeline = self.redis_connection.pipeline()
pipeline.zrem(leaderboard_name, member)
pipeline.hdel(self._member_data_key(leaderboard_name), member)
pipeline.execute() | Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name. | Below is the the instruction that describes the task:
### Input:
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
### Response:
def remove_member_from(self, leaderboard_name, member):
'''
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
'''
pipeline = self.redis_connection.pipeline()
pipeline.zrem(leaderboard_name, member)
pipeline.hdel(self._member_data_key(leaderboard_name), member)
pipeline.execute() |
def get_region():
"""Use the environment to get the current region"""
global _REGION
if _REGION is None:
region_name = os.getenv("AWS_DEFAULT_REGION") or "us-east-1"
region_dict = {r.name: r for r in boto.regioninfo.get_regions("ec2")}
if region_name not in region_dict:
raise ValueError("No such EC2 region: {}. Check AWS_DEFAULT_REGION "
"environment variable".format(region_name))
_REGION = region_dict[region_name]
return _REGION | Use the environment to get the current region | Below is the the instruction that describes the task:
### Input:
Use the environment to get the current region
### Response:
def get_region():
"""Use the environment to get the current region"""
global _REGION
if _REGION is None:
region_name = os.getenv("AWS_DEFAULT_REGION") or "us-east-1"
region_dict = {r.name: r for r in boto.regioninfo.get_regions("ec2")}
if region_name not in region_dict:
raise ValueError("No such EC2 region: {}. Check AWS_DEFAULT_REGION "
"environment variable".format(region_name))
_REGION = region_dict[region_name]
return _REGION |
def parse(self, method, endpoint, body):
''' calls parse on list or detail '''
if isinstance(body, dict): # request body was already parsed
return body
if endpoint == 'list':
return self.parse_list(body)
return self.parse_detail(body) | calls parse on list or detail | Below is the the instruction that describes the task:
### Input:
calls parse on list or detail
### Response:
def parse(self, method, endpoint, body):
''' calls parse on list or detail '''
if isinstance(body, dict): # request body was already parsed
return body
if endpoint == 'list':
return self.parse_list(body)
return self.parse_detail(body) |
def infile_path(self) -> Optional[PurePath]:
"""
Read-only property.
:return: A ``pathlib.PurePath`` object or ``None``.
"""
if not self.__infile_path:
return Path(self.__infile_path).expanduser()
return None | Read-only property.
:return: A ``pathlib.PurePath`` object or ``None``. | Below is the the instruction that describes the task:
### Input:
Read-only property.
:return: A ``pathlib.PurePath`` object or ``None``.
### Response:
def infile_path(self) -> Optional[PurePath]:
"""
Read-only property.
:return: A ``pathlib.PurePath`` object or ``None``.
"""
if not self.__infile_path:
return Path(self.__infile_path).expanduser()
return None |
def when_value_edited(self, *args, **kargs):
""" Overrided to prevent user from selecting too many instances """
if len(self.value) > self.instance_num:
self.value.pop(-2)
self.display() | Overrided to prevent user from selecting too many instances | Below is the the instruction that describes the task:
### Input:
Overrided to prevent user from selecting too many instances
### Response:
def when_value_edited(self, *args, **kargs):
""" Overrided to prevent user from selecting too many instances """
if len(self.value) > self.instance_num:
self.value.pop(-2)
self.display() |
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return() | Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return. | Below is the the instruction that describes the task:
### Input:
Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
### Response:
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return() |
def receive_nak_requesting(self, pkt):
"""Receive NAK in REQUESTING state."""
logger.debug("C3.1. Received NAK?, in REQUESTING state.")
if self.process_received_nak(pkt):
logger.debug("C3.1: T. Received NAK, in REQUESTING state, "
"raise INIT.")
raise self.INIT() | Receive NAK in REQUESTING state. | Below is the the instruction that describes the task:
### Input:
Receive NAK in REQUESTING state.
### Response:
def receive_nak_requesting(self, pkt):
"""Receive NAK in REQUESTING state."""
logger.debug("C3.1. Received NAK?, in REQUESTING state.")
if self.process_received_nak(pkt):
logger.debug("C3.1: T. Received NAK, in REQUESTING state, "
"raise INIT.")
raise self.INIT() |
def clear_all(tgt=None, tgt_type='glob'):
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar, grains, and mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_all
'''
return _clear_cache(tgt,
tgt_type,
clear_pillar_flag=True,
clear_grains_flag=True,
clear_mine_flag=True) | .. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar, grains, and mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_all | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar, grains, and mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_all
### Response:
def clear_all(tgt=None, tgt_type='glob'):
'''
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Clear the cached pillar, grains, and mine data of the targeted minions
CLI Example:
.. code-block:: bash
salt-run cache.clear_all
'''
return _clear_cache(tgt,
tgt_type,
clear_pillar_flag=True,
clear_grains_flag=True,
clear_mine_flag=True) |
def load_graphml(filename, folder=None, node_type=int):
"""
Load a GraphML file from disk and convert the node/edge attributes to
correct data types.
Parameters
----------
filename : string
the name of the graphml file (including file extension)
folder : string
the folder containing the file, if None, use default data folder
node_type : type
(Python type (default: int)) - Convert node ids to this type
Returns
-------
networkx multidigraph
"""
start_time = time.time()
# read the graph from disk
if folder is None:
folder = settings.data_folder
path = os.path.join(folder, filename)
G = nx.MultiDiGraph(nx.read_graphml(path, node_type=node_type))
# convert graph crs attribute from saved string to correct dict data type
G.graph['crs'] = ast.literal_eval(G.graph['crs'])
if 'streets_per_node' in G.graph:
G.graph['streets_per_node'] = ast.literal_eval(G.graph['streets_per_node'])
# convert numeric node tags from string to numeric data types
log('Converting node and edge attribute data types')
for _, data in G.nodes(data=True):
data['osmid'] = node_type(data['osmid'])
data['x'] = float(data['x'])
data['y'] = float(data['y'])
# convert numeric, bool, and list node tags from string to correct data types
for _, _, data in G.edges(data=True, keys=False):
# first parse oneway to bool and length to float - they should always
# have only 1 value each
data['oneway'] = ast.literal_eval(data['oneway'])
data['length'] = float(data['length'])
# these attributes might have a single value, or a list if edge's
# topology was simplified
for attr in ['highway', 'name', 'bridge', 'tunnel', 'lanes', 'ref', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']:
# if this edge has this attribute, and it starts with '[' and ends
# with ']', then it's a list to be parsed
if attr in data and data[attr][0] == '[' and data[attr][-1] == ']':
# try to convert the string list to a list type, else leave as
# single-value string (and leave as string if error)
try:
data[attr] = ast.literal_eval(data[attr])
except:
pass
# osmid might have a single value or a list
if 'osmid' in data:
if data['osmid'][0] == '[' and data['osmid'][-1] == ']':
# if it's a list, eval the list then convert each element to node_type
data['osmid'] = [node_type(i) for i in ast.literal_eval(data['osmid'])]
else:
# if it's not a list, convert it to the node_type
data['osmid'] = node_type(data['osmid'])
# if geometry attribute exists, load the string as well-known text to
# shapely LineString
if 'geometry' in data:
data['geometry'] = wkt.loads(data['geometry'])
# remove node_default and edge_default metadata keys if they exist
if 'node_default' in G.graph:
del G.graph['node_default']
if 'edge_default' in G.graph:
del G.graph['edge_default']
log('Loaded graph with {:,} nodes and {:,} edges in {:,.2f} seconds from "{}"'.format(len(list(G.nodes())),
len(list(G.edges())),
time.time()-start_time,
path))
return G | Load a GraphML file from disk and convert the node/edge attributes to
correct data types.
Parameters
----------
filename : string
the name of the graphml file (including file extension)
folder : string
the folder containing the file, if None, use default data folder
node_type : type
(Python type (default: int)) - Convert node ids to this type
Returns
-------
networkx multidigraph | Below is the the instruction that describes the task:
### Input:
Load a GraphML file from disk and convert the node/edge attributes to
correct data types.
Parameters
----------
filename : string
the name of the graphml file (including file extension)
folder : string
the folder containing the file, if None, use default data folder
node_type : type
(Python type (default: int)) - Convert node ids to this type
Returns
-------
networkx multidigraph
### Response:
def load_graphml(filename, folder=None, node_type=int):
"""
Load a GraphML file from disk and convert the node/edge attributes to
correct data types.
Parameters
----------
filename : string
the name of the graphml file (including file extension)
folder : string
the folder containing the file, if None, use default data folder
node_type : type
(Python type (default: int)) - Convert node ids to this type
Returns
-------
networkx multidigraph
"""
start_time = time.time()
# read the graph from disk
if folder is None:
folder = settings.data_folder
path = os.path.join(folder, filename)
G = nx.MultiDiGraph(nx.read_graphml(path, node_type=node_type))
# convert graph crs attribute from saved string to correct dict data type
G.graph['crs'] = ast.literal_eval(G.graph['crs'])
if 'streets_per_node' in G.graph:
G.graph['streets_per_node'] = ast.literal_eval(G.graph['streets_per_node'])
# convert numeric node tags from string to numeric data types
log('Converting node and edge attribute data types')
for _, data in G.nodes(data=True):
data['osmid'] = node_type(data['osmid'])
data['x'] = float(data['x'])
data['y'] = float(data['y'])
# convert numeric, bool, and list node tags from string to correct data types
for _, _, data in G.edges(data=True, keys=False):
# first parse oneway to bool and length to float - they should always
# have only 1 value each
data['oneway'] = ast.literal_eval(data['oneway'])
data['length'] = float(data['length'])
# these attributes might have a single value, or a list if edge's
# topology was simplified
for attr in ['highway', 'name', 'bridge', 'tunnel', 'lanes', 'ref', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']:
# if this edge has this attribute, and it starts with '[' and ends
# with ']', then it's a list to be parsed
if attr in data and data[attr][0] == '[' and data[attr][-1] == ']':
# try to convert the string list to a list type, else leave as
# single-value string (and leave as string if error)
try:
data[attr] = ast.literal_eval(data[attr])
except:
pass
# osmid might have a single value or a list
if 'osmid' in data:
if data['osmid'][0] == '[' and data['osmid'][-1] == ']':
# if it's a list, eval the list then convert each element to node_type
data['osmid'] = [node_type(i) for i in ast.literal_eval(data['osmid'])]
else:
# if it's not a list, convert it to the node_type
data['osmid'] = node_type(data['osmid'])
# if geometry attribute exists, load the string as well-known text to
# shapely LineString
if 'geometry' in data:
data['geometry'] = wkt.loads(data['geometry'])
# remove node_default and edge_default metadata keys if they exist
if 'node_default' in G.graph:
del G.graph['node_default']
if 'edge_default' in G.graph:
del G.graph['edge_default']
log('Loaded graph with {:,} nodes and {:,} edges in {:,.2f} seconds from "{}"'.format(len(list(G.nodes())),
len(list(G.edges())),
time.time()-start_time,
path))
return G |
def _resizeColumnToContents(self, header, data, col, limit_ms):
"""Resize a column by its contents."""
hdr_width = self._sizeHintForColumn(header, col, limit_ms)
data_width = self._sizeHintForColumn(data, col, limit_ms)
if data_width > hdr_width:
width = min(self.max_width, data_width)
elif hdr_width > data_width * 2:
width = max(min(hdr_width, self.min_trunc), min(self.max_width,
data_width))
else:
width = max(min(self.max_width, hdr_width), self.min_trunc)
header.setColumnWidth(col, width) | Resize a column by its contents. | Below is the the instruction that describes the task:
### Input:
Resize a column by its contents.
### Response:
def _resizeColumnToContents(self, header, data, col, limit_ms):
"""Resize a column by its contents."""
hdr_width = self._sizeHintForColumn(header, col, limit_ms)
data_width = self._sizeHintForColumn(data, col, limit_ms)
if data_width > hdr_width:
width = min(self.max_width, data_width)
elif hdr_width > data_width * 2:
width = max(min(hdr_width, self.min_trunc), min(self.max_width,
data_width))
else:
width = max(min(self.max_width, hdr_width), self.min_trunc)
header.setColumnWidth(col, width) |
def file_path(self, request, response=None, info=None):
"""
抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息
"""
return json.dumps(self._extract_key_info(request)) | 抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息 | Below is the the instruction that describes the task:
### Input:
抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息
### Response:
def file_path(self, request, response=None, info=None):
"""
抓取到的资源存放到七牛的时候, 应该采用什么样的key? 返回的path是一个JSON字符串, 其中有bucket和key的信息
"""
return json.dumps(self._extract_key_info(request)) |
def invoke(self, results):
"""
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
"""
args = [results.get(d) for d in self.deps]
return self.component(*args) | Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration. | Below is the the instruction that describes the task:
### Input:
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
### Response:
def invoke(self, results):
"""
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
"""
args = [results.get(d) for d in self.deps]
return self.component(*args) |
def get_device_model(self, cat, sub_cat, key=''):
"""Return the model name given cat/subcat or product key"""
if cat + ':' + sub_cat in self.device_models:
return self.device_models[cat + ':' + sub_cat]
else:
for i_key, i_val in self.device_models.items():
if 'key' in i_val:
if i_val['key'] == key:
return i_val
return False | Return the model name given cat/subcat or product key | Below is the the instruction that describes the task:
### Input:
Return the model name given cat/subcat or product key
### Response:
def get_device_model(self, cat, sub_cat, key=''):
"""Return the model name given cat/subcat or product key"""
if cat + ':' + sub_cat in self.device_models:
return self.device_models[cat + ':' + sub_cat]
else:
for i_key, i_val in self.device_models.items():
if 'key' in i_val:
if i_val['key'] == key:
return i_val
return False |
def chi_eff(self):
"""Returns the effective spin."""
return conversions.chi_eff(self.mass1, self.mass2, self.spin1z,
self.spin2z) | Returns the effective spin. | Below is the the instruction that describes the task:
### Input:
Returns the effective spin.
### Response:
def chi_eff(self):
"""Returns the effective spin."""
return conversions.chi_eff(self.mass1, self.mass2, self.spin1z,
self.spin2z) |
def get(self, name):
"""
Returns the struct, enum, or interface with the given name, or raises RpcException if
no elements match that name.
:Parameters:
name
Name of struct/enum/interface to return
"""
if self.structs.has_key(name):
return self.structs[name]
elif self.enums.has_key(name):
return self.enums[name]
elif self.interfaces.has_key(name):
return self.interfaces[name]
else:
raise RpcException(ERR_INVALID_PARAMS, "Unknown entity: '%s'" % name) | Returns the struct, enum, or interface with the given name, or raises RpcException if
no elements match that name.
:Parameters:
name
Name of struct/enum/interface to return | Below is the the instruction that describes the task:
### Input:
Returns the struct, enum, or interface with the given name, or raises RpcException if
no elements match that name.
:Parameters:
name
Name of struct/enum/interface to return
### Response:
def get(self, name):
"""
Returns the struct, enum, or interface with the given name, or raises RpcException if
no elements match that name.
:Parameters:
name
Name of struct/enum/interface to return
"""
if self.structs.has_key(name):
return self.structs[name]
elif self.enums.has_key(name):
return self.enums[name]
elif self.interfaces.has_key(name):
return self.interfaces[name]
else:
raise RpcException(ERR_INVALID_PARAMS, "Unknown entity: '%s'" % name) |
def AddShapePointObjectUnsorted(self, shapepoint, problems):
"""Insert a point into a correct position by sequence. """
if (len(self.sequence) == 0 or
shapepoint.shape_pt_sequence >= self.sequence[-1]):
index = len(self.sequence)
elif shapepoint.shape_pt_sequence <= self.sequence[0]:
index = 0
else:
index = bisect.bisect(self.sequence, shapepoint.shape_pt_sequence)
if shapepoint.shape_pt_sequence in self.sequence:
problems.InvalidValue('shape_pt_sequence', shapepoint.shape_pt_sequence,
'The sequence number %d occurs more than once in '
'shape %s.' %
(shapepoint.shape_pt_sequence, self.shape_id))
if shapepoint.shape_dist_traveled is not None and len(self.sequence) > 0:
if (index != len(self.sequence) and
shapepoint.shape_dist_traveled > self.distance[index]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that shouldn\'t be larger '
'than the next ones. In this case, the next '
'distance was %f.' % self.distance[index])
if (index > 0 and
shapepoint.shape_dist_traveled < self.distance[index - 1]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that\'s at least as large as '
'the previous ones. In this case, the previous '
'distance was %f.' % self.distance[index - 1])
if shapepoint.shape_dist_traveled > self.max_distance:
self.max_distance = shapepoint.shape_dist_traveled
self.sequence.insert(index, shapepoint.shape_pt_sequence)
self.distance.insert(index, shapepoint.shape_dist_traveled)
self.points.insert(index, (shapepoint.shape_pt_lat,
shapepoint.shape_pt_lon,
shapepoint.shape_dist_traveled)) | Insert a point into a correct position by sequence. | Below is the the instruction that describes the task:
### Input:
Insert a point into a correct position by sequence.
### Response:
def AddShapePointObjectUnsorted(self, shapepoint, problems):
"""Insert a point into a correct position by sequence. """
if (len(self.sequence) == 0 or
shapepoint.shape_pt_sequence >= self.sequence[-1]):
index = len(self.sequence)
elif shapepoint.shape_pt_sequence <= self.sequence[0]:
index = 0
else:
index = bisect.bisect(self.sequence, shapepoint.shape_pt_sequence)
if shapepoint.shape_pt_sequence in self.sequence:
problems.InvalidValue('shape_pt_sequence', shapepoint.shape_pt_sequence,
'The sequence number %d occurs more than once in '
'shape %s.' %
(shapepoint.shape_pt_sequence, self.shape_id))
if shapepoint.shape_dist_traveled is not None and len(self.sequence) > 0:
if (index != len(self.sequence) and
shapepoint.shape_dist_traveled > self.distance[index]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that shouldn\'t be larger '
'than the next ones. In this case, the next '
'distance was %f.' % self.distance[index])
if (index > 0 and
shapepoint.shape_dist_traveled < self.distance[index - 1]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that\'s at least as large as '
'the previous ones. In this case, the previous '
'distance was %f.' % self.distance[index - 1])
if shapepoint.shape_dist_traveled > self.max_distance:
self.max_distance = shapepoint.shape_dist_traveled
self.sequence.insert(index, shapepoint.shape_pt_sequence)
self.distance.insert(index, shapepoint.shape_dist_traveled)
self.points.insert(index, (shapepoint.shape_pt_lat,
shapepoint.shape_pt_lon,
shapepoint.shape_dist_traveled)) |
def hgetall(key, host=None, port=None, db=None, password=None):
'''
Get all fields and values from a redis hash, returns dict
CLI Example:
.. code-block:: bash
salt '*' redis.hgetall foo_hash
'''
server = _connect(host, port, db, password)
return server.hgetall(key) | Get all fields and values from a redis hash, returns dict
CLI Example:
.. code-block:: bash
salt '*' redis.hgetall foo_hash | Below is the the instruction that describes the task:
### Input:
Get all fields and values from a redis hash, returns dict
CLI Example:
.. code-block:: bash
salt '*' redis.hgetall foo_hash
### Response:
def hgetall(key, host=None, port=None, db=None, password=None):
'''
Get all fields and values from a redis hash, returns dict
CLI Example:
.. code-block:: bash
salt '*' redis.hgetall foo_hash
'''
server = _connect(host, port, db, password)
return server.hgetall(key) |
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
elif self.connecting():
return 0
else:
return float('inf') | Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections. | Below is the the instruction that describes the task:
### Input:
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
### Response:
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
elif self.connecting():
return 0
else:
return float('inf') |
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged | Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1} | Below is the the instruction that describes the task:
### Input:
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
### Response:
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged |
def replace(self, photo_file, **kwds):
"""
Endpoint: /photo/<id>/replace.json
Uploads the specified photo file to replace this photo.
"""
result = self._client.photo.replace(self, photo_file, **kwds)
self._replace_fields(result.get_fields()) | Endpoint: /photo/<id>/replace.json
Uploads the specified photo file to replace this photo. | Below is the the instruction that describes the task:
### Input:
Endpoint: /photo/<id>/replace.json
Uploads the specified photo file to replace this photo.
### Response:
def replace(self, photo_file, **kwds):
"""
Endpoint: /photo/<id>/replace.json
Uploads the specified photo file to replace this photo.
"""
result = self._client.photo.replace(self, photo_file, **kwds)
self._replace_fields(result.get_fields()) |
def _recurse_config_to_dict(t_data):
'''
helper function to recurse through a vim object and attempt to return all child objects
'''
if not isinstance(t_data, type(None)):
if isinstance(t_data, list):
t_list = []
for i in t_data:
t_list.append(_recurse_config_to_dict(i))
return t_list
elif isinstance(t_data, dict):
t_dict = {}
for k, v in six.iteritems(t_data):
t_dict[k] = _recurse_config_to_dict(v)
return t_dict
else:
if hasattr(t_data, '__dict__'):
return _recurse_config_to_dict(t_data.__dict__)
else:
return _serializer(t_data) | helper function to recurse through a vim object and attempt to return all child objects | Below is the the instruction that describes the task:
### Input:
helper function to recurse through a vim object and attempt to return all child objects
### Response:
def _recurse_config_to_dict(t_data):
'''
helper function to recurse through a vim object and attempt to return all child objects
'''
if not isinstance(t_data, type(None)):
if isinstance(t_data, list):
t_list = []
for i in t_data:
t_list.append(_recurse_config_to_dict(i))
return t_list
elif isinstance(t_data, dict):
t_dict = {}
for k, v in six.iteritems(t_data):
t_dict[k] = _recurse_config_to_dict(v)
return t_dict
else:
if hasattr(t_data, '__dict__'):
return _recurse_config_to_dict(t_data.__dict__)
else:
return _serializer(t_data) |
def get_signing_key(self, key_type="", owner="", kid=None, **kwargs):
"""
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
"""
return self.get("sig", key_type, owner, kid, **kwargs) | Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys | Below is the the instruction that describes the task:
### Input:
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
### Response:
def get_signing_key(self, key_type="", owner="", kid=None, **kwargs):
"""
Shortcut to use for signing keys only.
:param key_type: Type of key (rsa, ec, oct, ..)
:param owner: Who is the owner of the keys, "" == me (default)
:param kid: A Key Identifier
:param kwargs: Extra key word arguments
:return: A possibly empty list of keys
"""
return self.get("sig", key_type, owner, kid, **kwargs) |
def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True):
# type: (BinaryIO, bytes, bool) -> None
'''
Internal method to write data out to the output file descriptor,
ensuring that it doesn't go beyond the bounds of the ISO.
Parameters:
outfp - The file object to write to.
data - The actual data to write.
enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking.
Returns:
Nothing.
'''
start = outfp.tell()
outfp.write(data)
if self._track_writes:
# After the write, double check that we didn't write beyond the
# boundary of the PVD, and raise a PyCdlibException if we do.
end = outfp.tell()
if end > self.pvd.space_size * self.pvd.logical_block_size():
raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size()))
if enable_overwrite_check:
bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1)) | Internal method to write data out to the output file descriptor,
ensuring that it doesn't go beyond the bounds of the ISO.
Parameters:
outfp - The file object to write to.
data - The actual data to write.
enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Internal method to write data out to the output file descriptor,
ensuring that it doesn't go beyond the bounds of the ISO.
Parameters:
outfp - The file object to write to.
data - The actual data to write.
enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking.
Returns:
Nothing.
### Response:
def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True):
# type: (BinaryIO, bytes, bool) -> None
'''
Internal method to write data out to the output file descriptor,
ensuring that it doesn't go beyond the bounds of the ISO.
Parameters:
outfp - The file object to write to.
data - The actual data to write.
enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking.
Returns:
Nothing.
'''
start = outfp.tell()
outfp.write(data)
if self._track_writes:
# After the write, double check that we didn't write beyond the
# boundary of the PVD, and raise a PyCdlibException if we do.
end = outfp.tell()
if end > self.pvd.space_size * self.pvd.logical_block_size():
raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size()))
if enable_overwrite_check:
bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1)) |
def add_activation_summary(x, types=None, name=None, collections=None):
"""
Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops.
"""
ndim = x.get_shape().ndims
if ndim < 2:
logger.warn("Cannot summarize scalar activation {}".format(x.name))
return
if types is None:
types = ['sparsity', 'rms', 'histogram']
with cached_name_scope('activation-summary'):
add_tensor_summary(x, types, name=name, collections=collections) | Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops. | Below is the the instruction that describes the task:
### Input:
Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops.
### Response:
def add_activation_summary(x, types=None, name=None, collections=None):
"""
Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops.
"""
ndim = x.get_shape().ndims
if ndim < 2:
logger.warn("Cannot summarize scalar activation {}".format(x.name))
return
if types is None:
types = ['sparsity', 'rms', 'histogram']
with cached_name_scope('activation-summary'):
add_tensor_summary(x, types, name=name, collections=collections) |
def switch_axis_limits(ax, which_axis):
'''
Switch the axis limits of either x or y. Or both!
'''
for a in which_axis:
assert a in ('x', 'y')
ax_limits = ax.axis()
if a == 'x':
ax.set_xlim(ax_limits[1], ax_limits[0])
else:
ax.set_ylim(ax_limits[3], ax_limits[2]) | Switch the axis limits of either x or y. Or both! | Below is the the instruction that describes the task:
### Input:
Switch the axis limits of either x or y. Or both!
### Response:
def switch_axis_limits(ax, which_axis):
'''
Switch the axis limits of either x or y. Or both!
'''
for a in which_axis:
assert a in ('x', 'y')
ax_limits = ax.axis()
if a == 'x':
ax.set_xlim(ax_limits[1], ax_limits[0])
else:
ax.set_ylim(ax_limits[3], ax_limits[2]) |
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret) | Remove all the AP profiles. | Below is the the instruction that describes the task:
### Input:
Remove all the AP profiles.
### Response:
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret) |
def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False,
dry_run=False):
"""
Call this method from within a try/except clause to generate a call to Stack Sentinel.
:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself
:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.
:param tags: Any string tags you want associated with the exception report.
:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.
:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.
"""
if not exc_info:
exc_info = sys.exc_info()
if exc_info is None:
raise StackSentinelError("handle_exception called outside of exception handler")
(etype, value, tb) = exc_info
try:
msg = value.args[0]
except:
msg = repr(value)
if not isinstance(tags, list):
tags = [tags]
limit = None
new_tb = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
tb = tb.tb_next
n = n + 1
new_tb.append({'line': lineno, 'module': filename, 'method': name})
if state is None:
state = {}
if 'sys' not in state:
try:
state['sys'] = self._get_sys_info()
except Exception as e:
state['sys'] = '<Unable to get sys: %r>' % e
if 'machine' not in state:
try:
state['machine'] = self._get_machine_info()
except Exception as e:
state['machine'] = '<Unable to get machine: %e>' % e
if tags is None:
tags = []
# The joy of Unicode
if sys.version_info.major > 2:
error_type = str(etype.__name__)
error_message = str(value)
else:
error_type = unicode(etype.__name__)
error_message = unicode(value)
send_error_args = dict(error_type=error_type,
error_message=error_message,
traceback=new_tb,
environment=self.environment,
state=state,
tags=self.tags + tags,
return_feedback_urls=return_feedback_urls)
if dry_run:
return send_error_args
else:
return self.send_error(**send_error_args) | Call this method from within a try/except clause to generate a call to Stack Sentinel.
:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself
:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.
:param tags: Any string tags you want associated with the exception report.
:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.
:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests. | Below is the the instruction that describes the task:
### Input:
Call this method from within a try/except clause to generate a call to Stack Sentinel.
:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself
:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.
:param tags: Any string tags you want associated with the exception report.
:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.
:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.
### Response:
def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False,
dry_run=False):
"""
Call this method from within a try/except clause to generate a call to Stack Sentinel.
:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself
:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.
:param tags: Any string tags you want associated with the exception report.
:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.
:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.
"""
if not exc_info:
exc_info = sys.exc_info()
if exc_info is None:
raise StackSentinelError("handle_exception called outside of exception handler")
(etype, value, tb) = exc_info
try:
msg = value.args[0]
except:
msg = repr(value)
if not isinstance(tags, list):
tags = [tags]
limit = None
new_tb = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
tb = tb.tb_next
n = n + 1
new_tb.append({'line': lineno, 'module': filename, 'method': name})
if state is None:
state = {}
if 'sys' not in state:
try:
state['sys'] = self._get_sys_info()
except Exception as e:
state['sys'] = '<Unable to get sys: %r>' % e
if 'machine' not in state:
try:
state['machine'] = self._get_machine_info()
except Exception as e:
state['machine'] = '<Unable to get machine: %e>' % e
if tags is None:
tags = []
# The joy of Unicode
if sys.version_info.major > 2:
error_type = str(etype.__name__)
error_message = str(value)
else:
error_type = unicode(etype.__name__)
error_message = unicode(value)
send_error_args = dict(error_type=error_type,
error_message=error_message,
traceback=new_tb,
environment=self.environment,
state=state,
tags=self.tags + tags,
return_feedback_urls=return_feedback_urls)
if dry_run:
return send_error_args
else:
return self.send_error(**send_error_args) |
def tokenize_math(text):
r"""Prevents math from being tokenized.
:param Buffer text: iterator over line, with current position
>>> b = Buffer(r'$\min_x$ \command')
>>> tokenize_math(b)
'$'
>>> b = Buffer(r'$$\min_x$$ \command')
>>> tokenize_math(b)
'$$'
"""
if text.startswith('$') and (
text.position == 0 or text.peek(-1) != '\\' or text.endswith(r'\\')):
starter = '$$' if text.startswith('$$') else '$'
return TokenWithPosition(text.forward(len(starter)), text.position) | r"""Prevents math from being tokenized.
:param Buffer text: iterator over line, with current position
>>> b = Buffer(r'$\min_x$ \command')
>>> tokenize_math(b)
'$'
>>> b = Buffer(r'$$\min_x$$ \command')
>>> tokenize_math(b)
'$$' | Below is the the instruction that describes the task:
### Input:
r"""Prevents math from being tokenized.
:param Buffer text: iterator over line, with current position
>>> b = Buffer(r'$\min_x$ \command')
>>> tokenize_math(b)
'$'
>>> b = Buffer(r'$$\min_x$$ \command')
>>> tokenize_math(b)
'$$'
### Response:
def tokenize_math(text):
r"""Prevents math from being tokenized.
:param Buffer text: iterator over line, with current position
>>> b = Buffer(r'$\min_x$ \command')
>>> tokenize_math(b)
'$'
>>> b = Buffer(r'$$\min_x$$ \command')
>>> tokenize_math(b)
'$$'
"""
if text.startswith('$') and (
text.position == 0 or text.peek(-1) != '\\' or text.endswith(r'\\')):
starter = '$$' if text.startswith('$$') else '$'
return TokenWithPosition(text.forward(len(starter)), text.position) |
def parseFullScan(self, i, modifications=False):
"""
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
"""
scanObj = PeptideObject()
peptide = str(i[1])
pid=i[2]
scanObj.acc = self.protein_map.get(i[4], i[4])
if pid is None:
return None
if modifications:
sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid
for row in self.conn.execute(sql):
scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0])
else:
mods = self.mods.get(int(pid))
if mods is not None:
for modId, modPosition in zip(mods[0].split(','),mods[1].split(',')):
modEntry = self.modTable[str(modId)]
scanObj.addModification(peptide[int(modPosition)], modPosition, modEntry[1], modEntry[0])
tmods = self.tmods.get(int(pid))
if tmods is not None:
for modIds in tmods:
for modId in modIds.split(','):
modEntry = self.modTable[str(modId)]
scanObj.addModification('[', 0, modEntry[1], modEntry[0])
scanObj.peptide = peptide
if self.decompressScanInfo(scanObj, i[0]):
return scanObj
return None | parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml | Below is the the instruction that describes the task:
### Input:
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
### Response:
def parseFullScan(self, i, modifications=False):
"""
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
"""
scanObj = PeptideObject()
peptide = str(i[1])
pid=i[2]
scanObj.acc = self.protein_map.get(i[4], i[4])
if pid is None:
return None
if modifications:
sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid
for row in self.conn.execute(sql):
scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0])
else:
mods = self.mods.get(int(pid))
if mods is not None:
for modId, modPosition in zip(mods[0].split(','),mods[1].split(',')):
modEntry = self.modTable[str(modId)]
scanObj.addModification(peptide[int(modPosition)], modPosition, modEntry[1], modEntry[0])
tmods = self.tmods.get(int(pid))
if tmods is not None:
for modIds in tmods:
for modId in modIds.split(','):
modEntry = self.modTable[str(modId)]
scanObj.addModification('[', 0, modEntry[1], modEntry[0])
scanObj.peptide = peptide
if self.decompressScanInfo(scanObj, i[0]):
return scanObj
return None |
def valueFromString(self, value, context=None):
"""
Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str>
"""
if value in ('today', 'now'):
return datetime.date.utcnow()
try:
return datetime.datetime.fromtimestamp(float(value))
except StandardError:
if dateutil_parser:
return dateutil_parser.parse(value)
else:
return datetime.datetime.min() | Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str> | Below is the the instruction that describes the task:
### Input:
Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str>
### Response:
def valueFromString(self, value, context=None):
"""
Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str>
"""
if value in ('today', 'now'):
return datetime.date.utcnow()
try:
return datetime.datetime.fromtimestamp(float(value))
except StandardError:
if dateutil_parser:
return dateutil_parser.parse(value)
else:
return datetime.datetime.min() |
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
if formatted:
to.write(self.start_tag)
to.write('\n')
if not self.tag_self_closes:
for blok in self.blox:
to.write(indentation * (indent + 1))
blok.output(to=to, indent=indent + 1, formatted=True, indentation=indentation, *args, **kwargs)
to.write('\n')
to.write(indentation * indent)
to.write(self.end_tag)
if not indentation:
to.write('\n')
else:
to.write(self.start_tag)
if not self.tag_self_closes:
for blok in self.blox:
blok.output(to=to, *args, **kwargs)
to.write(self.end_tag) | Outputs to a stream (like a file or request) | Below is the the instruction that describes the task:
### Input:
Outputs to a stream (like a file or request)
### Response:
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
if formatted:
to.write(self.start_tag)
to.write('\n')
if not self.tag_self_closes:
for blok in self.blox:
to.write(indentation * (indent + 1))
blok.output(to=to, indent=indent + 1, formatted=True, indentation=indentation, *args, **kwargs)
to.write('\n')
to.write(indentation * indent)
to.write(self.end_tag)
if not indentation:
to.write('\n')
else:
to.write(self.start_tag)
if not self.tag_self_closes:
for blok in self.blox:
blok.output(to=to, *args, **kwargs)
to.write(self.end_tag) |
def update_listener(self, lbaas_listener, body=None):
"""Updates a lbaas_listener."""
return self.put(self.lbaas_listener_path % (lbaas_listener),
body=body) | Updates a lbaas_listener. | Below is the the instruction that describes the task:
### Input:
Updates a lbaas_listener.
### Response:
def update_listener(self, lbaas_listener, body=None):
"""Updates a lbaas_listener."""
return self.put(self.lbaas_listener_path % (lbaas_listener),
body=body) |
def directions(self, features, profile='mapbox/driving',
alternatives=None, geometries=None, overview=None, steps=None,
continue_straight=None, waypoint_snapping=None, annotations=None,
language=None, **kwargs):
"""Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
"""
# backwards compatible, deprecated
if 'geometry' in kwargs and geometries is None:
geometries = kwargs['geometry']
warnings.warn('Use `geometries` instead of `geometry`',
errors.MapboxDeprecationWarning)
annotations = self._validate_annotations(annotations)
coordinates = encode_coordinates(
features, precision=6, min_limit=2, max_limit=25)
geometries = self._validate_geom_encoding(geometries)
overview = self._validate_geom_overview(overview)
profile = self._validate_profile(profile)
bearings, radii = self._validate_snapping(waypoint_snapping, features)
params = {}
if alternatives is not None:
params.update(
{'alternatives': 'true' if alternatives is True else 'false'})
if geometries is not None:
params.update({'geometries': geometries})
if overview is not None:
params.update(
{'overview': 'false' if overview is False else overview})
if steps is not None:
params.update(
{'steps': 'true' if steps is True else 'false'})
if continue_straight is not None:
params.update(
{'continue_straight': 'true' if steps is True else 'false'})
if annotations is not None:
params.update({'annotations': ','.join(annotations)})
if language is not None:
params.update({'language': language})
if radii is not None:
params.update(
{'radiuses': ';'.join(str(r) for r in radii)})
if bearings is not None:
params.update(
{'bearings': ';'.join(self._encode_bearing(b) for b in bearings)})
profile_ns, profile_name = profile.split('/')
uri = URITemplate(
self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand(
profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates)
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
def geojson():
return self._geojson(resp.json(), geom_format=geometries)
resp.geojson = geojson
return resp | Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary. | Below is the the instruction that describes the task:
### Input:
Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
### Response:
def directions(self, features, profile='mapbox/driving',
alternatives=None, geometries=None, overview=None, steps=None,
continue_straight=None, waypoint_snapping=None, annotations=None,
language=None, **kwargs):
"""Request directions for waypoints encoded as GeoJSON features.
Parameters
----------
features : iterable
An collection of GeoJSON features
profile : str
Name of a Mapbox profile such as 'mapbox.driving'
alternatives : bool
Whether to try to return alternative routes, default: False
geometries : string
Type of geometry returned (geojson, polyline, polyline6)
overview : string or False
Type of returned overview geometry: 'full', 'simplified',
or False
steps : bool
Whether to return steps and turn-by-turn instructions,
default: False
continue_straight : bool
Direction of travel when departing intermediate waypoints
radiuses : iterable of numbers or 'unlimited'
Must be same length as features
waypoint_snapping : list
Controls snapping of waypoints
The list is zipped with the features collection and must
have the same length. Elements of the list must be one of:
- A number (interpretted as a snapping radius)
- The string 'unlimited' (unlimited snapping radius)
- A 3-element tuple consisting of (radius, angle, range)
- None (no snapping parameters specified for that waypoint)
annotations : str
Whether or not to return additional metadata along the route
Possible values are: 'duration', 'distance', 'speed', and
'congestion'. Several annotations can be used by joining
them with ','.
language : str
Language of returned turn-by-turn text instructions,
default: 'en'
Returns
-------
requests.Response
The response object has a geojson() method for access to
the route(s) as a GeoJSON-like FeatureCollection
dictionary.
"""
# backwards compatible, deprecated
if 'geometry' in kwargs and geometries is None:
geometries = kwargs['geometry']
warnings.warn('Use `geometries` instead of `geometry`',
errors.MapboxDeprecationWarning)
annotations = self._validate_annotations(annotations)
coordinates = encode_coordinates(
features, precision=6, min_limit=2, max_limit=25)
geometries = self._validate_geom_encoding(geometries)
overview = self._validate_geom_overview(overview)
profile = self._validate_profile(profile)
bearings, radii = self._validate_snapping(waypoint_snapping, features)
params = {}
if alternatives is not None:
params.update(
{'alternatives': 'true' if alternatives is True else 'false'})
if geometries is not None:
params.update({'geometries': geometries})
if overview is not None:
params.update(
{'overview': 'false' if overview is False else overview})
if steps is not None:
params.update(
{'steps': 'true' if steps is True else 'false'})
if continue_straight is not None:
params.update(
{'continue_straight': 'true' if steps is True else 'false'})
if annotations is not None:
params.update({'annotations': ','.join(annotations)})
if language is not None:
params.update({'language': language})
if radii is not None:
params.update(
{'radiuses': ';'.join(str(r) for r in radii)})
if bearings is not None:
params.update(
{'bearings': ';'.join(self._encode_bearing(b) for b in bearings)})
profile_ns, profile_name = profile.split('/')
uri = URITemplate(
self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand(
profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates)
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
def geojson():
return self._geojson(resp.json(), geom_format=geometries)
resp.geojson = geojson
return resp |
def from_json(cls, data, result=None):
"""
Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
) | Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | Below is the the instruction that describes the task:
### Input:
Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
### Response:
def from_json(cls, data, result=None):
"""
Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
) |
def _from_dict(cls, _dict):
"""Initialize a ConsumptionPreferencesCategory object from a json dictionary."""
args = {}
if 'consumption_preference_category_id' in _dict:
args['consumption_preference_category_id'] = _dict.get(
'consumption_preference_category_id')
else:
raise ValueError(
'Required property \'consumption_preference_category_id\' not present in ConsumptionPreferencesCategory JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in ConsumptionPreferencesCategory JSON'
)
if 'consumption_preferences' in _dict:
args['consumption_preferences'] = [
ConsumptionPreferences._from_dict(x)
for x in (_dict.get('consumption_preferences'))
]
else:
raise ValueError(
'Required property \'consumption_preferences\' not present in ConsumptionPreferencesCategory JSON'
)
return cls(**args) | Initialize a ConsumptionPreferencesCategory object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a ConsumptionPreferencesCategory object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a ConsumptionPreferencesCategory object from a json dictionary."""
args = {}
if 'consumption_preference_category_id' in _dict:
args['consumption_preference_category_id'] = _dict.get(
'consumption_preference_category_id')
else:
raise ValueError(
'Required property \'consumption_preference_category_id\' not present in ConsumptionPreferencesCategory JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
'Required property \'name\' not present in ConsumptionPreferencesCategory JSON'
)
if 'consumption_preferences' in _dict:
args['consumption_preferences'] = [
ConsumptionPreferences._from_dict(x)
for x in (_dict.get('consumption_preferences'))
]
else:
raise ValueError(
'Required property \'consumption_preferences\' not present in ConsumptionPreferencesCategory JSON'
)
return cls(**args) |
def load_from_path(path, filetype=None, has_filetype=True):
""" load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.read()
except AttributeError:
return path
path = normalize_path(path, filetype, has_filetype)
with open(path) as data:
return data.read() | load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly. | Below is the the instruction that describes the task:
### Input:
load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
### Response:
def load_from_path(path, filetype=None, has_filetype=True):
""" load file content from a file specified as dot-separated
The file is located according to logic in normalize_path,
and the contents are returned. (See Note 1)
Parameters: (see normalize_path)
path - dot-separated path
filetype - optional filetype
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. If path is a file-like object, then data is read directly
from path, without trying to open it.
2. Non-string paths are returned immediately (excluding the
case in Note 1).
3. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
try:
return path.read()
except AttributeError:
return path
path = normalize_path(path, filetype, has_filetype)
with open(path) as data:
return data.read() |
def get_ISSNs(self):
"""
Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings.
"""
invalid_issns = set(self.get_invalid_ISSNs())
return [
self._clean_isbn(issn)
for issn in self["022a"]
if self._clean_isbn(issn) not in invalid_issns
] | Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings. | Below is the the instruction that describes the task:
### Input:
Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings.
### Response:
def get_ISSNs(self):
"""
Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings.
"""
invalid_issns = set(self.get_invalid_ISSNs())
return [
self._clean_isbn(issn)
for issn in self["022a"]
if self._clean_isbn(issn) not in invalid_issns
] |
def _set_fcoe(self, v, load=False):
"""
Setter method for fcoe, mapped from YANG variable /interface/fcoe (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
YANG Description: The list of FCoE interfaces. Each row contains FCoE
interface name and its status.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-interface-name', extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}), is_container='list', yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-interface-name', extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}), is_container='list', yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)""",
})
self.__fcoe = t
if hasattr(self, '_set'):
self._set() | Setter method for fcoe, mapped from YANG variable /interface/fcoe (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
YANG Description: The list of FCoE interfaces. Each row contains FCoE
interface name and its status. | Below is the the instruction that describes the task:
### Input:
Setter method for fcoe, mapped from YANG variable /interface/fcoe (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
YANG Description: The list of FCoE interfaces. Each row contains FCoE
interface name and its status.
### Response:
def _set_fcoe(self, v, load=False):
"""
Setter method for fcoe, mapped from YANG variable /interface/fcoe (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
YANG Description: The list of FCoE interfaces. Each row contains FCoE
interface name and its status.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-interface-name', extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}), is_container='list', yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-interface-name', extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}), is_container='list', yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)""",
})
self.__fcoe = t
if hasattr(self, '_set'):
self._set() |
def get_build_logs_zip(self, project, build_id, **kwargs):
"""GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.0',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object | Below is the the instruction that describes the task:
### Input:
GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
### Response:
def get_build_logs_zip(self, project, build_id, **kwargs):
"""GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.0',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) |
def avatar(self, blogname, size=64):
"""
Retrieves the url of the blog's avatar
:param blogname: a string, the blog you want the avatar for
:returns: A dict created from the JSON response
"""
url = "/v2/blog/{}/avatar/{}".format(blogname, size)
return self.send_api_request("get", url) | Retrieves the url of the blog's avatar
:param blogname: a string, the blog you want the avatar for
:returns: A dict created from the JSON response | Below is the the instruction that describes the task:
### Input:
Retrieves the url of the blog's avatar
:param blogname: a string, the blog you want the avatar for
:returns: A dict created from the JSON response
### Response:
def avatar(self, blogname, size=64):
"""
Retrieves the url of the blog's avatar
:param blogname: a string, the blog you want the avatar for
:returns: A dict created from the JSON response
"""
url = "/v2/blog/{}/avatar/{}".format(blogname, size)
return self.send_api_request("get", url) |
def plot_calibration_purchases_vs_holdout_purchases(
model, calibration_holdout_matrix, kind="frequency_cal", n=7, **kwargs
):
"""
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
x_labels = {
"frequency_cal": "Purchases in calibration period",
"recency_cal": "Age of customer at last purchase",
"T_cal": "Age of customer at the end of calibration period",
"time_since_last_purchase": "Time since user made last purchase",
}
summary = calibration_holdout_matrix.copy()
duration_holdout = summary.iloc[0]["duration_holdout"]
summary["model_predictions"] = model.conditional_expected_number_of_purchases_up_to_time(
duration_holdout, summary["frequency_cal"], summary["recency_cal"], summary["T_cal"])
if kind == "time_since_last_purchase":
summary["time_since_last_purchase"] = summary["T_cal"] - summary["recency_cal"]
ax = (
summary.groupby(["time_since_last_purchase"])[["frequency_holdout", "model_predictions"]]
.mean()
.iloc[:n]
.plot(**kwargs)
)
else:
ax = summary.groupby(kind)[["frequency_holdout", "model_predictions"]].mean().iloc[:n].plot(**kwargs)
plt.title("Actual Purchases in Holdout Period vs Predicted Purchases")
plt.xlabel(x_labels[kind])
plt.ylabel("Average of Purchases in Holdout Period")
plt.legend()
return ax | Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot | Below is the the instruction that describes the task:
### Input:
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
### Response:
def plot_calibration_purchases_vs_holdout_purchases(
model, calibration_holdout_matrix, kind="frequency_cal", n=7, **kwargs
):
"""
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
x_labels = {
"frequency_cal": "Purchases in calibration period",
"recency_cal": "Age of customer at last purchase",
"T_cal": "Age of customer at the end of calibration period",
"time_since_last_purchase": "Time since user made last purchase",
}
summary = calibration_holdout_matrix.copy()
duration_holdout = summary.iloc[0]["duration_holdout"]
summary["model_predictions"] = model.conditional_expected_number_of_purchases_up_to_time(
duration_holdout, summary["frequency_cal"], summary["recency_cal"], summary["T_cal"])
if kind == "time_since_last_purchase":
summary["time_since_last_purchase"] = summary["T_cal"] - summary["recency_cal"]
ax = (
summary.groupby(["time_since_last_purchase"])[["frequency_holdout", "model_predictions"]]
.mean()
.iloc[:n]
.plot(**kwargs)
)
else:
ax = summary.groupby(kind)[["frequency_holdout", "model_predictions"]].mean().iloc[:n].plot(**kwargs)
plt.title("Actual Purchases in Holdout Period vs Predicted Purchases")
plt.xlabel(x_labels[kind])
plt.ylabel("Average of Purchases in Holdout Period")
plt.legend()
return ax |
def _is_admin(user_id):
"""
Is the specified user an admin
"""
user = get_session().query(User).filter(User.id==user_id).one()
if user.is_admin():
return True
else:
return False | Is the specified user an admin | Below is the the instruction that describes the task:
### Input:
Is the specified user an admin
### Response:
def _is_admin(user_id):
"""
Is the specified user an admin
"""
user = get_session().query(User).filter(User.id==user_id).one()
if user.is_admin():
return True
else:
return False |
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.serialize() + '\n'
return s | Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object. | Below is the the instruction that describes the task:
### Input:
Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object.
### Response:
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
Returns:
string with the flags assignments from this FlagValues object.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.serialize() + '\n'
return s |
async def dispatch(self, *args, **kwargs):
'''
This method handles the actual request to the resource.
It performs all the neccesary checks and then executes the relevant member method which is mapped to the method name.
Handles authentication and de-serialization before calling the required method.
Handles the serialization of the response
'''
method = self.request_method()
# get the db object associated with the app and assign to resource
if hasattr(self.request.app, 'db'):
setattr(self, 'db', self.request.app.db)
# check if method is allowed
if method not in self.methods.get(self.endpoint, {}):
raise MethodNotImplemented("Unsupported method '{0}' for {1} endpoint.".format(method, self.endpoint))
if self.is_method_allowed(self.endpoint, method) is False:
raise MethodNotAllowed("Unsupported method '{0}' for {1} endpoint.".format(method, self.endpoint))
# check user authentication
if not await self._meta.authentication.is_authenticated(self.request):
raise Unauthorized()
# deserialize request data
body = await self.request_body()
self.data = self.parse(method, self.endpoint, body)
# if method != 'GET':
# self.data.update(kwargs)
kwargs.update(self.request_args())
view_method = getattr(self, self.methods[self.endpoint][method])
# call request method
data = await view_method(*args, **kwargs)
# add hypermedia to the response, if response is not empty
if data and self._meta.hypermedia is True:
if self.endpoint == 'list' and method == 'GET':
for item in data['objects']:
self.add_hypermedia(item)
else:
self.add_hypermedia(data)
return data | This method handles the actual request to the resource.
It performs all the neccesary checks and then executes the relevant member method which is mapped to the method name.
Handles authentication and de-serialization before calling the required method.
Handles the serialization of the response | Below is the the instruction that describes the task:
### Input:
This method handles the actual request to the resource.
It performs all the neccesary checks and then executes the relevant member method which is mapped to the method name.
Handles authentication and de-serialization before calling the required method.
Handles the serialization of the response
### Response:
async def dispatch(self, *args, **kwargs):
'''
This method handles the actual request to the resource.
It performs all the neccesary checks and then executes the relevant member method which is mapped to the method name.
Handles authentication and de-serialization before calling the required method.
Handles the serialization of the response
'''
method = self.request_method()
# get the db object associated with the app and assign to resource
if hasattr(self.request.app, 'db'):
setattr(self, 'db', self.request.app.db)
# check if method is allowed
if method not in self.methods.get(self.endpoint, {}):
raise MethodNotImplemented("Unsupported method '{0}' for {1} endpoint.".format(method, self.endpoint))
if self.is_method_allowed(self.endpoint, method) is False:
raise MethodNotAllowed("Unsupported method '{0}' for {1} endpoint.".format(method, self.endpoint))
# check user authentication
if not await self._meta.authentication.is_authenticated(self.request):
raise Unauthorized()
# deserialize request data
body = await self.request_body()
self.data = self.parse(method, self.endpoint, body)
# if method != 'GET':
# self.data.update(kwargs)
kwargs.update(self.request_args())
view_method = getattr(self, self.methods[self.endpoint][method])
# call request method
data = await view_method(*args, **kwargs)
# add hypermedia to the response, if response is not empty
if data and self._meta.hypermedia is True:
if self.endpoint == 'list' and method == 'GET':
for item in data['objects']:
self.add_hypermedia(item)
else:
self.add_hypermedia(data)
return data |
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix | Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline. | Below is the the instruction that describes the task:
### Input:
Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
### Response:
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix |
def set_itunes_element(self):
"""Set each of the itunes elements."""
self.set_itunes_author_name()
self.set_itunes_block()
self.set_itunes_closed_captioned()
self.set_itunes_duration()
self.set_itunes_explicit()
self.set_itune_image()
self.set_itunes_order()
self.set_itunes_subtitle()
self.set_itunes_summary() | Set each of the itunes elements. | Below is the the instruction that describes the task:
### Input:
Set each of the itunes elements.
### Response:
def set_itunes_element(self):
"""Set each of the itunes elements."""
self.set_itunes_author_name()
self.set_itunes_block()
self.set_itunes_closed_captioned()
self.set_itunes_duration()
self.set_itunes_explicit()
self.set_itune_image()
self.set_itunes_order()
self.set_itunes_subtitle()
self.set_itunes_summary() |
def select_postponed_date(self):
"""
The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
_form = forms.JsonForm(title="Postponed Workflow")
_form.start_date = fields.DateTime("Start Date")
_form.finish_date = fields.DateTime("Finish Date")
_form.save_button = fields.Button("Save")
self.form_out(_form) | The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
} | Below is the the instruction that describes the task:
### Input:
The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
### Response:
def select_postponed_date(self):
"""
The time intervals at which the workflow is to be extended are determined.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
_form = forms.JsonForm(title="Postponed Workflow")
_form.start_date = fields.DateTime("Start Date")
_form.finish_date = fields.DateTime("Finish Date")
_form.save_button = fields.Button("Save")
self.form_out(_form) |
def powerUp(self, powerup, interface=None, priority=0):
"""
Installs a powerup (e.g. plugin) on an item or store.
Powerups will be returned in an iterator when queried for using the
'powerupsFor' method. Normally they will be returned in order of
installation [this may change in future versions, so please don't
depend on it]. Higher priorities are returned first. If you have
something that should run before "normal" powerups, pass
POWERUP_BEFORE; if you have something that should run after, pass
POWERUP_AFTER. We suggest not depending too heavily on order of
execution of your powerups, but if finer-grained control is necessary
you may pass any integer. Normal (unspecified) priority is zero.
Powerups will only be installed once on a given item. If you install a
powerup for a given interface with priority 1, then again with priority
30, the powerup will be adjusted to priority 30 but future calls to
powerupFor will still only return that powerup once.
If no interface or priority are specified, and the class of the
powerup has a "powerupInterfaces" attribute (containing
either a sequence of interfaces, or a sequence of
(interface, priority) tuples), this object will be powered up
with the powerup object on those interfaces.
If no interface or priority are specified and the powerup has
a "__getPowerupInterfaces__" method, it will be called with
an iterable of (interface, priority) tuples, collected from the
"powerupInterfaces" attribute described above. The iterable of
(interface, priority) tuples it returns will then be
installed.
@param powerup: an Item that implements C{interface} (if specified)
@param interface: a zope interface, or None
@param priority: An int; preferably either POWERUP_BEFORE,
POWERUP_AFTER, or unspecified.
@raise TypeError: raises if interface is IPowerupIndirector You may not
install a powerup for IPowerupIndirector because that would be
nonsensical.
"""
if interface is None:
for iface, priority in powerup._getPowerupInterfaces():
self.powerUp(powerup, iface, priority)
elif interface is IPowerupIndirector:
raise TypeError(
"You cannot install a powerup for IPowerupIndirector: " +
powerup)
else:
forc = self.store.findOrCreate(_PowerupConnector,
item=self,
interface=unicode(qual(interface)),
powerup=powerup)
forc.priority = priority | Installs a powerup (e.g. plugin) on an item or store.
Powerups will be returned in an iterator when queried for using the
'powerupsFor' method. Normally they will be returned in order of
installation [this may change in future versions, so please don't
depend on it]. Higher priorities are returned first. If you have
something that should run before "normal" powerups, pass
POWERUP_BEFORE; if you have something that should run after, pass
POWERUP_AFTER. We suggest not depending too heavily on order of
execution of your powerups, but if finer-grained control is necessary
you may pass any integer. Normal (unspecified) priority is zero.
Powerups will only be installed once on a given item. If you install a
powerup for a given interface with priority 1, then again with priority
30, the powerup will be adjusted to priority 30 but future calls to
powerupFor will still only return that powerup once.
If no interface or priority are specified, and the class of the
powerup has a "powerupInterfaces" attribute (containing
either a sequence of interfaces, or a sequence of
(interface, priority) tuples), this object will be powered up
with the powerup object on those interfaces.
If no interface or priority are specified and the powerup has
a "__getPowerupInterfaces__" method, it will be called with
an iterable of (interface, priority) tuples, collected from the
"powerupInterfaces" attribute described above. The iterable of
(interface, priority) tuples it returns will then be
installed.
@param powerup: an Item that implements C{interface} (if specified)
@param interface: a zope interface, or None
@param priority: An int; preferably either POWERUP_BEFORE,
POWERUP_AFTER, or unspecified.
@raise TypeError: raises if interface is IPowerupIndirector You may not
install a powerup for IPowerupIndirector because that would be
nonsensical. | Below is the the instruction that describes the task:
### Input:
Installs a powerup (e.g. plugin) on an item or store.
Powerups will be returned in an iterator when queried for using the
'powerupsFor' method. Normally they will be returned in order of
installation [this may change in future versions, so please don't
depend on it]. Higher priorities are returned first. If you have
something that should run before "normal" powerups, pass
POWERUP_BEFORE; if you have something that should run after, pass
POWERUP_AFTER. We suggest not depending too heavily on order of
execution of your powerups, but if finer-grained control is necessary
you may pass any integer. Normal (unspecified) priority is zero.
Powerups will only be installed once on a given item. If you install a
powerup for a given interface with priority 1, then again with priority
30, the powerup will be adjusted to priority 30 but future calls to
powerupFor will still only return that powerup once.
If no interface or priority are specified, and the class of the
powerup has a "powerupInterfaces" attribute (containing
either a sequence of interfaces, or a sequence of
(interface, priority) tuples), this object will be powered up
with the powerup object on those interfaces.
If no interface or priority are specified and the powerup has
a "__getPowerupInterfaces__" method, it will be called with
an iterable of (interface, priority) tuples, collected from the
"powerupInterfaces" attribute described above. The iterable of
(interface, priority) tuples it returns will then be
installed.
@param powerup: an Item that implements C{interface} (if specified)
@param interface: a zope interface, or None
@param priority: An int; preferably either POWERUP_BEFORE,
POWERUP_AFTER, or unspecified.
@raise TypeError: raises if interface is IPowerupIndirector You may not
install a powerup for IPowerupIndirector because that would be
nonsensical.
### Response:
def powerUp(self, powerup, interface=None, priority=0):
"""
Installs a powerup (e.g. plugin) on an item or store.
Powerups will be returned in an iterator when queried for using the
'powerupsFor' method. Normally they will be returned in order of
installation [this may change in future versions, so please don't
depend on it]. Higher priorities are returned first. If you have
something that should run before "normal" powerups, pass
POWERUP_BEFORE; if you have something that should run after, pass
POWERUP_AFTER. We suggest not depending too heavily on order of
execution of your powerups, but if finer-grained control is necessary
you may pass any integer. Normal (unspecified) priority is zero.
Powerups will only be installed once on a given item. If you install a
powerup for a given interface with priority 1, then again with priority
30, the powerup will be adjusted to priority 30 but future calls to
powerupFor will still only return that powerup once.
If no interface or priority are specified, and the class of the
powerup has a "powerupInterfaces" attribute (containing
either a sequence of interfaces, or a sequence of
(interface, priority) tuples), this object will be powered up
with the powerup object on those interfaces.
If no interface or priority are specified and the powerup has
a "__getPowerupInterfaces__" method, it will be called with
an iterable of (interface, priority) tuples, collected from the
"powerupInterfaces" attribute described above. The iterable of
(interface, priority) tuples it returns will then be
installed.
@param powerup: an Item that implements C{interface} (if specified)
@param interface: a zope interface, or None
@param priority: An int; preferably either POWERUP_BEFORE,
POWERUP_AFTER, or unspecified.
@raise TypeError: raises if interface is IPowerupIndirector You may not
install a powerup for IPowerupIndirector because that would be
nonsensical.
"""
if interface is None:
for iface, priority in powerup._getPowerupInterfaces():
self.powerUp(powerup, iface, priority)
elif interface is IPowerupIndirector:
raise TypeError(
"You cannot install a powerup for IPowerupIndirector: " +
powerup)
else:
forc = self.store.findOrCreate(_PowerupConnector,
item=self,
interface=unicode(qual(interface)),
powerup=powerup)
forc.priority = priority |
def start_gen(port, ser='msgpack', version='2.2', detector='AGIPD',
raw=False, nsources=1, datagen='random', *,
debug=True):
""""Karabo bridge server simulation.
Simulate a Karabo Bridge server and send random data from a detector,
either AGIPD or LPD.
Parameters
----------
port: str
The port to on which the server is bound.
ser: str, optional
The serialization algorithm, default is msgpack.
version: str, optional
The container version of the serialized data.
detector: str, optional
The data format to send, default is AGIPD detector.
raw: bool, optional
Generate raw data output if True, else CORRECTED. Default is False.
nsources: int, optional
Number of sources.
datagen: string, optional
Generator function used to generate detector data. Default is random.
"""
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.LINGER, 0)
socket.bind('tcp://*:{}'.format(port))
if ser != 'msgpack':
raise ValueError("Unknown serialisation format %s" % ser)
serialize = partial(msgpack.dumps, use_bin_type=True)
det = Detector.getDetector(detector, raw=raw, gen=datagen)
generator = generate(det, nsources)
print('Simulated Karabo-bridge server started on:\ntcp://{}:{}'.format(
uname().nodename, port))
t_prev = time()
n = 0
try:
while True:
msg = socket.recv()
if msg == b'next':
train = next(generator)
msg = containize(train, ser, serialize, version)
socket.send_multipart(msg, copy=False)
if debug:
print('Server : emitted train:',
train[1][list(train[1].keys())[0]]['timestamp.tid'])
n += 1
if n % TIMING_INTERVAL == 0:
t_now = time()
print('Sent {} trains in {:.2f} seconds ({:.2f} Hz)'
''.format(TIMING_INTERVAL, t_now - t_prev,
TIMING_INTERVAL / (t_now - t_prev)))
t_prev = t_now
else:
print('wrong request')
break
except KeyboardInterrupt:
print('\nStopped.')
finally:
socket.close()
context.destroy() | Karabo bridge server simulation.
Simulate a Karabo Bridge server and send random data from a detector,
either AGIPD or LPD.
Parameters
----------
port: str
The port to on which the server is bound.
ser: str, optional
The serialization algorithm, default is msgpack.
version: str, optional
The container version of the serialized data.
detector: str, optional
The data format to send, default is AGIPD detector.
raw: bool, optional
Generate raw data output if True, else CORRECTED. Default is False.
nsources: int, optional
Number of sources.
datagen: string, optional
Generator function used to generate detector data. Default is random. | Below is the the instruction that describes the task:
### Input:
Karabo bridge server simulation.
Simulate a Karabo Bridge server and send random data from a detector,
either AGIPD or LPD.
Parameters
----------
port: str
The port to on which the server is bound.
ser: str, optional
The serialization algorithm, default is msgpack.
version: str, optional
The container version of the serialized data.
detector: str, optional
The data format to send, default is AGIPD detector.
raw: bool, optional
Generate raw data output if True, else CORRECTED. Default is False.
nsources: int, optional
Number of sources.
datagen: string, optional
Generator function used to generate detector data. Default is random.
### Response:
def start_gen(port, ser='msgpack', version='2.2', detector='AGIPD',
raw=False, nsources=1, datagen='random', *,
debug=True):
""""Karabo bridge server simulation.
Simulate a Karabo Bridge server and send random data from a detector,
either AGIPD or LPD.
Parameters
----------
port: str
The port to on which the server is bound.
ser: str, optional
The serialization algorithm, default is msgpack.
version: str, optional
The container version of the serialized data.
detector: str, optional
The data format to send, default is AGIPD detector.
raw: bool, optional
Generate raw data output if True, else CORRECTED. Default is False.
nsources: int, optional
Number of sources.
datagen: string, optional
Generator function used to generate detector data. Default is random.
"""
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.LINGER, 0)
socket.bind('tcp://*:{}'.format(port))
if ser != 'msgpack':
raise ValueError("Unknown serialisation format %s" % ser)
serialize = partial(msgpack.dumps, use_bin_type=True)
det = Detector.getDetector(detector, raw=raw, gen=datagen)
generator = generate(det, nsources)
print('Simulated Karabo-bridge server started on:\ntcp://{}:{}'.format(
uname().nodename, port))
t_prev = time()
n = 0
try:
while True:
msg = socket.recv()
if msg == b'next':
train = next(generator)
msg = containize(train, ser, serialize, version)
socket.send_multipart(msg, copy=False)
if debug:
print('Server : emitted train:',
train[1][list(train[1].keys())[0]]['timestamp.tid'])
n += 1
if n % TIMING_INTERVAL == 0:
t_now = time()
print('Sent {} trains in {:.2f} seconds ({:.2f} Hz)'
''.format(TIMING_INTERVAL, t_now - t_prev,
TIMING_INTERVAL / (t_now - t_prev)))
t_prev = t_now
else:
print('wrong request')
break
except KeyboardInterrupt:
print('\nStopped.')
finally:
socket.close()
context.destroy() |
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value | Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers()) | Below is the the instruction that describes the task:
### Input:
Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
### Response:
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value |
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args | Return a set of the arguments passed to the named state | Below is the the instruction that describes the task:
### Input:
Return a set of the arguments passed to the named state
### Response:
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args |
def edit_config_input_target_config_target_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
edit_config = ET.Element("edit_config")
config = edit_config
input = ET.SubElement(edit_config, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
candidate = ET.SubElement(config_target, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def edit_config_input_target_config_target_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
edit_config = ET.Element("edit_config")
config = edit_config
input = ET.SubElement(edit_config, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
candidate = ET.SubElement(config_target, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
self.__update_buttons()
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | Call this to initialize the state of the UI after everything has been connected. | Below is the the instruction that describes the task:
### Input:
Call this to initialize the state of the UI after everything has been connected.
### Response:
def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
self.__update_buttons()
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) |
def weekday_to_str(
weekday: Union[int, str], *, inverse: bool = False
) -> Union[int, str]:
"""
Given a weekday number (integer in the range 0, 1, ..., 6),
return its corresponding weekday name as a lowercase string.
Here 0 -> 'monday', 1 -> 'tuesday', and so on.
If ``inverse``, then perform the inverse operation.
"""
s = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
if not inverse:
try:
return s[weekday]
except:
return
else:
try:
return s.index(weekday)
except:
return | Given a weekday number (integer in the range 0, 1, ..., 6),
return its corresponding weekday name as a lowercase string.
Here 0 -> 'monday', 1 -> 'tuesday', and so on.
If ``inverse``, then perform the inverse operation. | Below is the the instruction that describes the task:
### Input:
Given a weekday number (integer in the range 0, 1, ..., 6),
return its corresponding weekday name as a lowercase string.
Here 0 -> 'monday', 1 -> 'tuesday', and so on.
If ``inverse``, then perform the inverse operation.
### Response:
def weekday_to_str(
weekday: Union[int, str], *, inverse: bool = False
) -> Union[int, str]:
"""
Given a weekday number (integer in the range 0, 1, ..., 6),
return its corresponding weekday name as a lowercase string.
Here 0 -> 'monday', 1 -> 'tuesday', and so on.
If ``inverse``, then perform the inverse operation.
"""
s = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
if not inverse:
try:
return s[weekday]
except:
return
else:
try:
return s.index(weekday)
except:
return |
def selector_production(self, tokens):
"""Production for a full selector."""
validators = []
# the following productions should return predicate functions.
if self.peek(tokens, 'type'):
type_ = self.match(tokens, 'type')
validators.append(self.type_production(type_))
if self.peek(tokens, 'identifier'):
key = self.match(tokens, 'identifier')
validators.append(self.key_production(key))
if self.peek(tokens, 'pclass'):
pclass = self.match(tokens, 'pclass')
validators.append(self.pclass_production(pclass))
if self.peek(tokens, 'nth_func'):
nth_func = self.match(tokens, 'nth_func')
validators.append(self.nth_child_production(nth_func, tokens))
if self.peek(tokens, 'pclass_func'):
pclass_func = self.match(tokens, 'pclass_func')
validators.append(self.pclass_func_production(pclass_func, tokens))
if not len(validators):
raise SelectorSyntaxError('no selector recognized.')
# apply validators from a selector expression to self.obj
results = self._match_nodes(validators, self.obj)
if self.peek(tokens, 'operator'):
operator = self.match(tokens, 'operator')
rvals = self.selector_production(tokens)
if operator == ',':
results.extend(rvals)
elif operator == '>':
results = self.parents(results, rvals)
elif operator == '~':
results = self.siblings(results, rvals)
elif operator == ' ':
results = self.ancestors(results, rvals)
else:
raise SelectorSyntaxError("unrecognized operator '%s'"
% operator)
else:
if len(tokens):
rvals = self.selector_production(tokens)
results = self.ancestors(results, rvals)
return results | Production for a full selector. | Below is the the instruction that describes the task:
### Input:
Production for a full selector.
### Response:
def selector_production(self, tokens):
"""Production for a full selector."""
validators = []
# the following productions should return predicate functions.
if self.peek(tokens, 'type'):
type_ = self.match(tokens, 'type')
validators.append(self.type_production(type_))
if self.peek(tokens, 'identifier'):
key = self.match(tokens, 'identifier')
validators.append(self.key_production(key))
if self.peek(tokens, 'pclass'):
pclass = self.match(tokens, 'pclass')
validators.append(self.pclass_production(pclass))
if self.peek(tokens, 'nth_func'):
nth_func = self.match(tokens, 'nth_func')
validators.append(self.nth_child_production(nth_func, tokens))
if self.peek(tokens, 'pclass_func'):
pclass_func = self.match(tokens, 'pclass_func')
validators.append(self.pclass_func_production(pclass_func, tokens))
if not len(validators):
raise SelectorSyntaxError('no selector recognized.')
# apply validators from a selector expression to self.obj
results = self._match_nodes(validators, self.obj)
if self.peek(tokens, 'operator'):
operator = self.match(tokens, 'operator')
rvals = self.selector_production(tokens)
if operator == ',':
results.extend(rvals)
elif operator == '>':
results = self.parents(results, rvals)
elif operator == '~':
results = self.siblings(results, rvals)
elif operator == ' ':
results = self.ancestors(results, rvals)
else:
raise SelectorSyntaxError("unrecognized operator '%s'"
% operator)
else:
if len(tokens):
rvals = self.selector_production(tokens)
results = self.ancestors(results, rvals)
return results |
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents) | Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it. | Below is the the instruction that describes the task:
### Input:
Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
### Response:
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents) |
def get_top_edge_depth(self):
"""
Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km.
"""
top_edge = self.mesh[0:1]
if top_edge.depths is None:
return 0
else:
return numpy.min(top_edge.depths) | Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km. | Below is the the instruction that describes the task:
### Input:
Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km.
### Response:
def get_top_edge_depth(self):
"""
Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km.
"""
top_edge = self.mesh[0:1]
if top_edge.depths is None:
return 0
else:
return numpy.min(top_edge.depths) |
def measure_all(self, *qubit_reg_pairs):
"""
Measures many qubits into their specified classical bits, in the order
they were entered. If no qubit/register pairs are provided, measure all qubits present in
the program into classical addresses of the same index.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
"""
if qubit_reg_pairs == ():
qubit_inds = self.get_qubits(indices=True)
if len(qubit_inds) == 0:
return self
ro = self.declare('ro', 'BIT', max(qubit_inds) + 1)
for qi in qubit_inds:
self.inst(MEASURE(qi, ro[qi]))
else:
for qubit_index, classical_reg in qubit_reg_pairs:
self.inst(MEASURE(qubit_index, classical_reg))
return self | Measures many qubits into their specified classical bits, in the order
they were entered. If no qubit/register pairs are provided, measure all qubits present in
the program into classical addresses of the same index.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program | Below is the the instruction that describes the task:
### Input:
Measures many qubits into their specified classical bits, in the order
they were entered. If no qubit/register pairs are provided, measure all qubits present in
the program into classical addresses of the same index.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
### Response:
def measure_all(self, *qubit_reg_pairs):
"""
Measures many qubits into their specified classical bits, in the order
they were entered. If no qubit/register pairs are provided, measure all qubits present in
the program into classical addresses of the same index.
:param Tuple qubit_reg_pairs: Tuples of qubit indices paired with classical bits.
:return: The Quil Program with the appropriate measure instructions appended, e.g.
.. code::
MEASURE 0 [1]
MEASURE 1 [2]
MEASURE 2 [3]
:rtype: Program
"""
if qubit_reg_pairs == ():
qubit_inds = self.get_qubits(indices=True)
if len(qubit_inds) == 0:
return self
ro = self.declare('ro', 'BIT', max(qubit_inds) + 1)
for qi in qubit_inds:
self.inst(MEASURE(qi, ro[qi]))
else:
for qubit_index, classical_reg in qubit_reg_pairs:
self.inst(MEASURE(qubit_index, classical_reg))
return self |
async def update_template_context(self, context: dict) -> None:
"""Update the provided template context.
This adds additional context from the various template context
processors.
Arguments:
context: The context to update (mutate).
"""
processors = self.template_context_processors[None]
if has_request_context():
blueprint = _request_ctx_stack.top.request.blueprint
if blueprint is not None and blueprint in self.template_context_processors:
processors = chain(processors, self.template_context_processors[blueprint]) # type: ignore # noqa
extra_context: dict = {}
for processor in processors:
extra_context.update(await processor())
original = context.copy()
context.update(extra_context)
context.update(original) | Update the provided template context.
This adds additional context from the various template context
processors.
Arguments:
context: The context to update (mutate). | Below is the the instruction that describes the task:
### Input:
Update the provided template context.
This adds additional context from the various template context
processors.
Arguments:
context: The context to update (mutate).
### Response:
async def update_template_context(self, context: dict) -> None:
"""Update the provided template context.
This adds additional context from the various template context
processors.
Arguments:
context: The context to update (mutate).
"""
processors = self.template_context_processors[None]
if has_request_context():
blueprint = _request_ctx_stack.top.request.blueprint
if blueprint is not None and blueprint in self.template_context_processors:
processors = chain(processors, self.template_context_processors[blueprint]) # type: ignore # noqa
extra_context: dict = {}
for processor in processors:
extra_context.update(await processor())
original = context.copy()
context.update(extra_context)
context.update(original) |
def getoptT(X, W, Y, Z, S, M_E, E, m0, rho):
''' Perform line search
'''
iter_max = 20
norm2WZ = np.linalg.norm(W, ord='fro')**2 + np.linalg.norm(Z, ord='fro')**2
f = np.zeros(iter_max + 1)
f[0] = F_t(X, Y, S, M_E, E, m0, rho)
t = -1e-1
for i in range(iter_max):
f[i + 1] = F_t(X + t * W, Y + t * Z, S, M_E, E, m0, rho)
if f[i + 1] - f[0] <= 0.5 * t * norm2WZ:
return t
t /= 2
return t | Perform line search | Below is the the instruction that describes the task:
### Input:
Perform line search
### Response:
def getoptT(X, W, Y, Z, S, M_E, E, m0, rho):
''' Perform line search
'''
iter_max = 20
norm2WZ = np.linalg.norm(W, ord='fro')**2 + np.linalg.norm(Z, ord='fro')**2
f = np.zeros(iter_max + 1)
f[0] = F_t(X, Y, S, M_E, E, m0, rho)
t = -1e-1
for i in range(iter_max):
f[i + 1] = F_t(X + t * W, Y + t * Z, S, M_E, E, m0, rho)
if f[i + 1] - f[0] <= 0.5 * t * norm2WZ:
return t
t /= 2
return t |
def liftover(args):
"""
%prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--checkvalid", default=False, action="store_true",
help="Check minscore, period and length")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
refbed, fastafile = args
genome = pyfasta.Fasta(fastafile)
edits = []
fp = open(refbed)
for i, row in enumerate(fp):
s = STRLine(row)
seq = genome[s.seqid][s.start - 1: s.end].upper()
s.motif = get_motif(seq, len(s.motif))
s.fix_counts(seq)
if opts.checkvalid and not s.is_valid():
continue
edits.append(s)
if i % 10000 == 0:
print(i, "lines read", file=sys.stderr)
edits = natsorted(edits, key=lambda x: (x.seqid, x.start))
for e in edits:
print(str(e)) | %prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers. | Below is the the instruction that describes the task:
### Input:
%prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers.
### Response:
def liftover(args):
"""
%prog liftover lobstr_v3.0.2_hg38_ref.bed hg38.upper.fa
LiftOver CODIS/Y-STR markers.
"""
p = OptionParser(liftover.__doc__)
p.add_option("--checkvalid", default=False, action="store_true",
help="Check minscore, period and length")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
refbed, fastafile = args
genome = pyfasta.Fasta(fastafile)
edits = []
fp = open(refbed)
for i, row in enumerate(fp):
s = STRLine(row)
seq = genome[s.seqid][s.start - 1: s.end].upper()
s.motif = get_motif(seq, len(s.motif))
s.fix_counts(seq)
if opts.checkvalid and not s.is_valid():
continue
edits.append(s)
if i % 10000 == 0:
print(i, "lines read", file=sys.stderr)
edits = natsorted(edits, key=lambda x: (x.seqid, x.start))
for e in edits:
print(str(e)) |
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
)) | Display a list of actors | Below is the the instruction that describes the task:
### Input:
Display a list of actors
### Response:
def actors(context):
"""Display a list of actors"""
fritz = context.obj
fritz.login()
for actor in fritz.get_actors():
click.echo("{} ({} {}; AIN {} )".format(
actor.name,
actor.manufacturer,
actor.productname,
actor.actor_id,
))
if actor.has_temperature:
click.echo("Temp: act {} target {}; battery (low): {}".format(
actor.temperature,
actor.target_temperature,
actor.battery_low,
))
click.echo("Temp (via get): act {} target {}".format(
actor.get_temperature(),
actor.get_target_temperature(),
)) |
def insert_volume(self, metadata, attachments=[]):
'''Insert a new volume
Returns the ID of the added volume
`metadata` must be a dict containg metadata of the volume::
{
"_language" : "it", # language of the metadata
"key1" : "value1", # attribute
"key2" : "value2",
...
"keyN" : "valueN"
}
The only required key is `_language`
`attachments` must be an array of dict::
{
"file" : "/prova/una/path/a/caso" # path or fp
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes that will be attached to this file [optional]
}
'''
log.debug("adding new volume:\n\tdata: {}\n\tfiles: {}".format(metadata, attachments))
requiredFields = ['_language']
for requiredField in requiredFields:
if requiredField not in metadata:
raise KeyError("Required field '{}' is missing".format(requiredField))
volume = deepcopy(metadata)
attsData = []
for index, a in enumerate(attachments):
try:
attData = self._assemble_attachment(a['file'], a)
attsData.append(attData)
except Exception:
log.exception("Error while elaborating attachments array at index: {}".format(index))
raise
volume['_attachments'] = attsData
log.debug('constructed volume for insertion: {}'.format(volume))
addedVolume = self._db.add_book(body=volume)
log.debug("added new volume: '{}'".format(addedVolume['_id']))
return addedVolume['_id'] | Insert a new volume
Returns the ID of the added volume
`metadata` must be a dict containg metadata of the volume::
{
"_language" : "it", # language of the metadata
"key1" : "value1", # attribute
"key2" : "value2",
...
"keyN" : "valueN"
}
The only required key is `_language`
`attachments` must be an array of dict::
{
"file" : "/prova/una/path/a/caso" # path or fp
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes that will be attached to this file [optional]
} | Below is the the instruction that describes the task:
### Input:
Insert a new volume
Returns the ID of the added volume
`metadata` must be a dict containg metadata of the volume::
{
"_language" : "it", # language of the metadata
"key1" : "value1", # attribute
"key2" : "value2",
...
"keyN" : "valueN"
}
The only required key is `_language`
`attachments` must be an array of dict::
{
"file" : "/prova/una/path/a/caso" # path or fp
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes that will be attached to this file [optional]
}
### Response:
def insert_volume(self, metadata, attachments=[]):
'''Insert a new volume
Returns the ID of the added volume
`metadata` must be a dict containg metadata of the volume::
{
"_language" : "it", # language of the metadata
"key1" : "value1", # attribute
"key2" : "value2",
...
"keyN" : "valueN"
}
The only required key is `_language`
`attachments` must be an array of dict::
{
"file" : "/prova/una/path/a/caso" # path or fp
"name" : "nome_buffo.ext" # name of the file (extension included) [optional if a path was given]
"mime" : "application/json" # mime type of the file [optional]
"notes" : "this file is awesome" # notes that will be attached to this file [optional]
}
'''
log.debug("adding new volume:\n\tdata: {}\n\tfiles: {}".format(metadata, attachments))
requiredFields = ['_language']
for requiredField in requiredFields:
if requiredField not in metadata:
raise KeyError("Required field '{}' is missing".format(requiredField))
volume = deepcopy(metadata)
attsData = []
for index, a in enumerate(attachments):
try:
attData = self._assemble_attachment(a['file'], a)
attsData.append(attData)
except Exception:
log.exception("Error while elaborating attachments array at index: {}".format(index))
raise
volume['_attachments'] = attsData
log.debug('constructed volume for insertion: {}'.format(volume))
addedVolume = self._db.add_book(body=volume)
log.debug("added new volume: '{}'".format(addedVolume['_id']))
return addedVolume['_id'] |
def add_left_child(self, n, parent, **attrs):
'''
API: add_left_child(self, n, parent, **attrs)
Description:
Adds left child n to node parent.
Pre:
Left child of parent should not exist.
Input:
n: Node name.
parent: Parent node name.
attrs: Attributes of node n.
'''
if self.get_left_child(parent) is not None:
msg = "Right child already exists for node " + str(parent)
raise Exception(msg)
attrs['direction'] = 'L'
self.set_node_attr(parent, 'Lchild', n)
self.add_child(n, parent, **attrs) | API: add_left_child(self, n, parent, **attrs)
Description:
Adds left child n to node parent.
Pre:
Left child of parent should not exist.
Input:
n: Node name.
parent: Parent node name.
attrs: Attributes of node n. | Below is the the instruction that describes the task:
### Input:
API: add_left_child(self, n, parent, **attrs)
Description:
Adds left child n to node parent.
Pre:
Left child of parent should not exist.
Input:
n: Node name.
parent: Parent node name.
attrs: Attributes of node n.
### Response:
def add_left_child(self, n, parent, **attrs):
'''
API: add_left_child(self, n, parent, **attrs)
Description:
Adds left child n to node parent.
Pre:
Left child of parent should not exist.
Input:
n: Node name.
parent: Parent node name.
attrs: Attributes of node n.
'''
if self.get_left_child(parent) is not None:
msg = "Right child already exists for node " + str(parent)
raise Exception(msg)
attrs['direction'] = 'L'
self.set_node_attr(parent, 'Lchild', n)
self.add_child(n, parent, **attrs) |
def sum_of_gaussian_factory(N):
"""Return a model of the sum of N Gaussians and a constant background."""
name = "SumNGauss%d" % N
attr = {}
# parameters
for i in range(N):
key = "amplitude_%d" % i
attr[key] = Parameter(key)
key = "center_%d" % i
attr[key] = Parameter(key)
key = "stddev_%d" % i
attr[key] = Parameter(key)
attr['background'] = Parameter('background', default=0.0)
def fit_eval(self, x, *args):
result = x * 0 + args[-1]
for i in range(N):
result += args[3 * i] * \
np.exp(- 0.5 * (x - args[3 * i + 1])
** 2 / args[3 * i + 2] ** 2)
return result
attr['evaluate'] = fit_eval
def deriv(self, x, *args):
d_result = np.ones(((3 * N + 1), len(x)))
for i in range(N):
d_result[3 * i] = (np.exp(-0.5 / args[3 * i + 2] ** 2 *
(x - args[3 * i + 1]) ** 2))
d_result[3 * i + 1] = args[3 * i] * d_result[3 * i] * \
(x - args[3 * i + 1]) / args[3 * i + 2] ** 2
d_result[3 * i + 2] = args[3 * i] * d_result[3 * i] * \
(x - args[3 * i + 1]) ** 2 / args[3 * i + 2] ** 3
return d_result
attr['fit_deriv'] = deriv
klass = type(name, (Fittable1DModel, ), attr)
return klass | Return a model of the sum of N Gaussians and a constant background. | Below is the the instruction that describes the task:
### Input:
Return a model of the sum of N Gaussians and a constant background.
### Response:
def sum_of_gaussian_factory(N):
"""Return a model of the sum of N Gaussians and a constant background."""
name = "SumNGauss%d" % N
attr = {}
# parameters
for i in range(N):
key = "amplitude_%d" % i
attr[key] = Parameter(key)
key = "center_%d" % i
attr[key] = Parameter(key)
key = "stddev_%d" % i
attr[key] = Parameter(key)
attr['background'] = Parameter('background', default=0.0)
def fit_eval(self, x, *args):
result = x * 0 + args[-1]
for i in range(N):
result += args[3 * i] * \
np.exp(- 0.5 * (x - args[3 * i + 1])
** 2 / args[3 * i + 2] ** 2)
return result
attr['evaluate'] = fit_eval
def deriv(self, x, *args):
d_result = np.ones(((3 * N + 1), len(x)))
for i in range(N):
d_result[3 * i] = (np.exp(-0.5 / args[3 * i + 2] ** 2 *
(x - args[3 * i + 1]) ** 2))
d_result[3 * i + 1] = args[3 * i] * d_result[3 * i] * \
(x - args[3 * i + 1]) / args[3 * i + 2] ** 2
d_result[3 * i + 2] = args[3 * i] * d_result[3 * i] * \
(x - args[3 * i + 1]) ** 2 / args[3 * i + 2] ** 3
return d_result
attr['fit_deriv'] = deriv
klass = type(name, (Fittable1DModel, ), attr)
return klass |
def present(name, value, config=None):
'''
Ensure that the named sysctl value is set in memory and persisted to the
named configuration file. The default sysctl configuration file is
/etc/sysctl.conf
name
The name of the sysctl value to edit
value
The sysctl value to apply
config
The location of the sysctl configuration file. If not specified, the
proper location will be detected based on platform.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if config is None:
# Certain linux systems will ignore /etc/sysctl.conf, get the right
# default configuration file.
if 'sysctl.default_config' in __salt__:
config = __salt__['sysctl.default_config']()
else:
config = '/etc/sysctl.conf'
if __opts__['test']:
current = __salt__['sysctl.show']()
configured = __salt__['sysctl.show'](config_file=config)
if configured is None:
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} might be changed, we failed to check '
'config file at {1}. The file is either unreadable, or '
'missing.'.format(name, config)
)
return ret
if name in current and name not in configured:
if re.sub(' +|\t+', ' ', current[name]) != \
re.sub(' +|\t+', ' ', six.text_type(value)):
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} set to be changed to {1}'
.format(name, value)
)
return ret
else:
ret['result'] = None
ret['comment'] = (
'Sysctl value is currently set on the running system but '
'not in a config file. Sysctl option {0} set to be '
'changed to {1} in config file.'.format(name, value)
)
return ret
elif name in configured and name not in current:
ret['result'] = None
ret['comment'] = (
'Sysctl value {0} is present in configuration file but is not '
'present in the running config. The value {0} is set to be '
'changed to {1}'.format(name, value)
)
return ret
elif name in configured and name in current:
if six.text_type(value).split() == __salt__['sysctl.get'](name).split():
ret['result'] = True
ret['comment'] = (
'Sysctl value {0} = {1} is already set'
.format(name, value)
)
return ret
# otherwise, we don't have it set anywhere and need to set it
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} would be changed to {1}'.format(name, value)
)
return ret
try:
update = __salt__['sysctl.persist'](name, value, config)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = (
'Failed to set {0} to {1}: {2}'.format(name, value, exc)
)
return ret
if update == 'Updated':
ret['changes'] = {name: value}
ret['comment'] = 'Updated sysctl value {0} = {1}'.format(name, value)
elif update == 'Already set':
ret['comment'] = (
'Sysctl value {0} = {1} is already set'
.format(name, value)
)
return ret | Ensure that the named sysctl value is set in memory and persisted to the
named configuration file. The default sysctl configuration file is
/etc/sysctl.conf
name
The name of the sysctl value to edit
value
The sysctl value to apply
config
The location of the sysctl configuration file. If not specified, the
proper location will be detected based on platform. | Below is the the instruction that describes the task:
### Input:
Ensure that the named sysctl value is set in memory and persisted to the
named configuration file. The default sysctl configuration file is
/etc/sysctl.conf
name
The name of the sysctl value to edit
value
The sysctl value to apply
config
The location of the sysctl configuration file. If not specified, the
proper location will be detected based on platform.
### Response:
def present(name, value, config=None):
'''
Ensure that the named sysctl value is set in memory and persisted to the
named configuration file. The default sysctl configuration file is
/etc/sysctl.conf
name
The name of the sysctl value to edit
value
The sysctl value to apply
config
The location of the sysctl configuration file. If not specified, the
proper location will be detected based on platform.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if config is None:
# Certain linux systems will ignore /etc/sysctl.conf, get the right
# default configuration file.
if 'sysctl.default_config' in __salt__:
config = __salt__['sysctl.default_config']()
else:
config = '/etc/sysctl.conf'
if __opts__['test']:
current = __salt__['sysctl.show']()
configured = __salt__['sysctl.show'](config_file=config)
if configured is None:
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} might be changed, we failed to check '
'config file at {1}. The file is either unreadable, or '
'missing.'.format(name, config)
)
return ret
if name in current and name not in configured:
if re.sub(' +|\t+', ' ', current[name]) != \
re.sub(' +|\t+', ' ', six.text_type(value)):
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} set to be changed to {1}'
.format(name, value)
)
return ret
else:
ret['result'] = None
ret['comment'] = (
'Sysctl value is currently set on the running system but '
'not in a config file. Sysctl option {0} set to be '
'changed to {1} in config file.'.format(name, value)
)
return ret
elif name in configured and name not in current:
ret['result'] = None
ret['comment'] = (
'Sysctl value {0} is present in configuration file but is not '
'present in the running config. The value {0} is set to be '
'changed to {1}'.format(name, value)
)
return ret
elif name in configured and name in current:
if six.text_type(value).split() == __salt__['sysctl.get'](name).split():
ret['result'] = True
ret['comment'] = (
'Sysctl value {0} = {1} is already set'
.format(name, value)
)
return ret
# otherwise, we don't have it set anywhere and need to set it
ret['result'] = None
ret['comment'] = (
'Sysctl option {0} would be changed to {1}'.format(name, value)
)
return ret
try:
update = __salt__['sysctl.persist'](name, value, config)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = (
'Failed to set {0} to {1}: {2}'.format(name, value, exc)
)
return ret
if update == 'Updated':
ret['changes'] = {name: value}
ret['comment'] = 'Updated sysctl value {0} = {1}'.format(name, value)
elif update == 'Already set':
ret['comment'] = (
'Sysctl value {0} = {1} is already set'
.format(name, value)
)
return ret |
def tagAttributes_while(fdef_master_list,root):
'''Tag each node under root with the appropriate depth. '''
depth = 0
current = root
untagged_nodes = [root]
while untagged_nodes:
current = untagged_nodes.pop()
for x in fdef_master_list:
if jsName(x.path,x.name) == current['name']:
current['path'] = x.path
if children in current:
for child in children:
child["depth"] = depth
untagged_nodes.append(child)
if depth not in current:
current["depth"] = depth
depth += 1
return root | Tag each node under root with the appropriate depth. | Below is the the instruction that describes the task:
### Input:
Tag each node under root with the appropriate depth.
### Response:
def tagAttributes_while(fdef_master_list,root):
'''Tag each node under root with the appropriate depth. '''
depth = 0
current = root
untagged_nodes = [root]
while untagged_nodes:
current = untagged_nodes.pop()
for x in fdef_master_list:
if jsName(x.path,x.name) == current['name']:
current['path'] = x.path
if children in current:
for child in children:
child["depth"] = depth
untagged_nodes.append(child)
if depth not in current:
current["depth"] = depth
depth += 1
return root |
def kill(self, exit_code: Any = None):
"""
Stops the behaviour
Args:
exit_code (object, optional): the exit code of the behaviour (Default value = None)
"""
self._force_kill.set()
if exit_code is not None:
self._exit_code = exit_code
logger.info("Killing behavior {0} with exit code: {1}".format(self, exit_code)) | Stops the behaviour
Args:
exit_code (object, optional): the exit code of the behaviour (Default value = None) | Below is the the instruction that describes the task:
### Input:
Stops the behaviour
Args:
exit_code (object, optional): the exit code of the behaviour (Default value = None)
### Response:
def kill(self, exit_code: Any = None):
"""
Stops the behaviour
Args:
exit_code (object, optional): the exit code of the behaviour (Default value = None)
"""
self._force_kill.set()
if exit_code is not None:
self._exit_code = exit_code
logger.info("Killing behavior {0} with exit code: {1}".format(self, exit_code)) |
def get_supported_binary_ops():
'''
Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol.
'''
binary_ops = {}
binary_ops[np.add.__name__] = '+'
binary_ops[np.subtract.__name__] = '-'
binary_ops[np.multiply.__name__] = '*'
binary_ops[np.divide.__name__] = '/'
return binary_ops | Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol.
### Response:
def get_supported_binary_ops():
'''
Returns a dictionary of the Weld supported binary ops, with values being their Weld symbol.
'''
binary_ops = {}
binary_ops[np.add.__name__] = '+'
binary_ops[np.subtract.__name__] = '-'
binary_ops[np.multiply.__name__] = '*'
binary_ops[np.divide.__name__] = '/'
return binary_ops |
def process_post_categories(self, bulk_mode, api_post, post_categories):
"""
Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None
"""
post_categories[api_post["ID"]] = []
for api_category in six.itervalues(api_post["categories"]):
category = self.process_post_category(bulk_mode, api_category)
if category:
post_categories[api_post["ID"]].append(category) | Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None | Below is the the instruction that describes the task:
### Input:
Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None
### Response:
def process_post_categories(self, bulk_mode, api_post, post_categories):
"""
Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None
"""
post_categories[api_post["ID"]] = []
for api_category in six.itervalues(api_post["categories"]):
category = self.process_post_category(bulk_mode, api_category)
if category:
post_categories[api_post["ID"]].append(category) |
def _extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `centerdistance`.
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = [(x - 1) / 2. for x in image.shape]
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel() | Internal, single-image version of `centerdistance`. | Below is the the instruction that describes the task:
### Input:
Internal, single-image version of `centerdistance`.
### Response:
def _extract_centerdistance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `centerdistance`.
"""
image = numpy.array(image, copy=False)
if None == voxelspacing:
voxelspacing = [1.] * image.ndim
# get image center and an array holding the images indices
centers = [(x - 1) / 2. for x in image.shape]
indices = numpy.indices(image.shape, dtype=numpy.float)
# shift to center of image and correct spacing to real world coordinates
for dim_indices, c, vs in zip(indices, centers, voxelspacing):
dim_indices -= c
dim_indices *= vs
# compute euclidean distance to image center
return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel() |
def create_scalar_summary(name, v):
"""
Args:
name (str):
v (float): scalar value
Returns:
tf.Summary: a tf.Summary object with name and simple scalar value v.
"""
assert isinstance(name, six.string_types), type(name)
v = float(v)
s = tf.Summary()
s.value.add(tag=name, simple_value=v)
return s | Args:
name (str):
v (float): scalar value
Returns:
tf.Summary: a tf.Summary object with name and simple scalar value v. | Below is the the instruction that describes the task:
### Input:
Args:
name (str):
v (float): scalar value
Returns:
tf.Summary: a tf.Summary object with name and simple scalar value v.
### Response:
def create_scalar_summary(name, v):
"""
Args:
name (str):
v (float): scalar value
Returns:
tf.Summary: a tf.Summary object with name and simple scalar value v.
"""
assert isinstance(name, six.string_types), type(name)
v = float(v)
s = tf.Summary()
s.value.add(tag=name, simple_value=v)
return s |
def geotiff_tags(self):
"""Return consolidated metadata from GeoTIFF tags as dict."""
if not self.is_geotiff:
return None
tags = self.tags
gkd = tags['GeoKeyDirectoryTag'].value
if gkd[0] != 1:
log.warning('GeoTIFF tags: invalid GeoKeyDirectoryTag')
return {}
result = {
'KeyDirectoryVersion': gkd[0],
'KeyRevision': gkd[1],
'KeyRevisionMinor': gkd[2],
# 'NumberOfKeys': gkd[3],
}
# deltags = ['GeoKeyDirectoryTag']
geokeys = TIFF.GEO_KEYS
geocodes = TIFF.GEO_CODES
for index in range(gkd[3]):
keyid, tagid, count, offset = gkd[4 + index * 4: index * 4 + 8]
keyid = geokeys.get(keyid, keyid)
if tagid == 0:
value = offset
else:
tagname = TIFF.TAGS[tagid]
# deltags.append(tagname)
value = tags[tagname].value[offset: offset + count]
if tagid == 34737 and count > 1 and value[-1] == '|':
value = value[:-1]
value = value if count > 1 else value[0]
if keyid in geocodes:
try:
value = geocodes[keyid](value)
except Exception:
pass
result[keyid] = value
if 'IntergraphMatrixTag' in tags:
value = tags['IntergraphMatrixTag'].value
value = numpy.array(value)
if len(value) == 16:
value = value.reshape((4, 4)).tolist()
result['IntergraphMatrix'] = value
if 'ModelPixelScaleTag' in tags:
value = numpy.array(tags['ModelPixelScaleTag'].value).tolist()
result['ModelPixelScale'] = value
if 'ModelTiepointTag' in tags:
value = tags['ModelTiepointTag'].value
value = numpy.array(value).reshape((-1, 6)).squeeze().tolist()
result['ModelTiepoint'] = value
if 'ModelTransformationTag' in tags:
value = tags['ModelTransformationTag'].value
value = numpy.array(value).reshape((4, 4)).tolist()
result['ModelTransformation'] = value
# if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags:
# sx, sy, sz = tags['ModelPixelScaleTag'].value
# tiepoints = tags['ModelTiepointTag'].value
# transforms = []
# for tp in range(0, len(tiepoints), 6):
# i, j, k, x, y, z = tiepoints[tp:tp+6]
# transforms.append([
# [sx, 0.0, 0.0, x - i * sx],
# [0.0, -sy, 0.0, y + j * sy],
# [0.0, 0.0, sz, z - k * sz],
# [0.0, 0.0, 0.0, 1.0]])
# if len(tiepoints) == 6:
# transforms = transforms[0]
# result['ModelTransformation'] = transforms
if 'RPCCoefficientTag' in tags:
rpcc = tags['RPCCoefficientTag'].value
result['RPCCoefficient'] = {
'ERR_BIAS': rpcc[0],
'ERR_RAND': rpcc[1],
'LINE_OFF': rpcc[2],
'SAMP_OFF': rpcc[3],
'LAT_OFF': rpcc[4],
'LONG_OFF': rpcc[5],
'HEIGHT_OFF': rpcc[6],
'LINE_SCALE': rpcc[7],
'SAMP_SCALE': rpcc[8],
'LAT_SCALE': rpcc[9],
'LONG_SCALE': rpcc[10],
'HEIGHT_SCALE': rpcc[11],
'LINE_NUM_COEFF': rpcc[12:33],
'LINE_DEN_COEFF ': rpcc[33:53],
'SAMP_NUM_COEFF': rpcc[53:73],
'SAMP_DEN_COEFF': rpcc[73:]}
return result | Return consolidated metadata from GeoTIFF tags as dict. | Below is the the instruction that describes the task:
### Input:
Return consolidated metadata from GeoTIFF tags as dict.
### Response:
def geotiff_tags(self):
"""Return consolidated metadata from GeoTIFF tags as dict."""
if not self.is_geotiff:
return None
tags = self.tags
gkd = tags['GeoKeyDirectoryTag'].value
if gkd[0] != 1:
log.warning('GeoTIFF tags: invalid GeoKeyDirectoryTag')
return {}
result = {
'KeyDirectoryVersion': gkd[0],
'KeyRevision': gkd[1],
'KeyRevisionMinor': gkd[2],
# 'NumberOfKeys': gkd[3],
}
# deltags = ['GeoKeyDirectoryTag']
geokeys = TIFF.GEO_KEYS
geocodes = TIFF.GEO_CODES
for index in range(gkd[3]):
keyid, tagid, count, offset = gkd[4 + index * 4: index * 4 + 8]
keyid = geokeys.get(keyid, keyid)
if tagid == 0:
value = offset
else:
tagname = TIFF.TAGS[tagid]
# deltags.append(tagname)
value = tags[tagname].value[offset: offset + count]
if tagid == 34737 and count > 1 and value[-1] == '|':
value = value[:-1]
value = value if count > 1 else value[0]
if keyid in geocodes:
try:
value = geocodes[keyid](value)
except Exception:
pass
result[keyid] = value
if 'IntergraphMatrixTag' in tags:
value = tags['IntergraphMatrixTag'].value
value = numpy.array(value)
if len(value) == 16:
value = value.reshape((4, 4)).tolist()
result['IntergraphMatrix'] = value
if 'ModelPixelScaleTag' in tags:
value = numpy.array(tags['ModelPixelScaleTag'].value).tolist()
result['ModelPixelScale'] = value
if 'ModelTiepointTag' in tags:
value = tags['ModelTiepointTag'].value
value = numpy.array(value).reshape((-1, 6)).squeeze().tolist()
result['ModelTiepoint'] = value
if 'ModelTransformationTag' in tags:
value = tags['ModelTransformationTag'].value
value = numpy.array(value).reshape((4, 4)).tolist()
result['ModelTransformation'] = value
# if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags:
# sx, sy, sz = tags['ModelPixelScaleTag'].value
# tiepoints = tags['ModelTiepointTag'].value
# transforms = []
# for tp in range(0, len(tiepoints), 6):
# i, j, k, x, y, z = tiepoints[tp:tp+6]
# transforms.append([
# [sx, 0.0, 0.0, x - i * sx],
# [0.0, -sy, 0.0, y + j * sy],
# [0.0, 0.0, sz, z - k * sz],
# [0.0, 0.0, 0.0, 1.0]])
# if len(tiepoints) == 6:
# transforms = transforms[0]
# result['ModelTransformation'] = transforms
if 'RPCCoefficientTag' in tags:
rpcc = tags['RPCCoefficientTag'].value
result['RPCCoefficient'] = {
'ERR_BIAS': rpcc[0],
'ERR_RAND': rpcc[1],
'LINE_OFF': rpcc[2],
'SAMP_OFF': rpcc[3],
'LAT_OFF': rpcc[4],
'LONG_OFF': rpcc[5],
'HEIGHT_OFF': rpcc[6],
'LINE_SCALE': rpcc[7],
'SAMP_SCALE': rpcc[8],
'LAT_SCALE': rpcc[9],
'LONG_SCALE': rpcc[10],
'HEIGHT_SCALE': rpcc[11],
'LINE_NUM_COEFF': rpcc[12:33],
'LINE_DEN_COEFF ': rpcc[33:53],
'SAMP_NUM_COEFF': rpcc[53:73],
'SAMP_DEN_COEFF': rpcc[73:]}
return result |
def clean_path_middleware(environ, start_response=None):
'''Clean url from double slashes and redirect if needed.'''
path = environ['PATH_INFO']
if path and '//' in path:
url = re.sub("/+", '/', path)
if not url.startswith('/'):
url = '/%s' % url
qs = environ['QUERY_STRING']
if qs:
url = '%s?%s' % (url, qs)
raise HttpRedirect(url) | Clean url from double slashes and redirect if needed. | Below is the the instruction that describes the task:
### Input:
Clean url from double slashes and redirect if needed.
### Response:
def clean_path_middleware(environ, start_response=None):
'''Clean url from double slashes and redirect if needed.'''
path = environ['PATH_INFO']
if path and '//' in path:
url = re.sub("/+", '/', path)
if not url.startswith('/'):
url = '/%s' % url
qs = environ['QUERY_STRING']
if qs:
url = '%s?%s' % (url, qs)
raise HttpRedirect(url) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.