code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def start_proxy(self):
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
# Needed for python 2 compatibility (exists_ok missing)
pass
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode('utf-8')
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return | Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it! | Below is the the instruction that describes the task:
### Input:
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
### Response:
def start_proxy(self):
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
# Needed for python 2 compatibility (exists_ok missing)
pass
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode('utf-8')
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return |
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__ | Helper for _AddMessageMethods(). | Below is the the instruction that describes the task:
### Input:
Helper for _AddMessageMethods().
### Response:
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__ |
def _validated(self, value):
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
try:
return self._format_num(value)
except (TypeError, ValueError):
self.fail('invalid', input=value)
except OverflowError:
self.fail('too_large', input=value) | Format the value or raise a :exc:`ValidationError` if an error occurs. | Below is the the instruction that describes the task:
### Input:
Format the value or raise a :exc:`ValidationError` if an error occurs.
### Response:
def _validated(self, value):
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
try:
return self._format_num(value)
except (TypeError, ValueError):
self.fail('invalid', input=value)
except OverflowError:
self.fail('too_large', input=value) |
def flatten_element(p):
"""
Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors.
"""
rd = {"time": p.time}
for member in p.members:
rd[member["standard"]] = member["value"]
return rd | Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors. | Below is the the instruction that describes the task:
### Input:
Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors.
### Response:
def flatten_element(p):
"""
Convenience function to return record-style time series representation
from elements ('p') members in station element.
member['standard'] is a standard_name parameter name, typically CF based.
Ideally, member['value'] should already be floating point value,
so it's ready to use.
Useful with most pyoos collectors.
"""
rd = {"time": p.time}
for member in p.members:
rd[member["standard"]] = member["value"]
return rd |
def evaluate_model_single_recording(model_file, recording):
"""
Evaluate a model for a single recording.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
"""
(preprocessing_queue, feature_list, model,
output_semantics) = load_model(model_file)
results = evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording)
return results | Evaluate a model for a single recording.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording. | Below is the the instruction that describes the task:
### Input:
Evaluate a model for a single recording.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
### Response:
def evaluate_model_single_recording(model_file, recording):
"""
Evaluate a model for a single recording.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
"""
(preprocessing_queue, feature_list, model,
output_semantics) = load_model(model_file)
results = evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording)
return results |
def undecorate(cls, function):
'''
Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator.
'''
if cls.is_function_validated(function):
return cls.get_function_validator(function).function
return function | Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator. | Below is the the instruction that describes the task:
### Input:
Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator.
### Response:
def undecorate(cls, function):
'''
Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator.
'''
if cls.is_function_validated(function):
return cls.get_function_validator(function).function
return function |
def _from_json(json_data):
"""
Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report
"""
if 'bbox' in json_data:
box = BoundingBox._from_json(json_data['bbox'])
else:
box = BoundingBox(Coordinate(0.,0.,0.), Coordinate(0.,0.,0.))
if 'features' in json_data and json_data['features']:
quakes = list(map(Earthquake._from_json, json_data['features']))
else:
quakes = []
try:
title = json_data['metadata']['title']
except KeyError:
raise USGSException("No report title information returned by server")
return Report(box, quakes, title) | Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report | Below is the the instruction that describes the task:
### Input:
Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report
### Response:
def _from_json(json_data):
"""
Creates a Report from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Report
"""
if 'bbox' in json_data:
box = BoundingBox._from_json(json_data['bbox'])
else:
box = BoundingBox(Coordinate(0.,0.,0.), Coordinate(0.,0.,0.))
if 'features' in json_data and json_data['features']:
quakes = list(map(Earthquake._from_json, json_data['features']))
else:
quakes = []
try:
title = json_data['metadata']['title']
except KeyError:
raise USGSException("No report title information returned by server")
return Report(box, quakes, title) |
def set_source(self, source_id):
"""Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_source_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(source_id):
raise errors.InvalidArgument()
self._my_map['sourceId'] = str(source_id) | Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_source(self, source_id):
"""Sets the source.
arg: source_id (osid.id.Id): the new publisher
raise: InvalidArgument - ``source_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``source_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_source_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(source_id):
raise errors.InvalidArgument()
self._my_map['sourceId'] = str(source_id) |
def value(board, who='x'):
"""Returns the value of a board
>>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']]
>>> value(b)
1
>>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']]
>>> value(b)
-1
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> value(b)
1
>>> b._rows[0][2] = 'x'
>>> value(b)
-1
"""
w = board.winner()
if w == who:
return 1
if w == opp(who):
return -1
if board.turn == 9:
return 0
if who == board.whose_turn:
return max([value(b, who) for b in board.possible()])
else:
return min([value(b, who) for b in board.possible()]) | Returns the value of a board
>>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']]
>>> value(b)
1
>>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']]
>>> value(b)
-1
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> value(b)
1
>>> b._rows[0][2] = 'x'
>>> value(b)
-1 | Below is the the instruction that describes the task:
### Input:
Returns the value of a board
>>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']]
>>> value(b)
1
>>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']]
>>> value(b)
-1
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> value(b)
1
>>> b._rows[0][2] = 'x'
>>> value(b)
-1
### Response:
def value(board, who='x'):
"""Returns the value of a board
>>> b = Board(); b._rows = [['x', 'x', 'x'], ['x', 'x', 'x'], ['x', 'x', 'x']]
>>> value(b)
1
>>> b = Board(); b._rows = [['o', 'o', 'o'], ['o', 'o', 'o'], ['o', 'o', 'o']]
>>> value(b)
-1
>>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']]
>>> value(b)
1
>>> b._rows[0][2] = 'x'
>>> value(b)
-1
"""
w = board.winner()
if w == who:
return 1
if w == opp(who):
return -1
if board.turn == 9:
return 0
if who == board.whose_turn:
return max([value(b, who) for b in board.possible()])
else:
return min([value(b, who) for b in board.possible()]) |
def _init_properties(self):
""" Loop through the list of Properties,
extract the derived and required properties and do the
appropriate book-keeping
"""
self._missing = {}
for k, p in self.params.items():
if p.required:
self._missing[k] = p
if isinstance(p, Derived):
if p.loader is None:
# Default to using _<param_name>
p.loader = self.__getattribute__("_%s" % k)
elif isinstance(p.loader, str):
p.loader = self.__getattribute__(p.loader) | Loop through the list of Properties,
extract the derived and required properties and do the
appropriate book-keeping | Below is the the instruction that describes the task:
### Input:
Loop through the list of Properties,
extract the derived and required properties and do the
appropriate book-keeping
### Response:
def _init_properties(self):
""" Loop through the list of Properties,
extract the derived and required properties and do the
appropriate book-keeping
"""
self._missing = {}
for k, p in self.params.items():
if p.required:
self._missing[k] = p
if isinstance(p, Derived):
if p.loader is None:
# Default to using _<param_name>
p.loader = self.__getattribute__("_%s" % k)
elif isinstance(p.loader, str):
p.loader = self.__getattribute__(p.loader) |
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
) | checker is a ContentChecker | Below is the the instruction that describes the task:
### Input:
checker is a ContentChecker
### Response:
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
) |
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Applies to the main object in the AdjointLayout.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
dimension = self.get_dimension(dimension, strict=True).name
return self.main.dimension_values(dimension, expanded, flat) | Return the values along the requested dimension.
Applies to the main object in the AdjointLayout.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension | Below is the the instruction that describes the task:
### Input:
Return the values along the requested dimension.
Applies to the main object in the AdjointLayout.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
### Response:
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Applies to the main object in the AdjointLayout.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
dimension = self.get_dimension(dimension, strict=True).name
return self.main.dimension_values(dimension, expanded, flat) |
def update_channels(cls, installation_id, channels_to_add=set(),
channels_to_remove=set(), **kw):
"""
Allow an application to manually subscribe or unsubscribe an
installation to a certain push channel in a unified operation.
this is based on:
https://www.parse.com/docs/rest#installations-updating
installation_id: the installation id you'd like to add a channel to
channels_to_add: the name of the channel you'd like to subscribe the user to
channels_to_remove: the name of the channel you'd like to unsubscribe the user from
"""
installation_url = cls._get_installation_url(installation_id)
current_config = cls.GET(installation_url)
new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove))
cls.PUT(installation_url, channels=new_channels) | Allow an application to manually subscribe or unsubscribe an
installation to a certain push channel in a unified operation.
this is based on:
https://www.parse.com/docs/rest#installations-updating
installation_id: the installation id you'd like to add a channel to
channels_to_add: the name of the channel you'd like to subscribe the user to
channels_to_remove: the name of the channel you'd like to unsubscribe the user from | Below is the the instruction that describes the task:
### Input:
Allow an application to manually subscribe or unsubscribe an
installation to a certain push channel in a unified operation.
this is based on:
https://www.parse.com/docs/rest#installations-updating
installation_id: the installation id you'd like to add a channel to
channels_to_add: the name of the channel you'd like to subscribe the user to
channels_to_remove: the name of the channel you'd like to unsubscribe the user from
### Response:
def update_channels(cls, installation_id, channels_to_add=set(),
channels_to_remove=set(), **kw):
"""
Allow an application to manually subscribe or unsubscribe an
installation to a certain push channel in a unified operation.
this is based on:
https://www.parse.com/docs/rest#installations-updating
installation_id: the installation id you'd like to add a channel to
channels_to_add: the name of the channel you'd like to subscribe the user to
channels_to_remove: the name of the channel you'd like to unsubscribe the user from
"""
installation_url = cls._get_installation_url(installation_id)
current_config = cls.GET(installation_url)
new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove))
cls.PUT(installation_url, channels=new_channels) |
def get_instance(self, payload):
"""
Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance
"""
return UsageInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of UsageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.UsageInstance
:rtype: twilio.rest.api.v2010.account.usage.UsageInstance
"""
return UsageInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def total(self):
'''
Returns sum of all counts in all features that are multisets.
'''
feats = imap(lambda name: self[name], self._counters())
return sum(chain(*map(lambda mset: map(abs, mset.values()), feats))) | Returns sum of all counts in all features that are multisets. | Below is the the instruction that describes the task:
### Input:
Returns sum of all counts in all features that are multisets.
### Response:
def total(self):
'''
Returns sum of all counts in all features that are multisets.
'''
feats = imap(lambda name: self[name], self._counters())
return sum(chain(*map(lambda mset: map(abs, mset.values()), feats))) |
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False):
'''The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register.
Pixels without any data are masked.
Parameters
----------
pix_regs : iterable, string
List of pixel register to read (e.g. Enable, C_High, ...).
If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"
dcs : iterable, int
List of double columns to read.
overwrite_config : bool
The read values overwrite the config in RAM if true.
Returns
-------
list of masked numpy.ndarrays
'''
if pix_regs is None:
pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"]
self.register_utils.send_commands(self.register.get_commands("ConfMode"))
result = []
for pix_reg in pix_regs:
pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True) # the result pixel array, only pixel with data are not masked
for dc in dcs:
with self.readout(fill_buffer=True, callback=None, errback=None):
self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc]))
data = self.read_data()
interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True)
if overwrite_config:
self.register.set_pixel_register(pix_reg, pixel_data.data)
result.append(pixel_data)
return result | The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register.
Pixels without any data are masked.
Parameters
----------
pix_regs : iterable, string
List of pixel register to read (e.g. Enable, C_High, ...).
If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"
dcs : iterable, int
List of double columns to read.
overwrite_config : bool
The read values overwrite the config in RAM if true.
Returns
-------
list of masked numpy.ndarrays | Below is the the instruction that describes the task:
### Input:
The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register.
Pixels without any data are masked.
Parameters
----------
pix_regs : iterable, string
List of pixel register to read (e.g. Enable, C_High, ...).
If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"
dcs : iterable, int
List of double columns to read.
overwrite_config : bool
The read values overwrite the config in RAM if true.
Returns
-------
list of masked numpy.ndarrays
### Response:
def read_pixel_register(self, pix_regs=None, dcs=range(40), overwrite_config=False):
'''The function reads the pixel register, interprets the data and returns a masked numpy arrays with the data for the chosen pixel register.
Pixels without any data are masked.
Parameters
----------
pix_regs : iterable, string
List of pixel register to read (e.g. Enable, C_High, ...).
If None all are read: "EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"
dcs : iterable, int
List of double columns to read.
overwrite_config : bool
The read values overwrite the config in RAM if true.
Returns
-------
list of masked numpy.ndarrays
'''
if pix_regs is None:
pix_regs = ["EnableDigInj", "Imon", "Enable", "C_High", "C_Low", "TDAC", "FDAC"]
self.register_utils.send_commands(self.register.get_commands("ConfMode"))
result = []
for pix_reg in pix_regs:
pixel_data = np.ma.masked_array(np.zeros(shape=(80, 336), dtype=np.uint32), mask=True) # the result pixel array, only pixel with data are not masked
for dc in dcs:
with self.readout(fill_buffer=True, callback=None, errback=None):
self.register_utils.send_commands(self.register.get_commands("RdFrontEnd", name=[pix_reg], dcs=[dc]))
data = self.read_data()
interpret_pixel_data(data, dc, pixel_data, invert=False if pix_reg == "EnableDigInj" else True)
if overwrite_config:
self.register.set_pixel_register(pix_reg, pixel_data.data)
result.append(pixel_data)
return result |
def flux(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
model = super(InverseCompton, self).flux(
photon_energy, distance=distance
)
if seed is not None:
# Test seed argument
if not isinstance(seed, int):
if seed not in self.seed_photon_fields:
raise ValueError(
"Provided seed photon field name is not in"
" the definition of the InverseCompton instance"
)
else:
seed = list(self.seed_photon_fields.keys()).index(seed)
elif seed > len(self.seed_photon_fields):
raise ValueError(
"Provided seed photon field number is larger"
" than the number of seed photon fields defined in the"
" InverseCompton instance"
)
if distance != 0:
distance = validate_scalar(
"distance", distance, physical_type="length"
)
dfac = 4 * np.pi * distance.to("cm") ** 2
out_unit = "1/(s cm2 eV)"
else:
dfac = 1
out_unit = "1/(s eV)"
model = (self.specic[seed] / dfac).to(out_unit)
return model | Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default). | Below is the the instruction that describes the task:
### Input:
Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
### Response:
def flux(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
model = super(InverseCompton, self).flux(
photon_energy, distance=distance
)
if seed is not None:
# Test seed argument
if not isinstance(seed, int):
if seed not in self.seed_photon_fields:
raise ValueError(
"Provided seed photon field name is not in"
" the definition of the InverseCompton instance"
)
else:
seed = list(self.seed_photon_fields.keys()).index(seed)
elif seed > len(self.seed_photon_fields):
raise ValueError(
"Provided seed photon field number is larger"
" than the number of seed photon fields defined in the"
" InverseCompton instance"
)
if distance != 0:
distance = validate_scalar(
"distance", distance, physical_type="length"
)
dfac = 4 * np.pi * distance.to("cm") ** 2
out_unit = "1/(s cm2 eV)"
else:
dfac = 1
out_unit = "1/(s eV)"
model = (self.specic[seed] / dfac).to(out_unit)
return model |
def cli(execute, region, aws_access_key_id, aws_secret_access_key,
s3_staging_dir, athenaclirc, profile, database):
'''A Athena terminal client with auto-completion and syntax highlighting.
\b
Examples:
- athenacli
- athenacli my_database
'''
if (athenaclirc == ATHENACLIRC) and (not os.path.exists(os.path.expanduser(ATHENACLIRC))):
err_msg = '''
Welcome to athenacli!
It seems this is your first time to run athenacli,
we generated a default config file for you
%s
Please change it accordingly, and run athenacli again.
''' % ATHENACLIRC
print(err_msg)
write_default_config(DEFAULT_CONFIG_FILE, ATHENACLIRC)
sys.exit(1)
if profile != 'default':
os.environ['AWS_PROFILE'] = profile
athenacli = AthenaCli(
region=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key= aws_secret_access_key,
s3_staging_dir=s3_staging_dir,
athenaclirc=athenaclirc,
profile=profile,
database=database
)
# --execute argument
if execute:
if os.path.exists(execute):
with open(execute) as f:
query = f.read()
else:
query = execute
try:
athenacli.formatter.format_name = 'csv'
athenacli.run_query(query)
exit(0)
except Exception as e:
click.secho(str(e), err=True, fg='red')
exit(1)
athenacli.run_cli() | A Athena terminal client with auto-completion and syntax highlighting.
\b
Examples:
- athenacli
- athenacli my_database | Below is the the instruction that describes the task:
### Input:
A Athena terminal client with auto-completion and syntax highlighting.
\b
Examples:
- athenacli
- athenacli my_database
### Response:
def cli(execute, region, aws_access_key_id, aws_secret_access_key,
s3_staging_dir, athenaclirc, profile, database):
'''A Athena terminal client with auto-completion and syntax highlighting.
\b
Examples:
- athenacli
- athenacli my_database
'''
if (athenaclirc == ATHENACLIRC) and (not os.path.exists(os.path.expanduser(ATHENACLIRC))):
err_msg = '''
Welcome to athenacli!
It seems this is your first time to run athenacli,
we generated a default config file for you
%s
Please change it accordingly, and run athenacli again.
''' % ATHENACLIRC
print(err_msg)
write_default_config(DEFAULT_CONFIG_FILE, ATHENACLIRC)
sys.exit(1)
if profile != 'default':
os.environ['AWS_PROFILE'] = profile
athenacli = AthenaCli(
region=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key= aws_secret_access_key,
s3_staging_dir=s3_staging_dir,
athenaclirc=athenaclirc,
profile=profile,
database=database
)
# --execute argument
if execute:
if os.path.exists(execute):
with open(execute) as f:
query = f.read()
else:
query = execute
try:
athenacli.formatter.format_name = 'csv'
athenacli.run_query(query)
exit(0)
except Exception as e:
click.secho(str(e), err=True, fg='red')
exit(1)
athenacli.run_cli() |
def upload_object(bucket_path, bucket, content='',
metadata=None, acl=None, cache_control=None,
content_type=None):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content type (such as ``text/html``). If left unset,
no MIME type is passed to boto3 (which defaults to
``binary/octet-stream``).
"""
obj = bucket.Object(bucket_path)
# Object.put seems to be sensitive to None-type kwargs, so we filter first
args = {}
if metadata is not None and len(metadata) > 0: # avoid empty Metadata
args['Metadata'] = metadata
if acl is not None:
args['ACL'] = acl
if cache_control is not None:
args['CacheControl'] = cache_control
if content_type is not None:
args['ContentType'] = content_type
obj.put(Body=content, **args) | Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content type (such as ``text/html``). If left unset,
no MIME type is passed to boto3 (which defaults to
``binary/octet-stream``). | Below is the the instruction that describes the task:
### Input:
Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content type (such as ``text/html``). If left unset,
no MIME type is passed to boto3 (which defaults to
``binary/octet-stream``).
### Response:
def upload_object(bucket_path, bucket, content='',
metadata=None, acl=None, cache_control=None,
content_type=None):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content type (such as ``text/html``). If left unset,
no MIME type is passed to boto3 (which defaults to
``binary/octet-stream``).
"""
obj = bucket.Object(bucket_path)
# Object.put seems to be sensitive to None-type kwargs, so we filter first
args = {}
if metadata is not None and len(metadata) > 0: # avoid empty Metadata
args['Metadata'] = metadata
if acl is not None:
args['ACL'] = acl
if cache_control is not None:
args['CacheControl'] = cache_control
if content_type is not None:
args['ContentType'] = content_type
obj.put(Body=content, **args) |
def insert(self, key, minhash, check_duplication=True):
'''
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'''
self._insert(key, minhash, check_duplication=check_duplication, buffer=False) | Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`. | Below is the the instruction that describes the task:
### Input:
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
### Response:
def insert(self, key, minhash, check_duplication=True):
'''
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'''
self._insert(key, minhash, check_duplication=check_duplication, buffer=False) |
def check(self, values, namespace):
"""specifying a plain tuple allows arguments that are tuples or lists;
specifying a specialized (subclassed) tuple allows only that type;
specifying a list allows only that list type."""
is_tuplish_type = (issubclass(self._cls, tg.Tuple) or
issubclass(type(values), self._cls))
if (not _is_sequence(values) or not is_tuplish_type or
len(values) != len(self._checks)):
return False
for thischeck, thisvalue in zip(self._checks, values):
if not thischeck(thisvalue, namespace):
return False
return True | specifying a plain tuple allows arguments that are tuples or lists;
specifying a specialized (subclassed) tuple allows only that type;
specifying a list allows only that list type. | Below is the the instruction that describes the task:
### Input:
specifying a plain tuple allows arguments that are tuples or lists;
specifying a specialized (subclassed) tuple allows only that type;
specifying a list allows only that list type.
### Response:
def check(self, values, namespace):
"""specifying a plain tuple allows arguments that are tuples or lists;
specifying a specialized (subclassed) tuple allows only that type;
specifying a list allows only that list type."""
is_tuplish_type = (issubclass(self._cls, tg.Tuple) or
issubclass(type(values), self._cls))
if (not _is_sequence(values) or not is_tuplish_type or
len(values) != len(self._checks)):
return False
for thischeck, thisvalue in zip(self._checks, values):
if not thischeck(thisvalue, namespace):
return False
return True |
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result | Consolidate adjacent slices in a list of slices. | Below is the the instruction that describes the task:
### Input:
Consolidate adjacent slices in a list of slices.
### Response:
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result |
def _colorize(self, depth_im, color_im):
"""Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
"""
# Project the point cloud into the webcam's frame
target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3)
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth)
# Sort the points by their distance from the webcam's apeture
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
# Generate image coordinates for each sorted point
icds = self._webcam.color_intrinsics.project(pc_color).data.T
# Create mask for points that are masked by others
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3
# Create mask for points with missing depth or that lie outside the image
valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame) | Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image. | Below is the the instruction that describes the task:
### Input:
Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
### Response:
def _colorize(self, depth_im, color_im):
"""Colorize a depth image from the PhoXi using a color image from the webcam.
Parameters
----------
depth_im : DepthImage
The PhoXi depth image.
color_im : ColorImage
Corresponding color image.
Returns
-------
ColorImage
A colorized image corresponding to the PhoXi depth image.
"""
# Project the point cloud into the webcam's frame
target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3)
pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im)
pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth)
# Sort the points by their distance from the webcam's apeture
pc_data = pc_color.data.T
dists = np.linalg.norm(pc_data, axis=1)
order = np.argsort(dists)
pc_data = pc_data[order]
pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame)
sorted_dists = dists[order]
sorted_depths = depth_im.data.flatten()[order]
# Generate image coordinates for each sorted point
icds = self._webcam.color_intrinsics.project(pc_color).data.T
# Create mask for points that are masked by others
rounded_icds = np.array(icds / 3.0, dtype=np.uint32)
unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True)
icd_depths = sorted_dists[unique_inds]
min_depths_pp = icd_depths[unique_inv]
depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3
# Create mask for points with missing depth or that lie outside the image
valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width),
np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height))
valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0)
valid_mask = np.logical_and(valid_mask, depth_delta_mask)
valid_icds = icds[valid_mask]
colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:]
color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8)
color_im_data[valid_mask] = colors
color_im_data[order] = color_im_data.copy()
color_im_data = color_im_data.reshape(target_shape)
return ColorImage(color_im_data, frame=self._frame) |
def imshow(data, photometric=None, planarconfig=None, bitspersample=None,
interpolation=None, cmap=None, vmin=None, vmax=None,
figure=None, title=None, dpi=96, subplot=None, maxdim=None,
**kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported C{from matplotlib import pyplot}.
Parameters
----------
data : nd array
The image data.
photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'}
The color space of the image data.
planarconfig : {'CONTIG' or 'SEPARATE'}
Defines how components of each pixel are stored.
bitspersample : int
Number of bits per channel in integer RGB images.
interpolation : str
The image interpolation method used in matplotlib.imshow. By default,
'nearest' will be used for image dimensions <= 512, else 'bilinear'.
cmap : str or matplotlib.colors.Colormap
The colormap maps non-RGBA scalar data to colors.
vmin, vmax : scalar
Data range covered by the colormap. By default, the complete
range of the data is covered.
figure : matplotlib.figure.Figure
Matplotlib figure to use for plotting.
title : str
Window and subplot title.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
Maximum image width and length.
kwargs : dict
Additional arguments for matplotlib.pyplot.imshow.
"""
# TODO: rewrite detection of isrgb, iscontig
# TODO: use planarconfig
if photometric is None:
photometric = 'RGB'
if maxdim is None:
maxdim = 2**16
isrgb = photometric in ('RGB', 'YCBCR') # 'PALETTE', 'YCBCR'
if data.dtype == 'float16':
data = data.astype('float32')
if data.dtype.kind == 'b':
isrgb = False
if isrgb and not (data.shape[-1] in (3, 4) or (
data.ndim > 2 and data.shape[-3] in (3, 4))):
isrgb = False
photometric = 'MINISBLACK'
data = data.squeeze()
if photometric in ('MINISWHITE', 'MINISBLACK', None):
data = reshape_nd(data, 2)
else:
data = reshape_nd(data, 3)
dims = data.ndim
if dims < 2:
raise ValueError('not an image')
if dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if interpolation is None:
threshold = 512
elif isinstance(interpolation, int):
threshold = interpolation
else:
threshold = 0
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
if threshold:
if (data.shape[-2] > threshold or data.shape[-3] > threshold):
interpolation = 'bilinear'
else:
interpolation = 'nearest'
else:
data = data[..., :maxdim, :maxdim]
if threshold:
if (data.shape[-1] > threshold or data.shape[-2] > threshold):
interpolation = 'bilinear'
else:
interpolation = 'nearest'
if photometric == 'PALETTE' and isrgb:
datamax = data.max()
if datamax > 255:
data = data >> 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, inttypes):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data = data << (8 - bitspersample)
elif bitspersample > 8:
data = data >> (bitspersample - 8) # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
else:
data = data / datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
data = numpy.absolute(data)
datamax = data.max()
if isrgb:
vmin = 0
else:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
elif data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
size = len(title.splitlines()) if title else 1
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
if subplot is None:
subplot = 111
subplot = pyplot.subplot(subplot)
subplot.set_facecolor((0, 0, 0))
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.char == '?':
cmap = 'gray'
elif data.dtype.kind in 'buf' or vmin == 0:
cmap = 'viridis'
else:
cmap = 'coolwarm'
if photometric == 'MINISWHITE':
cmap += '_r'
image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()),
vmin=vmin, vmax=vmax, cmap=cmap,
interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return '%s @ %s [%4i, %4i]' % (
curaxdat[1][y, x], current, y, x)
return '%s @ [%4i, %4i]' % (data[y, x], y, x)
except IndexError:
return ''
def none(event):
return ''
subplot.format_coord = format_coord
image.get_cursor_data = none
image.format_cursor_data = none
if dims:
current = list((0,) * dims)
curaxdat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
curaxdat[1] = data[tuple(current)].squeeze()
image.set_data(curaxdat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
curaxdat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = curaxdat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image | Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported C{from matplotlib import pyplot}.
Parameters
----------
data : nd array
The image data.
photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'}
The color space of the image data.
planarconfig : {'CONTIG' or 'SEPARATE'}
Defines how components of each pixel are stored.
bitspersample : int
Number of bits per channel in integer RGB images.
interpolation : str
The image interpolation method used in matplotlib.imshow. By default,
'nearest' will be used for image dimensions <= 512, else 'bilinear'.
cmap : str or matplotlib.colors.Colormap
The colormap maps non-RGBA scalar data to colors.
vmin, vmax : scalar
Data range covered by the colormap. By default, the complete
range of the data is covered.
figure : matplotlib.figure.Figure
Matplotlib figure to use for plotting.
title : str
Window and subplot title.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
Maximum image width and length.
kwargs : dict
Additional arguments for matplotlib.pyplot.imshow. | Below is the the instruction that describes the task:
### Input:
Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported C{from matplotlib import pyplot}.
Parameters
----------
data : nd array
The image data.
photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'}
The color space of the image data.
planarconfig : {'CONTIG' or 'SEPARATE'}
Defines how components of each pixel are stored.
bitspersample : int
Number of bits per channel in integer RGB images.
interpolation : str
The image interpolation method used in matplotlib.imshow. By default,
'nearest' will be used for image dimensions <= 512, else 'bilinear'.
cmap : str or matplotlib.colors.Colormap
The colormap maps non-RGBA scalar data to colors.
vmin, vmax : scalar
Data range covered by the colormap. By default, the complete
range of the data is covered.
figure : matplotlib.figure.Figure
Matplotlib figure to use for plotting.
title : str
Window and subplot title.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
Maximum image width and length.
kwargs : dict
Additional arguments for matplotlib.pyplot.imshow.
### Response:
def imshow(data, photometric=None, planarconfig=None, bitspersample=None,
interpolation=None, cmap=None, vmin=None, vmax=None,
figure=None, title=None, dpi=96, subplot=None, maxdim=None,
**kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported C{from matplotlib import pyplot}.
Parameters
----------
data : nd array
The image data.
photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'}
The color space of the image data.
planarconfig : {'CONTIG' or 'SEPARATE'}
Defines how components of each pixel are stored.
bitspersample : int
Number of bits per channel in integer RGB images.
interpolation : str
The image interpolation method used in matplotlib.imshow. By default,
'nearest' will be used for image dimensions <= 512, else 'bilinear'.
cmap : str or matplotlib.colors.Colormap
The colormap maps non-RGBA scalar data to colors.
vmin, vmax : scalar
Data range covered by the colormap. By default, the complete
range of the data is covered.
figure : matplotlib.figure.Figure
Matplotlib figure to use for plotting.
title : str
Window and subplot title.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
Maximum image width and length.
kwargs : dict
Additional arguments for matplotlib.pyplot.imshow.
"""
# TODO: rewrite detection of isrgb, iscontig
# TODO: use planarconfig
if photometric is None:
photometric = 'RGB'
if maxdim is None:
maxdim = 2**16
isrgb = photometric in ('RGB', 'YCBCR') # 'PALETTE', 'YCBCR'
if data.dtype == 'float16':
data = data.astype('float32')
if data.dtype.kind == 'b':
isrgb = False
if isrgb and not (data.shape[-1] in (3, 4) or (
data.ndim > 2 and data.shape[-3] in (3, 4))):
isrgb = False
photometric = 'MINISBLACK'
data = data.squeeze()
if photometric in ('MINISWHITE', 'MINISBLACK', None):
data = reshape_nd(data, 2)
else:
data = reshape_nd(data, 3)
dims = data.ndim
if dims < 2:
raise ValueError('not an image')
if dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if interpolation is None:
threshold = 512
elif isinstance(interpolation, int):
threshold = interpolation
else:
threshold = 0
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
if threshold:
if (data.shape[-2] > threshold or data.shape[-3] > threshold):
interpolation = 'bilinear'
else:
interpolation = 'nearest'
else:
data = data[..., :maxdim, :maxdim]
if threshold:
if (data.shape[-1] > threshold or data.shape[-2] > threshold):
interpolation = 'bilinear'
else:
interpolation = 'nearest'
if photometric == 'PALETTE' and isrgb:
datamax = data.max()
if datamax > 255:
data = data >> 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, inttypes):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data = data << (8 - bitspersample)
elif bitspersample > 8:
data = data >> (bitspersample - 8) # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
else:
data = data / datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
data = numpy.absolute(data)
datamax = data.max()
if isrgb:
vmin = 0
else:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
elif data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data[data > dtmin])
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
size = len(title.splitlines()) if title else 1
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
if subplot is None:
subplot = 111
subplot = pyplot.subplot(subplot)
subplot.set_facecolor((0, 0, 0))
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.char == '?':
cmap = 'gray'
elif data.dtype.kind in 'buf' or vmin == 0:
cmap = 'viridis'
else:
cmap = 'coolwarm'
if photometric == 'MINISWHITE':
cmap += '_r'
image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()),
vmin=vmin, vmax=vmax, cmap=cmap,
interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return '%s @ %s [%4i, %4i]' % (
curaxdat[1][y, x], current, y, x)
return '%s @ [%4i, %4i]' % (data[y, x], y, x)
except IndexError:
return ''
def none(event):
return ''
subplot.format_coord = format_coord
image.get_cursor_data = none
image.format_cursor_data = none
if dims:
current = list((0,) * dims)
curaxdat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
curaxdat[1] = data[tuple(current)].squeeze()
image.set_data(curaxdat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
curaxdat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = curaxdat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image |
def update_note(note, **kwargs):
"""
Update a note
"""
note_i = _get_note(note.id)
if note.ref_key != note_i.ref_key:
raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key))
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
db.DBSession.flush()
return note_i | Update a note | Below is the the instruction that describes the task:
### Input:
Update a note
### Response:
def update_note(note, **kwargs):
"""
Update a note
"""
note_i = _get_note(note.id)
if note.ref_key != note_i.ref_key:
raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key))
note_i.set_ref(note.ref_key, note.ref_id)
note_i.value = note.value
db.DBSession.flush()
return note_i |
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5 | Check if code contains REBOL header and so it probably not R code | Below is the the instruction that describes the task:
### Input:
Check if code contains REBOL header and so it probably not R code
### Response:
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5 |
def pageassert(func):
'''
Decorator that assert page number
'''
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError('Page Number not found')
return func(*args, **kwargs)
return wrapper | Decorator that assert page number | Below is the the instruction that describes the task:
### Input:
Decorator that assert page number
### Response:
def pageassert(func):
'''
Decorator that assert page number
'''
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError('Page Number not found')
return func(*args, **kwargs)
return wrapper |
def build_locator(selector):
"""
- ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return:
"""
if type(selector) is tuple:
return selector
if not isinstance(selector, six.string_types):
raise InvalidSelectorException("Invalid locator values passed in")
s = selector.strip()
for test, by, index in selectors:
if test(s):
return by, s[index:]
raise InvalidSelectorException("Invalid locator values passed in: {}".format(selector)) | - ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return: | Below is the the instruction that describes the task:
### Input:
- ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return:
### Response:
def build_locator(selector):
"""
- ID = "#valid_id"
- CLASS_NAME = ".valid_class_name"
- TAG_NAME = "valid_tag_name"
- XPATH = start with "./" or "//" or "$x:"
- LINK_TEXT = start with "$link_text:"
- PARTIAL_LINK_TEXT = start with "$partial_link_text:"
- NAME = "@valid_name_attribute_value"
CSS_SELECTOR = all other that starts with *|.|#|[\w-]|\[|:
:type selector: str|tuple
:param selector:
:rtype: tuple[selenium.webdriver.common.by.By, str]
:return:
"""
if type(selector) is tuple:
return selector
if not isinstance(selector, six.string_types):
raise InvalidSelectorException("Invalid locator values passed in")
s = selector.strip()
for test, by, index in selectors:
if test(s):
return by, s[index:]
raise InvalidSelectorException("Invalid locator values passed in: {}".format(selector)) |
def parse_text_to_table(txt):
"""
takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text
"""
res = [] # resulting table
delim = identify_delim(txt)
print('txt to parse = ', txt, '\ndelim=',delim)
if delim == '' or delim == ' ':
fixed_split = identify_col_pos(txt)
if fixed_split == []:
res = []
else:
res = parse_text_by_col_pos(txt, fixed_split)
else:
res = parse_text_by_delim(txt, delim)
return res | takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text | Below is the the instruction that describes the task:
### Input:
takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text
### Response:
def parse_text_to_table(txt):
"""
takes a blob of text and finds delimiter OR guesses
the column positions to parse into a table.
input: txt = blob of text, lines separated by \n
output: res = table of text
"""
res = [] # resulting table
delim = identify_delim(txt)
print('txt to parse = ', txt, '\ndelim=',delim)
if delim == '' or delim == ' ':
fixed_split = identify_col_pos(txt)
if fixed_split == []:
res = []
else:
res = parse_text_by_col_pos(txt, fixed_split)
else:
res = parse_text_by_delim(txt, delim)
return res |
def close(self):
"""
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
"""
with self.halting: # Avoid simultaneous "close" threads
if not self.finished: # Ignore all "close" calls, but the first,
self.finished = True # and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True:
with self.lock: # Ensure there's no other thread messing around
try:
thread = self._threads[0] # Needless to say: pop = deadlock
except IndexError: # Empty list
break # No more threads
if not self.wait:
thread.stop()
thread.join()
# Closes all recording RecStream instances
while self._recordings:
recst = self._recordings[-1]
recst.stop()
recst.take(inf) # Ensure it'll be closed
# Finishes
assert not self._pa._streams # No stream should survive
self._pa.terminate() | Destructor for this audio interface. Waits the threads to finish their
streams, if desired. | Below is the the instruction that describes the task:
### Input:
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
### Response:
def close(self):
"""
Destructor for this audio interface. Waits the threads to finish their
streams, if desired.
"""
with self.halting: # Avoid simultaneous "close" threads
if not self.finished: # Ignore all "close" calls, but the first,
self.finished = True # and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True:
with self.lock: # Ensure there's no other thread messing around
try:
thread = self._threads[0] # Needless to say: pop = deadlock
except IndexError: # Empty list
break # No more threads
if not self.wait:
thread.stop()
thread.join()
# Closes all recording RecStream instances
while self._recordings:
recst = self._recordings[-1]
recst.stop()
recst.take(inf) # Ensure it'll be closed
# Finishes
assert not self._pa._streams # No stream should survive
self._pa.terminate() |
def allow_origins(self, *origins, methods=None, max_age=None, credentials=None, headers=None, **overrides):
"""Convenience method for quickly allowing other resources to access this one"""
response_headers = {}
if origins:
@hug.response_middleware()
def process_data(request, response, resource):
if 'ORIGIN' in request.headers:
origin = request.headers['ORIGIN']
if origin in origins:
response.set_header('Access-Control-Allow-Origin', origin)
else:
response_headers['Access-Control-Allow-Origin'] = '*'
if methods:
response_headers['Access-Control-Allow-Methods'] = ', '.join(methods)
if max_age:
response_headers['Access-Control-Max-Age'] = max_age
if credentials:
response_headers['Access-Control-Allow-Credentials'] = str(credentials).lower()
if headers:
response_headers['Access-Control-Allow-Headers'] = headers
return self.add_response_headers(response_headers, **overrides) | Convenience method for quickly allowing other resources to access this one | Below is the the instruction that describes the task:
### Input:
Convenience method for quickly allowing other resources to access this one
### Response:
def allow_origins(self, *origins, methods=None, max_age=None, credentials=None, headers=None, **overrides):
"""Convenience method for quickly allowing other resources to access this one"""
response_headers = {}
if origins:
@hug.response_middleware()
def process_data(request, response, resource):
if 'ORIGIN' in request.headers:
origin = request.headers['ORIGIN']
if origin in origins:
response.set_header('Access-Control-Allow-Origin', origin)
else:
response_headers['Access-Control-Allow-Origin'] = '*'
if methods:
response_headers['Access-Control-Allow-Methods'] = ', '.join(methods)
if max_age:
response_headers['Access-Control-Max-Age'] = max_age
if credentials:
response_headers['Access-Control-Allow-Credentials'] = str(credentials).lower()
if headers:
response_headers['Access-Control-Allow-Headers'] = headers
return self.add_response_headers(response_headers, **overrides) |
def tag_del(self, item, tag):
"""
Remove tag from the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str
"""
tags = list(self.item(item, "tags"))
if tag in tags:
tags.remove(tag)
self.item(item, tags=tuple(tags)) | Remove tag from the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str | Below is the the instruction that describes the task:
### Input:
Remove tag from the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str
### Response:
def tag_del(self, item, tag):
"""
Remove tag from the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str
"""
tags = list(self.item(item, "tags"))
if tag in tags:
tags.remove(tag)
self.item(item, tags=tuple(tags)) |
def get_record(self):
"""Override the base."""
self.recid = self.get_recid()
self.remove_controlfields()
self.update_system_numbers()
self.add_systemnumber("Inspire", recid=self.recid)
self.add_control_number("003", "SzGeCERN")
self.update_collections()
self.update_languages()
self.update_reportnumbers()
self.update_authors()
self.update_journals()
self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds")
self.update_pagenumber()
self.update_notes()
self.update_experiments()
self.update_isbn()
self.update_dois()
self.update_links_and_ffts()
self.update_date()
self.update_date_year()
self.update_hidden_notes()
self.update_oai_info()
self.update_cnum()
self.update_conference_info()
self.fields_list = [
"909", "541", "961",
"970", "690", "695",
"981",
]
self.strip_fields()
if "ANNOUNCEMENT" in self.collections:
self.update_conference_111()
self.update_conference_links()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
if "THESIS" in self.collections:
self.update_thesis_information()
self.update_thesis_supervisors()
if "PROCEEDINGS" in self.collections:
# Special proceeding syntax
self.update_title_to_proceeding()
self.update_author_to_proceeding()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
# 690 tags
if self.tag_as_cern:
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")])
return self.record | Override the base. | Below is the the instruction that describes the task:
### Input:
Override the base.
### Response:
def get_record(self):
"""Override the base."""
self.recid = self.get_recid()
self.remove_controlfields()
self.update_system_numbers()
self.add_systemnumber("Inspire", recid=self.recid)
self.add_control_number("003", "SzGeCERN")
self.update_collections()
self.update_languages()
self.update_reportnumbers()
self.update_authors()
self.update_journals()
self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds")
self.update_pagenumber()
self.update_notes()
self.update_experiments()
self.update_isbn()
self.update_dois()
self.update_links_and_ffts()
self.update_date()
self.update_date_year()
self.update_hidden_notes()
self.update_oai_info()
self.update_cnum()
self.update_conference_info()
self.fields_list = [
"909", "541", "961",
"970", "690", "695",
"981",
]
self.strip_fields()
if "ANNOUNCEMENT" in self.collections:
self.update_conference_111()
self.update_conference_links()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
if "THESIS" in self.collections:
self.update_thesis_information()
self.update_thesis_supervisors()
if "PROCEEDINGS" in self.collections:
# Special proceeding syntax
self.update_title_to_proceeding()
self.update_author_to_proceeding()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
# 690 tags
if self.tag_as_cern:
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")])
return self.record |
def guard_activate(analysis_service):
"""Returns whether the transition activate can be performed for the
analysis service passed in
"""
calculation = analysis_service.getCalculation()
if not calculation:
return True
# If the calculation is inactive, we cannot activate the service
if not api.is_active(calculation):
return False
# All services that we depend on to calculate our result are active or we
# don't depend on other services.
dependencies = calculation.getDependentServices()
for dependency in dependencies:
if not api.is_active(dependency):
return False
return True | Returns whether the transition activate can be performed for the
analysis service passed in | Below is the the instruction that describes the task:
### Input:
Returns whether the transition activate can be performed for the
analysis service passed in
### Response:
def guard_activate(analysis_service):
"""Returns whether the transition activate can be performed for the
analysis service passed in
"""
calculation = analysis_service.getCalculation()
if not calculation:
return True
# If the calculation is inactive, we cannot activate the service
if not api.is_active(calculation):
return False
# All services that we depend on to calculate our result are active or we
# don't depend on other services.
dependencies = calculation.getDependentServices()
for dependency in dependencies:
if not api.is_active(dependency):
return False
return True |
def get_sections(self, s, base,
sections=['Parameters', 'Other Parameters']):
"""
Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring
"""
params = self.params
# Remove the summary and dedent the rest
s = self._remove_summary(s)
for section in sections:
key = '%s.%s' % (base, section.lower().replace(' ', '_'))
params[key] = self._get_section(s, section)
return s | Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring | Below is the the instruction that describes the task:
### Input:
Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring
### Response:
def get_sections(self, s, base,
sections=['Parameters', 'Other Parameters']):
"""
Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring
"""
params = self.params
# Remove the summary and dedent the rest
s = self._remove_summary(s)
for section in sections:
key = '%s.%s' % (base, section.lower().replace(' ', '_'))
params[key] = self._get_section(s, section)
return s |
def _sync(self, timeout_ms=30000):
""" Reimplements MatrixClient._sync, add 'account_data' support to /sync """
response = self.api.sync(self.sync_token, timeout_ms)
prev_sync_token = self.sync_token
self.sync_token = response["next_batch"]
if self._handle_thread is not None:
# if previous _handle_thread is still running, wait for it and re-raise if needed
self._handle_thread.get()
is_first_sync = (prev_sync_token is None)
self._handle_thread = gevent.Greenlet(self._handle_response, response, is_first_sync)
self._handle_thread.name = (
f'GMatrixClient._sync user_id:{self.user_id} sync_token:{prev_sync_token}'
)
self._handle_thread.link_exception(lambda g: self.sync_thread.kill(g.exception))
self._handle_thread.start()
if self._post_hook_func is not None:
self._post_hook_func(self.sync_token) | Reimplements MatrixClient._sync, add 'account_data' support to /sync | Below is the the instruction that describes the task:
### Input:
Reimplements MatrixClient._sync, add 'account_data' support to /sync
### Response:
def _sync(self, timeout_ms=30000):
""" Reimplements MatrixClient._sync, add 'account_data' support to /sync """
response = self.api.sync(self.sync_token, timeout_ms)
prev_sync_token = self.sync_token
self.sync_token = response["next_batch"]
if self._handle_thread is not None:
# if previous _handle_thread is still running, wait for it and re-raise if needed
self._handle_thread.get()
is_first_sync = (prev_sync_token is None)
self._handle_thread = gevent.Greenlet(self._handle_response, response, is_first_sync)
self._handle_thread.name = (
f'GMatrixClient._sync user_id:{self.user_id} sync_token:{prev_sync_token}'
)
self._handle_thread.link_exception(lambda g: self.sync_thread.kill(g.exception))
self._handle_thread.start()
if self._post_hook_func is not None:
self._post_hook_func(self.sync_token) |
def __purge():
"""Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock.
"""
global __receivers
newreceivers = collections.defaultdict(list)
for signal, receivers in six.iteritems(__receivers):
alive = [x for x in receivers if not __is_dead(x)]
newreceivers[signal] = alive
__receivers = newreceivers | Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock. | Below is the the instruction that describes the task:
### Input:
Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock.
### Response:
def __purge():
"""Remove all dead signal receivers from the global receivers collection.
Note:
It is assumed that the caller holds the __lock.
"""
global __receivers
newreceivers = collections.defaultdict(list)
for signal, receivers in six.iteritems(__receivers):
alive = [x for x in receivers if not __is_dead(x)]
newreceivers[signal] = alive
__receivers = newreceivers |
def is_F_hypergraph(self):
"""Indicates whether the hypergraph is an F-hypergraph.
In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every
hyperedge has exactly one node in the tail.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
"""
for hyperedge_id in self._hyperedge_attributes:
tail = self.get_hyperedge_tail(hyperedge_id)
if len(tail) > 1:
return False
return True | Indicates whether the hypergraph is an F-hypergraph.
In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every
hyperedge has exactly one node in the tail.
:returns: bool -- True iff the hypergraph is an F-hypergraph. | Below is the the instruction that describes the task:
### Input:
Indicates whether the hypergraph is an F-hypergraph.
In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every
hyperedge has exactly one node in the tail.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
### Response:
def is_F_hypergraph(self):
"""Indicates whether the hypergraph is an F-hypergraph.
In an F-hypergraph, all hyperedges are F-hyperedges -- that is, every
hyperedge has exactly one node in the tail.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
"""
for hyperedge_id in self._hyperedge_attributes:
tail = self.get_hyperedge_tail(hyperedge_id)
if len(tail) > 1:
return False
return True |
def authenticate(
self, end_user_ip, personal_number=None, requirement=None, **kwargs
):
"""Request an authentication order. The :py:meth:`collect` method
is used to query the status of the order.
Note that personal number is not needed when authentication is to
be done on the same device, provided that the returned
``autoStartToken`` is used to open the BankID Client.
Example data returned:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6"
}
:param end_user_ip: IP address of the user requesting
the authentication.
:type end_user_ip: str
:param personal_number: The Swedish personal number in
format YYYYMMDDXXXX.
:type personal_number: str
:param requirement: An optional dictionary stating how the signature
must be created and verified. See BankID Relying Party Guidelines,
section 13.5 for more details.
:type requirement: dict
:return: The order response.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
"""
data = {"endUserIp": end_user_ip}
if personal_number:
data["personalNumber"] = personal_number
if requirement and isinstance(requirement, dict):
data["requirement"] = requirement
# Handling potentially changed optional in-parameters.
data.update(kwargs)
response = self.client.post(self._auth_endpoint, json=data)
if response.status_code == 200:
return response.json()
else:
raise get_json_error_class(response) | Request an authentication order. The :py:meth:`collect` method
is used to query the status of the order.
Note that personal number is not needed when authentication is to
be done on the same device, provided that the returned
``autoStartToken`` is used to open the BankID Client.
Example data returned:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6"
}
:param end_user_ip: IP address of the user requesting
the authentication.
:type end_user_ip: str
:param personal_number: The Swedish personal number in
format YYYYMMDDXXXX.
:type personal_number: str
:param requirement: An optional dictionary stating how the signature
must be created and verified. See BankID Relying Party Guidelines,
section 13.5 for more details.
:type requirement: dict
:return: The order response.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server. | Below is the the instruction that describes the task:
### Input:
Request an authentication order. The :py:meth:`collect` method
is used to query the status of the order.
Note that personal number is not needed when authentication is to
be done on the same device, provided that the returned
``autoStartToken`` is used to open the BankID Client.
Example data returned:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6"
}
:param end_user_ip: IP address of the user requesting
the authentication.
:type end_user_ip: str
:param personal_number: The Swedish personal number in
format YYYYMMDDXXXX.
:type personal_number: str
:param requirement: An optional dictionary stating how the signature
must be created and verified. See BankID Relying Party Guidelines,
section 13.5 for more details.
:type requirement: dict
:return: The order response.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
### Response:
def authenticate(
self, end_user_ip, personal_number=None, requirement=None, **kwargs
):
"""Request an authentication order. The :py:meth:`collect` method
is used to query the status of the order.
Note that personal number is not needed when authentication is to
be done on the same device, provided that the returned
``autoStartToken`` is used to open the BankID Client.
Example data returned:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"autoStartToken":"7c40b5c9-fa74-49cf-b98c-bfe651f9a7c6"
}
:param end_user_ip: IP address of the user requesting
the authentication.
:type end_user_ip: str
:param personal_number: The Swedish personal number in
format YYYYMMDDXXXX.
:type personal_number: str
:param requirement: An optional dictionary stating how the signature
must be created and verified. See BankID Relying Party Guidelines,
section 13.5 for more details.
:type requirement: dict
:return: The order response.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
"""
data = {"endUserIp": end_user_ip}
if personal_number:
data["personalNumber"] = personal_number
if requirement and isinstance(requirement, dict):
data["requirement"] = requirement
# Handling potentially changed optional in-parameters.
data.update(kwargs)
response = self.client.post(self._auth_endpoint, json=data)
if response.status_code == 200:
return response.json()
else:
raise get_json_error_class(response) |
def _DoCopyFile(source_filename, target_filename, copy_symlink=True):
'''
:param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist
'''
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_filename)
target_url = urlparse(target_filename)
if _UrlIsLocal(source_url):
if not Exists(source_filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(source_filename)
if _UrlIsLocal(target_url):
# local to local
_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)
elif target_url.scheme in ['ftp']:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
elif source_url.scheme in ['http', 'https', 'ftp']:
if _UrlIsLocal(target_url):
# HTTP/FTP to local
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
# HTTP/FTP to other ==> NotImplemented
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol # @Reimport
raise NotImplementedProtocol(source_url.scheme) | :param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist | Below is the the instruction that describes the task:
### Input:
:param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist
### Response:
def _DoCopyFile(source_filename, target_filename, copy_symlink=True):
'''
:param unicode source_filename:
The source filename.
Schemas: local, ftp, http
:param unicode target_filename:
Target filename.
Schemas: local, ftp
:param copy_symlink:
@see _CopyFileLocal
:raises FileNotFoundError:
If source_filename does not exist
'''
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_filename)
target_url = urlparse(target_filename)
if _UrlIsLocal(source_url):
if not Exists(source_filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(source_filename)
if _UrlIsLocal(target_url):
# local to local
_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)
elif target_url.scheme in ['ftp']:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
elif source_url.scheme in ['http', 'https', 'ftp']:
if _UrlIsLocal(target_url):
# HTTP/FTP to local
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
# HTTP/FTP to other ==> NotImplemented
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
from ._exceptions import NotImplementedProtocol # @Reimport
raise NotImplementedProtocol(source_url.scheme) |
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file
"""
self.sftp._log(
DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size)
)
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file | Below is the the instruction that describes the task:
### Input:
Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file
### Response:
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file
"""
self.sftp._log(
DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size)
)
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr) |
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True) | Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2} | Below is the the instruction that describes the task:
### Input:
Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
### Response:
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True) |
def mul_table(self, other):
"""
Fast multiplication using a the LWNAF precomputation table.
"""
# Get a BigInt
other = coerceBigInt(other)
if not other:
return NotImplemented
other %= orderG2()
# Building the precomputation table, if there is not one already.
if not self._table:
self._table = lwnafTable()
librelic.ep2_mul_pre_lwnaf(byref(self._table), byref(self))
result = G2Element()
librelic.ep2_mul_fix_lwnaf(byref(result), byref(self._table),
byref(other))
return result | Fast multiplication using a the LWNAF precomputation table. | Below is the the instruction that describes the task:
### Input:
Fast multiplication using a the LWNAF precomputation table.
### Response:
def mul_table(self, other):
"""
Fast multiplication using a the LWNAF precomputation table.
"""
# Get a BigInt
other = coerceBigInt(other)
if not other:
return NotImplemented
other %= orderG2()
# Building the precomputation table, if there is not one already.
if not self._table:
self._table = lwnafTable()
librelic.ep2_mul_pre_lwnaf(byref(self._table), byref(self))
result = G2Element()
librelic.ep2_mul_fix_lwnaf(byref(result), byref(self._table),
byref(other))
return result |
def to_dataframe(self, dtypes=None):
"""Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
if dtypes is None:
dtypes = {}
columns = collections.defaultdict(list)
for row in self:
for column in row:
columns[column].append(row[column])
for column in dtypes:
columns[column] = pandas.Series(columns[column], dtype=dtypes[column])
return pandas.DataFrame(columns, columns=self._column_names) | Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream. | Below is the the instruction that describes the task:
### Input:
Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
### Response:
def to_dataframe(self, dtypes=None):
"""Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
if dtypes is None:
dtypes = {}
columns = collections.defaultdict(list)
for row in self:
for column in row:
columns[column].append(row[column])
for column in dtypes:
columns[column] = pandas.Series(columns[column], dtype=dtypes[column])
return pandas.DataFrame(columns, columns=self._column_names) |
def wrap_hvac(msg):
"""Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class?"""
# pylint: disable=missing-docstring
def wrap_call(func):
# pylint: disable=missing-docstring
def func_wrapper(self, vault_client):
try:
return func(self, vault_client)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied %s from %s" % (msg, self.path)
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
return func_wrapper
return wrap_call | Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class? | Below is the the instruction that describes the task:
### Input:
Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class?
### Response:
def wrap_hvac(msg):
"""Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class?"""
# pylint: disable=missing-docstring
def wrap_call(func):
# pylint: disable=missing-docstring
def func_wrapper(self, vault_client):
try:
return func(self, vault_client)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied %s from %s" % (msg, self.path)
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
return func_wrapper
return wrap_call |
def summarize_variable(self, variable = None, use_baseline = False, weighted = False, force_compute = False):
"""
Prints a summary of a variable including its memory usage.
:param string variable: the variable being summarized
:param bool use_baseline: the tax-benefit-system considered
:param bool weighted: whether the produced statistics should be weigthted or not
:param bool force_compute: whether the computation of the variable should be forced
Example:
>>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario
>>> survey_scenario = create_randomly_initialized_survey_scenario()
>>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True)
<BLANKLINE>
housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B
Details:
2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%).
>>> survey_scenario.summarize_variable(variable = "rent", force_compute = True)
<BLANKLINE>
rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B
Details:
2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
"""
if use_baseline:
simulation = self.baseline_simulation
else:
simulation = self.simulation
tax_benefit_system = simulation.tax_benefit_system
assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable)
variable_instance = tax_benefit_system.variables[variable]
default_value = variable_instance.default_value
value_type = variable_instance.value_type
if weighted:
weight_variable = self.weight_column_name_by_entity[variable_instance.entity.key]
weights = simulation.calculate(weight_variable, simulation.period)
infos = simulation.get_memory_usage(variables = [variable])['by_variable'].get(variable)
if not infos:
if force_compute:
self.calculate_variable(variable = variable, period = simulation.period, use_baseline = use_baseline)
self.summarize_variable(variable = variable, use_baseline = use_baseline, weighted = weighted)
return
else:
print("{} is not computed yet. Use keyword argument force_compute = True".format(variable))
return
header_line = "{}: {} periods * {} cells * item size {} ({}, default = {}) = {}".format(
variable,
infos['nb_arrays'],
infos['nb_cells_by_array'],
infos['cell_size'],
infos['dtype'],
default_value,
humanize.naturalsize(infos['total_nb_bytes'], gnu = True),
)
print("")
print(header_line)
print("Details:")
holder = simulation.get_holder(variable)
if holder is not None:
if holder.variable.definition_period == ETERNITY:
array = holder.get_array(ETERNITY)
print("permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}".format(
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
np.median(array),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
)
))
else:
for period in sorted(holder.get_known_periods()):
array = holder.get_array(period)
if array.shape == ():
print("{}: always = {}".format(period, array))
continue
if value_type == Enum:
possible_values = variable_instance.possible_values
categories_by_index = dict(zip(
range(len(possible_values._member_names_)),
possible_values._member_names_
))
categories_type = pd.api.types.CategoricalDtype(categories = possible_values._member_names_, ordered = True)
df = pd.DataFrame({variable: array}).replace(categories_by_index).astype(categories_type)
df['weights'] = weights if weighted else 1
groupby = df.groupby(variable)['weights'].sum()
total = groupby.sum()
expr = [" {} = {:.2e} ({:.1%})".format(index, row, row / total) for index, row in groupby.iteritems()]
print("{}:{}.".format(period, ",".join(expr)))
continue
print("{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}".format(
period,
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
array.sum() if not weighted else np.sum(array * weights),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
),
np.median(array),
)) | Prints a summary of a variable including its memory usage.
:param string variable: the variable being summarized
:param bool use_baseline: the tax-benefit-system considered
:param bool weighted: whether the produced statistics should be weigthted or not
:param bool force_compute: whether the computation of the variable should be forced
Example:
>>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario
>>> survey_scenario = create_randomly_initialized_survey_scenario()
>>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True)
<BLANKLINE>
housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B
Details:
2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%).
>>> survey_scenario.summarize_variable(variable = "rent", force_compute = True)
<BLANKLINE>
rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B
Details:
2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301 | Below is the the instruction that describes the task:
### Input:
Prints a summary of a variable including its memory usage.
:param string variable: the variable being summarized
:param bool use_baseline: the tax-benefit-system considered
:param bool weighted: whether the produced statistics should be weigthted or not
:param bool force_compute: whether the computation of the variable should be forced
Example:
>>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario
>>> survey_scenario = create_randomly_initialized_survey_scenario()
>>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True)
<BLANKLINE>
housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B
Details:
2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%).
>>> survey_scenario.summarize_variable(variable = "rent", force_compute = True)
<BLANKLINE>
rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B
Details:
2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
### Response:
def summarize_variable(self, variable = None, use_baseline = False, weighted = False, force_compute = False):
"""
Prints a summary of a variable including its memory usage.
:param string variable: the variable being summarized
:param bool use_baseline: the tax-benefit-system considered
:param bool weighted: whether the produced statistics should be weigthted or not
:param bool force_compute: whether the computation of the variable should be forced
Example:
>>> from openfisca_survey_manager.tests.test_scenario import create_randomly_initialized_survey_scenario
>>> survey_scenario = create_randomly_initialized_survey_scenario()
>>> survey_scenario.summarize_variable(variable = "housing_occupancy_status", force_compute = True)
<BLANKLINE>
housing_occupancy_status: 1 periods * 5 cells * item size 2 (<type 'numpy.int16'>, default = HousingOccupancyStatus.tenant) = 10B
Details:
2017-01: owner = 0.00e+00 (0.0%), tenant = 5.00e+00 (100.0%), free_lodger = 0.00e+00 (0.0%), homeless = 0.00e+00 (0.0%).
>>> survey_scenario.summarize_variable(variable = "rent", force_compute = True)
<BLANKLINE>
rent: 2 periods * 5 cells * item size 4 (<type 'numpy.float32'>, default = 0) = 40B
Details:
2017-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
2018-01: mean = 562.385070801, min = 156.01864624, max = 950.714294434, mass = 2.81e+03, default = 0.0%, median = 598.658508301
"""
if use_baseline:
simulation = self.baseline_simulation
else:
simulation = self.simulation
tax_benefit_system = simulation.tax_benefit_system
assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable)
variable_instance = tax_benefit_system.variables[variable]
default_value = variable_instance.default_value
value_type = variable_instance.value_type
if weighted:
weight_variable = self.weight_column_name_by_entity[variable_instance.entity.key]
weights = simulation.calculate(weight_variable, simulation.period)
infos = simulation.get_memory_usage(variables = [variable])['by_variable'].get(variable)
if not infos:
if force_compute:
self.calculate_variable(variable = variable, period = simulation.period, use_baseline = use_baseline)
self.summarize_variable(variable = variable, use_baseline = use_baseline, weighted = weighted)
return
else:
print("{} is not computed yet. Use keyword argument force_compute = True".format(variable))
return
header_line = "{}: {} periods * {} cells * item size {} ({}, default = {}) = {}".format(
variable,
infos['nb_arrays'],
infos['nb_cells_by_array'],
infos['cell_size'],
infos['dtype'],
default_value,
humanize.naturalsize(infos['total_nb_bytes'], gnu = True),
)
print("")
print(header_line)
print("Details:")
holder = simulation.get_holder(variable)
if holder is not None:
if holder.variable.definition_period == ETERNITY:
array = holder.get_array(ETERNITY)
print("permanent: mean = {}, min = {}, max = {}, median = {}, default = {:.1%}".format(
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
np.median(array),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
)
))
else:
for period in sorted(holder.get_known_periods()):
array = holder.get_array(period)
if array.shape == ():
print("{}: always = {}".format(period, array))
continue
if value_type == Enum:
possible_values = variable_instance.possible_values
categories_by_index = dict(zip(
range(len(possible_values._member_names_)),
possible_values._member_names_
))
categories_type = pd.api.types.CategoricalDtype(categories = possible_values._member_names_, ordered = True)
df = pd.DataFrame({variable: array}).replace(categories_by_index).astype(categories_type)
df['weights'] = weights if weighted else 1
groupby = df.groupby(variable)['weights'].sum()
total = groupby.sum()
expr = [" {} = {:.2e} ({:.1%})".format(index, row, row / total) for index, row in groupby.iteritems()]
print("{}:{}.".format(period, ",".join(expr)))
continue
print("{}: mean = {}, min = {}, max = {}, mass = {:.2e}, default = {:.1%}, median = {}".format(
period,
array.mean() if not weighted else np.average(array, weights = weights),
array.min(),
array.max(),
array.sum() if not weighted else np.sum(array * weights),
(
(array == default_value).sum() / len(array)
if not weighted
else ((array == default_value) * weights).sum() / weights.sum()
),
np.median(array),
)) |
def main(self):
"""
Run the necessary methods in the correct order
"""
if not os.path.isfile(self.gdcs_report):
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Run the analyses
ShortKSippingMethods(self, self.cutoff)
# Create the reports
self.reporter()
else:
self.report_parse() | Run the necessary methods in the correct order | Below is the the instruction that describes the task:
### Input:
Run the necessary methods in the correct order
### Response:
def main(self):
"""
Run the necessary methods in the correct order
"""
if not os.path.isfile(self.gdcs_report):
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Run the analyses
ShortKSippingMethods(self, self.cutoff)
# Create the reports
self.reporter()
else:
self.report_parse() |
def __GetAuthorizationTokenUsingMasterKey(verb,
resource_id_or_fullname,
resource_type,
headers,
master_key):
"""Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict
"""
# decodes the master key which is encoded in base64
key = base64.b64decode(master_key)
# Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname
text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n'.format(
verb=(verb.lower() or ''),
resource_type=(resource_type.lower() or ''),
resource_id_or_fullname=(resource_id_or_fullname or ''),
x_date=headers.get(http_constants.HttpHeaders.XDate, '').lower(),
http_date=headers.get(http_constants.HttpHeaders.HttpDate, '').lower())
if six.PY2:
body = text.decode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = digest.encode('base64')
else:
# python 3 support
body = text.encode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = base64.encodebytes(digest).decode('utf-8')
master_token = 'master'
token_version = '1.0'
return 'type={type}&ver={ver}&sig={sig}'.format(type=master_token,
ver=token_version,
sig=signature[:-1]) | Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict
### Response:
def __GetAuthorizationTokenUsingMasterKey(verb,
resource_id_or_fullname,
resource_type,
headers,
master_key):
"""Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict
"""
# decodes the master key which is encoded in base64
key = base64.b64decode(master_key)
# Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname
text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n'.format(
verb=(verb.lower() or ''),
resource_type=(resource_type.lower() or ''),
resource_id_or_fullname=(resource_id_or_fullname or ''),
x_date=headers.get(http_constants.HttpHeaders.XDate, '').lower(),
http_date=headers.get(http_constants.HttpHeaders.HttpDate, '').lower())
if six.PY2:
body = text.decode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = digest.encode('base64')
else:
# python 3 support
body = text.encode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = base64.encodebytes(digest).decode('utf-8')
master_token = 'master'
token_version = '1.0'
return 'type={type}&ver={ver}&sig={sig}'.format(type=master_token,
ver=token_version,
sig=signature[:-1]) |
def makedbthreads(self, folder):
"""
Setup and create threads for class
:param folder: folder with sequence files with which to create blast databases
"""
# Create and start threads for each fasta file in the list
for i in range(len(folder)):
# Send the threads to makeblastdb
threads = Thread(target=self.makeblastdb, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# Make blast databases for MLST files (if necessary)
for alleledir in folder:
# List comprehension to remove any previously created database files from list
allelefiles = glob('{}/*.fasta'.format(alleledir))
# For each allele file
for allelefile in allelefiles:
# Add the fasta file to the queue
self.dqueue.put(allelefile)
self.dqueue.join() | Setup and create threads for class
:param folder: folder with sequence files with which to create blast databases | Below is the the instruction that describes the task:
### Input:
Setup and create threads for class
:param folder: folder with sequence files with which to create blast databases
### Response:
def makedbthreads(self, folder):
"""
Setup and create threads for class
:param folder: folder with sequence files with which to create blast databases
"""
# Create and start threads for each fasta file in the list
for i in range(len(folder)):
# Send the threads to makeblastdb
threads = Thread(target=self.makeblastdb, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# Make blast databases for MLST files (if necessary)
for alleledir in folder:
# List comprehension to remove any previously created database files from list
allelefiles = glob('{}/*.fasta'.format(alleledir))
# For each allele file
for allelefile in allelefiles:
# Add the fasta file to the queue
self.dqueue.put(allelefile)
self.dqueue.join() |
def sampleLocationFromFeature(self, feature):
"""
Samples a location from one specific feature.
This is only supported with three dimensions.
"""
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | Samples a location from one specific feature.
This is only supported with three dimensions. | Below is the the instruction that describes the task:
### Input:
Samples a location from one specific feature.
This is only supported with three dimensions.
### Response:
def sampleLocationFromFeature(self, feature):
"""
Samples a location from one specific feature.
This is only supported with three dimensions.
"""
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) |
def related_lua_args(self):
'''Generator of load_related arguments'''
related = self.queryelem.select_related
if related:
meta = self.meta
for rel in related:
field = meta.dfields[rel]
relmodel = field.relmodel
bk = self.backend.basekey(relmodel._meta) if relmodel else ''
fields = list(related[rel])
if meta.pkname() in fields:
fields.remove(meta.pkname())
if not fields:
fields.append('')
ftype = field.type if field in meta.multifields else ''
data = {'field': field.attname, 'type': ftype,
'bk': bk, 'fields': fields}
yield field.name, data | Generator of load_related arguments | Below is the the instruction that describes the task:
### Input:
Generator of load_related arguments
### Response:
def related_lua_args(self):
'''Generator of load_related arguments'''
related = self.queryelem.select_related
if related:
meta = self.meta
for rel in related:
field = meta.dfields[rel]
relmodel = field.relmodel
bk = self.backend.basekey(relmodel._meta) if relmodel else ''
fields = list(related[rel])
if meta.pkname() in fields:
fields.remove(meta.pkname())
if not fields:
fields.append('')
ftype = field.type if field in meta.multifields else ''
data = {'field': field.attname, 'type': ftype,
'bk': bk, 'fields': fields}
yield field.name, data |
def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_RDAP['network']['_short'] if hr else 'network',
name=HR_RDAP['network']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data['network'].items():
if key in ['links', 'status']:
output += self.generate_output_list(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in ['notices', 'remarks']:
output += self.generate_output_notices(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == 'events':
output += self.generate_output_events(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in ['raw']:
output += generate_output(
line='1',
short=HR_RDAP['network'][key]['_short'] if hr else key,
name=HR_RDAP['network'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output | The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | Below is the the instruction that describes the task:
### Input:
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
### Response:
def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_RDAP['network']['_short'] if hr else 'network',
name=HR_RDAP['network']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data['network'].items():
if key in ['links', 'status']:
output += self.generate_output_list(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in ['notices', 'remarks']:
output += self.generate_output_notices(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == 'events':
output += self.generate_output_events(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in ['raw']:
output += generate_output(
line='1',
short=HR_RDAP['network'][key]['_short'] if hr else key,
name=HR_RDAP['network'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output |
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None):
"""Reset object attributes when state is INIT."""
logger.debug('Reseting attributes.')
if iface is None:
iface = conf.iface
if client_mac is None:
# scapy for python 3 returns byte, not tuple
tempmac = get_if_raw_hwaddr(iface)
if isinstance(tempmac, tuple) and len(tempmac) == 2:
mac = tempmac[1]
else:
mac = tempmac
client_mac = str2mac(mac)
self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid)
if scriptfile is not None:
self.script = ClientScript(scriptfile)
else:
self.script = None
self.time_sent_request = None
self.discover_attempts = 0
self.request_attempts = 0
self.current_state = STATE_PREINIT
self.offers = list() | Reset object attributes when state is INIT. | Below is the the instruction that describes the task:
### Input:
Reset object attributes when state is INIT.
### Response:
def reset(self, iface=None, client_mac=None, xid=None, scriptfile=None):
"""Reset object attributes when state is INIT."""
logger.debug('Reseting attributes.')
if iface is None:
iface = conf.iface
if client_mac is None:
# scapy for python 3 returns byte, not tuple
tempmac = get_if_raw_hwaddr(iface)
if isinstance(tempmac, tuple) and len(tempmac) == 2:
mac = tempmac[1]
else:
mac = tempmac
client_mac = str2mac(mac)
self.client = DHCPCAP(iface=iface, client_mac=client_mac, xid=xid)
if scriptfile is not None:
self.script = ClientScript(scriptfile)
else:
self.script = None
self.time_sent_request = None
self.discover_attempts = 0
self.request_attempts = 0
self.current_state = STATE_PREINIT
self.offers = list() |
def remove_writer(self, address):
""" Remove a writer address from the routing table, if present.
"""
log_debug("[#0000] C: <ROUTING> Removing writer %r", address)
self.routing_table.writers.discard(address)
log_debug("[#0000] C: <ROUTING> table=%r", self.routing_table) | Remove a writer address from the routing table, if present. | Below is the the instruction that describes the task:
### Input:
Remove a writer address from the routing table, if present.
### Response:
def remove_writer(self, address):
""" Remove a writer address from the routing table, if present.
"""
log_debug("[#0000] C: <ROUTING> Removing writer %r", address)
self.routing_table.writers.discard(address)
log_debug("[#0000] C: <ROUTING> table=%r", self.routing_table) |
def get_rmse(self, data_x=None, data_y=None):
"""
Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max
"""
if data_x is None:
data_x = np.array(self.args["x"])
if data_y is None:
data_y = np.array(self.args["y"])
if len(data_x) != len(data_y):
raise ValueError("Lengths of data_x and data_y are different")
rmse_y = self.bestfit_func(data_x)
return np.sqrt(np.mean((rmse_y - data_y) ** 2)) | Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max | Below is the the instruction that describes the task:
### Input:
Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max
### Response:
def get_rmse(self, data_x=None, data_y=None):
"""
Get Root Mean Square Error using
self.bestfit_func
args:
x_min: scalar, default=min(x)
minimum x value of the line
x_max: scalar, default=max(x)
maximum x value of the line
resolution: int, default=1000
how many steps between x_min and x_max
"""
if data_x is None:
data_x = np.array(self.args["x"])
if data_y is None:
data_y = np.array(self.args["y"])
if len(data_x) != len(data_y):
raise ValueError("Lengths of data_x and data_y are different")
rmse_y = self.bestfit_func(data_x)
return np.sqrt(np.mean((rmse_y - data_y) ** 2)) |
def PROFILE_SDRAUTIAN(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,sg):
"""
# Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
"""
return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,cZero,sg) | # Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input). | Below is the the instruction that describes the task:
### Input:
# Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
### Response:
def PROFILE_SDRAUTIAN(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,sg):
"""
# Speed dependent Rautian profile based on HTP.
# Input parameters:
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# Gam2 : Speed dependence of the line-width in cm-1 (Input).
# anuVC : Velocity-changing frequency in cm-1 (Input).
# Shift0 : Speed-averaged line-shift in cm-1 (Input).
# Shift2 : Speed dependence of the line-shift in cm-1 (Input)
# sg : Current WaveNumber of the Computation in cm-1 (Input).
"""
return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,cZero,sg) |
def find_by_user(self, user, params={}, **options):
"""Returns the compact records for all teams to which user is assigned.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request
- [organization] : {Id} The workspace or organization to filter teams on.
"""
path = "/users/%s/teams" % (user)
return self.client.get_collection(path, params, **options) | Returns the compact records for all teams to which user is assigned.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request
- [organization] : {Id} The workspace or organization to filter teams on. | Below is the the instruction that describes the task:
### Input:
Returns the compact records for all teams to which user is assigned.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request
- [organization] : {Id} The workspace or organization to filter teams on.
### Response:
def find_by_user(self, user, params={}, **options):
"""Returns the compact records for all teams to which user is assigned.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request
- [organization] : {Id} The workspace or organization to filter teams on.
"""
path = "/users/%s/teams" % (user)
return self.client.get_collection(path, params, **options) |
def start(self):
'''
Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(Syndic, self).start()
if check_user(self.config['user']):
self.action_log_info('Starting up')
self.verify_hash_type()
try:
self.syndic.tune_in()
except KeyboardInterrupt:
self.action_log_info('Stopping')
self.shutdown() | Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`. | Below is the the instruction that describes the task:
### Input:
Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
### Response:
def start(self):
'''
Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(Syndic, self).start()
if check_user(self.config['user']):
self.action_log_info('Starting up')
self.verify_hash_type()
try:
self.syndic.tune_in()
except KeyboardInterrupt:
self.action_log_info('Stopping')
self.shutdown() |
def assume(self, cond):
"""
Optimizer hint: assume *cond* is always true.
"""
fn = self.module.declare_intrinsic("llvm.assume")
return self.call(fn, [cond]) | Optimizer hint: assume *cond* is always true. | Below is the the instruction that describes the task:
### Input:
Optimizer hint: assume *cond* is always true.
### Response:
def assume(self, cond):
"""
Optimizer hint: assume *cond* is always true.
"""
fn = self.module.declare_intrinsic("llvm.assume")
return self.call(fn, [cond]) |
def _forward_request(transaction, destination, path):
"""
Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction
"""
client = HelperClient(destination)
request = Request()
request.options = copy.deepcopy(transaction.request.options)
del request.block2
del request.block1
del request.uri_path
del request.proxy_uri
del request.proxy_schema
# TODO handle observing
del request.observe
# request.observe = transaction.request.observe
request.uri_path = path
request.destination = destination
request.payload = transaction.request.payload
request.code = transaction.request.code
response = client.send_request(request)
client.stop()
if response is not None:
transaction.response.payload = response.payload
transaction.response.code = response.code
transaction.response.options = response.options
else:
transaction.response.code = defines.Codes.SERVICE_UNAVAILABLE.number
return transaction | Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction | Below is the the instruction that describes the task:
### Input:
Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction
### Response:
def _forward_request(transaction, destination, path):
"""
Forward requests.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:param destination: the destination of the request (IP, port)
:param path: the path of the request.
:rtype : Transaction
:return: the edited transaction
"""
client = HelperClient(destination)
request = Request()
request.options = copy.deepcopy(transaction.request.options)
del request.block2
del request.block1
del request.uri_path
del request.proxy_uri
del request.proxy_schema
# TODO handle observing
del request.observe
# request.observe = transaction.request.observe
request.uri_path = path
request.destination = destination
request.payload = transaction.request.payload
request.code = transaction.request.code
response = client.send_request(request)
client.stop()
if response is not None:
transaction.response.payload = response.payload
transaction.response.code = response.code
transaction.response.options = response.options
else:
transaction.response.code = defines.Codes.SERVICE_UNAVAILABLE.number
return transaction |
def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat)) | Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0 | Below is the the instruction that describes the task:
### Input:
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
### Response:
def fromML(mat):
"""
Convert a matrix from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param mat: a :py:class:`pyspark.ml.linalg.Matrix`
:return: a :py:class:`pyspark.mllib.linalg.Matrix`
.. versionadded:: 2.0.0
"""
if isinstance(mat, newlinalg.DenseMatrix):
return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
elif isinstance(mat, newlinalg.SparseMatrix):
return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices,
mat.values, mat.isTransposed)
else:
raise TypeError("Unsupported matrix type %s" % type(mat)) |
def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Call this method with request."""
if upload is not None and download is not None:
# TODO(craigcitro): This just involves refactoring the logic
# below into callbacks that we can pass around; in particular,
# the order should be that the upload gets the initial request,
# and then passes its reply to a download if one exists, and
# then that goes to ProcessResponse and is returned.
raise exceptions.NotYetImplementedError(
'Cannot yet use both upload and download at once')
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
# TODO(craigcitro): Make num_retries customizable on Transfer
# objects, and pass in self.__client.num_retries when initializing
# an upload or download.
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
opts = {
'retries': self.__client.num_retries,
'max_retry_wait': self.__client.max_retry_wait,
}
if self.__client.check_response_func:
opts['check_response_func'] = self.__client.check_response_func
if self.__client.retry_func:
opts['retry_func'] = self.__client.retry_func
http_response = http_wrapper.MakeRequest(
http, http_request, **opts)
return self.ProcessHttpResponse(method_config, http_response, request) | Call this method with request. | Below is the the instruction that describes the task:
### Input:
Call this method with request.
### Response:
def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Call this method with request."""
if upload is not None and download is not None:
# TODO(craigcitro): This just involves refactoring the logic
# below into callbacks that we can pass around; in particular,
# the order should be that the upload gets the initial request,
# and then passes its reply to a download if one exists, and
# then that goes to ProcessResponse and is returned.
raise exceptions.NotYetImplementedError(
'Cannot yet use both upload and download at once')
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
# TODO(craigcitro): Make num_retries customizable on Transfer
# objects, and pass in self.__client.num_retries when initializing
# an upload or download.
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
opts = {
'retries': self.__client.num_retries,
'max_retry_wait': self.__client.max_retry_wait,
}
if self.__client.check_response_func:
opts['check_response_func'] = self.__client.check_response_func
if self.__client.retry_func:
opts['retry_func'] = self.__client.retry_func
http_response = http_wrapper.MakeRequest(
http, http_request, **opts)
return self.ProcessHttpResponse(method_config, http_response, request) |
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(client._base_url)
host = parse_result.netloc
if parse_result.scheme == "https":
channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return datastore_client.DatastoreClient(
channel=channel,
client_info=client_info.ClientInfo(
client_library_version=__version__, gapic_version=__version__
),
) | Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials. | Below is the the instruction that describes the task:
### Input:
Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
### Response:
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(client._base_url)
host = parse_result.netloc
if parse_result.scheme == "https":
channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return datastore_client.DatastoreClient(
channel=channel,
client_info=client_info.ClientInfo(
client_library_version=__version__, gapic_version=__version__
),
) |
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if not isinstance(self._optimizer, opt.Optimizer):
raise UserWarning("Optimizer has to be defined before its learning "
"rate is mutated.")
else:
self._optimizer.set_learning_rate(lr) | Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer. | Below is the the instruction that describes the task:
### Input:
Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
### Response:
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if not isinstance(self._optimizer, opt.Optimizer):
raise UserWarning("Optimizer has to be defined before its learning "
"rate is mutated.")
else:
self._optimizer.set_learning_rate(lr) |
def write(self, data: Union[bytes, memoryview]) -> "Future[None]":
"""Asynchronously write the given data to this stream.
This method returns a `.Future` that resolves (with a result
of ``None``) when the write has been completed.
The ``data`` argument may be of type `bytes` or `memoryview`.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 4.5
Added support for `memoryview` arguments.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
"""
self._check_closed()
if data:
if (
self.max_write_buffer_size is not None
and len(self._write_buffer) + len(data) > self.max_write_buffer_size
):
raise StreamBufferFullError("Reached maximum write buffer size")
self._write_buffer.append(data)
self._total_write_index += len(data)
future = Future() # type: Future[None]
future.add_done_callback(lambda f: f.exception())
self._write_futures.append((self._total_write_index, future))
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future | Asynchronously write the given data to this stream.
This method returns a `.Future` that resolves (with a result
of ``None``) when the write has been completed.
The ``data`` argument may be of type `bytes` or `memoryview`.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 4.5
Added support for `memoryview` arguments.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead. | Below is the the instruction that describes the task:
### Input:
Asynchronously write the given data to this stream.
This method returns a `.Future` that resolves (with a result
of ``None``) when the write has been completed.
The ``data`` argument may be of type `bytes` or `memoryview`.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 4.5
Added support for `memoryview` arguments.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
### Response:
def write(self, data: Union[bytes, memoryview]) -> "Future[None]":
"""Asynchronously write the given data to this stream.
This method returns a `.Future` that resolves (with a result
of ``None``) when the write has been completed.
The ``data`` argument may be of type `bytes` or `memoryview`.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
.. versionchanged:: 4.5
Added support for `memoryview` arguments.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
"""
self._check_closed()
if data:
if (
self.max_write_buffer_size is not None
and len(self._write_buffer) + len(data) > self.max_write_buffer_size
):
raise StreamBufferFullError("Reached maximum write buffer size")
self._write_buffer.append(data)
self._total_write_index += len(data)
future = Future() # type: Future[None]
future.add_done_callback(lambda f: f.exception())
self._write_futures.append((self._total_write_index, future))
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future |
def autoencoder_residual():
"""Residual autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adafactor"
hparams.clip_grad_norm = 1.0
hparams.learning_rate_constant = 0.5
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay"
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
hparams.add_hparam("kl_beta", 1.0)
return hparams | Residual autoencoder model. | Below is the the instruction that describes the task:
### Input:
Residual autoencoder model.
### Response:
def autoencoder_residual():
"""Residual autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adafactor"
hparams.clip_grad_norm = 1.0
hparams.learning_rate_constant = 0.5
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay"
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
hparams.add_hparam("kl_beta", 1.0)
return hparams |
def mapkeys(function, dict_):
"""Return a new dictionary where the keys come from applying ``function``
to the keys of given dictionary.
.. warning::
If ``function`` returns the same value for more than one key,
it is undefined which key will be chosen for the resulting dictionary.
:param function: Function taking a dictionary key,
or None (corresponding to identity function)
.. versionadded:: 0.0.2
"""
ensure_mapping(dict_)
function = identity() if function is None else ensure_callable(function)
return dict_.__class__((function(k), v) for k, v in iteritems(dict_)) | Return a new dictionary where the keys come from applying ``function``
to the keys of given dictionary.
.. warning::
If ``function`` returns the same value for more than one key,
it is undefined which key will be chosen for the resulting dictionary.
:param function: Function taking a dictionary key,
or None (corresponding to identity function)
.. versionadded:: 0.0.2 | Below is the the instruction that describes the task:
### Input:
Return a new dictionary where the keys come from applying ``function``
to the keys of given dictionary.
.. warning::
If ``function`` returns the same value for more than one key,
it is undefined which key will be chosen for the resulting dictionary.
:param function: Function taking a dictionary key,
or None (corresponding to identity function)
.. versionadded:: 0.0.2
### Response:
def mapkeys(function, dict_):
"""Return a new dictionary where the keys come from applying ``function``
to the keys of given dictionary.
.. warning::
If ``function`` returns the same value for more than one key,
it is undefined which key will be chosen for the resulting dictionary.
:param function: Function taking a dictionary key,
or None (corresponding to identity function)
.. versionadded:: 0.0.2
"""
ensure_mapping(dict_)
function = identity() if function is None else ensure_callable(function)
return dict_.__class__((function(k), v) for k, v in iteritems(dict_)) |
def _ensure_patient_group_is_ok(patient_object, patient_name=None):
"""
Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
"""
from protect.addons.common import TCGAToGTEx
assert isinstance(patient_object, (set, dict)), '%s,%s' % (patient_object, patient_name)
# set(dict) = set of keys of the dict
test_set = set(patient_object)
if 'tumor_type' not in patient_object:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a Tumor type.')
elif patient_object['tumor_type'] not in TCGAToGTEx:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does contains an invalid Tumor type. Please use one of the '
'valid TCGA tumor types.')
if {'tumor_dna_fastq_1', 'normal_dna_fastq_1', 'tumor_rna_fastq_1'}.issubset(test_set):
# Best case scenario, we get all fastqs
pass
else:
# We have less than 3 fastqs so we have to have a haplotype.
if 'hla_haplotype_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a hla_haplotype_files entry.\nCannot haplotype '
'patient if all the input sequence files are not fastqs.')
# Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf
if (({re.search('tumor_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None} or
{re.search('normal_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}) and
('mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set)):
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a mutation_vcf or fusion_bedpe entry. If both '
'tumor and normal DNA sequences (fastqs or bam) are not provided, '
'a pre-computed vcf and/or bedpe must be provided.')
# We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions
if {re.search('tumor_rna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}:
if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set:
# The only case where it is ok to not have the genome mapped rna.
pass
else:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a tumor rna sequence data entry. We require '
'either tumor_rna_fastq_1 or tumor_rna_bam.')
# If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless
# we have also been provided expression values.
if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set:
if 'expression_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name +
'was provided a tumor rna bam with sequences mapped to the '
'genome but was not provided a matching rna bam for the '
'transcriptome or a tar containing expression values. '
'We require either a matching transcriptome bam to estimate'
'expression, or the precomputed expression values.')) | Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist | Below is the the instruction that describes the task:
### Input:
Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
### Response:
def _ensure_patient_group_is_ok(patient_object, patient_name=None):
"""
Ensure that the provided entries for the patient groups is formatted properly.
:param set|dict patient_object: The values passed to the samples patient group
:param str patient_name: Optional name for the set
:raises ParameterError: If required entry doesnt exist
"""
from protect.addons.common import TCGAToGTEx
assert isinstance(patient_object, (set, dict)), '%s,%s' % (patient_object, patient_name)
# set(dict) = set of keys of the dict
test_set = set(patient_object)
if 'tumor_type' not in patient_object:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a Tumor type.')
elif patient_object['tumor_type'] not in TCGAToGTEx:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does contains an invalid Tumor type. Please use one of the '
'valid TCGA tumor types.')
if {'tumor_dna_fastq_1', 'normal_dna_fastq_1', 'tumor_rna_fastq_1'}.issubset(test_set):
# Best case scenario, we get all fastqs
pass
else:
# We have less than 3 fastqs so we have to have a haplotype.
if 'hla_haplotype_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a hla_haplotype_files entry.\nCannot haplotype '
'patient if all the input sequence files are not fastqs.')
# Either we have a fastq and/or bam for the tumor and normal, or we need to be given a vcf
if (({re.search('tumor_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None} or
{re.search('normal_dna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}) and
('mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set)):
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a mutation_vcf or fusion_bedpe entry. If both '
'tumor and normal DNA sequences (fastqs or bam) are not provided, '
'a pre-computed vcf and/or bedpe must be provided.')
# We have to be given a tumor rna fastq or bam unless we are processing ONLY fusions
if {re.search('tumor_rna_((bam)|(fastq_1)).*', x) for x in test_set} == {None}:
if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set:
# The only case where it is ok to not have the genome mapped rna.
pass
else:
raise ParameterError(('The patient entry for sample %s ' % patient_name) +
'does not contain a tumor rna sequence data entry. We require '
'either tumor_rna_fastq_1 or tumor_rna_bam.')
# If we are given an RNA bam then it needs to have a corresponding transcriptome bam unless
# we have also been provided expression values.
if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set:
if 'expression_files' not in test_set:
raise ParameterError(('The patient entry for sample %s ' % patient_name +
'was provided a tumor rna bam with sequences mapped to the '
'genome but was not provided a matching rna bam for the '
'transcriptome or a tar containing expression values. '
'We require either a matching transcriptome bam to estimate'
'expression, or the precomputed expression values.')) |
def calculate_lstm_output_shapes(operator):
'''
See LSTM's conversion function for its output shapes.
'''
check_input_and_output_numbers(operator, input_count_range=[1, 3], output_count_range=[1, 3])
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
if len(input_shape) not in [2, 4]:
raise RuntimeError('Input must be a 2-D tensor')
params = operator.raw_operator.uniDirectionalLSTM
# The following line is more accurate but it may break some tests
# output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, params.outputVectorSize]
output_shape = ['None', params.outputVectorSize]
state_shape = [1, params.outputVectorSize]
# TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence
Y_h_in.type.shape = state_shape
if len(operator.inputs) > 2:
Y_c_in = operator.inputs[2] # The initial cell state of a single sequence
Y_c_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape
if len(operator.outputs) > 2:
operator.outputs[2].type.shape = state_shape | See LSTM's conversion function for its output shapes. | Below is the the instruction that describes the task:
### Input:
See LSTM's conversion function for its output shapes.
### Response:
def calculate_lstm_output_shapes(operator):
'''
See LSTM's conversion function for its output shapes.
'''
check_input_and_output_numbers(operator, input_count_range=[1, 3], output_count_range=[1, 3])
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
if len(input_shape) not in [2, 4]:
raise RuntimeError('Input must be a 2-D tensor')
params = operator.raw_operator.uniDirectionalLSTM
# The following line is more accurate but it may break some tests
# output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, params.outputVectorSize]
output_shape = ['None', params.outputVectorSize]
state_shape = [1, params.outputVectorSize]
# TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence
Y_h_in.type.shape = state_shape
if len(operator.inputs) > 2:
Y_c_in = operator.inputs[2] # The initial cell state of a single sequence
Y_c_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape
if len(operator.outputs) > 2:
operator.outputs[2].type.shape = state_shape |
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
uname = getpass.getuser().encode('latin1')
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode('latin1')
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {'uuid': self.uuid,
'uname': hashed_username,
'hname': hashed_hostname,
'test': self.test_mode,
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message) | Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP | Below is the the instruction that describes the task:
### Input:
Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
### Response:
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
uname = getpass.getuser().encode('latin1')
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode('latin1')
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {'uuid': self.uuid,
'uname': hashed_username,
'hname': hashed_hostname,
'test': self.test_mode,
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message) |
def get_output_files(self):
"""
Return list of output files for this DAG node and its job.
"""
output_files = list(self.__output_files)
if isinstance(self.job(), CondorDAGJob):
output_files = output_files + self.job().get_output_files()
return output_files | Return list of output files for this DAG node and its job. | Below is the the instruction that describes the task:
### Input:
Return list of output files for this DAG node and its job.
### Response:
def get_output_files(self):
"""
Return list of output files for this DAG node and its job.
"""
output_files = list(self.__output_files)
if isinstance(self.job(), CondorDAGJob):
output_files = output_files + self.job().get_output_files()
return output_files |
def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list] | Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)] | Below is the the instruction that describes the task:
### Input:
Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
### Response:
def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list] |
def _respond(self, channel, text):
"""Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
"""
result = self._format_message(channel, text)
if result is not None:
logger.info(
'Sending message: %r',
truncate(result, max_len=50),
)
self.socket.send_str(result) | Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send. | Below is the the instruction that describes the task:
### Input:
Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
### Response:
def _respond(self, channel, text):
"""Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
"""
result = self._format_message(channel, text)
if result is not None:
logger.info(
'Sending message: %r',
truncate(result, max_len=50),
)
self.socket.send_str(result) |
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_clear_breakpoint() | Set/Clear breakpoint | Below is the the instruction that describes the task:
### Input:
Set/Clear breakpoint
### Response:
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_clear_breakpoint() |
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start() | Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start() | Below is the the instruction that describes the task:
### Input:
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
### Response:
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start() |
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True) | Ensure that the parent directory of `path` exists | Below is the the instruction that describes the task:
### Input:
Ensure that the parent directory of `path` exists
### Response:
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True) |
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }") | Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Below is the the instruction that describes the task:
### Input:
Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
### Response:
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }") |
def del_tag(
self,
tag):
"""*delete a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to delete to the object
**Usage:**
.. code-block:: python
aTask.del_tag("@due")
"""
if tag.replace("@", "") not in self.tags:
return
self.refresh
oldContent = self.to_string(indentLevel=1)
newTags = []
newTags[:] = [n for n in newTags if tag not in n]
self.tags = newTags
newContent = self.to_string(indentLevel=1)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.refresh
return None | *delete a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to delete to the object
**Usage:**
.. code-block:: python
aTask.del_tag("@due") | Below is the the instruction that describes the task:
### Input:
*delete a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to delete to the object
**Usage:**
.. code-block:: python
aTask.del_tag("@due")
### Response:
def del_tag(
self,
tag):
"""*delete a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to delete to the object
**Usage:**
.. code-block:: python
aTask.del_tag("@due")
"""
if tag.replace("@", "") not in self.tags:
return
self.refresh
oldContent = self.to_string(indentLevel=1)
newTags = []
newTags[:] = [n for n in newTags if tag not in n]
self.tags = newTags
newContent = self.to_string(indentLevel=1)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.refresh
return None |
def insert_pattern(self, pattern, index):
"""
Inserts given pattern into the Model.
:param pattern: Pattern.
:type pattern: unicode
:param index: Insertion index.
:type index: int
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Inserting '{0}' at '{1}' index.".format(pattern, index))
self.remove_pattern(pattern)
self.beginInsertRows(self.get_node_index(self.root_node), index, index)
pattern_node = PatternNode(name=pattern)
self.root_node.insert_child(pattern_node, index)
self.endInsertRows()
self.pattern_inserted.emit(pattern_node)
return True | Inserts given pattern into the Model.
:param pattern: Pattern.
:type pattern: unicode
:param index: Insertion index.
:type index: int
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Inserts given pattern into the Model.
:param pattern: Pattern.
:type pattern: unicode
:param index: Insertion index.
:type index: int
:return: Method success.
:rtype: bool
### Response:
def insert_pattern(self, pattern, index):
"""
Inserts given pattern into the Model.
:param pattern: Pattern.
:type pattern: unicode
:param index: Insertion index.
:type index: int
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Inserting '{0}' at '{1}' index.".format(pattern, index))
self.remove_pattern(pattern)
self.beginInsertRows(self.get_node_index(self.root_node), index, index)
pattern_node = PatternNode(name=pattern)
self.root_node.insert_child(pattern_node, index)
self.endInsertRows()
self.pattern_inserted.emit(pattern_node)
return True |
def temp_file_context(raw_dump_path, logger=None):
"""this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it."""
try:
yield raw_dump_path
finally:
if 'TEMPORARY' in raw_dump_path:
try:
os.unlink(raw_dump_path)
except OSError:
if logger is None:
logger = FakeLogger()
logger.warning(
'unable to delete %s. manual deletion is required.',
raw_dump_path,
exc_info=True
) | this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it. | Below is the the instruction that describes the task:
### Input:
this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it.
### Response:
def temp_file_context(raw_dump_path, logger=None):
"""this contextmanager implements conditionally deleting a pathname
at the end of a context if the pathname indicates that it is a temp
file by having the word 'TEMPORARY' embedded in it."""
try:
yield raw_dump_path
finally:
if 'TEMPORARY' in raw_dump_path:
try:
os.unlink(raw_dump_path)
except OSError:
if logger is None:
logger = FakeLogger()
logger.warning(
'unable to delete %s. manual deletion is required.',
raw_dump_path,
exc_info=True
) |
def RobotFactory(path, parent=None):
'''Return an instance of SuiteFile, ResourceFile, SuiteFolder
Exactly which is returned depends on whether it's a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile.
'''
if os.path.isdir(path):
return SuiteFolder(path, parent)
else:
rf = RobotFile(path, parent)
for table in rf.tables:
if isinstance(table, TestcaseTable):
rf.__class__ = SuiteFile
return rf
rf.__class__ = ResourceFile
return rf | Return an instance of SuiteFile, ResourceFile, SuiteFolder
Exactly which is returned depends on whether it's a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile. | Below is the the instruction that describes the task:
### Input:
Return an instance of SuiteFile, ResourceFile, SuiteFolder
Exactly which is returned depends on whether it's a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile.
### Response:
def RobotFactory(path, parent=None):
'''Return an instance of SuiteFile, ResourceFile, SuiteFolder
Exactly which is returned depends on whether it's a file or
folder, and if a file, the contents of the file. If there is a
testcase table, this will return an instance of SuiteFile,
otherwise it will return an instance of ResourceFile.
'''
if os.path.isdir(path):
return SuiteFolder(path, parent)
else:
rf = RobotFile(path, parent)
for table in rf.tables:
if isinstance(table, TestcaseTable):
rf.__class__ = SuiteFile
return rf
rf.__class__ = ResourceFile
return rf |
def get_singleton(cls, annotators=None, **options):
"""
Get or create a corenlp parser with the given annotator and options
Note: multiple parsers with the same annotator and different options
are not supported.
"""
if annotators is not None:
annotators = tuple(annotators)
if annotators not in cls._singletons:
cls._singletons[annotators] = cls(annotators, **options)
return cls._singletons[annotators] | Get or create a corenlp parser with the given annotator and options
Note: multiple parsers with the same annotator and different options
are not supported. | Below is the the instruction that describes the task:
### Input:
Get or create a corenlp parser with the given annotator and options
Note: multiple parsers with the same annotator and different options
are not supported.
### Response:
def get_singleton(cls, annotators=None, **options):
"""
Get or create a corenlp parser with the given annotator and options
Note: multiple parsers with the same annotator and different options
are not supported.
"""
if annotators is not None:
annotators = tuple(annotators)
if annotators not in cls._singletons:
cls._singletons[annotators] = cls(annotators, **options)
return cls._singletons[annotators] |
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code) | Convenience: sets all the code expressions at once. | Below is the the instruction that describes the task:
### Input:
Convenience: sets all the code expressions at once.
### Response:
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code) |
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None, _context=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int max_results:
The maximum number of entities to return.
:param obj marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list(:class:`~azure.storage.table.models.Entity`)
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name) + '()'
request.headers = {'Accept': _to_str(accept)}
request.query = {
'$filter': _to_str(filter),
'$select': _to_str(select),
'$top': _int_to_str(max_results),
'NextPartitionKey': _to_str(next_partition_key),
'NextRowKey': _to_str(next_row_key),
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_json_response_to_entities,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function],
operation_context=_context) | Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int max_results:
The maximum number of entities to return.
:param obj marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list(:class:`~azure.storage.table.models.Entity`) | Below is the the instruction that describes the task:
### Input:
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int max_results:
The maximum number of entities to return.
:param obj marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list(:class:`~azure.storage.table.models.Entity`)
### Response:
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None, _context=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int max_results:
The maximum number of entities to return.
:param obj marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list(:class:`~azure.storage.table.models.Entity`)
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name) + '()'
request.headers = {'Accept': _to_str(accept)}
request.query = {
'$filter': _to_str(filter),
'$select': _to_str(select),
'$top': _int_to_str(max_results),
'NextPartitionKey': _to_str(next_partition_key),
'NextRowKey': _to_str(next_row_key),
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_json_response_to_entities,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function],
operation_context=_context) |
def do_trace(self, arg):
"""
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
"""
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True | t - trace at the current assembly instruction
trace - trace at the current assembly instruction | Below is the the instruction that describes the task:
### Input:
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
### Response:
def do_trace(self, arg):
"""
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
"""
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True |
def parseExternalSubset(self, ExternalID, SystemID):
"""parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * """
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID) | parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * | Below is the the instruction that describes the task:
### Input:
parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) *
### Response:
def parseExternalSubset(self, ExternalID, SystemID):
"""parse Markup declarations from an external subset [30]
extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl
::= (markupdecl | conditionalSect | PEReference | S) * """
libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID) |
def read(self, size=None):
""" Read `size` of bytes."""
if size is None:
return self.buf.read() + self.open_file.read()
contents = self.buf.read(size)
if len(contents) < size:
contents += self.open_file.read(size - len(contents))
return contents | Read `size` of bytes. | Below is the the instruction that describes the task:
### Input:
Read `size` of bytes.
### Response:
def read(self, size=None):
""" Read `size` of bytes."""
if size is None:
return self.buf.read() + self.open_file.read()
contents = self.buf.read(size)
if len(contents) < size:
contents += self.open_file.read(size - len(contents))
return contents |
def _preSynapticTRNCells(self, i, j):
"""
Given a relay cell at the given coordinate, return a list of the (x,y)
coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in.
:param i, j: relay cell Coordinates
:return:
"""
xmin = max(i - 1, 0)
xmax = min(i + 2, self.trnWidth)
ymin = max(j - 1, 0)
ymax = min(j + 2, self.trnHeight)
trnCells = [
(x, y) for x in range(xmin, xmax) for y in range(ymin, ymax)
]
return trnCells | Given a relay cell at the given coordinate, return a list of the (x,y)
coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in.
:param i, j: relay cell Coordinates
:return: | Below is the the instruction that describes the task:
### Input:
Given a relay cell at the given coordinate, return a list of the (x,y)
coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in.
:param i, j: relay cell Coordinates
:return:
### Response:
def _preSynapticTRNCells(self, i, j):
"""
Given a relay cell at the given coordinate, return a list of the (x,y)
coordinates of all TRN cells that project to it. This assumes a 3X3 fan-in.
:param i, j: relay cell Coordinates
:return:
"""
xmin = max(i - 1, 0)
xmax = min(i + 2, self.trnWidth)
ymin = max(j - 1, 0)
ymax = min(j + 2, self.trnHeight)
trnCells = [
(x, y) for x in range(xmin, xmax) for y in range(ymin, ymax)
]
return trnCells |
def open(self, url):
"""
Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes
"""
protocol, location = self.__split(url)
content = self.__find(location)
if protocol == 'suds' and content is None:
raise Exception, 'location "%s" not in document store' % location
return content | Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes | Below is the the instruction that describes the task:
### Input:
Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes
### Response:
def open(self, url):
"""
Open a document at the specified URL.
The document URL's needs not contain a protocol identifier, and if it
does, that protocol identifier is ignored when looking up the store
content.
Missing documents referenced using the internal 'suds' protocol are
reported by raising an exception. For other protocols, None is returned
instead.
@param url: A document URL.
@type url: str
@return: Document content or None if not found.
@rtype: bytes
"""
protocol, location = self.__split(url)
content = self.__find(location)
if protocol == 'suds' and content is None:
raise Exception, 'location "%s" not in document store' % location
return content |
def _expand(dat, counts, start, end):
"""
expand the same counts from start to end
"""
for pos in range(start, end):
for s in counts:
dat[s][pos] += counts[s]
return dat | expand the same counts from start to end | Below is the the instruction that describes the task:
### Input:
expand the same counts from start to end
### Response:
def _expand(dat, counts, start, end):
"""
expand the same counts from start to end
"""
for pos in range(start, end):
for s in counts:
dat[s][pos] += counts[s]
return dat |
def _abort_all_transfers(self, exception):
"""
Abort any ongoing transfers and clear all buffers
"""
pending_reads = len(self._commands_to_read)
# invalidate _transfer_list
for transfer in self._transfer_list:
transfer.add_error(exception)
# clear all deferred buffers
self._init_deferred_buffers()
# finish all pending reads and ignore the data
# Only do this if the error is a tranfer error.
# Otherwise this could cause another exception
if isinstance(exception, DAPAccessIntf.TransferError):
for _ in range(pending_reads):
self._interface.read() | Abort any ongoing transfers and clear all buffers | Below is the the instruction that describes the task:
### Input:
Abort any ongoing transfers and clear all buffers
### Response:
def _abort_all_transfers(self, exception):
"""
Abort any ongoing transfers and clear all buffers
"""
pending_reads = len(self._commands_to_read)
# invalidate _transfer_list
for transfer in self._transfer_list:
transfer.add_error(exception)
# clear all deferred buffers
self._init_deferred_buffers()
# finish all pending reads and ignore the data
# Only do this if the error is a tranfer error.
# Otherwise this could cause another exception
if isinstance(exception, DAPAccessIntf.TransferError):
for _ in range(pending_reads):
self._interface.read() |
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | build an AST from an Excel formula expression in reverse polish notation | Below is the the instruction that describes the task:
### Input:
build an AST from an Excel formula expression in reverse polish notation
### Response:
def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() |
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url | Translates the current URL for the given language code, eg:
{% translate_url de %} | Below is the the instruction that describes the task:
### Input:
Translates the current URL for the given language code, eg:
{% translate_url de %}
### Response:
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url |
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError(
"%d total append mutations exceed the maximum "
"allowable %d." % (num_mutations, MAX_MUTATIONS)
)
data_client = self._table._instance._client.table_data_client
row_response = data_client.read_modify_write_row(
table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list
)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response) | Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`. | Below is the the instruction that describes the task:
### Input:
Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
### Response:
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError(
"%d total append mutations exceed the maximum "
"allowable %d." % (num_mutations, MAX_MUTATIONS)
)
data_client = self._table._instance._client.table_data_client
row_response = data_client.read_modify_write_row(
table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list
)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response) |
def most_recent_submission(project, group):
"""Return the most recent submission for the user and project id."""
return (Submission.query_by(project=project, group=group)
.order_by(Submission.created_at.desc()).first()) | Return the most recent submission for the user and project id. | Below is the the instruction that describes the task:
### Input:
Return the most recent submission for the user and project id.
### Response:
def most_recent_submission(project, group):
"""Return the most recent submission for the user and project id."""
return (Submission.query_by(project=project, group=group)
.order_by(Submission.created_at.desc()).first()) |
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage='despiked'):
"""
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
"""
if 'bkgsub' not in self.data.keys():
self.data['bkgsub'] = Bunch()
self.data['bkgsub'][analyte] = self.data[focus_stage][analyte] - bkg
if ind is not None:
self.data['bkgsub'][analyte][ind] = np.nan
return | Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
### Response:
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage='despiked'):
"""
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
"""
if 'bkgsub' not in self.data.keys():
self.data['bkgsub'] = Bunch()
self.data['bkgsub'][analyte] = self.data[focus_stage][analyte] - bkg
if ind is not None:
self.data['bkgsub'][analyte][ind] = np.nan
return |
def sink_update(
self, project, sink_name, filter_, destination, unique_writer_identity=False
):
"""API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = "/projects/%s/sinks/%s" % (project, sink_name)
data = {"name": sink_name, "filter": filter_, "destination": destination}
query_params = {"uniqueWriterIdentity": unique_writer_identity}
return self.api_request(
method="PUT", path=target, query_params=query_params, data=data
) | API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The returned (updated) resource. | Below is the the instruction that describes the task:
### Input:
API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The returned (updated) resource.
### Response:
def sink_update(
self, project, sink_name, filter_, destination, unique_writer_identity=False
):
"""API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = "/projects/%s/sinks/%s" % (project, sink_name)
data = {"name": sink_name, "filter": filter_, "destination": destination}
query_params = {"uniqueWriterIdentity": unique_writer_identity}
return self.api_request(
method="PUT", path=target, query_params=query_params, data=data
) |
def parse_authentication_request(self, request_body, http_headers=None):
# type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest
"""
Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers
"""
auth_req = AuthorizationRequest().deserialize(request_body)
for validator in self.authentication_request_validators:
validator(auth_req)
logger.debug('parsed authentication_request: %s', auth_req)
return auth_req | Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers | Below is the the instruction that describes the task:
### Input:
Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers
### Response:
def parse_authentication_request(self, request_body, http_headers=None):
# type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest
"""
Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers
"""
auth_req = AuthorizationRequest().deserialize(request_body)
for validator in self.authentication_request_validators:
validator(auth_req)
logger.debug('parsed authentication_request: %s', auth_req)
return auth_req |
def walk_dir(path, args, state):
"""
Check all files in `path' to see if there is any requests that
we should send out on the bus.
"""
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True | Check all files in `path' to see if there is any requests that
we should send out on the bus. | Below is the the instruction that describes the task:
### Input:
Check all files in `path' to see if there is any requests that
we should send out on the bus.
### Response:
def walk_dir(path, args, state):
"""
Check all files in `path' to see if there is any requests that
we should send out on the bus.
"""
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True |
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
"""Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
"""
patch_images = extract_patch_images(patch_archive, 'train')
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split('.')[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError('not all patch images were used') | Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files. | Below is the the instruction that describes the task:
### Input:
Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
### Response:
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
"""Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
"""
patch_images = extract_patch_images(patch_archive, 'train')
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split('.')[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError('not all patch images were used') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.