code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def calculate_splits(sdf_file, split_size):
"""Retrieve
"""
counts = _sdfstats(sdf_file)["counts"]
splits = []
cur = 0
for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)):
splits.append("%s-%s" % (cur, min(counts, cur + split_size)))
cur += split_size
return splits | Retrieve | Below is the the instruction that describes the task:
### Input:
Retrieve
### Response:
def calculate_splits(sdf_file, split_size):
"""Retrieve
"""
counts = _sdfstats(sdf_file)["counts"]
splits = []
cur = 0
for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)):
splits.append("%s-%s" % (cur, min(counts, cur + split_size)))
cur += split_size
return splits |
def _rapply(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
op_args = list(op_args)
op_args.append(input_layer.tensor)
return input_layer.with_tensor(operation(*op_args, **op_kwargs)) | Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied. | Below is the the instruction that describes the task:
### Input:
Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
### Response:
def _rapply(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
op_args = list(op_args)
op_args.append(input_layer.tensor)
return input_layer.with_tensor(operation(*op_args, **op_kwargs)) |
def _generic_convert_string(v, from_type, to_type, encoding):
"""
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
"""
assert six.PY2, "This function should be used with Python 2 only"
assert from_type != to_type
if from_type == six.binary_type and isinstance(v, six.binary_type):
return six.text_type(v, encoding)
elif from_type == six.text_type and isinstance(v, six.text_type):
return v.encode(encoding)
elif isinstance(v, (list, tuple, set)):
return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v])
elif isinstance(v, dict):
return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.iteritems()}
return v | Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return: | Below is the the instruction that describes the task:
### Input:
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
### Response:
def _generic_convert_string(v, from_type, to_type, encoding):
"""
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return:
"""
assert six.PY2, "This function should be used with Python 2 only"
assert from_type != to_type
if from_type == six.binary_type and isinstance(v, six.binary_type):
return six.text_type(v, encoding)
elif from_type == six.text_type and isinstance(v, six.text_type):
return v.encode(encoding)
elif isinstance(v, (list, tuple, set)):
return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v])
elif isinstance(v, dict):
return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.iteritems()}
return v |
def _generate_enumerated_subtypes_tag_mapping(self, ns, data_type):
"""
Generates attributes needed for serializing and deserializing structs
with enumerated subtypes. These assignments are made after all the
Python class definitions to ensure that all references exist.
"""
assert data_type.has_enumerated_subtypes()
# Generate _tag_to_subtype_ attribute: Map from string type tag to
# the validator of the referenced subtype. Used on deserialization
# to look up the subtype for a given tag.
tag_to_subtype_items = []
for tags, subtype in data_type.get_all_subtypes_with_tags():
tag_to_subtype_items.append("{}: {}".format(
tags,
generate_validator_constructor(ns, subtype)))
self.generate_multiline_list(
tag_to_subtype_items,
before='{}._tag_to_subtype_ = '.format(data_type.name),
delim=('{', '}'),
compact=False)
# Generate _pytype_to_tag_and_subtype_: Map from Python class to a
# tuple of (type tag, subtype). Used on serialization to lookup how a
# class should be encoded based on the root struct's enumerated
# subtypes.
items = []
for tag, subtype in data_type.get_all_subtypes_with_tags():
items.append("{0}: ({1}, {2})".format(
fmt_class(subtype.name),
tag,
generate_validator_constructor(ns, subtype)))
self.generate_multiline_list(
items,
before='{}._pytype_to_tag_and_subtype_ = '.format(data_type.name),
delim=('{', '}'),
compact=False)
# Generate _is_catch_all_ attribute:
self.emit('{}._is_catch_all_ = {!r}'.format(
data_type.name, data_type.is_catch_all()))
self.emit() | Generates attributes needed for serializing and deserializing structs
with enumerated subtypes. These assignments are made after all the
Python class definitions to ensure that all references exist. | Below is the the instruction that describes the task:
### Input:
Generates attributes needed for serializing and deserializing structs
with enumerated subtypes. These assignments are made after all the
Python class definitions to ensure that all references exist.
### Response:
def _generate_enumerated_subtypes_tag_mapping(self, ns, data_type):
"""
Generates attributes needed for serializing and deserializing structs
with enumerated subtypes. These assignments are made after all the
Python class definitions to ensure that all references exist.
"""
assert data_type.has_enumerated_subtypes()
# Generate _tag_to_subtype_ attribute: Map from string type tag to
# the validator of the referenced subtype. Used on deserialization
# to look up the subtype for a given tag.
tag_to_subtype_items = []
for tags, subtype in data_type.get_all_subtypes_with_tags():
tag_to_subtype_items.append("{}: {}".format(
tags,
generate_validator_constructor(ns, subtype)))
self.generate_multiline_list(
tag_to_subtype_items,
before='{}._tag_to_subtype_ = '.format(data_type.name),
delim=('{', '}'),
compact=False)
# Generate _pytype_to_tag_and_subtype_: Map from Python class to a
# tuple of (type tag, subtype). Used on serialization to lookup how a
# class should be encoded based on the root struct's enumerated
# subtypes.
items = []
for tag, subtype in data_type.get_all_subtypes_with_tags():
items.append("{0}: ({1}, {2})".format(
fmt_class(subtype.name),
tag,
generate_validator_constructor(ns, subtype)))
self.generate_multiline_list(
items,
before='{}._pytype_to_tag_and_subtype_ = '.format(data_type.name),
delim=('{', '}'),
compact=False)
# Generate _is_catch_all_ attribute:
self.emit('{}._is_catch_all_ = {!r}'.format(
data_type.name, data_type.is_catch_all()))
self.emit() |
def row(self):
"""
Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
}
"""
row = OrderedDict()
row['retro_game_id'] = self.retro_game_id
row['game_type'] = self.game_type
row['game_type_des'] = self.game_type_des
row['st_fl'] = self.st_fl
row['regseason_fl'] = self.regseason_fl
row['playoff_fl'] = self.playoff_fl
row['local_game_time'] = self.local_game_time
row['game_id'] = self.game_id
row['home_team_id'] = self.home_team_id
row['home_team_lg'] = self.home_team_lg
row['away_team_id'] = self.away_team_id
row['away_team_lg'] = self.away_team_lg
row['home_team_name'] = self.home_team_name
row['away_team_name'] = self.away_team_name
row['home_team_name_full'] = self.home_team_name_full
row['away_team_name_full'] = self.away_team_name_full
row['interleague_fl'] = self.interleague_fl
row['park_id'] = self.park_id
row['park_name'] = self.park_name
row['park_loc'] = self.park_loc
return row | Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
} | Below is the the instruction that describes the task:
### Input:
Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
}
### Response:
def row(self):
"""
Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
}
"""
row = OrderedDict()
row['retro_game_id'] = self.retro_game_id
row['game_type'] = self.game_type
row['game_type_des'] = self.game_type_des
row['st_fl'] = self.st_fl
row['regseason_fl'] = self.regseason_fl
row['playoff_fl'] = self.playoff_fl
row['local_game_time'] = self.local_game_time
row['game_id'] = self.game_id
row['home_team_id'] = self.home_team_id
row['home_team_lg'] = self.home_team_lg
row['away_team_id'] = self.away_team_id
row['away_team_lg'] = self.away_team_lg
row['home_team_name'] = self.home_team_name
row['away_team_name'] = self.away_team_name
row['home_team_name_full'] = self.home_team_name_full
row['away_team_name_full'] = self.away_team_name_full
row['interleague_fl'] = self.interleague_fl
row['park_id'] = self.park_id
row['park_name'] = self.park_name
row['park_loc'] = self.park_loc
return row |
def get(self, server):
"""
Returns a registered GPServer object with a matching GenePattern server url or index
Returns None if no matching result was found
:param server:
:return:
"""
# Handle indexes
if isinstance(server, int):
if server >= len(self.sessions):
return None
else:
return self.sessions[server]
# Handle server URLs
index = self._get_index(server)
if index == -1:
return None
else:
return self.sessions[index] | Returns a registered GPServer object with a matching GenePattern server url or index
Returns None if no matching result was found
:param server:
:return: | Below is the the instruction that describes the task:
### Input:
Returns a registered GPServer object with a matching GenePattern server url or index
Returns None if no matching result was found
:param server:
:return:
### Response:
def get(self, server):
"""
Returns a registered GPServer object with a matching GenePattern server url or index
Returns None if no matching result was found
:param server:
:return:
"""
# Handle indexes
if isinstance(server, int):
if server >= len(self.sessions):
return None
else:
return self.sessions[server]
# Handle server URLs
index = self._get_index(server)
if index == -1:
return None
else:
return self.sessions[index] |
def media(self, uri):
"""Play a media file."""
try:
local_path, _ = urllib.request.urlretrieve(uri)
metadata = mutagen.File(local_path, easy=True)
if metadata.tags:
self._tags = metadata.tags
title = self._tags.get(TAG_TITLE, [])
self._manager[ATTR_TITLE] = title[0] if len(title) else ''
artist = self._tags.get(TAG_ARTIST, [])
self._manager[ATTR_ARTIST] = artist[0] if len(artist) else ''
album = self._tags.get(TAG_ALBUM, [])
self._manager[ATTR_ALBUM] = album[0] if len(album) else ''
local_uri = 'file://{}'.format(local_path)
# urllib.error.HTTPError
except Exception: # pylint: disable=broad-except
local_uri = uri
self._player.set_state(Gst.State.NULL)
self._player.set_property(PROP_URI, local_uri)
self._player.set_state(Gst.State.PLAYING)
self.state = STATE_PLAYING
self._manager[ATTR_URI] = uri
self._manager[ATTR_DURATION] = self._duration()
self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME)
_LOGGER.info('playing %s (as %s)', uri, local_uri) | Play a media file. | Below is the the instruction that describes the task:
### Input:
Play a media file.
### Response:
def media(self, uri):
"""Play a media file."""
try:
local_path, _ = urllib.request.urlretrieve(uri)
metadata = mutagen.File(local_path, easy=True)
if metadata.tags:
self._tags = metadata.tags
title = self._tags.get(TAG_TITLE, [])
self._manager[ATTR_TITLE] = title[0] if len(title) else ''
artist = self._tags.get(TAG_ARTIST, [])
self._manager[ATTR_ARTIST] = artist[0] if len(artist) else ''
album = self._tags.get(TAG_ALBUM, [])
self._manager[ATTR_ALBUM] = album[0] if len(album) else ''
local_uri = 'file://{}'.format(local_path)
# urllib.error.HTTPError
except Exception: # pylint: disable=broad-except
local_uri = uri
self._player.set_state(Gst.State.NULL)
self._player.set_property(PROP_URI, local_uri)
self._player.set_state(Gst.State.PLAYING)
self.state = STATE_PLAYING
self._manager[ATTR_URI] = uri
self._manager[ATTR_DURATION] = self._duration()
self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME)
_LOGGER.info('playing %s (as %s)', uri, local_uri) |
def state_transition(history_id_key, table_name, always_set=[], may_spend_tokens=False):
"""
Decorator for the check() method on state-transition operations.
Make sure that:
* there is a __table__ field set, which names the table in which this record is stored.
* there is a __history_id_key__ field set, which identifies the table record's primary key.
Any fields named in @always_set will always be set when the transition is applied.
That is, fields set here *must* be set on transition, and *will* be set in the database, even if
they have prior values in the affected name record that might constrain which rows to update.
"""
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
if rc:
# put fields in place
nameop['__table__'] = table_name
nameop['__history_id_key__'] = history_id_key
nameop['__state_transition__'] = True
nameop['__always_set__'] = always_set
if not may_spend_tokens:
state_transition_put_account_payment_info(nameop, None, None, None)
elif '__account_payment_info__' not in nameop:
raise Exception('Operation spends tokens, but no payment account information is set')
# sanity check
invariant_tags = state_transition_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag '%s'" % tag
# sanity check---all required consensus fields must be present
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field)
return rc
return wrapped_check
return wrap | Decorator for the check() method on state-transition operations.
Make sure that:
* there is a __table__ field set, which names the table in which this record is stored.
* there is a __history_id_key__ field set, which identifies the table record's primary key.
Any fields named in @always_set will always be set when the transition is applied.
That is, fields set here *must* be set on transition, and *will* be set in the database, even if
they have prior values in the affected name record that might constrain which rows to update. | Below is the the instruction that describes the task:
### Input:
Decorator for the check() method on state-transition operations.
Make sure that:
* there is a __table__ field set, which names the table in which this record is stored.
* there is a __history_id_key__ field set, which identifies the table record's primary key.
Any fields named in @always_set will always be set when the transition is applied.
That is, fields set here *must* be set on transition, and *will* be set in the database, even if
they have prior values in the affected name record that might constrain which rows to update.
### Response:
def state_transition(history_id_key, table_name, always_set=[], may_spend_tokens=False):
"""
Decorator for the check() method on state-transition operations.
Make sure that:
* there is a __table__ field set, which names the table in which this record is stored.
* there is a __history_id_key__ field set, which identifies the table record's primary key.
Any fields named in @always_set will always be set when the transition is applied.
That is, fields set here *must* be set on transition, and *will* be set in the database, even if
they have prior values in the affected name record that might constrain which rows to update.
"""
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
if rc:
# put fields in place
nameop['__table__'] = table_name
nameop['__history_id_key__'] = history_id_key
nameop['__state_transition__'] = True
nameop['__always_set__'] = always_set
if not may_spend_tokens:
state_transition_put_account_payment_info(nameop, None, None, None)
elif '__account_payment_info__' not in nameop:
raise Exception('Operation spends tokens, but no payment account information is set')
# sanity check
invariant_tags = state_transition_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag '%s'" % tag
# sanity check---all required consensus fields must be present
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field)
return rc
return wrapped_check
return wrap |
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir):
"""
Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
"""
file_format = None
transcript_data = get_video_transcript_data(edx_video_id, language_code)
# First check if transcript record does not exist.
if not transcript_data:
# Read file from import file system and attach it to transcript record in DS.
try:
with resource_fs.open(combine(static_dir, file_name), 'rb') as f:
file_content = f.read()
file_content = file_content.decode('utf-8-sig')
except ResourceNotFound as exc:
# Don't raise exception in case transcript file is not found in course OLX.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" is not found.',
language_code,
file_name,
edx_video_id
)
return
except UnicodeDecodeError:
# Don't raise exception in case transcript contains non-utf8 content.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.',
language_code,
file_name,
edx_video_id
)
return
# Get file format from transcript content.
try:
file_format = get_transcript_format(file_content)
except Error as ex:
# Don't raise exception, just don't create transcript record.
logger.warn(
'[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s',
edx_video_id,
language_code,
file_name
)
return
# Create transcript record.
create_video_transcript(
video_id=edx_video_id,
language_code=language_code,
file_format=file_format,
content=ContentFile(file_content),
provider=provider
) | Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file. | Below is the the instruction that describes the task:
### Input:
Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
### Response:
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir):
"""
Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
"""
file_format = None
transcript_data = get_video_transcript_data(edx_video_id, language_code)
# First check if transcript record does not exist.
if not transcript_data:
# Read file from import file system and attach it to transcript record in DS.
try:
with resource_fs.open(combine(static_dir, file_name), 'rb') as f:
file_content = f.read()
file_content = file_content.decode('utf-8-sig')
except ResourceNotFound as exc:
# Don't raise exception in case transcript file is not found in course OLX.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" is not found.',
language_code,
file_name,
edx_video_id
)
return
except UnicodeDecodeError:
# Don't raise exception in case transcript contains non-utf8 content.
logger.warn(
'[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.',
language_code,
file_name,
edx_video_id
)
return
# Get file format from transcript content.
try:
file_format = get_transcript_format(file_content)
except Error as ex:
# Don't raise exception, just don't create transcript record.
logger.warn(
'[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s',
edx_video_id,
language_code,
file_name
)
return
# Create transcript record.
create_video_transcript(
video_id=edx_video_id,
language_code=language_code,
file_format=file_format,
content=ContentFile(file_content),
provider=provider
) |
def CBO_L(self, **kwargs):
'''
Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_L(**kwargs) +
self.CBO_strain_shift(**kwargs)) | Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally. | Below is the the instruction that describes the task:
### Input:
Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
### Response:
def CBO_L(self, **kwargs):
'''
Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally.
'''
return (self.unstrained.CBO_L(**kwargs) +
self.CBO_strain_shift(**kwargs)) |
def vocabulary(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
"""
url = WANIKANI_BASE.format(self.api_key, 'vocabulary')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
if 'general' in data['requested_information']:
for item in data['requested_information']['general']:
yield Vocabulary(item)
else:
for item in data['requested_information']:
yield Vocabulary(item) | :param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list | Below is the the instruction that describes the task:
### Input:
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
### Response:
def vocabulary(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
"""
url = WANIKANI_BASE.format(self.api_key, 'vocabulary')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
if 'general' in data['requested_information']:
for item in data['requested_information']['general']:
yield Vocabulary(item)
else:
for item in data['requested_information']:
yield Vocabulary(item) |
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
try:
self.token = yield self.get_token_async()
except app_identity.InternalError, e:
if os.environ.get('DATACENTER', '').endswith('sandman'):
self.token = None
logging.warning('Could not fetch an authentication token in sandman '
'based Appengine devel setup; proceeding without one.')
else:
raise e
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp) | Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet! | Below is the the instruction that describes the task:
### Input:
Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
### Response:
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
try:
self.token = yield self.get_token_async()
except app_identity.InternalError, e:
if os.environ.get('DATACENTER', '').endswith('sandman'):
self.token = None
logging.warning('Could not fetch an authentication token in sandman '
'based Appengine devel setup; proceeding without one.')
else:
raise e
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp) |
def cli(context, mongodb, username, password, authdb, host, port, loglevel, config, demo):
"""scout: manage interactions with a scout instance."""
# log_format = "%(message)s" if sys.stdout.isatty() else None
log_format = None
coloredlogs.install(level=loglevel, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
mongo_config = {}
cli_config = {}
if config:
LOG.debug("Use config file %s", config)
with open(config, 'r') as in_handle:
cli_config = yaml.load(in_handle)
mongo_config['mongodb'] = (mongodb or cli_config.get('mongodb') or 'scout')
if demo:
mongo_config['mongodb'] = 'scout-demo'
mongo_config['host'] = (host or cli_config.get('host') or 'localhost')
mongo_config['port'] = (port or cli_config.get('port') or 27017)
mongo_config['username'] = username or cli_config.get('username')
mongo_config['password'] = password or cli_config.get('password')
mongo_config['authdb'] = authdb or cli_config.get('authdb') or mongo_config['mongodb']
mongo_config['omim_api_key'] = cli_config.get('omim_api_key')
if context.invoked_subcommand in ('setup', 'serve'):
mongo_config['adapter'] = None
else:
LOG.info("Setting database name to %s", mongo_config['mongodb'])
LOG.debug("Setting host to %s", mongo_config['host'])
LOG.debug("Setting port to %s", mongo_config['port'])
try:
client = get_connection(**mongo_config)
except ConnectionFailure:
context.abort()
database = client[mongo_config['mongodb']]
LOG.info("Setting up a mongo adapter")
mongo_config['client'] = client
adapter = MongoAdapter(database)
mongo_config['adapter'] = adapter
LOG.info("Check if authenticated...")
try:
for ins_obj in adapter.institutes():
pass
except OperationFailure as err:
LOG.info("User not authenticated")
context.abort()
context.obj = mongo_config | scout: manage interactions with a scout instance. | Below is the the instruction that describes the task:
### Input:
scout: manage interactions with a scout instance.
### Response:
def cli(context, mongodb, username, password, authdb, host, port, loglevel, config, demo):
"""scout: manage interactions with a scout instance."""
# log_format = "%(message)s" if sys.stdout.isatty() else None
log_format = None
coloredlogs.install(level=loglevel, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
mongo_config = {}
cli_config = {}
if config:
LOG.debug("Use config file %s", config)
with open(config, 'r') as in_handle:
cli_config = yaml.load(in_handle)
mongo_config['mongodb'] = (mongodb or cli_config.get('mongodb') or 'scout')
if demo:
mongo_config['mongodb'] = 'scout-demo'
mongo_config['host'] = (host or cli_config.get('host') or 'localhost')
mongo_config['port'] = (port or cli_config.get('port') or 27017)
mongo_config['username'] = username or cli_config.get('username')
mongo_config['password'] = password or cli_config.get('password')
mongo_config['authdb'] = authdb or cli_config.get('authdb') or mongo_config['mongodb']
mongo_config['omim_api_key'] = cli_config.get('omim_api_key')
if context.invoked_subcommand in ('setup', 'serve'):
mongo_config['adapter'] = None
else:
LOG.info("Setting database name to %s", mongo_config['mongodb'])
LOG.debug("Setting host to %s", mongo_config['host'])
LOG.debug("Setting port to %s", mongo_config['port'])
try:
client = get_connection(**mongo_config)
except ConnectionFailure:
context.abort()
database = client[mongo_config['mongodb']]
LOG.info("Setting up a mongo adapter")
mongo_config['client'] = client
adapter = MongoAdapter(database)
mongo_config['adapter'] = adapter
LOG.info("Check if authenticated...")
try:
for ins_obj in adapter.institutes():
pass
except OperationFailure as err:
LOG.info("User not authenticated")
context.abort()
context.obj = mongo_config |
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs):
"""Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
else:
(data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
return data | Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs):
"""Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
else:
(data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
return data |
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if cpd is None:
raise ValueError('No CPD associated with {}'.format(node))
elif isinstance(cpd, (TabularCPD, ContinuousFactor)):
evidence = cpd.get_evidence()
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not cpd.is_valid_cpd():
raise ValueError("Sum or integral of conditional probabilites for node {node}"
" is not equal to 1.".format(node=node))
return True | Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed | Below is the the instruction that describes the task:
### Input:
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
### Response:
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if cpd is None:
raise ValueError('No CPD associated with {}'.format(node))
elif isinstance(cpd, (TabularCPD, ContinuousFactor)):
evidence = cpd.get_evidence()
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not cpd.is_valid_cpd():
raise ValueError("Sum or integral of conditional probabilites for node {node}"
" is not equal to 1.".format(node=node))
return True |
def _register_handler(event, fun, external=False):
"""Register a function to be an event handler"""
registry = core.HANDLER_REGISTRY
if external:
registry = core.EXTERNAL_HANDLER_REGISTRY
if not isinstance(event, basestring):
# If not basestring, it is a BaseEvent subclass.
# This occurs when class methods are registered as handlers
event = core.parse_event_to_name(event)
if event in registry:
registry[event].append(fun)
else:
registry[event] = [fun]
return fun | Register a function to be an event handler | Below is the the instruction that describes the task:
### Input:
Register a function to be an event handler
### Response:
def _register_handler(event, fun, external=False):
"""Register a function to be an event handler"""
registry = core.HANDLER_REGISTRY
if external:
registry = core.EXTERNAL_HANDLER_REGISTRY
if not isinstance(event, basestring):
# If not basestring, it is a BaseEvent subclass.
# This occurs when class methods are registered as handlers
event = core.parse_event_to_name(event)
if event in registry:
registry[event].append(fun)
else:
registry[event] = [fun]
return fun |
def from_element(self, element, defaults={}):
"""Populate object variables from SVD element"""
if isinstance(defaults, SvdElement):
defaults = vars(defaults)
for key in self.props:
try:
value = element.find(key).text
except AttributeError: # Maybe it's attribute?
default = defaults[key] if key in defaults else None
value = element.get(key, default)
if value is not None:
if key in self.props_to_integer:
try:
value = int(value)
except ValueError: # It has to be hex
value = int(value, 16)
elif key in self.props_to_boolean:
value = value.lower() in ("yes", "true", "t", "1")
setattr(self, key, value) | Populate object variables from SVD element | Below is the the instruction that describes the task:
### Input:
Populate object variables from SVD element
### Response:
def from_element(self, element, defaults={}):
"""Populate object variables from SVD element"""
if isinstance(defaults, SvdElement):
defaults = vars(defaults)
for key in self.props:
try:
value = element.find(key).text
except AttributeError: # Maybe it's attribute?
default = defaults[key] if key in defaults else None
value = element.get(key, default)
if value is not None:
if key in self.props_to_integer:
try:
value = int(value)
except ValueError: # It has to be hex
value = int(value, 16)
elif key in self.props_to_boolean:
value = value.lower() in ("yes", "true", "t", "1")
setattr(self, key, value) |
def list_storage_containers(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List containers associated with the storage account
CLI Example:
.. code-block:: bash
salt-cloud -f list_storage_containers my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_storage_containers function must be called with -f or --function.'
)
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.list_containers()
ret = {}
for item in data.containers:
ret[item.name] = object_to_dict(item)
return ret | .. versionadded:: 2015.8.0
List containers associated with the storage account
CLI Example:
.. code-block:: bash
salt-cloud -f list_storage_containers my-azure | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
List containers associated with the storage account
CLI Example:
.. code-block:: bash
salt-cloud -f list_storage_containers my-azure
### Response:
def list_storage_containers(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List containers associated with the storage account
CLI Example:
.. code-block:: bash
salt-cloud -f list_storage_containers my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_storage_containers function must be called with -f or --function.'
)
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.list_containers()
ret = {}
for item in data.containers:
ret[item.name] = object_to_dict(item)
return ret |
def restart_service(service, log=False):
""" restarts a service """
with settings():
if log:
bookshelf2.logging_helpers.log_yellow(
'stoping service %s' % service)
sudo('service %s stop' % service)
if log:
bookshelf2.logging_helpers.log_yellow(
'starting service %s' % service)
sudo('service %s start' % service)
return True | restarts a service | Below is the the instruction that describes the task:
### Input:
restarts a service
### Response:
def restart_service(service, log=False):
""" restarts a service """
with settings():
if log:
bookshelf2.logging_helpers.log_yellow(
'stoping service %s' % service)
sudo('service %s stop' % service)
if log:
bookshelf2.logging_helpers.log_yellow(
'starting service %s' % service)
sudo('service %s start' % service)
return True |
def get_default_property_values(self, classname):
"""Return a dict with default values for all properties declared on this class."""
schema_element = self.get_element_by_class_name(classname)
result = {
property_name: property_descriptor.default
for property_name, property_descriptor in six.iteritems(schema_element.properties)
}
if schema_element.is_edge:
# Remove the source/destination properties for edges, if they exist.
result.pop(EDGE_SOURCE_PROPERTY_NAME, None)
result.pop(EDGE_DESTINATION_PROPERTY_NAME, None)
return result | Return a dict with default values for all properties declared on this class. | Below is the the instruction that describes the task:
### Input:
Return a dict with default values for all properties declared on this class.
### Response:
def get_default_property_values(self, classname):
"""Return a dict with default values for all properties declared on this class."""
schema_element = self.get_element_by_class_name(classname)
result = {
property_name: property_descriptor.default
for property_name, property_descriptor in six.iteritems(schema_element.properties)
}
if schema_element.is_edge:
# Remove the source/destination properties for edges, if they exist.
result.pop(EDGE_SOURCE_PROPERTY_NAME, None)
result.pop(EDGE_DESTINATION_PROPERTY_NAME, None)
return result |
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
path = self.get_build_scanner_path
kw = self.get_kw()
# TODO(batch): scan by batches)
deps = []
for node in node_list:
node.disambiguate()
deps.extend(node.get_implicit_deps(env, scanner, path, kw))
deps.extend(self.get_implicit_deps())
for tgt in self.get_all_targets():
tgt.add_to_implicit(deps) | Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient. | Below is the the instruction that describes the task:
### Input:
Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
### Response:
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
path = self.get_build_scanner_path
kw = self.get_kw()
# TODO(batch): scan by batches)
deps = []
for node in node_list:
node.disambiguate()
deps.extend(node.get_implicit_deps(env, scanner, path, kw))
deps.extend(self.get_implicit_deps())
for tgt in self.get_all_targets():
tgt.add_to_implicit(deps) |
def file_rename(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
"""
return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /file-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename | Below is the the instruction that describes the task:
### Input:
Invokes the /file-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
### Response:
def file_rename(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /file-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
"""
return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs) |
def filter_records(self, records):
"""
Apply the filter to records
"""
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
# Quick tracking whether the sequence was modified
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
'failed_filter',
record,
filter_name=self.name,
value=v) | Apply the filter to records | Below is the the instruction that describes the task:
### Input:
Apply the filter to records
### Response:
def filter_records(self, records):
"""
Apply the filter to records
"""
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
# Quick tracking whether the sequence was modified
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
'failed_filter',
record,
filter_name=self.name,
value=v) |
def close(self):
"""Close this metrics repository."""
for reporter in self._reporters:
reporter.close()
self._metrics.clear() | Close this metrics repository. | Below is the the instruction that describes the task:
### Input:
Close this metrics repository.
### Response:
def close(self):
"""Close this metrics repository."""
for reporter in self._reporters:
reporter.close()
self._metrics.clear() |
def yn_prompt(text):
'''
Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input.
'''
text = "\n"+ text + "\n('y' or 'n'): "
while True:
answer = input(text).strip()
if answer != 'y' and answer != 'n':
continue
elif answer == 'y':
return True
elif answer == 'n':
return False | Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input. | Below is the the instruction that describes the task:
### Input:
Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input.
### Response:
def yn_prompt(text):
'''
Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input.
'''
text = "\n"+ text + "\n('y' or 'n'): "
while True:
answer = input(text).strip()
if answer != 'y' and answer != 'n':
continue
elif answer == 'y':
return True
elif answer == 'n':
return False |
def transfer_bankcard(self, true_name, bank_card_no, bank_code, amount, desc=None, out_trade_no=None):
"""
企业付款到银行卡接口
:param true_name: 开户人名称
:param bank_card_no: 银行卡号
:param bank_code: 银行编号
:param amount: 付款金额,单位分
:param desc: 付款说明
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'mch_id': self.mch_id,
'partner_trade_no': out_trade_no,
'amount': amount,
'desc': desc,
'enc_bank_no': self._rsa_encrypt(bank_card_no),
'enc_true_name': self._rsa_encrypt(true_name),
'bank_code': bank_code,
}
return self._post('mmpaysptrans/pay_bank', data=data) | 企业付款到银行卡接口
:param true_name: 开户人名称
:param bank_card_no: 银行卡号
:param bank_code: 银行编号
:param amount: 付款金额,单位分
:param desc: 付款说明
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息 | Below is the the instruction that describes the task:
### Input:
企业付款到银行卡接口
:param true_name: 开户人名称
:param bank_card_no: 银行卡号
:param bank_code: 银行编号
:param amount: 付款金额,单位分
:param desc: 付款说明
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息
### Response:
def transfer_bankcard(self, true_name, bank_card_no, bank_code, amount, desc=None, out_trade_no=None):
"""
企业付款到银行卡接口
:param true_name: 开户人名称
:param bank_card_no: 银行卡号
:param bank_code: 银行编号
:param amount: 付款金额,单位分
:param desc: 付款说明
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'mch_id': self.mch_id,
'partner_trade_no': out_trade_no,
'amount': amount,
'desc': desc,
'enc_bank_no': self._rsa_encrypt(bank_card_no),
'enc_true_name': self._rsa_encrypt(true_name),
'bank_code': bank_code,
}
return self._post('mmpaysptrans/pay_bank', data=data) |
def delta_crl_distribution_points(self):
"""
Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects
"""
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = self._get_http_crl_distribution_points(self.freshest_crl_value)
return self._delta_crl_distribution_points | Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects | Below is the the instruction that describes the task:
### Input:
Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects
### Response:
def delta_crl_distribution_points(self):
"""
Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects
"""
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = self._get_http_crl_distribution_points(self.freshest_crl_value)
return self._delta_crl_distribution_points |
def on_map_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'short',
'position': tuple(pos)
}) | Called when the map is clicked | Below is the the instruction that describes the task:
### Input:
Called when the map is clicked
### Response:
def on_map_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'short',
'position': tuple(pos)
}) |
def fastqfilter(self):
"""Filter the reads into separate files based on taxonomic assignment"""
printtime('Creating filtered .fastqfiles', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.filterfastq, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
self.filterqueue.put(sample)
self.filterqueue.join()
# Print the metadata to file
metadataprinter.MetadataPrinter(self) | Filter the reads into separate files based on taxonomic assignment | Below is the the instruction that describes the task:
### Input:
Filter the reads into separate files based on taxonomic assignment
### Response:
def fastqfilter(self):
"""Filter the reads into separate files based on taxonomic assignment"""
printtime('Creating filtered .fastqfiles', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.filterfastq, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
self.filterqueue.put(sample)
self.filterqueue.join()
# Print the metadata to file
metadataprinter.MetadataPrinter(self) |
def write(self, data):
'''
Write some bytes to the transport.
'''
if not hasattr(self, '_sock'):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
'sent %d bytes to %s' % (len(data), self._host))
return
except EnvironmentError:
# sockets raise this type of error, and since if sendall() fails
# we're left in an indeterminate state, assume that any error we
# catch means that the connection is dead. Note that this
# assumption requires this to be a blocking socket; if we ever
# support non-blocking in this class then this whole method has
# to change a lot.
self.connection.logger.exception(
'error writing to %s' % (self._host))
self.connection.transport_closed(
msg='error writing to %s' % (self._host)) | Write some bytes to the transport. | Below is the the instruction that describes the task:
### Input:
Write some bytes to the transport.
### Response:
def write(self, data):
'''
Write some bytes to the transport.
'''
if not hasattr(self, '_sock'):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
'sent %d bytes to %s' % (len(data), self._host))
return
except EnvironmentError:
# sockets raise this type of error, and since if sendall() fails
# we're left in an indeterminate state, assume that any error we
# catch means that the connection is dead. Note that this
# assumption requires this to be a blocking socket; if we ever
# support non-blocking in this class then this whole method has
# to change a lot.
self.connection.logger.exception(
'error writing to %s' % (self._host))
self.connection.transport_closed(
msg='error writing to %s' % (self._host)) |
def vae(x, z_size, name=None):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma | Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga. | Below is the the instruction that describes the task:
### Input:
Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
### Response:
def vae(x, z_size, name=None):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma |
def initialize(self):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.
"""
# Allocate appropriate temporal memory object
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._temporalArgNames)
if self._tfdr is None:
tpClass = _getTPClass(self.temporalImp)
if self.temporalImp in ['py', 'cpp', 'r',
'tm_py', 'tm_cpp',
'monitored_tm_py',]:
self._tfdr = tpClass(
numberOfCols=self.columnCount,
cellsPerColumn=self.cellsPerColumn,
**autoArgs)
else:
raise RuntimeError("Invalid temporalImp") | Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`. | Below is the the instruction that describes the task:
### Input:
Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.
### Response:
def initialize(self):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.
"""
# Allocate appropriate temporal memory object
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._temporalArgNames)
if self._tfdr is None:
tpClass = _getTPClass(self.temporalImp)
if self.temporalImp in ['py', 'cpp', 'r',
'tm_py', 'tm_cpp',
'monitored_tm_py',]:
self._tfdr = tpClass(
numberOfCols=self.columnCount,
cellsPerColumn=self.cellsPerColumn,
**autoArgs)
else:
raise RuntimeError("Invalid temporalImp") |
def create_archive(
self,
archive_name,
authority_name,
archive_path,
versioned,
raise_on_err=True,
metadata=None,
user_config=None,
tags=None,
helper=False):
'''
Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object
'''
archive_metadata = self._create_archive_metadata(
archive_name=archive_name,
authority_name=authority_name,
archive_path=archive_path,
versioned=versioned,
raise_on_err=raise_on_err,
metadata=metadata,
user_config=user_config,
tags=tags,
helper=helper)
if raise_on_err:
self._create_archive(
archive_name,
archive_metadata)
else:
self._create_if_not_exists(
archive_name,
archive_metadata)
return self.get_archive(archive_name) | Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object | Below is the the instruction that describes the task:
### Input:
Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object
### Response:
def create_archive(
self,
archive_name,
authority_name,
archive_path,
versioned,
raise_on_err=True,
metadata=None,
user_config=None,
tags=None,
helper=False):
'''
Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object
'''
archive_metadata = self._create_archive_metadata(
archive_name=archive_name,
authority_name=authority_name,
archive_path=archive_path,
versioned=versioned,
raise_on_err=raise_on_err,
metadata=metadata,
user_config=user_config,
tags=tags,
helper=helper)
if raise_on_err:
self._create_archive(
archive_name,
archive_metadata)
else:
self._create_if_not_exists(
archive_name,
archive_metadata)
return self.get_archive(archive_name) |
def agents_email_show(self, email_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id"
api_path = "/api/v2/agents/email/{email_id}"
api_path = api_path.format(email_id=email_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id
### Response:
def agents_email_show(self, email_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id"
api_path = "/api/v2/agents/email/{email_id}"
api_path = api_path.format(email_id=email_id)
return self.call(api_path, **kwargs) |
def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure. | Below is the the instruction that describes the task:
### Input:
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
### Response:
def update_matches(self, other):
"""
Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure.
"""
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=['classified_failure'])
continue
# if any of our matches have higher scores than other's matches,
# overwrite with our score.
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id |
def clear_measurements(self):
"""Remove all measurements from self.measurements. Reset the
measurement counter. All ID are invalidated.
"""
keys = list(self.measurements.keys())
for key in keys:
del(self.measurements[key])
self.meas_counter = -1 | Remove all measurements from self.measurements. Reset the
measurement counter. All ID are invalidated. | Below is the the instruction that describes the task:
### Input:
Remove all measurements from self.measurements. Reset the
measurement counter. All ID are invalidated.
### Response:
def clear_measurements(self):
"""Remove all measurements from self.measurements. Reset the
measurement counter. All ID are invalidated.
"""
keys = list(self.measurements.keys())
for key in keys:
del(self.measurements[key])
self.meas_counter = -1 |
def update_layers_geonode_wm(service, num_layers=None):
"""
Update layers for a WorldMap instance.
Sample endpoint: http://localhost:8000/
"""
wm_api_url = urlparse.urljoin(service.url, 'worldmap/api/2.8/layer/?format=json')
if num_layers:
total = num_layers
else:
response = requests.get(wm_api_url)
data = json.loads(response.content)
total = data['meta']['total_count']
# set srs
# WorldMap supports only 4326, 900913, 3857
for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']:
srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code)
service.srs.add(srs)
service.update_validity()
layer_n = 0
limit = 10
for i in range(0, total, limit):
try:
url = (
'%s&order_by=-date&offset=%s&limit=%s' % (wm_api_url, i, limit)
)
LOGGER.debug('Fetching %s' % url)
response = requests.get(url)
data = json.loads(response.content)
for row in data['objects']:
typename = row['typename']
# name = typename.split(':')[1]
name = typename
uuid = row['uuid']
LOGGER.debug('Updating layer %s' % name)
title = row['title']
abstract = row['abstract']
bbox = row['bbox']
page_url = urlparse.urljoin(service.url, 'data/%s' % name)
category = ''
if 'topic_category' in row:
category = row['topic_category']
username = ''
if 'owner_username' in row:
username = row['owner_username']
temporal_extent_start = ''
if 'temporal_extent_start' in row:
temporal_extent_start = row['temporal_extent_start']
temporal_extent_end = ''
if 'temporal_extent_end' in row:
temporal_extent_end = row['temporal_extent_end']
# we use the geoserver virtual layer getcapabilities for wm endpoint
# TODO we should port make geoserver port configurable some way...
# endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name)
endpoint = urlparse.urljoin(service.url, 'geoserver/wms?')
endpoint = endpoint.replace('8000', '8080')
print endpoint
if 'is_public' in row:
is_public = row['is_public']
layer, created = Layer.objects.get_or_create(
service=service, catalog=service.catalog, name=name, uuid=uuid)
if created:
LOGGER.debug('Added a new layer in registry: %s, %s' % (name, uuid))
if layer.active:
links = [['Hypermap:WorldMap', endpoint]]
# update fields
layer.type = 'Hypermap:WorldMap'
layer.title = title
layer.abstract = abstract
layer.is_public = is_public
layer.url = endpoint
layer.page_url = page_url
# category and owner username
layer_wm, created = LayerWM.objects.get_or_create(layer=layer)
layer_wm.category = category
layer_wm.username = username
layer_wm.temporal_extent_start = temporal_extent_start
layer_wm.temporal_extent_end = temporal_extent_end
layer_wm.save()
# bbox [x0, y0, x1, y1]
# check if it is a valid bbox (TODO improve this check)
# bbox = bbox.replace('-inf', 'None')
# bbox = bbox.replace('inf', 'None')
# if bbox.count(',') == 3:
# bbox_list = bbox[1:-1].split(',')
# else:
# bbox_list = [None, None, None, None]
x0 = format_float(bbox[0])
x1 = format_float(bbox[1])
y0 = format_float(bbox[2])
y1 = format_float(bbox[3])
# In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM.
x0, x1 = flip_coordinates(x0, x1)
y0, y1 = flip_coordinates(y0, y1)
layer.bbox_x0 = x0
layer.bbox_y0 = y0
layer.bbox_x1 = x1
layer.bbox_y1 = y1
# keywords
keywords = []
for keyword in row['keywords']:
keywords.append(keyword['name'])
layer.keywords.all().delete()
for keyword in keywords:
layer.keywords.add(keyword)
layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1])
layer.xml = create_metadata_record(
identifier=str(layer.uuid),
source=endpoint,
links=links,
format='Hypermap:WorldMap',
type=layer.csw_type,
relation=service.id_string,
title=layer.title,
alternative=name,
abstract=layer.abstract,
keywords=keywords,
wkt_geometry=layer.wkt_geometry
)
layer.anytext = gen_anytext(layer.title, layer.abstract, keywords)
layer.save()
# dates
add_mined_dates(layer)
add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer)
layer_n = layer_n + 1
# exits if DEBUG_SERVICES
LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
LOGGER.error('Error! %s' % err)
# update deleted layers. For now we check the whole set of deleted layers
# we should optimize it if the list will grow
# TODO implement the actions application
url = urlparse.urljoin(service.url, 'worldmap/api/2.8/actionlayerdelete/?format=json')
LOGGER.debug('Fetching %s for detecting deleted layers' % url)
try:
response = requests.get(url)
data = json.loads(response.content)
for deleted_layer in data['objects']:
if Layer.objects.filter(uuid=deleted_layer['args']).count() > 0:
layer = Layer.objects.get(uuid=deleted_layer['args'])
layer.was_deleted = True
layer.save()
LOGGER.debug('Layer %s marked as deleted' % layer.uuid)
except Exception as err:
LOGGER.error('Error! %s' % err) | Update layers for a WorldMap instance.
Sample endpoint: http://localhost:8000/ | Below is the the instruction that describes the task:
### Input:
Update layers for a WorldMap instance.
Sample endpoint: http://localhost:8000/
### Response:
def update_layers_geonode_wm(service, num_layers=None):
"""
Update layers for a WorldMap instance.
Sample endpoint: http://localhost:8000/
"""
wm_api_url = urlparse.urljoin(service.url, 'worldmap/api/2.8/layer/?format=json')
if num_layers:
total = num_layers
else:
response = requests.get(wm_api_url)
data = json.loads(response.content)
total = data['meta']['total_count']
# set srs
# WorldMap supports only 4326, 900913, 3857
for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']:
srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code)
service.srs.add(srs)
service.update_validity()
layer_n = 0
limit = 10
for i in range(0, total, limit):
try:
url = (
'%s&order_by=-date&offset=%s&limit=%s' % (wm_api_url, i, limit)
)
LOGGER.debug('Fetching %s' % url)
response = requests.get(url)
data = json.loads(response.content)
for row in data['objects']:
typename = row['typename']
# name = typename.split(':')[1]
name = typename
uuid = row['uuid']
LOGGER.debug('Updating layer %s' % name)
title = row['title']
abstract = row['abstract']
bbox = row['bbox']
page_url = urlparse.urljoin(service.url, 'data/%s' % name)
category = ''
if 'topic_category' in row:
category = row['topic_category']
username = ''
if 'owner_username' in row:
username = row['owner_username']
temporal_extent_start = ''
if 'temporal_extent_start' in row:
temporal_extent_start = row['temporal_extent_start']
temporal_extent_end = ''
if 'temporal_extent_end' in row:
temporal_extent_end = row['temporal_extent_end']
# we use the geoserver virtual layer getcapabilities for wm endpoint
# TODO we should port make geoserver port configurable some way...
# endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name)
endpoint = urlparse.urljoin(service.url, 'geoserver/wms?')
endpoint = endpoint.replace('8000', '8080')
print endpoint
if 'is_public' in row:
is_public = row['is_public']
layer, created = Layer.objects.get_or_create(
service=service, catalog=service.catalog, name=name, uuid=uuid)
if created:
LOGGER.debug('Added a new layer in registry: %s, %s' % (name, uuid))
if layer.active:
links = [['Hypermap:WorldMap', endpoint]]
# update fields
layer.type = 'Hypermap:WorldMap'
layer.title = title
layer.abstract = abstract
layer.is_public = is_public
layer.url = endpoint
layer.page_url = page_url
# category and owner username
layer_wm, created = LayerWM.objects.get_or_create(layer=layer)
layer_wm.category = category
layer_wm.username = username
layer_wm.temporal_extent_start = temporal_extent_start
layer_wm.temporal_extent_end = temporal_extent_end
layer_wm.save()
# bbox [x0, y0, x1, y1]
# check if it is a valid bbox (TODO improve this check)
# bbox = bbox.replace('-inf', 'None')
# bbox = bbox.replace('inf', 'None')
# if bbox.count(',') == 3:
# bbox_list = bbox[1:-1].split(',')
# else:
# bbox_list = [None, None, None, None]
x0 = format_float(bbox[0])
x1 = format_float(bbox[1])
y0 = format_float(bbox[2])
y1 = format_float(bbox[3])
# In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM.
x0, x1 = flip_coordinates(x0, x1)
y0, y1 = flip_coordinates(y0, y1)
layer.bbox_x0 = x0
layer.bbox_y0 = y0
layer.bbox_x1 = x1
layer.bbox_y1 = y1
# keywords
keywords = []
for keyword in row['keywords']:
keywords.append(keyword['name'])
layer.keywords.all().delete()
for keyword in keywords:
layer.keywords.add(keyword)
layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1])
layer.xml = create_metadata_record(
identifier=str(layer.uuid),
source=endpoint,
links=links,
format='Hypermap:WorldMap',
type=layer.csw_type,
relation=service.id_string,
title=layer.title,
alternative=name,
abstract=layer.abstract,
keywords=keywords,
wkt_geometry=layer.wkt_geometry
)
layer.anytext = gen_anytext(layer.title, layer.abstract, keywords)
layer.save()
# dates
add_mined_dates(layer)
add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer)
layer_n = layer_n + 1
# exits if DEBUG_SERVICES
LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
LOGGER.error('Error! %s' % err)
# update deleted layers. For now we check the whole set of deleted layers
# we should optimize it if the list will grow
# TODO implement the actions application
url = urlparse.urljoin(service.url, 'worldmap/api/2.8/actionlayerdelete/?format=json')
LOGGER.debug('Fetching %s for detecting deleted layers' % url)
try:
response = requests.get(url)
data = json.loads(response.content)
for deleted_layer in data['objects']:
if Layer.objects.filter(uuid=deleted_layer['args']).count() > 0:
layer = Layer.objects.get(uuid=deleted_layer['args'])
layer.was_deleted = True
layer.save()
LOGGER.debug('Layer %s marked as deleted' % layer.uuid)
except Exception as err:
LOGGER.error('Error! %s' % err) |
def balance(self, account_id=None):
"""
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
"""
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/balance'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
return MonzoBalance(data=response.json()) | Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance | Below is the the instruction that describes the task:
### Input:
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
### Response:
def balance(self, account_id=None):
"""
Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance
"""
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint = '/balance'
response = self._get_response(
method='get', endpoint=endpoint,
params={
'account_id': account_id,
},
)
return MonzoBalance(data=response.json()) |
def _to_p(self, mode):
"""Convert the image to P or PA mode.
"""
if self.mode.endswith("A"):
chans = self.channels[:-1]
alpha = self.channels[-1]
self._secondary_mode = self.mode[:-1]
else:
chans = self.channels
alpha = None
self._secondary_mode = self.mode
palette = []
selfmask = chans[0].mask
for chn in chans[1:]:
selfmask = np.ma.mask_or(selfmask, chn.mask)
new_chn = np.ma.zeros(self.shape, dtype=int)
color_nb = 0
for i in range(self.height):
for j in range(self.width):
current_col = tuple([chn[i, j] for chn in chans])
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
new_chn[i, j] = idx
if self.fill_value is not None:
if self.mode.endswith("A"):
current_col = tuple(self.fill_value[:-1])
fill_alpha = [self.fill_value[-1]]
else:
current_col = tuple(self.fill_value)
fill_alpha = []
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
self.fill_value = [idx] + fill_alpha
new_chn.mask = selfmask
self.palette = palette
if alpha is None:
self.channels = [new_chn]
else:
self.channels = [new_chn, alpha]
self.mode = mode | Convert the image to P or PA mode. | Below is the the instruction that describes the task:
### Input:
Convert the image to P or PA mode.
### Response:
def _to_p(self, mode):
"""Convert the image to P or PA mode.
"""
if self.mode.endswith("A"):
chans = self.channels[:-1]
alpha = self.channels[-1]
self._secondary_mode = self.mode[:-1]
else:
chans = self.channels
alpha = None
self._secondary_mode = self.mode
palette = []
selfmask = chans[0].mask
for chn in chans[1:]:
selfmask = np.ma.mask_or(selfmask, chn.mask)
new_chn = np.ma.zeros(self.shape, dtype=int)
color_nb = 0
for i in range(self.height):
for j in range(self.width):
current_col = tuple([chn[i, j] for chn in chans])
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
new_chn[i, j] = idx
if self.fill_value is not None:
if self.mode.endswith("A"):
current_col = tuple(self.fill_value[:-1])
fill_alpha = [self.fill_value[-1]]
else:
current_col = tuple(self.fill_value)
fill_alpha = []
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
self.fill_value = [idx] + fill_alpha
new_chn.mask = selfmask
self.palette = palette
if alpha is None:
self.channels = [new_chn]
else:
self.channels = [new_chn, alpha]
self.mode = mode |
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value | group = display-name ":" [group-list] ";" [CFWS] | Below is the the instruction that describes the task:
### Input:
group = display-name ":" [group-list] ";" [CFWS]
### Response:
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value |
def extract_from_commoncrawl(self, warc_download_url, callback_on_article_extracted, valid_hosts=None,
start_date=None, end_date=None,
strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None,
continue_after_error=True, show_download_progress=False,
log_level=logging.ERROR, delete_warc_after_extraction=True,
log_pathname_fully_extracted_warcs=None):
"""
Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_download_url:
:param callback_on_article_extracted:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return:
"""
self.__warc_download_url = warc_download_url
self.__filter_valid_hosts = valid_hosts
self.__filter_start_date = start_date
self.__filter_end_date = end_date
self.__filter_strict_date = strict_date
if local_download_dir_warc:
self.__local_download_dir_warc = local_download_dir_warc
self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.__continue_after_error = continue_after_error
self.__callback_on_article_extracted = callback_on_article_extracted
self.__show_download_progress = show_download_progress
self.__log_level = log_level
self.__delete_warc_after_extraction = delete_warc_after_extraction
self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
self.__run() | Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_download_url:
:param callback_on_article_extracted:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return: | Below is the the instruction that describes the task:
### Input:
Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_download_url:
:param callback_on_article_extracted:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return:
### Response:
def extract_from_commoncrawl(self, warc_download_url, callback_on_article_extracted, valid_hosts=None,
start_date=None, end_date=None,
strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None,
continue_after_error=True, show_download_progress=False,
log_level=logging.ERROR, delete_warc_after_extraction=True,
log_pathname_fully_extracted_warcs=None):
"""
Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_download_url:
:param callback_on_article_extracted:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return:
"""
self.__warc_download_url = warc_download_url
self.__filter_valid_hosts = valid_hosts
self.__filter_start_date = start_date
self.__filter_end_date = end_date
self.__filter_strict_date = strict_date
if local_download_dir_warc:
self.__local_download_dir_warc = local_download_dir_warc
self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.__continue_after_error = continue_after_error
self.__callback_on_article_extracted = callback_on_article_extracted
self.__show_download_progress = show_download_progress
self.__log_level = log_level
self.__delete_warc_after_extraction = delete_warc_after_extraction
self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
self.__run() |
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records | Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value`` | Below is the the instruction that describes the task:
### Input:
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
### Response:
def search(self, field_name, field_value, record=None, **options):
"""
Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value``
"""
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options['formula'] = formula
records = self.get_all(**options)
return records |
def enforce_versioning(force=False):
"""Install versioning on the db."""
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version | Install versioning on the db. | Below is the the instruction that describes the task:
### Input:
Install versioning on the db.
### Response:
def enforce_versioning(force=False):
"""Install versioning on the db."""
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version |
def set(cls, obj, keys, value, fill_list_value=None):
"""
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
"""
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
# Validate this key works with a list.
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
# This is the last key, so set the value.
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
# done.
return
# More keys left, ensure we have a container for this key.
if type(key) == int:
try:
current[key]
except IndexError:
# Create a list for this key.
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
# Create a list for this key.
current[key] = container_for_key(keys_list[idx])
# Move on to the next key.
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key) | sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers. | Below is the the instruction that describes the task:
### Input:
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
### Response:
def set(cls, obj, keys, value, fill_list_value=None):
"""
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
"""
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
# Validate this key works with a list.
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
# This is the last key, so set the value.
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
# done.
return
# More keys left, ensure we have a container for this key.
if type(key) == int:
try:
current[key]
except IndexError:
# Create a list for this key.
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
# Create a list for this key.
current[key] = container_for_key(keys_list[idx])
# Move on to the next key.
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key) |
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox | Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10 | Below is the the instruction that describes the task:
### Input:
Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
### Response:
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox |
def depends_on(self, dependency):
"""
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
"""
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")] | List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow' | Below is the the instruction that describes the task:
### Input:
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
### Response:
def depends_on(self, dependency):
"""
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
"""
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")] |
def next_retrieve_group_item(self, last_item=None, entry=None):
"""Return the item to start from in next reviews group."""
next_item = None
gerrit_version = self.version
if gerrit_version[0] == 2 and gerrit_version[1] > 9:
if last_item is None:
next_item = 0
else:
next_item = last_item
elif gerrit_version[0] == 2 and gerrit_version[1] == 9:
# https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E
cause = "Gerrit 2.9.0 does not support pagination"
raise BackendError(cause=cause)
else:
if entry is not None:
next_item = entry['sortKey']
return next_item | Return the item to start from in next reviews group. | Below is the the instruction that describes the task:
### Input:
Return the item to start from in next reviews group.
### Response:
def next_retrieve_group_item(self, last_item=None, entry=None):
"""Return the item to start from in next reviews group."""
next_item = None
gerrit_version = self.version
if gerrit_version[0] == 2 and gerrit_version[1] > 9:
if last_item is None:
next_item = 0
else:
next_item = last_item
elif gerrit_version[0] == 2 and gerrit_version[1] == 9:
# https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E
cause = "Gerrit 2.9.0 does not support pagination"
raise BackendError(cause=cause)
else:
if entry is not None:
next_item = entry['sortKey']
return next_item |
def qualified_name(self):
"""Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]".
"""
idxstr = '' if self.index is None else str(self.index)
return "%s[%s]" % (self.qualified_package_name, idxstr) | Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]". | Below is the the instruction that describes the task:
### Input:
Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]".
### Response:
def qualified_name(self):
"""Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]".
"""
idxstr = '' if self.index is None else str(self.index)
return "%s[%s]" % (self.qualified_package_name, idxstr) |
def diff(self, obj=None):
"""Determine if something has changed or not"""
if self.no_resource:
return NOOP
if not self.present:
if self.existing:
return DEL
return NOOP
if not obj:
obj = self.obj()
is_diff = NOOP
if self.present and self.existing:
if isinstance(self.existing, dict):
current = dict(self.existing)
if 'refresh_interval' in current:
del current['refresh_interval']
if diff_dict(current, obj):
is_diff = CHANGED
elif is_unicode(self.existing):
if self.existing != obj:
is_diff = CHANGED
elif self.present and not self.existing:
is_diff = ADD
return is_diff | Determine if something has changed or not | Below is the the instruction that describes the task:
### Input:
Determine if something has changed or not
### Response:
def diff(self, obj=None):
"""Determine if something has changed or not"""
if self.no_resource:
return NOOP
if not self.present:
if self.existing:
return DEL
return NOOP
if not obj:
obj = self.obj()
is_diff = NOOP
if self.present and self.existing:
if isinstance(self.existing, dict):
current = dict(self.existing)
if 'refresh_interval' in current:
del current['refresh_interval']
if diff_dict(current, obj):
is_diff = CHANGED
elif is_unicode(self.existing):
if self.existing != obj:
is_diff = CHANGED
elif self.present and not self.existing:
is_diff = ADD
return is_diff |
def __get_pricedb_session(self):
""" Provides initialization and access to module-level session """
from pricedb import dal
if not self.pricedb_session:
self.pricedb_session = dal.get_default_session()
return self.pricedb_session | Provides initialization and access to module-level session | Below is the the instruction that describes the task:
### Input:
Provides initialization and access to module-level session
### Response:
def __get_pricedb_session(self):
""" Provides initialization and access to module-level session """
from pricedb import dal
if not self.pricedb_session:
self.pricedb_session = dal.get_default_session()
return self.pricedb_session |
def do_dep(self, args):
"""Adds the name and attribute of a dependent variable to the list
for plotting/tabulating functions."""
vals = args.split()
twin = None
if len(vals) > 1:
if len(vals) == 2:
var, plot = vals
elif len(vals) == 3:
var, plot, twin = vals
else:
var = vals[0]
plot = None
if not self._validate_var(var):
msg.err("Variable {} is not a valid file name and property combination.".format(var))
else:
if var in self.curargs["dependents"]:
self.curargs["dependents"].remove(var)
self.curargs["dependents"].append(var)
if plot is not None:
self.curargs["plottypes"][var] = plot
if twin is not None:
self.curargs["twinplots"][var] = twin | Adds the name and attribute of a dependent variable to the list
for plotting/tabulating functions. | Below is the the instruction that describes the task:
### Input:
Adds the name and attribute of a dependent variable to the list
for plotting/tabulating functions.
### Response:
def do_dep(self, args):
"""Adds the name and attribute of a dependent variable to the list
for plotting/tabulating functions."""
vals = args.split()
twin = None
if len(vals) > 1:
if len(vals) == 2:
var, plot = vals
elif len(vals) == 3:
var, plot, twin = vals
else:
var = vals[0]
plot = None
if not self._validate_var(var):
msg.err("Variable {} is not a valid file name and property combination.".format(var))
else:
if var in self.curargs["dependents"]:
self.curargs["dependents"].remove(var)
self.curargs["dependents"].append(var)
if plot is not None:
self.curargs["plottypes"][var] = plot
if twin is not None:
self.curargs["twinplots"][var] = twin |
def is_blocked(self, ip):
"""Determine if an IP address should be considered blocked."""
blocked = True
if ip in self.allowed_admin_ips:
blocked = False
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False
return blocked | Determine if an IP address should be considered blocked. | Below is the the instruction that describes the task:
### Input:
Determine if an IP address should be considered blocked.
### Response:
def is_blocked(self, ip):
"""Determine if an IP address should be considered blocked."""
blocked = True
if ip in self.allowed_admin_ips:
blocked = False
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False
return blocked |
def addAction(self, action, checked=None, autoBuild=True):
"""
Adds the inputed action to this widget's action group. This will auto-\
create a new group if no group is already defined.
:param action | <QAction> || <str>
:return <QAction>
"""
# clear the holder
actions = self._actionGroup.actions()
if actions and actions[0].objectName() == 'place_holder':
self._actionGroup.removeAction(actions[0])
actions[0].deleteLater()
# create an action from the name
if not isinstance(action, QAction):
action_name = nativestring(action)
action = QAction(action_name, self)
action.setObjectName(action_name)
action.setCheckable(self.isCheckable())
# auto-check the first option
if checked or (not self._actionGroup.actions() and checked is None):
action.setChecked(True)
elif self.isCheckable():
action.setCheckable(True)
if not self.currentAction():
action.setChecked(True)
self._actionGroup.addAction(action)
if autoBuild:
self.rebuild()
return action | Adds the inputed action to this widget's action group. This will auto-\
create a new group if no group is already defined.
:param action | <QAction> || <str>
:return <QAction> | Below is the the instruction that describes the task:
### Input:
Adds the inputed action to this widget's action group. This will auto-\
create a new group if no group is already defined.
:param action | <QAction> || <str>
:return <QAction>
### Response:
def addAction(self, action, checked=None, autoBuild=True):
"""
Adds the inputed action to this widget's action group. This will auto-\
create a new group if no group is already defined.
:param action | <QAction> || <str>
:return <QAction>
"""
# clear the holder
actions = self._actionGroup.actions()
if actions and actions[0].objectName() == 'place_holder':
self._actionGroup.removeAction(actions[0])
actions[0].deleteLater()
# create an action from the name
if not isinstance(action, QAction):
action_name = nativestring(action)
action = QAction(action_name, self)
action.setObjectName(action_name)
action.setCheckable(self.isCheckable())
# auto-check the first option
if checked or (not self._actionGroup.actions() and checked is None):
action.setChecked(True)
elif self.isCheckable():
action.setCheckable(True)
if not self.currentAction():
action.setChecked(True)
self._actionGroup.addAction(action)
if autoBuild:
self.rebuild()
return action |
def paint(self, painter, option, index):
"""Paint checkbox and text
_
|_| My label
"""
body_rect = QtCore.QRectF(option.rect)
check_rect = QtCore.QRectF(body_rect)
check_rect.setWidth(check_rect.height())
check_rect.adjust(6, 6, -6, -6)
check_color = colors["idle"]
if index.data(model.IsProcessing) is True:
check_color = colors["active"]
elif index.data(model.HasFailed) is True:
check_color = colors["warning"]
elif index.data(model.HasSucceeded) is True:
check_color = colors["ok"]
elif index.data(model.HasProcessed) is True:
check_color = colors["ok"]
metrics = painter.fontMetrics()
label_rect = QtCore.QRectF(option.rect.adjusted(
check_rect.width() + 12, 2, 0, -2))
assert label_rect.width() > 0
label = index.data(model.Label)
label = metrics.elidedText(label,
QtCore.Qt.ElideRight,
label_rect.width() - 20)
font_color = colors["idle"]
if not index.data(model.IsChecked):
font_color = colors["inactive"]
# Maintain reference to state, so we can restore it once we're done
painter.save()
# Draw label
painter.setFont(fonts["h4"])
painter.setPen(QtGui.QPen(font_color))
painter.drawText(label_rect, label)
# Draw action icon
if index.data(model.ActionIconVisible):
painter.save()
if index.data(model.ActionIdle):
color = colors["idle"]
elif index.data(model.IsProcessing):
color = colors["active"]
elif index.data(model.ActionFailed):
color = colors["warning"]
else:
color = colors["ok"]
painter.setFont(fonts["smallAwesome"])
painter.setPen(QtGui.QPen(color))
icon_rect = QtCore.QRectF(option.rect.adjusted(
label_rect.width() + 1, label_rect.height() / 3, 0, 0))
painter.drawText(icon_rect, icons["action"])
painter.restore()
# Draw checkbox
pen = QtGui.QPen(check_color, 1)
painter.setPen(pen)
if index.data(model.IsOptional):
painter.drawRect(check_rect)
if index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
elif not index.data(model.IsIdle) and index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
if option.state & QtWidgets.QStyle.State_MouseOver:
painter.fillRect(body_rect, colors["hover"])
if option.state & QtWidgets.QStyle.State_Selected:
painter.fillRect(body_rect, colors["selected"])
# Ok, we're done, tidy up.
painter.restore() | Paint checkbox and text
_
|_| My label | Below is the the instruction that describes the task:
### Input:
Paint checkbox and text
_
|_| My label
### Response:
def paint(self, painter, option, index):
"""Paint checkbox and text
_
|_| My label
"""
body_rect = QtCore.QRectF(option.rect)
check_rect = QtCore.QRectF(body_rect)
check_rect.setWidth(check_rect.height())
check_rect.adjust(6, 6, -6, -6)
check_color = colors["idle"]
if index.data(model.IsProcessing) is True:
check_color = colors["active"]
elif index.data(model.HasFailed) is True:
check_color = colors["warning"]
elif index.data(model.HasSucceeded) is True:
check_color = colors["ok"]
elif index.data(model.HasProcessed) is True:
check_color = colors["ok"]
metrics = painter.fontMetrics()
label_rect = QtCore.QRectF(option.rect.adjusted(
check_rect.width() + 12, 2, 0, -2))
assert label_rect.width() > 0
label = index.data(model.Label)
label = metrics.elidedText(label,
QtCore.Qt.ElideRight,
label_rect.width() - 20)
font_color = colors["idle"]
if not index.data(model.IsChecked):
font_color = colors["inactive"]
# Maintain reference to state, so we can restore it once we're done
painter.save()
# Draw label
painter.setFont(fonts["h4"])
painter.setPen(QtGui.QPen(font_color))
painter.drawText(label_rect, label)
# Draw action icon
if index.data(model.ActionIconVisible):
painter.save()
if index.data(model.ActionIdle):
color = colors["idle"]
elif index.data(model.IsProcessing):
color = colors["active"]
elif index.data(model.ActionFailed):
color = colors["warning"]
else:
color = colors["ok"]
painter.setFont(fonts["smallAwesome"])
painter.setPen(QtGui.QPen(color))
icon_rect = QtCore.QRectF(option.rect.adjusted(
label_rect.width() + 1, label_rect.height() / 3, 0, 0))
painter.drawText(icon_rect, icons["action"])
painter.restore()
# Draw checkbox
pen = QtGui.QPen(check_color, 1)
painter.setPen(pen)
if index.data(model.IsOptional):
painter.drawRect(check_rect)
if index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
elif not index.data(model.IsIdle) and index.data(model.IsChecked):
painter.fillRect(check_rect, check_color)
if option.state & QtWidgets.QStyle.State_MouseOver:
painter.fillRect(body_rect, colors["hover"])
if option.state & QtWidgets.QStyle.State_Selected:
painter.fillRect(body_rect, colors["selected"])
# Ok, we're done, tidy up.
painter.restore() |
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):
"""Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered.
"""
if source_rect == None:
source_rect_ptr = ffi.NULL
else:
source_rect_ptr = source_rect._ptr
if dest_rect == None:
dest_rect_ptr = ffi.NULL
else:
dest_rect_ptr = dest_rect._ptr
if center == None:
center_ptr = ffi.NULL
else:
center_ptr = center._ptr
check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip)) | Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered. | Below is the the instruction that describes the task:
### Input:
Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered.
### Response:
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):
"""Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered.
"""
if source_rect == None:
source_rect_ptr = ffi.NULL
else:
source_rect_ptr = source_rect._ptr
if dest_rect == None:
dest_rect_ptr = ffi.NULL
else:
dest_rect_ptr = dest_rect._ptr
if center == None:
center_ptr = ffi.NULL
else:
center_ptr = center._ptr
check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip)) |
def get_author_label(self, urn):
"""Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found
"""
author = self.get_resource_by_urn(urn)
names = author.get_names()
en_names = sorted([name[1] for name in names if name[0] == "en"], key=len)
try:
assert len(en_names) > 0
return en_names[0]
except Exception as e:
none_names = sorted([name[1] for name in names if name[0] == None], key=len)
try:
return none_names[0]
except Exception as e:
la_names = sorted([name[1] for name in names if name[0] == "la"], key=len)
try:
assert len(la_names) > 0
return la_names[0]
except Exception as e:
return None | Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found | Below is the the instruction that describes the task:
### Input:
Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found
### Response:
def get_author_label(self, urn):
"""Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found
"""
author = self.get_resource_by_urn(urn)
names = author.get_names()
en_names = sorted([name[1] for name in names if name[0] == "en"], key=len)
try:
assert len(en_names) > 0
return en_names[0]
except Exception as e:
none_names = sorted([name[1] for name in names if name[0] == None], key=len)
try:
return none_names[0]
except Exception as e:
la_names = sorted([name[1] for name in names if name[0] == "la"], key=len)
try:
assert len(la_names) > 0
return la_names[0]
except Exception as e:
return None |
def _make_value(self, field_name, field_spec, value_spec, field_params, value):
"""
Contructs an appropriate Asn1Value object for a field
:param field_name:
A unicode string of the field name
:param field_spec:
An Asn1Value class that is the field spec
:param value_spec:
An Asn1Value class that is the vaue spec
:param field_params:
None or a dict of params for the field spec
:param value:
The value to construct an Asn1Value object from
:return:
An instance of a child class of Asn1Value
"""
if value is None and 'optional' in field_params:
return VOID
specs_different = field_spec != value_spec
is_any = issubclass(field_spec, Any)
if issubclass(value_spec, Choice):
is_asn1value = isinstance(value, Asn1Value)
is_tuple = isinstance(value, tuple) and len(value) == 2
is_dict = isinstance(value, dict) and len(value) == 1
if not is_asn1value and not is_tuple and not is_dict:
raise ValueError(unwrap(
'''
Can not set a native python value to %s, which has the
choice type of %s - value must be an instance of Asn1Value
''',
field_name,
type_name(value_spec)
))
if is_tuple or is_dict:
value = value_spec(value)
if not isinstance(value, value_spec):
wrapper = value_spec()
wrapper.validate(value.class_, value.tag, value.contents)
wrapper._parsed = value
new_value = wrapper
else:
new_value = value
elif isinstance(value, field_spec):
new_value = value
if specs_different:
new_value.parse(value_spec)
elif (not specs_different or is_any) and not isinstance(value, value_spec):
if (not is_any or specs_different) and isinstance(value, Asn1Value):
raise TypeError(unwrap(
'''
%s value must be %s, not %s
''',
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value, **field_params)
else:
if isinstance(value, value_spec):
new_value = value
else:
if isinstance(value, Asn1Value):
raise TypeError(unwrap(
'''
%s value must be %s, not %s
''',
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value)
# For when the field is OctetString or OctetBitString with embedded
# values we need to wrap the value in the field spec to get the
# appropriate encoded value.
if specs_different and not is_any:
wrapper = field_spec(value=new_value.dump(), **field_params)
wrapper._parsed = (new_value, new_value.__class__, None)
new_value = wrapper
new_value = _fix_tagging(new_value, field_params)
return new_value | Contructs an appropriate Asn1Value object for a field
:param field_name:
A unicode string of the field name
:param field_spec:
An Asn1Value class that is the field spec
:param value_spec:
An Asn1Value class that is the vaue spec
:param field_params:
None or a dict of params for the field spec
:param value:
The value to construct an Asn1Value object from
:return:
An instance of a child class of Asn1Value | Below is the the instruction that describes the task:
### Input:
Contructs an appropriate Asn1Value object for a field
:param field_name:
A unicode string of the field name
:param field_spec:
An Asn1Value class that is the field spec
:param value_spec:
An Asn1Value class that is the vaue spec
:param field_params:
None or a dict of params for the field spec
:param value:
The value to construct an Asn1Value object from
:return:
An instance of a child class of Asn1Value
### Response:
def _make_value(self, field_name, field_spec, value_spec, field_params, value):
"""
Contructs an appropriate Asn1Value object for a field
:param field_name:
A unicode string of the field name
:param field_spec:
An Asn1Value class that is the field spec
:param value_spec:
An Asn1Value class that is the vaue spec
:param field_params:
None or a dict of params for the field spec
:param value:
The value to construct an Asn1Value object from
:return:
An instance of a child class of Asn1Value
"""
if value is None and 'optional' in field_params:
return VOID
specs_different = field_spec != value_spec
is_any = issubclass(field_spec, Any)
if issubclass(value_spec, Choice):
is_asn1value = isinstance(value, Asn1Value)
is_tuple = isinstance(value, tuple) and len(value) == 2
is_dict = isinstance(value, dict) and len(value) == 1
if not is_asn1value and not is_tuple and not is_dict:
raise ValueError(unwrap(
'''
Can not set a native python value to %s, which has the
choice type of %s - value must be an instance of Asn1Value
''',
field_name,
type_name(value_spec)
))
if is_tuple or is_dict:
value = value_spec(value)
if not isinstance(value, value_spec):
wrapper = value_spec()
wrapper.validate(value.class_, value.tag, value.contents)
wrapper._parsed = value
new_value = wrapper
else:
new_value = value
elif isinstance(value, field_spec):
new_value = value
if specs_different:
new_value.parse(value_spec)
elif (not specs_different or is_any) and not isinstance(value, value_spec):
if (not is_any or specs_different) and isinstance(value, Asn1Value):
raise TypeError(unwrap(
'''
%s value must be %s, not %s
''',
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value, **field_params)
else:
if isinstance(value, value_spec):
new_value = value
else:
if isinstance(value, Asn1Value):
raise TypeError(unwrap(
'''
%s value must be %s, not %s
''',
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value)
# For when the field is OctetString or OctetBitString with embedded
# values we need to wrap the value in the field spec to get the
# appropriate encoded value.
if specs_different and not is_any:
wrapper = field_spec(value=new_value.dump(), **field_params)
wrapper._parsed = (new_value, new_value.__class__, None)
new_value = wrapper
new_value = _fix_tagging(new_value, field_params)
return new_value |
def kmeans(*args, **kwargs):
"""
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
num = kwargs.get('num', 2)
bidirectional = kwargs.get('bidirectional', True)
tolerance = kwargs.get('tolerance', 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = np.random.choice(lon, num)
center_lat = np.random.choice(lat, num)
centers = np.column_stack([center_lon, center_lat])
while True:
dists = np.array([dist(item) for item in centers]).T
closest = dists.argmin(axis=1)
new_centers = []
for i in range(num):
mask = mask = closest == i
_, vecs = cov_eig(lon[mask], lat[mask], bidirectional)
new_centers.append(stereonet_math.cart2sph(*vecs[:,-1]))
if np.allclose(centers, new_centers, atol=tolerance):
break
else:
centers = new_centers
return centers | Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster. | Below is the the instruction that describes the task:
### Input:
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
### Response:
def kmeans(*args, **kwargs):
"""
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
num = kwargs.get('num', 2)
bidirectional = kwargs.get('bidirectional', True)
tolerance = kwargs.get('tolerance', 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = np.random.choice(lon, num)
center_lat = np.random.choice(lat, num)
centers = np.column_stack([center_lon, center_lat])
while True:
dists = np.array([dist(item) for item in centers]).T
closest = dists.argmin(axis=1)
new_centers = []
for i in range(num):
mask = mask = closest == i
_, vecs = cov_eig(lon[mask], lat[mask], bidirectional)
new_centers.append(stereonet_math.cart2sph(*vecs[:,-1]))
if np.allclose(centers, new_centers, atol=tolerance):
break
else:
centers = new_centers
return centers |
def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid) | Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
### Response:
def triangle_center(tri, uv=False):
""" Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple
"""
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid) |
def enable(self, size, block_size=None, store=None, store_sync_interval=None):
"""Enables shared queue of the given size.
:param int size: Queue size.
:param int block_size: Block size in bytes. Default: 8 KiB.
:param str|unicode store: Persist the queue into file.
:param int store_sync_interval: Store sync interval in master cycles (usually seconds).
"""
self._set('queue', size)
self._set('queue-blocksize', block_size)
self._set('queue-store', store)
self._set('queue-store-sync', store_sync_interval)
return self._section | Enables shared queue of the given size.
:param int size: Queue size.
:param int block_size: Block size in bytes. Default: 8 KiB.
:param str|unicode store: Persist the queue into file.
:param int store_sync_interval: Store sync interval in master cycles (usually seconds). | Below is the the instruction that describes the task:
### Input:
Enables shared queue of the given size.
:param int size: Queue size.
:param int block_size: Block size in bytes. Default: 8 KiB.
:param str|unicode store: Persist the queue into file.
:param int store_sync_interval: Store sync interval in master cycles (usually seconds).
### Response:
def enable(self, size, block_size=None, store=None, store_sync_interval=None):
"""Enables shared queue of the given size.
:param int size: Queue size.
:param int block_size: Block size in bytes. Default: 8 KiB.
:param str|unicode store: Persist the queue into file.
:param int store_sync_interval: Store sync interval in master cycles (usually seconds).
"""
self._set('queue', size)
self._set('queue-blocksize', block_size)
self._set('queue-store', store)
self._set('queue-store-sync', store_sync_interval)
return self._section |
def _pastore8(ins):
''' Stores 2º operand content into address of 1st operand.
1st operand is an array element. Dimensions are pushed into the
stack.
Use '*' for indirect store on 1st operand (A pointer to an array)
'''
output = _paddr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
try:
value = int(ins.quad[2]) & 0xFFFF
if indirect:
output.append('ld a, (%i)' % value)
output.append('ld (hl), a')
else:
value &= 0xFF
output.append('ld (hl), %i' % value)
except ValueError:
output.append('pop af')
output.append('ld (hl), a')
return output | Stores 2º operand content into address of 1st operand.
1st operand is an array element. Dimensions are pushed into the
stack.
Use '*' for indirect store on 1st operand (A pointer to an array) | Below is the the instruction that describes the task:
### Input:
Stores 2º operand content into address of 1st operand.
1st operand is an array element. Dimensions are pushed into the
stack.
Use '*' for indirect store on 1st operand (A pointer to an array)
### Response:
def _pastore8(ins):
''' Stores 2º operand content into address of 1st operand.
1st operand is an array element. Dimensions are pushed into the
stack.
Use '*' for indirect store on 1st operand (A pointer to an array)
'''
output = _paddr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
try:
value = int(ins.quad[2]) & 0xFFFF
if indirect:
output.append('ld a, (%i)' % value)
output.append('ld (hl), a')
else:
value &= 0xFF
output.append('ld (hl), %i' % value)
except ValueError:
output.append('pop af')
output.append('ld (hl), a')
return output |
def evaluate_binop_math(self, operation, left, right, **kwargs):
"""
Evaluate given mathematical binary operation with given operands.
"""
if not operation in self.binops_math:
raise ValueError("Invalid math binary operation '{}'".format(operation))
if left is None or right is None:
return None
if not isinstance(left, (list, ListIP)):
left = [left]
if not isinstance(right, (list, ListIP)):
right = [right]
if not left or not right:
return None
try:
vect = self._calculate_vector(operation, left, right)
if len(vect) > 1:
return vect
return vect[0]
except:
return None | Evaluate given mathematical binary operation with given operands. | Below is the the instruction that describes the task:
### Input:
Evaluate given mathematical binary operation with given operands.
### Response:
def evaluate_binop_math(self, operation, left, right, **kwargs):
"""
Evaluate given mathematical binary operation with given operands.
"""
if not operation in self.binops_math:
raise ValueError("Invalid math binary operation '{}'".format(operation))
if left is None or right is None:
return None
if not isinstance(left, (list, ListIP)):
left = [left]
if not isinstance(right, (list, ListIP)):
right = [right]
if not left or not right:
return None
try:
vect = self._calculate_vector(operation, left, right)
if len(vect) > 1:
return vect
return vect[0]
except:
return None |
def get_html_keywords(index_page):
"""
Return list of `keywords` parsed from HTML ``<meta>`` tags.
Args:
index_page (str): Content of the page as UTF-8 string
Returns:
list: List of :class:`.SourceString` objects.
"""
keyword_lists = (
keyword_list.split(",")
for keyword_list in parse_meta(index_page, "keywords", "HTML")
)
# create SourceStrings from the list of keywords
return [
SourceString(keyword.strip(), source="HTML")
for keyword in sum(keyword_lists, []) # flattern the list
] | Return list of `keywords` parsed from HTML ``<meta>`` tags.
Args:
index_page (str): Content of the page as UTF-8 string
Returns:
list: List of :class:`.SourceString` objects. | Below is the the instruction that describes the task:
### Input:
Return list of `keywords` parsed from HTML ``<meta>`` tags.
Args:
index_page (str): Content of the page as UTF-8 string
Returns:
list: List of :class:`.SourceString` objects.
### Response:
def get_html_keywords(index_page):
"""
Return list of `keywords` parsed from HTML ``<meta>`` tags.
Args:
index_page (str): Content of the page as UTF-8 string
Returns:
list: List of :class:`.SourceString` objects.
"""
keyword_lists = (
keyword_list.split(",")
for keyword_list in parse_meta(index_page, "keywords", "HTML")
)
# create SourceStrings from the list of keywords
return [
SourceString(keyword.strip(), source="HTML")
for keyword in sum(keyword_lists, []) # flattern the list
] |
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
"""
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
"""
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ('jobid' in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data['jobid'])
if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls
if not data['jobresultcode']: # exit code is zero
try:
return data['jobresult']
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data | Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
### Response:
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
"""
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
"""
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ('jobid' in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data['jobid'])
if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls
if not data['jobresultcode']: # exit code is zero
try:
return data['jobresult']
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data |
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata, profile=None):
"""Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
if profile:
rrset["profile"] = profile
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
return self.rest_api_connection.put(uri, json.dumps(rrset)) | Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool | Below is the the instruction that describes the task:
### Input:
Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool
### Response:
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata, profile=None):
"""Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool
"""
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
if profile:
rrset["profile"] = profile
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
return self.rest_api_connection.put(uri, json.dumps(rrset)) |
def stop(self, signum=None, frame=None):
"""
handel's a termination signal
"""
BackgroundProcess.objects.filter(pk=self.process_id
).update(pid=0, last_update=now(), message='stopping..')
# run the cleanup
self.cleanup()
BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0,
last_update=now(),
message='stopped') | handel's a termination signal | Below is the the instruction that describes the task:
### Input:
handel's a termination signal
### Response:
def stop(self, signum=None, frame=None):
"""
handel's a termination signal
"""
BackgroundProcess.objects.filter(pk=self.process_id
).update(pid=0, last_update=now(), message='stopping..')
# run the cleanup
self.cleanup()
BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0,
last_update=now(),
message='stopped') |
def relations_dict(rel_lst):
"""constructs a relation's dictionary from a list that describes amphidromus relations between objects
:param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates
:returns: a dictionary
:Example:
>>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')]
>>> relations_dict(rl)
{'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'],
'z': ['y', 'x']}
"""
dc = {}
for c in rel_lst:
for i in c:
for k in c:
dc.setdefault(i, [])
dc[i].append(k)
do = {}
for k in list(dc.keys()):
if dc[k]:
vl = list(set(dc[k])) # remove duplicates
vl.remove(k)
do[k] = vl
return do | constructs a relation's dictionary from a list that describes amphidromus relations between objects
:param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates
:returns: a dictionary
:Example:
>>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')]
>>> relations_dict(rl)
{'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'],
'z': ['y', 'x']} | Below is the the instruction that describes the task:
### Input:
constructs a relation's dictionary from a list that describes amphidromus relations between objects
:param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates
:returns: a dictionary
:Example:
>>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')]
>>> relations_dict(rl)
{'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'],
'z': ['y', 'x']}
### Response:
def relations_dict(rel_lst):
"""constructs a relation's dictionary from a list that describes amphidromus relations between objects
:param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates
:returns: a dictionary
:Example:
>>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')]
>>> relations_dict(rl)
{'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'],
'z': ['y', 'x']}
"""
dc = {}
for c in rel_lst:
for i in c:
for k in c:
dc.setdefault(i, [])
dc[i].append(k)
do = {}
for k in list(dc.keys()):
if dc[k]:
vl = list(set(dc[k])) # remove duplicates
vl.remove(k)
do[k] = vl
return do |
def clear(self):
"""
Clears out all the items from this tab bar.
"""
self.blockSignals(True)
items = list(self.items())
for item in items:
item.close()
self.blockSignals(False)
self._currentIndex = -1
self.currentIndexChanged.emit(self._currentIndex) | Clears out all the items from this tab bar. | Below is the the instruction that describes the task:
### Input:
Clears out all the items from this tab bar.
### Response:
def clear(self):
"""
Clears out all the items from this tab bar.
"""
self.blockSignals(True)
items = list(self.items())
for item in items:
item.close()
self.blockSignals(False)
self._currentIndex = -1
self.currentIndexChanged.emit(self._currentIndex) |
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties) | Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties | Below is the the instruction that describes the task:
### Input:
Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
### Response:
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties) |
def split_strings(subtree: dict) -> List[str]:
"""
Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`.
"""
strings = subtree["strings"]
lengths = subtree["lengths"]
if lengths.shape[0] == 0 and strings.shape[0] == 0:
return []
strings = strings[0]
if subtree.get("str", True):
strings = strings.decode("utf-8")
result = [None] * lengths.shape[0]
offset = 0
for i, l in enumerate(lengths):
result[i] = strings[offset:offset + l]
offset += l
return result | Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`. | Below is the the instruction that describes the task:
### Input:
Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`.
### Response:
def split_strings(subtree: dict) -> List[str]:
"""
Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`.
"""
strings = subtree["strings"]
lengths = subtree["lengths"]
if lengths.shape[0] == 0 and strings.shape[0] == 0:
return []
strings = strings[0]
if subtree.get("str", True):
strings = strings.decode("utf-8")
result = [None] * lengths.shape[0]
offset = 0
for i, l in enumerate(lengths):
result[i] = strings[offset:offset + l]
offset += l
return result |
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering) | Full-text search using selected `term`. | Below is the the instruction that describes the task:
### Input:
Full-text search using selected `term`.
### Response:
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering) |
def _get_neighbor_conf(neigh_ip_address):
"""Returns neighbor configuration for given neighbor ip address.
Raises exception if no neighbor with `neigh_ip_address` exists.
"""
neigh_conf = \
CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address)
if not neigh_conf:
raise RuntimeConfigError(desc='No Neighbor configuration with IP'
' address %s' % neigh_ip_address)
assert isinstance(neigh_conf, NeighborConf)
return neigh_conf | Returns neighbor configuration for given neighbor ip address.
Raises exception if no neighbor with `neigh_ip_address` exists. | Below is the the instruction that describes the task:
### Input:
Returns neighbor configuration for given neighbor ip address.
Raises exception if no neighbor with `neigh_ip_address` exists.
### Response:
def _get_neighbor_conf(neigh_ip_address):
"""Returns neighbor configuration for given neighbor ip address.
Raises exception if no neighbor with `neigh_ip_address` exists.
"""
neigh_conf = \
CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address)
if not neigh_conf:
raise RuntimeConfigError(desc='No Neighbor configuration with IP'
' address %s' % neigh_ip_address)
assert isinstance(neigh_conf, NeighborConf)
return neigh_conf |
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
'''
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
'''
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError('Interactive load failed. Tkinter not supported '
'on this system. Try installing X-Quartz and '
'reloading')
head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude']
if filename.startswith('http'):
request = Request(filename, headers={'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '
'Safari/537.36')})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
# assume it's accessible via the file system
csvdata = open(filename, 'r')
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if 'Request Rejected' in firstline:
raise IOError('Remote server rejected TMY file request')
meta = dict(zip(head, firstline.rstrip('\n').split(",")))
# convert metadata strings to numeric types
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
meta['USAF'] = int(meta['USAF'])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(
csvdata, header=0,
parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
date_parser=lambda *x: _parsedate(*x, year=coerce_year),
index_col='datetime')
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta['TZ'] * 3600))
return data, meta | Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364. | Below is the the instruction that describes the task:
### Input:
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
### Response:
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
'''
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
'''
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError('Interactive load failed. Tkinter not supported '
'on this system. Try installing X-Quartz and '
'reloading')
head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude']
if filename.startswith('http'):
request = Request(filename, headers={'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '
'Safari/537.36')})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
# assume it's accessible via the file system
csvdata = open(filename, 'r')
# read in file metadata, advance buffer to second line
firstline = csvdata.readline()
if 'Request Rejected' in firstline:
raise IOError('Remote server rejected TMY file request')
meta = dict(zip(head, firstline.rstrip('\n').split(",")))
# convert metadata strings to numeric types
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
meta['USAF'] = int(meta['USAF'])
# use pandas to read the csv file/stringio buffer
# header is actually the second line in file, but tell pandas to look for
# header information on the 1st line (0 indexing) because we've already
# advanced past the true first line with the readline call above.
data = pd.read_csv(
csvdata, header=0,
parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
date_parser=lambda *x: _parsedate(*x, year=coerce_year),
index_col='datetime')
if recolumn:
data = _recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta['TZ'] * 3600))
return data, meta |
def get_metric_fns(metric_names, labels, outputs):
"""Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
"""
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns | Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name. | Below is the the instruction that describes the task:
### Input:
Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
### Response:
def get_metric_fns(metric_names, labels, outputs):
"""Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
"""
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns |
def parse(schema):
"""
Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages.
"""
if not isinstance(schema, basestring):
# assume it is file-like
schema = schema.read()
message = re.compile(r'^\(([^,]+),\s*(\d+)\):\s*$')
field = re.compile(r'^-\s*([^:]+):\s+(.+?)\s*$')
registry = MessageRegistry({})
messages = registry.messages
curr = None
names = None
for lineno, line in enumerate(schema.split('\n')):
line = line.strip()
if '#' in line:
line = line[:line.index('#')]
if line == '':
continue
f = field.match(line)
if f:
if curr is None:
raise ParseError(
'field definition outside of message at line %d' % lineno)
name = f.group(1)
type = f.group(2)
if name not in names:
f = Field(curr, name, type)
curr.fields.append(f)
names.add(name)
continue
else:
raise ParseError(
'duplicate field name "%s" at line %d' % (name, lineno))
m = message.match(line)
if m:
# new message definition
name, vers = m.group(1), int(m.group(2))
if (name, vers) in messages:
raise ParseError('Duplicate message (%s, %d)' % (name, vers))
curr = messages[(name, vers)] = Message(registry, name, vers, [])
names = set()
continue
for message in registry.messages.values():
message.fields = tuple(message.fields)
return registry | Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages. | Below is the the instruction that describes the task:
### Input:
Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages.
### Response:
def parse(schema):
"""
Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages.
"""
if not isinstance(schema, basestring):
# assume it is file-like
schema = schema.read()
message = re.compile(r'^\(([^,]+),\s*(\d+)\):\s*$')
field = re.compile(r'^-\s*([^:]+):\s+(.+?)\s*$')
registry = MessageRegistry({})
messages = registry.messages
curr = None
names = None
for lineno, line in enumerate(schema.split('\n')):
line = line.strip()
if '#' in line:
line = line[:line.index('#')]
if line == '':
continue
f = field.match(line)
if f:
if curr is None:
raise ParseError(
'field definition outside of message at line %d' % lineno)
name = f.group(1)
type = f.group(2)
if name not in names:
f = Field(curr, name, type)
curr.fields.append(f)
names.add(name)
continue
else:
raise ParseError(
'duplicate field name "%s" at line %d' % (name, lineno))
m = message.match(line)
if m:
# new message definition
name, vers = m.group(1), int(m.group(2))
if (name, vers) in messages:
raise ParseError('Duplicate message (%s, %d)' % (name, vers))
curr = messages[(name, vers)] = Message(registry, name, vers, [])
names = set()
continue
for message in registry.messages.values():
message.fields = tuple(message.fields)
return registry |
def fetch(self, zookeeper_path, settings=None):
"""
Download a partition from another server.
:param zookeeper_path: Path in zookeeper to fetch from
:param settings: Settings for executing request to ClickHouse over db.raw() method
:return: SQL Query
"""
return self._partition_operation_sql('FETCH', settings=settings, from_part=zookeeper_path) | Download a partition from another server.
:param zookeeper_path: Path in zookeeper to fetch from
:param settings: Settings for executing request to ClickHouse over db.raw() method
:return: SQL Query | Below is the the instruction that describes the task:
### Input:
Download a partition from another server.
:param zookeeper_path: Path in zookeeper to fetch from
:param settings: Settings for executing request to ClickHouse over db.raw() method
:return: SQL Query
### Response:
def fetch(self, zookeeper_path, settings=None):
"""
Download a partition from another server.
:param zookeeper_path: Path in zookeeper to fetch from
:param settings: Settings for executing request to ClickHouse over db.raw() method
:return: SQL Query
"""
return self._partition_operation_sql('FETCH', settings=settings, from_part=zookeeper_path) |
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError('y does not exist for x = ', x, ' this should not happen')
return m | measure the quality of a fit | Below is the the instruction that describes the task:
### Input:
measure the quality of a fit
### Response:
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError('y does not exist for x = ', x, ' this should not happen')
return m |
def get_file(self, attr_name):
'''Return absolute path to logging file for obj's attribute.'''
return os.path.abspath(os.path.join(self.folder, "{}.log"
.format(attr_name))) | Return absolute path to logging file for obj's attribute. | Below is the the instruction that describes the task:
### Input:
Return absolute path to logging file for obj's attribute.
### Response:
def get_file(self, attr_name):
'''Return absolute path to logging file for obj's attribute.'''
return os.path.abspath(os.path.join(self.folder, "{}.log"
.format(attr_name))) |
def _subspan(self, s, span, nextspan):
"""Recursively subdivide spans based on a series of rules."""
text = s[span[0]:span[1]]
lowertext = text.lower()
# Skip if only a single character or a split sequence
if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT:
return [span]
# Skip if it looks like URL
if text.startswith('http://') or text.startswith('ftp://') or text.startswith('www.'):
return [span]
# Split full stop at end of final token (allow certain characters to follow) unless ellipsis
if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == '...':
if text[-1] == '.':
return self._split_span(span, -1)
ind = text.rfind('.')
if ind > -1 and all(t in '\'‘’"“”)]}' for t in text[ind + 1:]):
return self._split_span(span, ind, 1)
# Split off certain sequences at the end of a token
for spl in self.SPLIT_END:
if text.endswith(spl) and len(text) > len(spl):
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_END_WORD:
if text.endswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_START_WORD:
if text.startswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, len(spl), 0)
# Split around certain sequences
for spl in self.SPLIT:
ind = text.find(spl)
if ind > -1:
return self._split_span(span, ind, len(spl))
# Split around certain sequences unless followed by a digit
# - We skip this because of difficulty with chemical names.
# for spl in self.SPLIT_NO_DIGIT:
# ind = text.rfind(spl)
# if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()):
# return self._split_span(span, ind, len(spl))
# Split off certain sequences at the end of a token unless preceded by a digit
for spl in self.SPLIT_END_NO_DIGIT:
if text.endswith(spl) and len(text) > len(spl) and not text[-len(spl) - 1].isdigit():
return self._split_span(span, -len(spl), 0)
# Regular Bracket at both start and end, break off both provided they correspond
if text.startswith('(') and text.endswith(')') and self._closing_bracket_index(text) == len(text) - 1:
return self._split_span(span, 1, len(text)-2)
# Split things like IR(KBr)
if text.startswith('IR(') and text.endswith(')'):
return self._split_span(span, 2, 1)
# Split things like \d+\.\d+([a-z]+) e.g. UV-vis/IR peaks with bracketed strength/shape
m = re.match('^(\d+\.\d+|\d{3,})(\([a-z]+\))$', text, re.I)
if m:
return self._split_span(span, m.start(2), 1)
# Split brackets off start and end if the corresponding bracket isn't within token
for bpair in [('(', ')'), ('{', '}'), ('[', ']')]:
#level = bracket_level(text, open=[bpair[0]], close=[bpair[1]])
# Bracket at start, bracketlevel > 0, break it off
if text.startswith(bpair[0]) and self._closing_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, 1, 0)
# Bracket at end, bracketlevel < 0, break it off
if text.endswith(bpair[1]) and self._opening_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, -1, 0)
# TODO: Consider splitting around comma in limited circumstances. Mainly to fix whitespace errors.
# Characters to split around, but with exceptions
for i, char in enumerate(text):
before = text[:i]
after = text[i+1:]
if char in {':', ';'}:
# Split around colon unless it looks like we're in a chemical name
if not (before and after and after[0].isdigit() and before.rstrip('′\'')[-1:].isdigit() and '-' in after) and not (self.NO_SPLIT_CHEM.search(before) and self.NO_SPLIT_CHEM.search(after)):
return self._split_span(span, i, 1)
elif char in {'x', '+', '−'}:
# Split around x, +, − (\u2212 minus) between two numbers or at start followed by numbers
if (i == 0 or self._is_number(before)) and self._is_number(after):
return self._split_span(span, i, 1)
# Also plit around − (\u2212 minus) between two letters
if char == '−' and before and before[-1].isalpha() and after and after[0].isalpha():
return self._split_span(span, i, 1)
elif char == '±':
# Split around ± unless surrounded by brackets
if not (before and after and before[-1] == '(' and after[0] == ')'):
return self._split_span(span, i, 1)
elif char == '/':
# Split around / unless '+/-' or '-/-' etc.
if not (before and after and before[-1] in self.NO_SPLIT_SLASH and after[0] in self.NO_SPLIT_SLASH):
return self._split_span(span, i, 1)
elif char == '>':
if not (before and before[-1] == '-'):
# Split if preceding is not -
return self._split_span(span, i, 1)
if before and before[-1] == '-':
# If preceding is -, split around -> unless in chemical name
if not text == '->' and not self._is_saccharide_arrow(before[:-1], after):
return self._split_span(span, i-1, 2)
elif char is '→' and not self._is_saccharide_arrow(before, after):
# TODO: 'is' should be '=='... this never splits!?
# Split around → unless in chemical name
return self._split_span(span, i, 1)
elif char == '(' and self._is_number(before) and not '(' in after and not ')' in after:
# Split around open bracket after a number
return self._split_span(span, i, 1)
elif char == '-':
lowerbefore = lowertext[:i]
lowerafter = lowertext[i+1:]
# Always split on -of-the- -to- -in- -by- -of- -or- -and- -per- -the-
if lowerafter[:7] == 'of-the-':
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 7), (span[0] + i + 7, span[0] + i + 8), (span[0] + i + 8, span[1])]
if lowerafter[:5] in {'on-a-', 'of-a-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[0] + i + 6), (span[0] + i + 6, span[1])]
if lowerafter[:3] in {'to-', 'in-', 'by-', 'of-', 'or-', 'on-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[1])]
if lowerafter[:4] in {'and-', 'per-', 'the-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[1])]
# By default we split on hyphens
split = True
if lowerafter == 'nmr':
split = True # Always split NMR off end
elif bracket_level(text) == 0 and (not bracket_level(after) == 0 or not bracket_level(before) == 0):
split = False # Don't split if within brackets
elif after and after[0] == '>':
split = False # Don't split if followed by >
elif lowerbefore in self.NO_SPLIT_PREFIX or lowerafter in self.NO_SPLIT_SUFFIX:
split = False # Don't split if prefix or suffix in list
elif self.NO_SPLIT_PREFIX_ENDING.search(lowerbefore):
split = False # Don't split if prefix ends with pattern
elif lowerafter in self.SPLIT_SUFFIX:
split = True # Do split if suffix in list
elif len(before) <= 1 or len(after) <= 2:
split = False # Don't split if not at least 2 char before and 3 after
elif self.NO_SPLIT_CHEM.search(lowerbefore) or self.NO_SPLIT_CHEM.search(lowerafter):
split = False # Don't split if prefix or suffix match chem regex
if split:
return self._split_span(span, i, 1)
# TODO: Errors:
# [³H]-choline
# S,S'-...
# 1,4-di-substituted
# 11-β - hydroxysteroid
# Spelt out greek: 11beta - hydroxysteroid
# ...-N-substituted like 2,5-dimethyl-N-substituted pyrroles
# 4-(2-Butyl-6,7-dichloro-2-cyclopentyl-indan-1-on-5-yl) oxobutyric acid
# Adenosine - monophosphate
# Consistency for amino acids: Arg-Arg and Arg-Arg-Asp... probably always split
# D,L-α-peptide?
# N'-formylkynurenine
# poly(D,L-lactic acid )?
# poly(methyl metha-acrylate )?
# Poly(N - alkyl Acrylamide )
# poly(N - isopropyl acrylamide )
# R,S - lorazepam
# S,S - dioxide
# Split units off the end of a numeric value
quantity = self.QUANTITY_RE.search(text)
if quantity:
return self._split_span(span, len(quantity.group(6) or quantity.group(3) or quantity.group(2)), 0)
# Split pH off the start of a numeric value
if text.startswith('pH') and self._is_number(text[2:]):
return self._split_span(span, 2, 0)
# Split contraction words
for contraction in self.CONTRACTIONS:
if lowertext == contraction[0]:
return self._split_span(span, contraction[1])
if nextspan:
nexttext = s[nextspan[0]:nextspan[1]]
# Split NMR isotope whitespace errors (joined with previous sentence full stop)
if nexttext == 'NMR':
ind = text.rfind('.')
if ind > -1 and text[ind + 1:] in {'1H', '13C', '15N', '31P', '19F', '11B', '29Si', '170', '73Ge', '195Pt', '33S', '13C{1H}'}:
return self._split_span(span, ind, 1)
return [span] | Recursively subdivide spans based on a series of rules. | Below is the the instruction that describes the task:
### Input:
Recursively subdivide spans based on a series of rules.
### Response:
def _subspan(self, s, span, nextspan):
"""Recursively subdivide spans based on a series of rules."""
text = s[span[0]:span[1]]
lowertext = text.lower()
# Skip if only a single character or a split sequence
if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT:
return [span]
# Skip if it looks like URL
if text.startswith('http://') or text.startswith('ftp://') or text.startswith('www.'):
return [span]
# Split full stop at end of final token (allow certain characters to follow) unless ellipsis
if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == '...':
if text[-1] == '.':
return self._split_span(span, -1)
ind = text.rfind('.')
if ind > -1 and all(t in '\'‘’"“”)]}' for t in text[ind + 1:]):
return self._split_span(span, ind, 1)
# Split off certain sequences at the end of a token
for spl in self.SPLIT_END:
if text.endswith(spl) and len(text) > len(spl):
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_END_WORD:
if text.endswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, -len(spl), 0)
# Split off certain sequences at the end of a word
for spl in self.SPLIT_START_WORD:
if text.startswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha():
return self._split_span(span, len(spl), 0)
# Split around certain sequences
for spl in self.SPLIT:
ind = text.find(spl)
if ind > -1:
return self._split_span(span, ind, len(spl))
# Split around certain sequences unless followed by a digit
# - We skip this because of difficulty with chemical names.
# for spl in self.SPLIT_NO_DIGIT:
# ind = text.rfind(spl)
# if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()):
# return self._split_span(span, ind, len(spl))
# Split off certain sequences at the end of a token unless preceded by a digit
for spl in self.SPLIT_END_NO_DIGIT:
if text.endswith(spl) and len(text) > len(spl) and not text[-len(spl) - 1].isdigit():
return self._split_span(span, -len(spl), 0)
# Regular Bracket at both start and end, break off both provided they correspond
if text.startswith('(') and text.endswith(')') and self._closing_bracket_index(text) == len(text) - 1:
return self._split_span(span, 1, len(text)-2)
# Split things like IR(KBr)
if text.startswith('IR(') and text.endswith(')'):
return self._split_span(span, 2, 1)
# Split things like \d+\.\d+([a-z]+) e.g. UV-vis/IR peaks with bracketed strength/shape
m = re.match('^(\d+\.\d+|\d{3,})(\([a-z]+\))$', text, re.I)
if m:
return self._split_span(span, m.start(2), 1)
# Split brackets off start and end if the corresponding bracket isn't within token
for bpair in [('(', ')'), ('{', '}'), ('[', ']')]:
#level = bracket_level(text, open=[bpair[0]], close=[bpair[1]])
# Bracket at start, bracketlevel > 0, break it off
if text.startswith(bpair[0]) and self._closing_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, 1, 0)
# Bracket at end, bracketlevel < 0, break it off
if text.endswith(bpair[1]) and self._opening_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, -1, 0)
# TODO: Consider splitting around comma in limited circumstances. Mainly to fix whitespace errors.
# Characters to split around, but with exceptions
for i, char in enumerate(text):
before = text[:i]
after = text[i+1:]
if char in {':', ';'}:
# Split around colon unless it looks like we're in a chemical name
if not (before and after and after[0].isdigit() and before.rstrip('′\'')[-1:].isdigit() and '-' in after) and not (self.NO_SPLIT_CHEM.search(before) and self.NO_SPLIT_CHEM.search(after)):
return self._split_span(span, i, 1)
elif char in {'x', '+', '−'}:
# Split around x, +, − (\u2212 minus) between two numbers or at start followed by numbers
if (i == 0 or self._is_number(before)) and self._is_number(after):
return self._split_span(span, i, 1)
# Also plit around − (\u2212 minus) between two letters
if char == '−' and before and before[-1].isalpha() and after and after[0].isalpha():
return self._split_span(span, i, 1)
elif char == '±':
# Split around ± unless surrounded by brackets
if not (before and after and before[-1] == '(' and after[0] == ')'):
return self._split_span(span, i, 1)
elif char == '/':
# Split around / unless '+/-' or '-/-' etc.
if not (before and after and before[-1] in self.NO_SPLIT_SLASH and after[0] in self.NO_SPLIT_SLASH):
return self._split_span(span, i, 1)
elif char == '>':
if not (before and before[-1] == '-'):
# Split if preceding is not -
return self._split_span(span, i, 1)
if before and before[-1] == '-':
# If preceding is -, split around -> unless in chemical name
if not text == '->' and not self._is_saccharide_arrow(before[:-1], after):
return self._split_span(span, i-1, 2)
elif char is '→' and not self._is_saccharide_arrow(before, after):
# TODO: 'is' should be '=='... this never splits!?
# Split around → unless in chemical name
return self._split_span(span, i, 1)
elif char == '(' and self._is_number(before) and not '(' in after and not ')' in after:
# Split around open bracket after a number
return self._split_span(span, i, 1)
elif char == '-':
lowerbefore = lowertext[:i]
lowerafter = lowertext[i+1:]
# Always split on -of-the- -to- -in- -by- -of- -or- -and- -per- -the-
if lowerafter[:7] == 'of-the-':
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 7), (span[0] + i + 7, span[0] + i + 8), (span[0] + i + 8, span[1])]
if lowerafter[:5] in {'on-a-', 'of-a-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[0] + i + 6), (span[0] + i + 6, span[1])]
if lowerafter[:3] in {'to-', 'in-', 'by-', 'of-', 'or-', 'on-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[1])]
if lowerafter[:4] in {'and-', 'per-', 'the-'}:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[1])]
# By default we split on hyphens
split = True
if lowerafter == 'nmr':
split = True # Always split NMR off end
elif bracket_level(text) == 0 and (not bracket_level(after) == 0 or not bracket_level(before) == 0):
split = False # Don't split if within brackets
elif after and after[0] == '>':
split = False # Don't split if followed by >
elif lowerbefore in self.NO_SPLIT_PREFIX or lowerafter in self.NO_SPLIT_SUFFIX:
split = False # Don't split if prefix or suffix in list
elif self.NO_SPLIT_PREFIX_ENDING.search(lowerbefore):
split = False # Don't split if prefix ends with pattern
elif lowerafter in self.SPLIT_SUFFIX:
split = True # Do split if suffix in list
elif len(before) <= 1 or len(after) <= 2:
split = False # Don't split if not at least 2 char before and 3 after
elif self.NO_SPLIT_CHEM.search(lowerbefore) or self.NO_SPLIT_CHEM.search(lowerafter):
split = False # Don't split if prefix or suffix match chem regex
if split:
return self._split_span(span, i, 1)
# TODO: Errors:
# [³H]-choline
# S,S'-...
# 1,4-di-substituted
# 11-β - hydroxysteroid
# Spelt out greek: 11beta - hydroxysteroid
# ...-N-substituted like 2,5-dimethyl-N-substituted pyrroles
# 4-(2-Butyl-6,7-dichloro-2-cyclopentyl-indan-1-on-5-yl) oxobutyric acid
# Adenosine - monophosphate
# Consistency for amino acids: Arg-Arg and Arg-Arg-Asp... probably always split
# D,L-α-peptide?
# N'-formylkynurenine
# poly(D,L-lactic acid )?
# poly(methyl metha-acrylate )?
# Poly(N - alkyl Acrylamide )
# poly(N - isopropyl acrylamide )
# R,S - lorazepam
# S,S - dioxide
# Split units off the end of a numeric value
quantity = self.QUANTITY_RE.search(text)
if quantity:
return self._split_span(span, len(quantity.group(6) or quantity.group(3) or quantity.group(2)), 0)
# Split pH off the start of a numeric value
if text.startswith('pH') and self._is_number(text[2:]):
return self._split_span(span, 2, 0)
# Split contraction words
for contraction in self.CONTRACTIONS:
if lowertext == contraction[0]:
return self._split_span(span, contraction[1])
if nextspan:
nexttext = s[nextspan[0]:nextspan[1]]
# Split NMR isotope whitespace errors (joined with previous sentence full stop)
if nexttext == 'NMR':
ind = text.rfind('.')
if ind > -1 and text[ind + 1:] in {'1H', '13C', '15N', '31P', '19F', '11B', '29Si', '170', '73Ge', '195Pt', '33S', '13C{1H}'}:
return self._split_span(span, ind, 1)
return [span] |
def _bsecurate_cli_component_file_refs(args):
'''Handles the component-file-refs subcommand'''
data = curate.component_file_refs(args.files)
s = ''
for cfile, cdata in data.items():
s += cfile + '\n'
rows = []
for el, refs in cdata:
rows.append((' ' + el, ' '.join(refs)))
s += '\n'.join(format_columns(rows)) + '\n\n'
return s | Handles the component-file-refs subcommand | Below is the the instruction that describes the task:
### Input:
Handles the component-file-refs subcommand
### Response:
def _bsecurate_cli_component_file_refs(args):
'''Handles the component-file-refs subcommand'''
data = curate.component_file_refs(args.files)
s = ''
for cfile, cdata in data.items():
s += cfile + '\n'
rows = []
for el, refs in cdata:
rows.append((' ' + el, ' '.join(refs)))
s += '\n'.join(format_columns(rows)) + '\n\n'
return s |
def available_backends():
"""Lists the currently available backend types"""
print 'The following LiveSync agents are available:'
for name, backend in current_plugin.backend_classes.iteritems():
print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description) | Lists the currently available backend types | Below is the the instruction that describes the task:
### Input:
Lists the currently available backend types
### Response:
def available_backends():
"""Lists the currently available backend types"""
print 'The following LiveSync agents are available:'
for name, backend in current_plugin.backend_classes.iteritems():
print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description) |
def init(plugin_manager, course_factory, client, config):
"""
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation
"""
courseid = config.get('courseid', 'external')
course = course_factory.get_course(courseid)
page_pattern = config.get('page_pattern', '/external')
return_fields = re.compile(config.get('return_fields', '^(result|text|problems)$'))
client_buffer = ClientBuffer(client)
client_sync = ClientSync(client)
class ExternalGrader(INGIniousPage):
""" Manages job from outside, using the default input """
def GET(self):
""" GET request """
return """
<!DOCTYPE html>
<html>
<head>
<title>External grade POST test</title>
</head>
<body>
<form method="post">
<textarea style="width:100%; height:400px;" name="input">{"question1":"print 'Hello World!'"}</textarea><br/>
<input type="text" name="taskid" value="helloworld"/> (taskid)<br/>
<input type="checkbox" name="async"/> async?<br/>
<input type="submit"/>
</form>
</body>
</html>"""
def keep_only_config_return_values(self, job_return):
""" Keep only some useful return values """
return {key: value for key, value in job_return.items() if return_fields.match(key)}
def POST(self):
""" POST request """
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
post_input = web.input()
if "input" in post_input and "taskid" in post_input:
# New job
try:
task_input = json.loads(post_input.input)
except:
return json.dumps({"status": "error", "status_message": "Cannot decode input"})
try:
task = course.get_task(post_input.taskid)
except:
return json.dumps({"status": "error", "status_message": "Cannot open task"})
if not task.input_is_consistent(task_input, self.default_allowed_file_extensions, self.default_max_file_size):
return json.dumps({"status": "error", "status_message": "Input is not consistent with the task"})
if post_input.get("async") is None:
# New sync job
try:
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_sync.new_job(task, task_input, "Plugin - Simple Grader")
job_return = {"result":result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "state": state, "archive": archive, "stdout": stdout, "stderr": stderr}
except:
return json.dumps({"status": "error", "status_message": "An internal error occurred"})
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
# New async job
jobid = client_buffer.new_job(task, task_input, "Plugin - Simple Grader")
return json.dumps({"status": "done", "jobid": str(jobid)})
elif "jobid" in post_input:
# Get status of async job
if client_buffer.is_waiting(post_input["jobid"]):
return json.dumps({"status": "waiting"})
elif client_buffer.is_done(post_input["jobid"]):
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_buffer.get_result(post_input["jobid"])
job_return = {"result": result, "grade": grade, "problems": problems, "tests": tests,
"custom": custom, "archive": archive, "stdout": stdout, "stderr": stderr}
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
return json.dumps({"status": "error", "status_message": "There is no job with jobid {}".format(post_input["jobid"])})
else:
return json.dumps({"status": "error", "status_message": "Unknown request type"})
plugin_manager.add_page(page_pattern, ExternalGrader) | Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation | Below is the the instruction that describes the task:
### Input:
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation
### Response:
def init(plugin_manager, course_factory, client, config):
"""
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation
"""
courseid = config.get('courseid', 'external')
course = course_factory.get_course(courseid)
page_pattern = config.get('page_pattern', '/external')
return_fields = re.compile(config.get('return_fields', '^(result|text|problems)$'))
client_buffer = ClientBuffer(client)
client_sync = ClientSync(client)
class ExternalGrader(INGIniousPage):
""" Manages job from outside, using the default input """
def GET(self):
""" GET request """
return """
<!DOCTYPE html>
<html>
<head>
<title>External grade POST test</title>
</head>
<body>
<form method="post">
<textarea style="width:100%; height:400px;" name="input">{"question1":"print 'Hello World!'"}</textarea><br/>
<input type="text" name="taskid" value="helloworld"/> (taskid)<br/>
<input type="checkbox" name="async"/> async?<br/>
<input type="submit"/>
</form>
</body>
</html>"""
def keep_only_config_return_values(self, job_return):
""" Keep only some useful return values """
return {key: value for key, value in job_return.items() if return_fields.match(key)}
def POST(self):
""" POST request """
web.header('Access-Control-Allow-Origin', '*')
web.header('Content-Type', 'application/json')
post_input = web.input()
if "input" in post_input and "taskid" in post_input:
# New job
try:
task_input = json.loads(post_input.input)
except:
return json.dumps({"status": "error", "status_message": "Cannot decode input"})
try:
task = course.get_task(post_input.taskid)
except:
return json.dumps({"status": "error", "status_message": "Cannot open task"})
if not task.input_is_consistent(task_input, self.default_allowed_file_extensions, self.default_max_file_size):
return json.dumps({"status": "error", "status_message": "Input is not consistent with the task"})
if post_input.get("async") is None:
# New sync job
try:
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_sync.new_job(task, task_input, "Plugin - Simple Grader")
job_return = {"result":result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "state": state, "archive": archive, "stdout": stdout, "stderr": stderr}
except:
return json.dumps({"status": "error", "status_message": "An internal error occurred"})
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
# New async job
jobid = client_buffer.new_job(task, task_input, "Plugin - Simple Grader")
return json.dumps({"status": "done", "jobid": str(jobid)})
elif "jobid" in post_input:
# Get status of async job
if client_buffer.is_waiting(post_input["jobid"]):
return json.dumps({"status": "waiting"})
elif client_buffer.is_done(post_input["jobid"]):
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_buffer.get_result(post_input["jobid"])
job_return = {"result": result, "grade": grade, "problems": problems, "tests": tests,
"custom": custom, "archive": archive, "stdout": stdout, "stderr": stderr}
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
return json.dumps({"status": "error", "status_message": "There is no job with jobid {}".format(post_input["jobid"])})
else:
return json.dumps({"status": "error", "status_message": "Unknown request type"})
plugin_manager.add_page(page_pattern, ExternalGrader) |
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response) | For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator` | Below is the the instruction that describes the task:
### Input:
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
### Response:
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response) |
def index_of_nearest(p, hot_points, distance_f=distance):
"""Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
"""
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i | Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty | Below is the the instruction that describes the task:
### Input:
Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
### Response:
def index_of_nearest(p, hot_points, distance_f=distance):
"""Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
"""
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i |
def markdown(self, text, mode='', context='', raw=False):
"""Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str -- HTML formatted text
"""
data = None
json = False
headers = {}
if raw:
url = self._build_url('markdown', 'raw')
data = text
headers['content-type'] = 'text/plain'
else:
url = self._build_url('markdown')
data = {}
if text:
data['text'] = text
if mode in ('markdown', 'gfm'):
data['mode'] = mode
if context:
data['context'] = context
json = True
if data:
req = self._post(url, data=data, json=json, headers=headers)
if req.ok:
return req.content
return '' | Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str -- HTML formatted text | Below is the the instruction that describes the task:
### Input:
Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str -- HTML formatted text
### Response:
def markdown(self, text, mode='', context='', raw=False):
"""Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str -- HTML formatted text
"""
data = None
json = False
headers = {}
if raw:
url = self._build_url('markdown', 'raw')
data = text
headers['content-type'] = 'text/plain'
else:
url = self._build_url('markdown')
data = {}
if text:
data['text'] = text
if mode in ('markdown', 'gfm'):
data['mode'] = mode
if context:
data['context'] = context
json = True
if data:
req = self._post(url, data=data, json=json, headers=headers)
if req.ok:
return req.content
return '' |
def from_dict(data, ctx):
"""
Instantiate a new InstrumentCommission from a dict (generally from
loading a JSON response). The data used to instantiate the
InstrumentCommission is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('unitsTraded') is not None:
data['unitsTraded'] = ctx.convert_decimal_number(
data.get('unitsTraded')
)
if data.get('minimumCommission') is not None:
data['minimumCommission'] = ctx.convert_decimal_number(
data.get('minimumCommission')
)
return InstrumentCommission(**data) | Instantiate a new InstrumentCommission from a dict (generally from
loading a JSON response). The data used to instantiate the
InstrumentCommission is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately. | Below is the the instruction that describes the task:
### Input:
Instantiate a new InstrumentCommission from a dict (generally from
loading a JSON response). The data used to instantiate the
InstrumentCommission is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
### Response:
def from_dict(data, ctx):
"""
Instantiate a new InstrumentCommission from a dict (generally from
loading a JSON response). The data used to instantiate the
InstrumentCommission is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('unitsTraded') is not None:
data['unitsTraded'] = ctx.convert_decimal_number(
data.get('unitsTraded')
)
if data.get('minimumCommission') is not None:
data['minimumCommission'] = ctx.convert_decimal_number(
data.get('minimumCommission')
)
return InstrumentCommission(**data) |
def validate_types(self, definition):
"""This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present.
"""
if not self._strict_type_checks:
return
# The required and optional fields in a scalar type definition.
REQUIRED_FIELDS = {
'bug_numbers': list, # This contains ints. See LIST_FIELDS_CONTENT.
'description': string_types,
'expires': string_types,
'kind': string_types,
'notification_emails': list, # This contains strings. See LIST_FIELDS_CONTENT.
'record_in_processes': list,
}
OPTIONAL_FIELDS = {
'cpp_guard': string_types,
'release_channel_collection': string_types,
'keyed': bool,
}
# The types for the data within the fields that hold lists.
LIST_FIELDS_CONTENT = {
'bug_numbers': int,
'notification_emails': string_types,
'record_in_processes': string_types,
}
# Concatenate the required and optional field definitions.
ALL_FIELDS = REQUIRED_FIELDS.copy()
ALL_FIELDS.update(OPTIONAL_FIELDS)
# Checks that all the required fields are available.
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise ParserError(self._name + ' - missing required fields: ' +
', '.join(missing_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Do we have any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Checks the type for all the fields.
wrong_type_names = ['{} must be {}'.format(f, utils.nice_type_name(ALL_FIELDS[f]))
for f in definition.keys()
if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Check that the lists are not empty and that data in the lists
# have the correct types.
list_fields = [f for f in definition if isinstance(definition[f], list)]
for field in list_fields:
# Check for empty lists.
if len(definition[field]) == 0:
raise ParserError(("Field '{}' for probe '{}' must not be empty" +
".\nSee: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
# Check the type of the list content.
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}"
".\nSee: {}#the-yaml-definition-file)")
.format(field, self._name, utils.nice_type_name(LIST_FIELDS_CONTENT[field]),
BASE_DOC_URL)) | This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present. | Below is the the instruction that describes the task:
### Input:
This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present.
### Response:
def validate_types(self, definition):
"""This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present.
"""
if not self._strict_type_checks:
return
# The required and optional fields in a scalar type definition.
REQUIRED_FIELDS = {
'bug_numbers': list, # This contains ints. See LIST_FIELDS_CONTENT.
'description': string_types,
'expires': string_types,
'kind': string_types,
'notification_emails': list, # This contains strings. See LIST_FIELDS_CONTENT.
'record_in_processes': list,
}
OPTIONAL_FIELDS = {
'cpp_guard': string_types,
'release_channel_collection': string_types,
'keyed': bool,
}
# The types for the data within the fields that hold lists.
LIST_FIELDS_CONTENT = {
'bug_numbers': int,
'notification_emails': string_types,
'record_in_processes': string_types,
}
# Concatenate the required and optional field definitions.
ALL_FIELDS = REQUIRED_FIELDS.copy()
ALL_FIELDS.update(OPTIONAL_FIELDS)
# Checks that all the required fields are available.
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise ParserError(self._name + ' - missing required fields: ' +
', '.join(missing_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Do we have any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Checks the type for all the fields.
wrong_type_names = ['{} must be {}'.format(f, utils.nice_type_name(ALL_FIELDS[f]))
for f in definition.keys()
if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Check that the lists are not empty and that data in the lists
# have the correct types.
list_fields = [f for f in definition if isinstance(definition[f], list)]
for field in list_fields:
# Check for empty lists.
if len(definition[field]) == 0:
raise ParserError(("Field '{}' for probe '{}' must not be empty" +
".\nSee: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
# Check the type of the list content.
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}"
".\nSee: {}#the-yaml-definition-file)")
.format(field, self._name, utils.nice_type_name(LIST_FIELDS_CONTENT[field]),
BASE_DOC_URL)) |
def find_imports(self, pbds):
"""Find all missing imports in list of Pbd instances.
"""
# List of types used, but not defined
imports = list(set(self.uses).difference(set(self.defines)))
# Clumpsy, but enought for now
for imp in imports:
for p in pbds:
if imp in p.defines:
self.imports.append(p.name)
break
self.imports = list(set(self.imports))
for import_file in self.imports:
self.lines.insert(2, 'import "{}";'.format(import_file)) | Find all missing imports in list of Pbd instances. | Below is the the instruction that describes the task:
### Input:
Find all missing imports in list of Pbd instances.
### Response:
def find_imports(self, pbds):
"""Find all missing imports in list of Pbd instances.
"""
# List of types used, but not defined
imports = list(set(self.uses).difference(set(self.defines)))
# Clumpsy, but enought for now
for imp in imports:
for p in pbds:
if imp in p.defines:
self.imports.append(p.name)
break
self.imports = list(set(self.imports))
for import_file in self.imports:
self.lines.insert(2, 'import "{}";'.format(import_file)) |
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins | Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin] | Below is the the instruction that describes the task:
### Input:
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
### Response:
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins |
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_nbr_interface_type = ET.SubElement(trunk_list_member, "trunk-list-nbr-interface-type")
trunk_list_nbr_interface_type.text = kwargs.pop('trunk_list_nbr_interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_nbr_interface_type = ET.SubElement(trunk_list_member, "trunk-list-nbr-interface-type")
trunk_list_nbr_interface_type.text = kwargs.pop('trunk_list_nbr_interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_pickled_sizes(obj):
"""
Return the pickled sizes of an object and its direct attributes,
ordered by decreasing size. Here is an example:
>> total_size, partial_sizes = get_pickled_sizes(Monitor(''))
>> total_size
345
>> partial_sizes
[('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4),
('_start_time', 4), ('duration', 4)]
Notice that the sizes depend on the operating system and the machine.
"""
sizes = []
attrs = getattr(obj, '__dict__', {})
for name, value in attrs.items():
sizes.append((name, len(Pickled(value))))
return len(Pickled(obj)), sorted(
sizes, key=lambda pair: pair[1], reverse=True) | Return the pickled sizes of an object and its direct attributes,
ordered by decreasing size. Here is an example:
>> total_size, partial_sizes = get_pickled_sizes(Monitor(''))
>> total_size
345
>> partial_sizes
[('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4),
('_start_time', 4), ('duration', 4)]
Notice that the sizes depend on the operating system and the machine. | Below is the the instruction that describes the task:
### Input:
Return the pickled sizes of an object and its direct attributes,
ordered by decreasing size. Here is an example:
>> total_size, partial_sizes = get_pickled_sizes(Monitor(''))
>> total_size
345
>> partial_sizes
[('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4),
('_start_time', 4), ('duration', 4)]
Notice that the sizes depend on the operating system and the machine.
### Response:
def get_pickled_sizes(obj):
"""
Return the pickled sizes of an object and its direct attributes,
ordered by decreasing size. Here is an example:
>> total_size, partial_sizes = get_pickled_sizes(Monitor(''))
>> total_size
345
>> partial_sizes
[('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4),
('_start_time', 4), ('duration', 4)]
Notice that the sizes depend on the operating system and the machine.
"""
sizes = []
attrs = getattr(obj, '__dict__', {})
for name, value in attrs.items():
sizes.append((name, len(Pickled(value))))
return len(Pickled(obj)), sorted(
sizes, key=lambda pair: pair[1], reverse=True) |
def attributes(self):
"""
Get or set this element's attributes as name/value pairs.
.. note::
Setting element attributes via this accessor will **remove**
any existing attributes, as opposed to the :meth:`set_attributes`
method which only updates and replaces them.
"""
attr_impl_nodes = self.adapter.get_node_attributes(self.impl_node)
return AttributeDict(attr_impl_nodes, self.impl_node, self.adapter) | Get or set this element's attributes as name/value pairs.
.. note::
Setting element attributes via this accessor will **remove**
any existing attributes, as opposed to the :meth:`set_attributes`
method which only updates and replaces them. | Below is the the instruction that describes the task:
### Input:
Get or set this element's attributes as name/value pairs.
.. note::
Setting element attributes via this accessor will **remove**
any existing attributes, as opposed to the :meth:`set_attributes`
method which only updates and replaces them.
### Response:
def attributes(self):
"""
Get or set this element's attributes as name/value pairs.
.. note::
Setting element attributes via this accessor will **remove**
any existing attributes, as opposed to the :meth:`set_attributes`
method which only updates and replaces them.
"""
attr_impl_nodes = self.adapter.get_node_attributes(self.impl_node)
return AttributeDict(attr_impl_nodes, self.impl_node, self.adapter) |
def _from_binary_acl(cls, binary_stream):
"""See base class."""
''' Revision number - 1
Padding - 1
Size - 2
ACE Count - 2
Padding - 2
'''
rev_number, size, ace_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])
aces = []
offset = cls._REPR.size
for i in range(ace_len):
ace = ACE.create_from_binary(binary_stream[offset:])
offset += len(ace)
aces.append(ace)
_MOD_LOGGER.debug("Next ACE offset = %d", offset)
nw_obj = cls((rev_number, size, aces))
_MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj | See base class. | Below is the the instruction that describes the task:
### Input:
See base class.
### Response:
def _from_binary_acl(cls, binary_stream):
"""See base class."""
''' Revision number - 1
Padding - 1
Size - 2
ACE Count - 2
Padding - 2
'''
rev_number, size, ace_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])
aces = []
offset = cls._REPR.size
for i in range(ace_len):
ace = ACE.create_from_binary(binary_stream[offset:])
offset += len(ace)
aces.append(ace)
_MOD_LOGGER.debug("Next ACE offset = %d", offset)
nw_obj = cls((rev_number, size, aces))
_MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj |
def handleUpgradeTxn(self, txn) -> None:
"""
Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn:
"""
FINALIZING_EVENT_TYPES = [UpgradeLog.Events.succeeded, UpgradeLog.Events.failed]
if get_type(txn) != POOL_UPGRADE:
return
logger.info("Node '{}' handles upgrade txn {}".format(self.nodeName, txn))
txn_data = get_payload_data(txn)
action = txn_data[ACTION]
version = txn_data[VERSION]
justification = txn_data.get(JUSTIFICATION)
pkg_name = txn_data.get(PACKAGE, self.config.UPGRADE_ENTRY)
upgrade_id = self.get_action_id(txn)
# TODO test
try:
version = src_version_cls(pkg_name)(version)
except InvalidVersionError as exc:
logger.warning(
"{} can't handle upgrade txn with version {} for package {}: {}"
.format(self, version, pkg_name, exc)
)
return
if action == START:
# forced txn could have partial schedule list
if self.nodeId not in txn_data[SCHEDULE]:
logger.info("Node '{}' disregards upgrade txn {}".format(
self.nodeName, txn))
return
last_event = self.lastActionEventInfo
if last_event:
if (last_event.data.upgrade_id == upgrade_id and
last_event.ev_type in FINALIZING_EVENT_TYPES):
logger.info(
"Node '{}' has already performed an upgrade with upgrade_id {}. "
"Last recorded event is {}"
.format(self.nodeName, upgrade_id, last_event.data))
return
when = txn_data[SCHEDULE][self.nodeId]
failTimeout = txn_data.get(TIMEOUT, self.defaultActionTimeout)
if isinstance(when, str):
when = dateutil.parser.parse(when)
new_ev_data = UpgradeLogData(when, version, upgrade_id, pkg_name)
if self.scheduledAction:
if self.scheduledAction == new_ev_data:
logger.debug(
"Node {} already scheduled upgrade to version '{}' "
.format(self.nodeName, version))
return
else:
logger.info(
"Node '{}' cancels previous upgrade and schedules a new one to {}"
.format(self.nodeName, version))
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' schedules upgrade to {}".format(self.nodeName, version))
self._scheduleUpgrade(new_ev_data, failTimeout)
return
if action == CANCEL:
if (self.scheduledAction and
self.scheduledAction.version == version):
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' cancels upgrade to {}".format(
self.nodeName, version))
return
logger.error(
"Got {} transaction with unsupported action {}".format(
POOL_UPGRADE, action)) | Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn: | Below is the the instruction that describes the task:
### Input:
Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn:
### Response:
def handleUpgradeTxn(self, txn) -> None:
"""
Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn:
"""
FINALIZING_EVENT_TYPES = [UpgradeLog.Events.succeeded, UpgradeLog.Events.failed]
if get_type(txn) != POOL_UPGRADE:
return
logger.info("Node '{}' handles upgrade txn {}".format(self.nodeName, txn))
txn_data = get_payload_data(txn)
action = txn_data[ACTION]
version = txn_data[VERSION]
justification = txn_data.get(JUSTIFICATION)
pkg_name = txn_data.get(PACKAGE, self.config.UPGRADE_ENTRY)
upgrade_id = self.get_action_id(txn)
# TODO test
try:
version = src_version_cls(pkg_name)(version)
except InvalidVersionError as exc:
logger.warning(
"{} can't handle upgrade txn with version {} for package {}: {}"
.format(self, version, pkg_name, exc)
)
return
if action == START:
# forced txn could have partial schedule list
if self.nodeId not in txn_data[SCHEDULE]:
logger.info("Node '{}' disregards upgrade txn {}".format(
self.nodeName, txn))
return
last_event = self.lastActionEventInfo
if last_event:
if (last_event.data.upgrade_id == upgrade_id and
last_event.ev_type in FINALIZING_EVENT_TYPES):
logger.info(
"Node '{}' has already performed an upgrade with upgrade_id {}. "
"Last recorded event is {}"
.format(self.nodeName, upgrade_id, last_event.data))
return
when = txn_data[SCHEDULE][self.nodeId]
failTimeout = txn_data.get(TIMEOUT, self.defaultActionTimeout)
if isinstance(when, str):
when = dateutil.parser.parse(when)
new_ev_data = UpgradeLogData(when, version, upgrade_id, pkg_name)
if self.scheduledAction:
if self.scheduledAction == new_ev_data:
logger.debug(
"Node {} already scheduled upgrade to version '{}' "
.format(self.nodeName, version))
return
else:
logger.info(
"Node '{}' cancels previous upgrade and schedules a new one to {}"
.format(self.nodeName, version))
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' schedules upgrade to {}".format(self.nodeName, version))
self._scheduleUpgrade(new_ev_data, failTimeout)
return
if action == CANCEL:
if (self.scheduledAction and
self.scheduledAction.version == version):
self._cancelScheduledUpgrade(justification)
logger.info("Node '{}' cancels upgrade to {}".format(
self.nodeName, version))
return
logger.error(
"Got {} transaction with unsupported action {}".format(
POOL_UPGRADE, action)) |
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
env['LDMODULEEMITTER']= shlib_emitter | Add Builders and construction variables for lib to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for lib to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env['AR'] = 'mwld'
env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES'
env['LIBDIRPREFIX'] = '-L'
env['LIBDIRSUFFIX'] = ''
env['LIBLINKPREFIX'] = '-l'
env['LIBLINKSUFFIX'] = '.lib'
env['LINK'] = 'mwld'
env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = '$LINKFLAGS'
env['SHLINKCOM'] = shlib_action
env['SHLIBEMITTER']= shlib_emitter
env['LDMODULEEMITTER']= shlib_emitter |
def show_storage_container_metadata(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Show a storage container's metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_storage_container function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('An storage container name must be specified as "name"')
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_metadata(
container_name=kwargs['name'],
x_ms_lease_id=kwargs.get('lease_id', None),
)
return data | .. versionadded:: 2015.8.0
Show a storage container's metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container's lease is active and matches this ID. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Show a storage container's metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container's lease is active and matches this ID.
### Response:
def show_storage_container_metadata(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Show a storage container's metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container's lease is active and matches this ID.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_storage_container function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('An storage container name must be specified as "name"')
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_metadata(
container_name=kwargs['name'],
x_ms_lease_id=kwargs.get('lease_id', None),
)
return data |
def contains_vasp_input(dir_name):
"""
Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR).
"""
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
if not os.path.exists(os.path.join(dir_name, f)) and \
not os.path.exists(os.path.join(dir_name, f + ".orig")):
return False
return True | Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR). | Below is the the instruction that describes the task:
### Input:
Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR).
### Response:
def contains_vasp_input(dir_name):
"""
Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR).
"""
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
if not os.path.exists(os.path.join(dir_name, f)) and \
not os.path.exists(os.path.join(dir_name, f + ".orig")):
return False
return True |
def update(self, deltat=1.0):
'''fly a square circuit'''
DNFZ.update(self, deltat)
self.dist_flown += self.speed * deltat
if self.dist_flown > self.circuit_width:
self.desired_heading = self.heading + 90
self.dist_flown = 0
if self.getalt() < self.ground_height() or self.getalt() > self.ground_height() + 2000:
self.randpos()
self.randalt() | fly a square circuit | Below is the the instruction that describes the task:
### Input:
fly a square circuit
### Response:
def update(self, deltat=1.0):
'''fly a square circuit'''
DNFZ.update(self, deltat)
self.dist_flown += self.speed * deltat
if self.dist_flown > self.circuit_width:
self.desired_heading = self.heading + 90
self.dist_flown = 0
if self.getalt() < self.ground_height() or self.getalt() > self.ground_height() + 2000:
self.randpos()
self.randalt() |
def set_initial(self, C_in, scale_in, scale_high):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`."""
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high | r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`. | Below is the the instruction that describes the task:
### Input:
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`.
### Response:
def set_initial(self, C_in, scale_in, scale_high):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`."""
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high |
def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | Parse a matching plan line. | Below is the the instruction that describes the task:
### Input:
Parse a matching plan line.
### Response:
def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) |
Subsets and Splits