Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,900 | ryanmcgrath/twython | twython/api.py | Twython.request | def request(self, endpoint, method='GET', params=None, version='1.1', json_encoded=False):
"""Return dict of response received from Twitter's API
:param endpoint: (required) Full url or Twitter API endpoint
(e.g. search/tweets)
:type endpoint: string
:param method: (optional) Method of accessing data, either
GET, POST or DELETE. (default GET)
:type method: string
:param params: (optional) Dict of parameters (if any) accepted
the by Twitter API endpoint you are trying to
access (default None)
:type params: dict or None
:param version: (optional) Twitter API version to access
(default 1.1)
:type version: string
:param json_encoded: (optional) Flag to indicate if this method should send data encoded as json
(default False)
:type json_encoded: bool
:rtype: dict
"""
if endpoint.startswith('http://'):
raise TwythonError('api.twitter.com is restricted to SSL/TLS traffic.')
# In case they want to pass a full Twitter URL
# i.e. https://api.twitter.com/1.1/search/tweets.json
if endpoint.startswith('https://'):
url = endpoint
else:
url = '%s/%s.json' % (self.api_url % version, endpoint)
content = self._request(url, method=method, params=params,
api_call=url, json_encoded=json_encoded)
return content | python | def request(self, endpoint, method='GET', params=None, version='1.1', json_encoded=False):
"""Return dict of response received from Twitter's API
:param endpoint: (required) Full url or Twitter API endpoint
(e.g. search/tweets)
:type endpoint: string
:param method: (optional) Method of accessing data, either
GET, POST or DELETE. (default GET)
:type method: string
:param params: (optional) Dict of parameters (if any) accepted
the by Twitter API endpoint you are trying to
access (default None)
:type params: dict or None
:param version: (optional) Twitter API version to access
(default 1.1)
:type version: string
:param json_encoded: (optional) Flag to indicate if this method should send data encoded as json
(default False)
:type json_encoded: bool
:rtype: dict
"""
if endpoint.startswith('http://'):
raise TwythonError('api.twitter.com is restricted to SSL/TLS traffic.')
# In case they want to pass a full Twitter URL
# i.e. https://api.twitter.com/1.1/search/tweets.json
if endpoint.startswith('https://'):
url = endpoint
else:
url = '%s/%s.json' % (self.api_url % version, endpoint)
content = self._request(url, method=method, params=params,
api_call=url, json_encoded=json_encoded)
return content | ['def', 'request', '(', 'self', ',', 'endpoint', ',', 'method', '=', "'GET'", ',', 'params', '=', 'None', ',', 'version', '=', "'1.1'", ',', 'json_encoded', '=', 'False', ')', ':', 'if', 'endpoint', '.', 'startswith', '(', "'http://'", ')', ':', 'raise', 'TwythonError', '(', "'api.twitter.com is restricted to SSL/TLS traffic.'", ')', '# In case they want to pass a full Twitter URL', '# i.e. https://api.twitter.com/1.1/search/tweets.json', 'if', 'endpoint', '.', 'startswith', '(', "'https://'", ')', ':', 'url', '=', 'endpoint', 'else', ':', 'url', '=', "'%s/%s.json'", '%', '(', 'self', '.', 'api_url', '%', 'version', ',', 'endpoint', ')', 'content', '=', 'self', '.', '_request', '(', 'url', ',', 'method', '=', 'method', ',', 'params', '=', 'params', ',', 'api_call', '=', 'url', ',', 'json_encoded', '=', 'json_encoded', ')', 'return', 'content'] | Return dict of response received from Twitter's API
:param endpoint: (required) Full url or Twitter API endpoint
(e.g. search/tweets)
:type endpoint: string
:param method: (optional) Method of accessing data, either
GET, POST or DELETE. (default GET)
:type method: string
:param params: (optional) Dict of parameters (if any) accepted
the by Twitter API endpoint you are trying to
access (default None)
:type params: dict or None
:param version: (optional) Twitter API version to access
(default 1.1)
:type version: string
:param json_encoded: (optional) Flag to indicate if this method should send data encoded as json
(default False)
:type json_encoded: bool
:rtype: dict | ['Return', 'dict', 'of', 'response', 'received', 'from', 'Twitter', 's', 'API'] | train | https://github.com/ryanmcgrath/twython/blob/7366de80efcbbdfaf615d3f1fea72546196916fc/twython/api.py#L238-L274 |
5,901 | inasafe/inasafe | safe/report/impact_report.py | ImpactReport._check_layer_count | def _check_layer_count(self, layer):
"""Check for the validity of the layer.
:param layer: QGIS layer
:type layer: qgis.core.QgsVectorLayer
:return:
"""
if layer:
if not layer.isValid():
raise ImpactReport.LayerException('Layer is not valid')
if isinstance(layer, QgsRasterLayer):
# can't check feature count of raster layer
return
feature_count = len([f for f in layer.getFeatures()])
if feature_count == 0:
raise ImpactReport.LayerException(
'Layer contains no features') | python | def _check_layer_count(self, layer):
"""Check for the validity of the layer.
:param layer: QGIS layer
:type layer: qgis.core.QgsVectorLayer
:return:
"""
if layer:
if not layer.isValid():
raise ImpactReport.LayerException('Layer is not valid')
if isinstance(layer, QgsRasterLayer):
# can't check feature count of raster layer
return
feature_count = len([f for f in layer.getFeatures()])
if feature_count == 0:
raise ImpactReport.LayerException(
'Layer contains no features') | ['def', '_check_layer_count', '(', 'self', ',', 'layer', ')', ':', 'if', 'layer', ':', 'if', 'not', 'layer', '.', 'isValid', '(', ')', ':', 'raise', 'ImpactReport', '.', 'LayerException', '(', "'Layer is not valid'", ')', 'if', 'isinstance', '(', 'layer', ',', 'QgsRasterLayer', ')', ':', "# can't check feature count of raster layer", 'return', 'feature_count', '=', 'len', '(', '[', 'f', 'for', 'f', 'in', 'layer', '.', 'getFeatures', '(', ')', ']', ')', 'if', 'feature_count', '==', '0', ':', 'raise', 'ImpactReport', '.', 'LayerException', '(', "'Layer contains no features'", ')'] | Check for the validity of the layer.
:param layer: QGIS layer
:type layer: qgis.core.QgsVectorLayer
:return: | ['Check', 'for', 'the', 'validity', 'of', 'the', 'layer', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/impact_report.py#L455-L471 |
5,902 | apache/incubator-superset | superset/connectors/connector_registry.py | ConnectorRegistry.get_eager_datasource | def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
) | python | def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
) | ['def', 'get_eager_datasource', '(', 'cls', ',', 'session', ',', 'datasource_type', ',', 'datasource_id', ')', ':', 'datasource_class', '=', 'ConnectorRegistry', '.', 'sources', '[', 'datasource_type', ']', 'return', '(', 'session', '.', 'query', '(', 'datasource_class', ')', '.', 'options', '(', 'subqueryload', '(', 'datasource_class', '.', 'columns', ')', ',', 'subqueryload', '(', 'datasource_class', '.', 'metrics', ')', ',', ')', '.', 'filter_by', '(', 'id', '=', 'datasource_id', ')', '.', 'one', '(', ')', ')'] | Returns datasource with columns and metrics. | ['Returns', 'datasource', 'with', 'columns', 'and', 'metrics', '.'] | train | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/connector_registry.py#L76-L87 |
5,903 | jasonbot/arcrest | arcrest/utils.py | pythonvaluetotime | def pythonvaluetotime(time_val):
"Convert a time or time range from Python datetime to ArcGIS REST server"
if time_val is None:
return None
elif isinstance(time_val, numeric):
return str(long(time_val * 1000.0))
elif isinstance(time_val, date):
dtlist = [time_val.year, time_val.month, time_val.day]
if isinstance(time_val, datetime.datetime):
dtlist += [time_val.hour, time_val.minute, time_val.second]
else:
dtlist += [0, 0, 0]
return long(calendar.timegm(dtlist) * 1000.0)
elif (isinstance(time_val, sequence)
and len(time_val) == 2):
if all(isinstance(x, numeric)
for x in time_val):
return ",".join(pythonvaluetotime(x)
for x in time_val)
elif all(isinstance(x, date)
for x in time_val):
return ",".join(pythonvaluetotime(x)
for x in time_val)
raise ValueError(repr(time_val)) | python | def pythonvaluetotime(time_val):
"Convert a time or time range from Python datetime to ArcGIS REST server"
if time_val is None:
return None
elif isinstance(time_val, numeric):
return str(long(time_val * 1000.0))
elif isinstance(time_val, date):
dtlist = [time_val.year, time_val.month, time_val.day]
if isinstance(time_val, datetime.datetime):
dtlist += [time_val.hour, time_val.minute, time_val.second]
else:
dtlist += [0, 0, 0]
return long(calendar.timegm(dtlist) * 1000.0)
elif (isinstance(time_val, sequence)
and len(time_val) == 2):
if all(isinstance(x, numeric)
for x in time_val):
return ",".join(pythonvaluetotime(x)
for x in time_val)
elif all(isinstance(x, date)
for x in time_val):
return ",".join(pythonvaluetotime(x)
for x in time_val)
raise ValueError(repr(time_val)) | ['def', 'pythonvaluetotime', '(', 'time_val', ')', ':', 'if', 'time_val', 'is', 'None', ':', 'return', 'None', 'elif', 'isinstance', '(', 'time_val', ',', 'numeric', ')', ':', 'return', 'str', '(', 'long', '(', 'time_val', '*', '1000.0', ')', ')', 'elif', 'isinstance', '(', 'time_val', ',', 'date', ')', ':', 'dtlist', '=', '[', 'time_val', '.', 'year', ',', 'time_val', '.', 'month', ',', 'time_val', '.', 'day', ']', 'if', 'isinstance', '(', 'time_val', ',', 'datetime', '.', 'datetime', ')', ':', 'dtlist', '+=', '[', 'time_val', '.', 'hour', ',', 'time_val', '.', 'minute', ',', 'time_val', '.', 'second', ']', 'else', ':', 'dtlist', '+=', '[', '0', ',', '0', ',', '0', ']', 'return', 'long', '(', 'calendar', '.', 'timegm', '(', 'dtlist', ')', '*', '1000.0', ')', 'elif', '(', 'isinstance', '(', 'time_val', ',', 'sequence', ')', 'and', 'len', '(', 'time_val', ')', '==', '2', ')', ':', 'if', 'all', '(', 'isinstance', '(', 'x', ',', 'numeric', ')', 'for', 'x', 'in', 'time_val', ')', ':', 'return', '","', '.', 'join', '(', 'pythonvaluetotime', '(', 'x', ')', 'for', 'x', 'in', 'time_val', ')', 'elif', 'all', '(', 'isinstance', '(', 'x', ',', 'date', ')', 'for', 'x', 'in', 'time_val', ')', ':', 'return', '","', '.', 'join', '(', 'pythonvaluetotime', '(', 'x', ')', 'for', 'x', 'in', 'time_val', ')', 'raise', 'ValueError', '(', 'repr', '(', 'time_val', ')', ')'] | Convert a time or time range from Python datetime to ArcGIS REST server | ['Convert', 'a', 'time', 'or', 'time', 'range', 'from', 'Python', 'datetime', 'to', 'ArcGIS', 'REST', 'server'] | train | https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/utils.py#L35-L58 |
5,904 | ramazanpolat/prodict | prodict/__init__.py | Prodict.attr_names | def attr_names(cls) -> List[str]:
"""
Returns annotated attribute names
:return: List[str]
"""
return [k for k, v in cls.attr_types().items()] | python | def attr_names(cls) -> List[str]:
"""
Returns annotated attribute names
:return: List[str]
"""
return [k for k, v in cls.attr_types().items()] | ['def', 'attr_names', '(', 'cls', ')', '->', 'List', '[', 'str', ']', ':', 'return', '[', 'k', 'for', 'k', ',', 'v', 'in', 'cls', '.', 'attr_types', '(', ')', '.', 'items', '(', ')', ']'] | Returns annotated attribute names
:return: List[str] | ['Returns', 'annotated', 'attribute', 'names', ':', 'return', ':', 'List', '[', 'str', ']'] | train | https://github.com/ramazanpolat/prodict/blob/e67e34738af1542f3b6c91c0e838f5be9a84aad4/prodict/__init__.py#L49-L54 |
5,905 | tensorpack/tensorpack | examples/FasterRCNN/utils/generate_anchors.py | generate_anchors | def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | python | def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size], dtype='float32') - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | ['def', 'generate_anchors', '(', 'base_size', '=', '16', ',', 'ratios', '=', '[', '0.5', ',', '1', ',', '2', ']', ',', 'scales', '=', '2', '**', 'np', '.', 'arange', '(', '3', ',', '6', ')', ')', ':', 'base_anchor', '=', 'np', '.', 'array', '(', '[', '1', ',', '1', ',', 'base_size', ',', 'base_size', ']', ',', 'dtype', '=', "'float32'", ')', '-', '1', 'ratio_anchors', '=', '_ratio_enum', '(', 'base_anchor', ',', 'ratios', ')', 'anchors', '=', 'np', '.', 'vstack', '(', '[', '_scale_enum', '(', 'ratio_anchors', '[', 'i', ',', ':', ']', ',', 'scales', ')', 'for', 'i', 'in', 'range', '(', 'ratio_anchors', '.', 'shape', '[', '0', ']', ')', ']', ')', 'return', 'anchors'] | Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window. | ['Generate', 'anchor', '(', 'reference', ')', 'windows', 'by', 'enumerating', 'aspect', 'ratios', 'X', 'scales', 'wrt', 'a', 'reference', '(', '0', '0', '15', '15', ')', 'window', '.'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/utils/generate_anchors.py#L41-L52 |
5,906 | Nachtfeuer/pipeline | spline/matrix.py | Matrix.run_matrix_ordered | def run_matrix_ordered(self, process_data):
"""
Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str).
"""
output = []
for entry in self.matrix:
env = entry['env'].copy()
env.update({'PIPELINE_MATRIX': entry['name']})
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags):
self.logger.info("Processing pipeline for matrix entry '%s'", entry['name'])
pipeline = Pipeline(model=process_data.model, env=env,
options=process_data.options)
pipeline.hooks = process_data.hooks
result = pipeline.process(process_data.pipeline)
output += result['output']
if not result['success']:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | python | def run_matrix_ordered(self, process_data):
"""
Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str).
"""
output = []
for entry in self.matrix:
env = entry['env'].copy()
env.update({'PIPELINE_MATRIX': entry['name']})
if Matrix.can_process_matrix(entry, process_data.options.matrix_tags):
self.logger.info("Processing pipeline for matrix entry '%s'", entry['name'])
pipeline = Pipeline(model=process_data.model, env=env,
options=process_data.options)
pipeline.hooks = process_data.hooks
result = pipeline.process(process_data.pipeline)
output += result['output']
if not result['success']:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | ['def', 'run_matrix_ordered', '(', 'self', ',', 'process_data', ')', ':', 'output', '=', '[', ']', 'for', 'entry', 'in', 'self', '.', 'matrix', ':', 'env', '=', 'entry', '[', "'env'", ']', '.', 'copy', '(', ')', 'env', '.', 'update', '(', '{', "'PIPELINE_MATRIX'", ':', 'entry', '[', "'name'", ']', '}', ')', 'if', 'Matrix', '.', 'can_process_matrix', '(', 'entry', ',', 'process_data', '.', 'options', '.', 'matrix_tags', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"Processing pipeline for matrix entry \'%s\'"', ',', 'entry', '[', "'name'", ']', ')', 'pipeline', '=', 'Pipeline', '(', 'model', '=', 'process_data', '.', 'model', ',', 'env', '=', 'env', ',', 'options', '=', 'process_data', '.', 'options', ')', 'pipeline', '.', 'hooks', '=', 'process_data', '.', 'hooks', 'result', '=', 'pipeline', '.', 'process', '(', 'process_data', '.', 'pipeline', ')', 'output', '+=', 'result', '[', "'output'", ']', 'if', 'not', 'result', '[', "'success'", ']', ':', 'return', '{', "'success'", ':', 'False', ',', "'output'", ':', 'output', '}', 'return', '{', "'success'", ':', 'True', ',', "'output'", ':', 'output', '}'] | Running pipelines one after the other.
Returns
dict: with two fields: success True/False and captured output (list of str). | ['Running', 'pipelines', 'one', 'after', 'the', 'other', '.'] | train | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L136-L157 |
5,907 | graphql-python/graphql-core-next | graphql/utilities/coerce_value.py | coerce_value | def coerce_value(
value: Any, type_: GraphQLInputType, blame_node: Node = None, path: Path = None
) -> CoercedValue:
"""Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors.
"""
# A value must be provided if the type is non-null.
if is_non_null_type(type_):
if value is None or value is INVALID:
return of_errors(
[
coercion_error(
f"Expected non-nullable type {type_} not to be null",
blame_node,
path,
)
]
)
type_ = cast(GraphQLNonNull, type_)
return coerce_value(value, type_.of_type, blame_node, path)
if value is None or value is INVALID:
# Explicitly return the value null.
return of_value(None)
if is_scalar_type(type_):
# Scalars determine if a value is valid via `parse_value()`, which can throw to
# indicate failure. If it throws, maintain a reference to the original error.
type_ = cast(GraphQLScalarType, type_)
try:
parse_result = type_.parse_value(value)
if is_invalid(parse_result):
return of_errors(
[coercion_error(f"Expected type {type_.name}", blame_node, path)]
)
return of_value(parse_result)
except (TypeError, ValueError) as error:
return of_errors(
[
coercion_error(
f"Expected type {type_.name}",
blame_node,
path,
str(error),
error,
)
]
)
if is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
values = type_.values
if isinstance(value, str):
enum_value = values.get(value)
if enum_value:
return of_value(value if enum_value.value is None else enum_value.value)
suggestions = suggestion_list(str(value), values)
did_you_mean = f"did you mean {or_list(suggestions)}?" if suggestions else None
return of_errors(
[
coercion_error(
f"Expected type {type_.name}", blame_node, path, did_you_mean
)
]
)
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and not isinstance(value, str):
errors = None
coerced_value_list: List[Any] = []
append_item = coerced_value_list.append
for index, item_value in enumerate(value):
coerced_item = coerce_value(
item_value, item_type, blame_node, at_path(path, index)
)
if coerced_item.errors:
errors = add(errors, *coerced_item.errors)
elif not errors:
append_item(coerced_item.value)
return of_errors(errors) if errors else of_value(coerced_value_list)
# Lists accept a non-list value as a list of one.
coerced_item = coerce_value(value, item_type, blame_node)
return coerced_item if coerced_item.errors else of_value([coerced_item.value])
if is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
if not isinstance(value, dict):
return of_errors(
[
coercion_error(
f"Expected type {type_.name} to be a dict", blame_node, path
)
]
)
errors = None
coerced_value_dict: Dict[str, Any] = {}
fields = type_.fields
# Ensure every defined field is valid.
for field_name, field in fields.items():
field_value = value.get(field_name, INVALID)
if is_invalid(field_value):
if not is_invalid(field.default_value):
coerced_value_dict[field_name] = field.default_value
elif is_non_null_type(field.type):
errors = add(
errors,
coercion_error(
f"Field {print_path(at_path(path, field_name))}"
f" of required type {field.type} was not provided",
blame_node,
),
)
else:
coerced_field = coerce_value(
field_value, field.type, blame_node, at_path(path, field_name)
)
if coerced_field.errors:
errors = add(errors, *coerced_field.errors)
else:
coerced_value_dict[field_name] = coerced_field.value
# Ensure every provided field is defined.
for field_name in value:
if field_name not in fields:
suggestions = suggestion_list(field_name, fields)
did_you_mean = (
f"did you mean {or_list(suggestions)}?" if suggestions else None
)
errors = add(
errors,
coercion_error(
f"Field '{field_name}' is not defined by type {type_.name}",
blame_node,
path,
did_you_mean,
),
)
return of_errors(errors) if errors else of_value(coerced_value_dict)
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: '{inspect(type_)}'.") | python | def coerce_value(
value: Any, type_: GraphQLInputType, blame_node: Node = None, path: Path = None
) -> CoercedValue:
"""Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors.
"""
# A value must be provided if the type is non-null.
if is_non_null_type(type_):
if value is None or value is INVALID:
return of_errors(
[
coercion_error(
f"Expected non-nullable type {type_} not to be null",
blame_node,
path,
)
]
)
type_ = cast(GraphQLNonNull, type_)
return coerce_value(value, type_.of_type, blame_node, path)
if value is None or value is INVALID:
# Explicitly return the value null.
return of_value(None)
if is_scalar_type(type_):
# Scalars determine if a value is valid via `parse_value()`, which can throw to
# indicate failure. If it throws, maintain a reference to the original error.
type_ = cast(GraphQLScalarType, type_)
try:
parse_result = type_.parse_value(value)
if is_invalid(parse_result):
return of_errors(
[coercion_error(f"Expected type {type_.name}", blame_node, path)]
)
return of_value(parse_result)
except (TypeError, ValueError) as error:
return of_errors(
[
coercion_error(
f"Expected type {type_.name}",
blame_node,
path,
str(error),
error,
)
]
)
if is_enum_type(type_):
type_ = cast(GraphQLEnumType, type_)
values = type_.values
if isinstance(value, str):
enum_value = values.get(value)
if enum_value:
return of_value(value if enum_value.value is None else enum_value.value)
suggestions = suggestion_list(str(value), values)
did_you_mean = f"did you mean {or_list(suggestions)}?" if suggestions else None
return of_errors(
[
coercion_error(
f"Expected type {type_.name}", blame_node, path, did_you_mean
)
]
)
if is_list_type(type_):
type_ = cast(GraphQLList, type_)
item_type = type_.of_type
if isinstance(value, Iterable) and not isinstance(value, str):
errors = None
coerced_value_list: List[Any] = []
append_item = coerced_value_list.append
for index, item_value in enumerate(value):
coerced_item = coerce_value(
item_value, item_type, blame_node, at_path(path, index)
)
if coerced_item.errors:
errors = add(errors, *coerced_item.errors)
elif not errors:
append_item(coerced_item.value)
return of_errors(errors) if errors else of_value(coerced_value_list)
# Lists accept a non-list value as a list of one.
coerced_item = coerce_value(value, item_type, blame_node)
return coerced_item if coerced_item.errors else of_value([coerced_item.value])
if is_input_object_type(type_):
type_ = cast(GraphQLInputObjectType, type_)
if not isinstance(value, dict):
return of_errors(
[
coercion_error(
f"Expected type {type_.name} to be a dict", blame_node, path
)
]
)
errors = None
coerced_value_dict: Dict[str, Any] = {}
fields = type_.fields
# Ensure every defined field is valid.
for field_name, field in fields.items():
field_value = value.get(field_name, INVALID)
if is_invalid(field_value):
if not is_invalid(field.default_value):
coerced_value_dict[field_name] = field.default_value
elif is_non_null_type(field.type):
errors = add(
errors,
coercion_error(
f"Field {print_path(at_path(path, field_name))}"
f" of required type {field.type} was not provided",
blame_node,
),
)
else:
coerced_field = coerce_value(
field_value, field.type, blame_node, at_path(path, field_name)
)
if coerced_field.errors:
errors = add(errors, *coerced_field.errors)
else:
coerced_value_dict[field_name] = coerced_field.value
# Ensure every provided field is defined.
for field_name in value:
if field_name not in fields:
suggestions = suggestion_list(field_name, fields)
did_you_mean = (
f"did you mean {or_list(suggestions)}?" if suggestions else None
)
errors = add(
errors,
coercion_error(
f"Field '{field_name}' is not defined by type {type_.name}",
blame_node,
path,
did_you_mean,
),
)
return of_errors(errors) if errors else of_value(coerced_value_dict)
# Not reachable. All possible input types have been considered.
raise TypeError(f"Unexpected input type: '{inspect(type_)}'.") | ['def', 'coerce_value', '(', 'value', ':', 'Any', ',', 'type_', ':', 'GraphQLInputType', ',', 'blame_node', ':', 'Node', '=', 'None', ',', 'path', ':', 'Path', '=', 'None', ')', '->', 'CoercedValue', ':', '# A value must be provided if the type is non-null.', 'if', 'is_non_null_type', '(', 'type_', ')', ':', 'if', 'value', 'is', 'None', 'or', 'value', 'is', 'INVALID', ':', 'return', 'of_errors', '(', '[', 'coercion_error', '(', 'f"Expected non-nullable type {type_} not to be null"', ',', 'blame_node', ',', 'path', ',', ')', ']', ')', 'type_', '=', 'cast', '(', 'GraphQLNonNull', ',', 'type_', ')', 'return', 'coerce_value', '(', 'value', ',', 'type_', '.', 'of_type', ',', 'blame_node', ',', 'path', ')', 'if', 'value', 'is', 'None', 'or', 'value', 'is', 'INVALID', ':', '# Explicitly return the value null.', 'return', 'of_value', '(', 'None', ')', 'if', 'is_scalar_type', '(', 'type_', ')', ':', '# Scalars determine if a value is valid via `parse_value()`, which can throw to', '# indicate failure. If it throws, maintain a reference to the original error.', 'type_', '=', 'cast', '(', 'GraphQLScalarType', ',', 'type_', ')', 'try', ':', 'parse_result', '=', 'type_', '.', 'parse_value', '(', 'value', ')', 'if', 'is_invalid', '(', 'parse_result', ')', ':', 'return', 'of_errors', '(', '[', 'coercion_error', '(', 'f"Expected type {type_.name}"', ',', 'blame_node', ',', 'path', ')', ']', ')', 'return', 'of_value', '(', 'parse_result', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', 'as', 'error', ':', 'return', 'of_errors', '(', '[', 'coercion_error', '(', 'f"Expected type {type_.name}"', ',', 'blame_node', ',', 'path', ',', 'str', '(', 'error', ')', ',', 'error', ',', ')', ']', ')', 'if', 'is_enum_type', '(', 'type_', ')', ':', 'type_', '=', 'cast', '(', 'GraphQLEnumType', ',', 'type_', ')', 'values', '=', 'type_', '.', 'values', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'enum_value', '=', 'values', '.', 'get', '(', 'value', ')', 'if', 'enum_value', ':', 'return', 'of_value', '(', 'value', 'if', 'enum_value', '.', 'value', 'is', 'None', 'else', 'enum_value', '.', 'value', ')', 'suggestions', '=', 'suggestion_list', '(', 'str', '(', 'value', ')', ',', 'values', ')', 'did_you_mean', '=', 'f"did you mean {or_list(suggestions)}?"', 'if', 'suggestions', 'else', 'None', 'return', 'of_errors', '(', '[', 'coercion_error', '(', 'f"Expected type {type_.name}"', ',', 'blame_node', ',', 'path', ',', 'did_you_mean', ')', ']', ')', 'if', 'is_list_type', '(', 'type_', ')', ':', 'type_', '=', 'cast', '(', 'GraphQLList', ',', 'type_', ')', 'item_type', '=', 'type_', '.', 'of_type', 'if', 'isinstance', '(', 'value', ',', 'Iterable', ')', 'and', 'not', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'errors', '=', 'None', 'coerced_value_list', ':', 'List', '[', 'Any', ']', '=', '[', ']', 'append_item', '=', 'coerced_value_list', '.', 'append', 'for', 'index', ',', 'item_value', 'in', 'enumerate', '(', 'value', ')', ':', 'coerced_item', '=', 'coerce_value', '(', 'item_value', ',', 'item_type', ',', 'blame_node', ',', 'at_path', '(', 'path', ',', 'index', ')', ')', 'if', 'coerced_item', '.', 'errors', ':', 'errors', '=', 'add', '(', 'errors', ',', '*', 'coerced_item', '.', 'errors', ')', 'elif', 'not', 'errors', ':', 'append_item', '(', 'coerced_item', '.', 'value', ')', 'return', 'of_errors', '(', 'errors', ')', 'if', 'errors', 'else', 'of_value', '(', 'coerced_value_list', ')', '# Lists accept a non-list value as a list of one.', 'coerced_item', '=', 'coerce_value', '(', 'value', ',', 'item_type', ',', 'blame_node', ')', 'return', 'coerced_item', 'if', 'coerced_item', '.', 'errors', 'else', 'of_value', '(', '[', 'coerced_item', '.', 'value', ']', ')', 'if', 'is_input_object_type', '(', 'type_', ')', ':', 'type_', '=', 'cast', '(', 'GraphQLInputObjectType', ',', 'type_', ')', 'if', 'not', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'return', 'of_errors', '(', '[', 'coercion_error', '(', 'f"Expected type {type_.name} to be a dict"', ',', 'blame_node', ',', 'path', ')', ']', ')', 'errors', '=', 'None', 'coerced_value_dict', ':', 'Dict', '[', 'str', ',', 'Any', ']', '=', '{', '}', 'fields', '=', 'type_', '.', 'fields', '# Ensure every defined field is valid.', 'for', 'field_name', ',', 'field', 'in', 'fields', '.', 'items', '(', ')', ':', 'field_value', '=', 'value', '.', 'get', '(', 'field_name', ',', 'INVALID', ')', 'if', 'is_invalid', '(', 'field_value', ')', ':', 'if', 'not', 'is_invalid', '(', 'field', '.', 'default_value', ')', ':', 'coerced_value_dict', '[', 'field_name', ']', '=', 'field', '.', 'default_value', 'elif', 'is_non_null_type', '(', 'field', '.', 'type', ')', ':', 'errors', '=', 'add', '(', 'errors', ',', 'coercion_error', '(', 'f"Field {print_path(at_path(path, field_name))}"', 'f" of required type {field.type} was not provided"', ',', 'blame_node', ',', ')', ',', ')', 'else', ':', 'coerced_field', '=', 'coerce_value', '(', 'field_value', ',', 'field', '.', 'type', ',', 'blame_node', ',', 'at_path', '(', 'path', ',', 'field_name', ')', ')', 'if', 'coerced_field', '.', 'errors', ':', 'errors', '=', 'add', '(', 'errors', ',', '*', 'coerced_field', '.', 'errors', ')', 'else', ':', 'coerced_value_dict', '[', 'field_name', ']', '=', 'coerced_field', '.', 'value', '# Ensure every provided field is defined.', 'for', 'field_name', 'in', 'value', ':', 'if', 'field_name', 'not', 'in', 'fields', ':', 'suggestions', '=', 'suggestion_list', '(', 'field_name', ',', 'fields', ')', 'did_you_mean', '=', '(', 'f"did you mean {or_list(suggestions)}?"', 'if', 'suggestions', 'else', 'None', ')', 'errors', '=', 'add', '(', 'errors', ',', 'coercion_error', '(', 'f"Field \'{field_name}\' is not defined by type {type_.name}"', ',', 'blame_node', ',', 'path', ',', 'did_you_mean', ',', ')', ',', ')', 'return', 'of_errors', '(', 'errors', ')', 'if', 'errors', 'else', 'of_value', '(', 'coerced_value_dict', ')', '# Not reachable. All possible input types have been considered.', 'raise', 'TypeError', '(', 'f"Unexpected input type: \'{inspect(type_)}\'."', ')'] | Coerce a Python value given a GraphQL Type.
Returns either a value which is valid for the provided type or a list of encountered
coercion errors. | ['Coerce', 'a', 'Python', 'value', 'given', 'a', 'GraphQL', 'Type', '.'] | train | https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/coerce_value.py#L33-L179 |
5,908 | log2timeline/plaso | plaso/parsers/pe.py | PEParser._GetLoadConfigTimestamp | def _GetLoadConfigTimestamp(self, pefile_object):
"""Retrieves the timestamp from the Load Configuration directory.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
int: load configuration timestamps or None if there are none present.
"""
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
return None
timestamp = getattr(
pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)
return timestamp | python | def _GetLoadConfigTimestamp(self, pefile_object):
"""Retrieves the timestamp from the Load Configuration directory.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
int: load configuration timestamps or None if there are none present.
"""
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):
return None
timestamp = getattr(
pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)
return timestamp | ['def', '_GetLoadConfigTimestamp', '(', 'self', ',', 'pefile_object', ')', ':', 'if', 'not', 'hasattr', '(', 'pefile_object', ',', "'DIRECTORY_ENTRY_LOAD_CONFIG'", ')', ':', 'return', 'None', 'timestamp', '=', 'getattr', '(', 'pefile_object', '.', 'DIRECTORY_ENTRY_LOAD_CONFIG', '.', 'struct', ',', "'TimeDateStamp'", ',', '0', ')', 'return', 'timestamp'] | Retrieves the timestamp from the Load Configuration directory.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
int: load configuration timestamps or None if there are none present. | ['Retrieves', 'the', 'timestamp', 'from', 'the', 'Load', 'Configuration', 'directory', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/pe.py#L120-L133 |
5,909 | collectiveacuity/labPack | labpack/storage/google/drive.py | driveClient._import | def _import(self, record_key, record_data, overwrite=True, last_modified=0.0, **kwargs):
'''
a helper method for other storage clients to import into appdata
:param record_key: string with key for record
:param record_data: byte data for body of record
:param overwrite: [optional] boolean to overwrite existing records
:param last_modified: [optional] float to record last modified date
:param kwargs: [optional] keyword arguments from other import methods
:return: boolean indicating whether record was imported
'''
title = '%s._import' % self.__class__.__name__
# verify permissions
if not self.permissions_write:
raise Exception('%s requires an access_token with write permissions.' % title)
# retrieve file id
file_id, parent_id = self._get_id(record_key)
# check overwrite condition
if file_id:
if overwrite:
try:
self.drive.delete(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
else:
return False
# # check size of file
# import sys
# record_optimal = self.fields.metadata['record_optimal_bytes']
# record_size = sys.getsizeof(record_data)
# error_prefix = '%s(record_key="%s", record_data=b"...")' % (title, record_key)
# if record_size > record_optimal:
# print('[WARNING] %s exceeds optimal record data size of %s bytes.' % (error_prefix, record_optimal))
# prepare file body
from googleapiclient.http import MediaInMemoryUpload
media_body = MediaInMemoryUpload(body=record_data, resumable=True)
# determine path segments
path_segments = record_key.split(os.sep)
# construct upload kwargs
create_kwargs = {
'body': {
'name': path_segments.pop()
},
'media_body': media_body,
'fields': 'id'
}
# walk through parent directories
parent_id = ''
if path_segments:
# construct query and creation arguments
walk_folders = True
folder_kwargs = {
'body': {
'name': '',
'mimeType' : 'application/vnd.google-apps.folder'
},
'fields': 'id'
}
query_kwargs = {
'spaces': self.drive_space,
'fields': 'files(id, parents)'
}
while path_segments:
folder_name = path_segments.pop(0)
folder_kwargs['body']['name'] = folder_name
# search for folder id in existing hierarchy
if walk_folders:
walk_query = "name = '%s'" % folder_name
if parent_id:
walk_query += "and '%s' in parents" % parent_id
query_kwargs['q'] = walk_query
try:
response = self.drive.list(**query_kwargs).execute()
except:
raise DriveConnectionError(title)
file_list = response.get('files', [])
else:
file_list = []
if file_list:
parent_id = file_list[0].get('id')
# or create folder
# https://developers.google.com/drive/v3/web/folder
else:
try:
if not parent_id:
if self.drive_space == 'appDataFolder':
folder_kwargs['body']['parents'] = [ self.drive_space ]
else:
del folder_kwargs['body']['parents']
else:
folder_kwargs['body']['parents'] = [parent_id]
response = self.drive.create(**folder_kwargs).execute()
parent_id = response.get('id')
walk_folders = False
except:
raise DriveConnectionError(title)
# add parent id to file creation kwargs
if parent_id:
create_kwargs['body']['parents'] = [parent_id]
elif self.drive_space == 'appDataFolder':
create_kwargs['body']['parents'] = [self.drive_space]
# modify file time
import re
if re.search('\\.drep$', create_kwargs['body']['name']):
from labpack.records.time import labDT
drep_time = labDT.fromEpoch(1).isoformat()
create_kwargs['body']['modifiedTime'] = drep_time
elif last_modified:
from labpack.records.time import labDT
mod_time = labDT.fromEpoch(last_modified).isoformat()
create_kwargs['body']['modifiedTime'] = mod_time
# send create request
try:
self.drive.create(**create_kwargs).execute()
except:
raise DriveConnectionError(title)
return True | python | def _import(self, record_key, record_data, overwrite=True, last_modified=0.0, **kwargs):
'''
a helper method for other storage clients to import into appdata
:param record_key: string with key for record
:param record_data: byte data for body of record
:param overwrite: [optional] boolean to overwrite existing records
:param last_modified: [optional] float to record last modified date
:param kwargs: [optional] keyword arguments from other import methods
:return: boolean indicating whether record was imported
'''
title = '%s._import' % self.__class__.__name__
# verify permissions
if not self.permissions_write:
raise Exception('%s requires an access_token with write permissions.' % title)
# retrieve file id
file_id, parent_id = self._get_id(record_key)
# check overwrite condition
if file_id:
if overwrite:
try:
self.drive.delete(fileId=file_id).execute()
except:
raise DriveConnectionError(title)
else:
return False
# # check size of file
# import sys
# record_optimal = self.fields.metadata['record_optimal_bytes']
# record_size = sys.getsizeof(record_data)
# error_prefix = '%s(record_key="%s", record_data=b"...")' % (title, record_key)
# if record_size > record_optimal:
# print('[WARNING] %s exceeds optimal record data size of %s bytes.' % (error_prefix, record_optimal))
# prepare file body
from googleapiclient.http import MediaInMemoryUpload
media_body = MediaInMemoryUpload(body=record_data, resumable=True)
# determine path segments
path_segments = record_key.split(os.sep)
# construct upload kwargs
create_kwargs = {
'body': {
'name': path_segments.pop()
},
'media_body': media_body,
'fields': 'id'
}
# walk through parent directories
parent_id = ''
if path_segments:
# construct query and creation arguments
walk_folders = True
folder_kwargs = {
'body': {
'name': '',
'mimeType' : 'application/vnd.google-apps.folder'
},
'fields': 'id'
}
query_kwargs = {
'spaces': self.drive_space,
'fields': 'files(id, parents)'
}
while path_segments:
folder_name = path_segments.pop(0)
folder_kwargs['body']['name'] = folder_name
# search for folder id in existing hierarchy
if walk_folders:
walk_query = "name = '%s'" % folder_name
if parent_id:
walk_query += "and '%s' in parents" % parent_id
query_kwargs['q'] = walk_query
try:
response = self.drive.list(**query_kwargs).execute()
except:
raise DriveConnectionError(title)
file_list = response.get('files', [])
else:
file_list = []
if file_list:
parent_id = file_list[0].get('id')
# or create folder
# https://developers.google.com/drive/v3/web/folder
else:
try:
if not parent_id:
if self.drive_space == 'appDataFolder':
folder_kwargs['body']['parents'] = [ self.drive_space ]
else:
del folder_kwargs['body']['parents']
else:
folder_kwargs['body']['parents'] = [parent_id]
response = self.drive.create(**folder_kwargs).execute()
parent_id = response.get('id')
walk_folders = False
except:
raise DriveConnectionError(title)
# add parent id to file creation kwargs
if parent_id:
create_kwargs['body']['parents'] = [parent_id]
elif self.drive_space == 'appDataFolder':
create_kwargs['body']['parents'] = [self.drive_space]
# modify file time
import re
if re.search('\\.drep$', create_kwargs['body']['name']):
from labpack.records.time import labDT
drep_time = labDT.fromEpoch(1).isoformat()
create_kwargs['body']['modifiedTime'] = drep_time
elif last_modified:
from labpack.records.time import labDT
mod_time = labDT.fromEpoch(last_modified).isoformat()
create_kwargs['body']['modifiedTime'] = mod_time
# send create request
try:
self.drive.create(**create_kwargs).execute()
except:
raise DriveConnectionError(title)
return True | ['def', '_import', '(', 'self', ',', 'record_key', ',', 'record_data', ',', 'overwrite', '=', 'True', ',', 'last_modified', '=', '0.0', ',', '*', '*', 'kwargs', ')', ':', 'title', '=', "'%s._import'", '%', 'self', '.', '__class__', '.', '__name__', '# verify permissions', 'if', 'not', 'self', '.', 'permissions_write', ':', 'raise', 'Exception', '(', "'%s requires an access_token with write permissions.'", '%', 'title', ')', '# retrieve file id', 'file_id', ',', 'parent_id', '=', 'self', '.', '_get_id', '(', 'record_key', ')', '# check overwrite condition', 'if', 'file_id', ':', 'if', 'overwrite', ':', 'try', ':', 'self', '.', 'drive', '.', 'delete', '(', 'fileId', '=', 'file_id', ')', '.', 'execute', '(', ')', 'except', ':', 'raise', 'DriveConnectionError', '(', 'title', ')', 'else', ':', 'return', 'False', '# # check size of file', '# import sys', "# record_optimal = self.fields.metadata['record_optimal_bytes']", '# record_size = sys.getsizeof(record_data)', '# error_prefix = \'%s(record_key="%s", record_data=b"...")\' % (title, record_key)', '# if record_size > record_optimal:', "# print('[WARNING] %s exceeds optimal record data size of %s bytes.' % (error_prefix, record_optimal))", '# prepare file body', 'from', 'googleapiclient', '.', 'http', 'import', 'MediaInMemoryUpload', 'media_body', '=', 'MediaInMemoryUpload', '(', 'body', '=', 'record_data', ',', 'resumable', '=', 'True', ')', '# determine path segments', 'path_segments', '=', 'record_key', '.', 'split', '(', 'os', '.', 'sep', ')', '# construct upload kwargs', 'create_kwargs', '=', '{', "'body'", ':', '{', "'name'", ':', 'path_segments', '.', 'pop', '(', ')', '}', ',', "'media_body'", ':', 'media_body', ',', "'fields'", ':', "'id'", '}', '# walk through parent directories', 'parent_id', '=', "''", 'if', 'path_segments', ':', '# construct query and creation arguments', 'walk_folders', '=', 'True', 'folder_kwargs', '=', '{', "'body'", ':', '{', "'name'", ':', "''", ',', "'mimeType'", ':', "'application/vnd.google-apps.folder'", '}', ',', "'fields'", ':', "'id'", '}', 'query_kwargs', '=', '{', "'spaces'", ':', 'self', '.', 'drive_space', ',', "'fields'", ':', "'files(id, parents)'", '}', 'while', 'path_segments', ':', 'folder_name', '=', 'path_segments', '.', 'pop', '(', '0', ')', 'folder_kwargs', '[', "'body'", ']', '[', "'name'", ']', '=', 'folder_name', '# search for folder id in existing hierarchy', 'if', 'walk_folders', ':', 'walk_query', '=', '"name = \'%s\'"', '%', 'folder_name', 'if', 'parent_id', ':', 'walk_query', '+=', '"and \'%s\' in parents"', '%', 'parent_id', 'query_kwargs', '[', "'q'", ']', '=', 'walk_query', 'try', ':', 'response', '=', 'self', '.', 'drive', '.', 'list', '(', '*', '*', 'query_kwargs', ')', '.', 'execute', '(', ')', 'except', ':', 'raise', 'DriveConnectionError', '(', 'title', ')', 'file_list', '=', 'response', '.', 'get', '(', "'files'", ',', '[', ']', ')', 'else', ':', 'file_list', '=', '[', ']', 'if', 'file_list', ':', 'parent_id', '=', 'file_list', '[', '0', ']', '.', 'get', '(', "'id'", ')', '# or create folder', '# https://developers.google.com/drive/v3/web/folder', 'else', ':', 'try', ':', 'if', 'not', 'parent_id', ':', 'if', 'self', '.', 'drive_space', '==', "'appDataFolder'", ':', 'folder_kwargs', '[', "'body'", ']', '[', "'parents'", ']', '=', '[', 'self', '.', 'drive_space', ']', 'else', ':', 'del', 'folder_kwargs', '[', "'body'", ']', '[', "'parents'", ']', 'else', ':', 'folder_kwargs', '[', "'body'", ']', '[', "'parents'", ']', '=', '[', 'parent_id', ']', 'response', '=', 'self', '.', 'drive', '.', 'create', '(', '*', '*', 'folder_kwargs', ')', '.', 'execute', '(', ')', 'parent_id', '=', 'response', '.', 'get', '(', "'id'", ')', 'walk_folders', '=', 'False', 'except', ':', 'raise', 'DriveConnectionError', '(', 'title', ')', '# add parent id to file creation kwargs', 'if', 'parent_id', ':', 'create_kwargs', '[', "'body'", ']', '[', "'parents'", ']', '=', '[', 'parent_id', ']', 'elif', 'self', '.', 'drive_space', '==', "'appDataFolder'", ':', 'create_kwargs', '[', "'body'", ']', '[', "'parents'", ']', '=', '[', 'self', '.', 'drive_space', ']', '# modify file time', 'import', 're', 'if', 're', '.', 'search', '(', "'\\\\.drep$'", ',', 'create_kwargs', '[', "'body'", ']', '[', "'name'", ']', ')', ':', 'from', 'labpack', '.', 'records', '.', 'time', 'import', 'labDT', 'drep_time', '=', 'labDT', '.', 'fromEpoch', '(', '1', ')', '.', 'isoformat', '(', ')', 'create_kwargs', '[', "'body'", ']', '[', "'modifiedTime'", ']', '=', 'drep_time', 'elif', 'last_modified', ':', 'from', 'labpack', '.', 'records', '.', 'time', 'import', 'labDT', 'mod_time', '=', 'labDT', '.', 'fromEpoch', '(', 'last_modified', ')', '.', 'isoformat', '(', ')', 'create_kwargs', '[', "'body'", ']', '[', "'modifiedTime'", ']', '=', 'mod_time', '# send create request', 'try', ':', 'self', '.', 'drive', '.', 'create', '(', '*', '*', 'create_kwargs', ')', '.', 'execute', '(', ')', 'except', ':', 'raise', 'DriveConnectionError', '(', 'title', ')', 'return', 'True'] | a helper method for other storage clients to import into appdata
:param record_key: string with key for record
:param record_data: byte data for body of record
:param overwrite: [optional] boolean to overwrite existing records
:param last_modified: [optional] float to record last modified date
:param kwargs: [optional] keyword arguments from other import methods
:return: boolean indicating whether record was imported | ['a', 'helper', 'method', 'for', 'other', 'storage', 'clients', 'to', 'import', 'into', 'appdata', ':', 'param', 'record_key', ':', 'string', 'with', 'key', 'for', 'record', ':', 'param', 'record_data', ':', 'byte', 'data', 'for', 'body', 'of', 'record', ':', 'param', 'overwrite', ':', '[', 'optional', ']', 'boolean', 'to', 'overwrite', 'existing', 'records', ':', 'param', 'last_modified', ':', '[', 'optional', ']', 'float', 'to', 'record', 'last', 'modified', 'date', ':', 'param', 'kwargs', ':', '[', 'optional', ']', 'keyword', 'arguments', 'from', 'other', 'import', 'methods', ':', 'return', ':', 'boolean', 'indicating', 'whether', 'record', 'was', 'imported'] | train | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L394-L527 |
5,910 | numenta/htmresearch | htmresearch/algorithms/multiconnections.py | Multiconnections.setPermanences | def setPermanences(self, segments, presynapticCellsBySource, permanence):
"""
Set the permanence of a specific set of synapses. Any synapses that don't
exist will be initialized. Any existing synapses will be overwritten.
Conceptually, this method takes a list of [segment, presynapticCell] pairs
and initializes their permanence. For each segment, one synapse is added
(although one might be added for each "source"). To add multiple synapses to
a segment, include it in the list multiple times.
The total number of affected synapses is len(segments)*number_of_sources*1.
@param segments (numpy array)
One segment for each synapse that should be added
@param presynapticCellsBySource (dict of numpy arrays)
One presynaptic cell for each segment.
Example:
{"customInputName1": np.array([42, 69])}
@param permanence (float)
The permanence to assign the synapse
"""
permanences = np.repeat(np.float32(permanence), len(segments))
for source, connections in self.connectionsBySource.iteritems():
if source in presynapticCellsBySource:
connections.matrix.setElements(segments, presynapticCellsBySource[source],
permanences) | python | def setPermanences(self, segments, presynapticCellsBySource, permanence):
"""
Set the permanence of a specific set of synapses. Any synapses that don't
exist will be initialized. Any existing synapses will be overwritten.
Conceptually, this method takes a list of [segment, presynapticCell] pairs
and initializes their permanence. For each segment, one synapse is added
(although one might be added for each "source"). To add multiple synapses to
a segment, include it in the list multiple times.
The total number of affected synapses is len(segments)*number_of_sources*1.
@param segments (numpy array)
One segment for each synapse that should be added
@param presynapticCellsBySource (dict of numpy arrays)
One presynaptic cell for each segment.
Example:
{"customInputName1": np.array([42, 69])}
@param permanence (float)
The permanence to assign the synapse
"""
permanences = np.repeat(np.float32(permanence), len(segments))
for source, connections in self.connectionsBySource.iteritems():
if source in presynapticCellsBySource:
connections.matrix.setElements(segments, presynapticCellsBySource[source],
permanences) | ['def', 'setPermanences', '(', 'self', ',', 'segments', ',', 'presynapticCellsBySource', ',', 'permanence', ')', ':', 'permanences', '=', 'np', '.', 'repeat', '(', 'np', '.', 'float32', '(', 'permanence', ')', ',', 'len', '(', 'segments', ')', ')', 'for', 'source', ',', 'connections', 'in', 'self', '.', 'connectionsBySource', '.', 'iteritems', '(', ')', ':', 'if', 'source', 'in', 'presynapticCellsBySource', ':', 'connections', '.', 'matrix', '.', 'setElements', '(', 'segments', ',', 'presynapticCellsBySource', '[', 'source', ']', ',', 'permanences', ')'] | Set the permanence of a specific set of synapses. Any synapses that don't
exist will be initialized. Any existing synapses will be overwritten.
Conceptually, this method takes a list of [segment, presynapticCell] pairs
and initializes their permanence. For each segment, one synapse is added
(although one might be added for each "source"). To add multiple synapses to
a segment, include it in the list multiple times.
The total number of affected synapses is len(segments)*number_of_sources*1.
@param segments (numpy array)
One segment for each synapse that should be added
@param presynapticCellsBySource (dict of numpy arrays)
One presynaptic cell for each segment.
Example:
{"customInputName1": np.array([42, 69])}
@param permanence (float)
The permanence to assign the synapse | ['Set', 'the', 'permanence', 'of', 'a', 'specific', 'set', 'of', 'synapses', '.', 'Any', 'synapses', 'that', 'don', 't', 'exist', 'will', 'be', 'initialized', '.', 'Any', 'existing', 'synapses', 'will', 'be', 'overwritten', '.'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/multiconnections.py#L109-L137 |
5,911 | DLR-RM/RAFCON | source/rafcon/gui/controllers/utils/tree_view_controller.py | TreeViewController.update_selection_sm_prior_condition | def update_selection_sm_prior_condition(self, state_row_iter, selected_model_list, sm_selected_model_list):
"""State machine prior update of tree selection for one tree model row"""
selected_path = self.tree_store.get_path(state_row_iter)
tree_model_row = self.tree_store[selected_path]
model = tree_model_row[self.MODEL_STORAGE_ID]
if model not in sm_selected_model_list and model in selected_model_list:
self._tree_selection.unselect_iter(state_row_iter)
elif model in sm_selected_model_list and model not in selected_model_list:
self.tree_view.expand_to_path(selected_path)
self._tree_selection.select_iter(state_row_iter) | python | def update_selection_sm_prior_condition(self, state_row_iter, selected_model_list, sm_selected_model_list):
"""State machine prior update of tree selection for one tree model row"""
selected_path = self.tree_store.get_path(state_row_iter)
tree_model_row = self.tree_store[selected_path]
model = tree_model_row[self.MODEL_STORAGE_ID]
if model not in sm_selected_model_list and model in selected_model_list:
self._tree_selection.unselect_iter(state_row_iter)
elif model in sm_selected_model_list and model not in selected_model_list:
self.tree_view.expand_to_path(selected_path)
self._tree_selection.select_iter(state_row_iter) | ['def', 'update_selection_sm_prior_condition', '(', 'self', ',', 'state_row_iter', ',', 'selected_model_list', ',', 'sm_selected_model_list', ')', ':', 'selected_path', '=', 'self', '.', 'tree_store', '.', 'get_path', '(', 'state_row_iter', ')', 'tree_model_row', '=', 'self', '.', 'tree_store', '[', 'selected_path', ']', 'model', '=', 'tree_model_row', '[', 'self', '.', 'MODEL_STORAGE_ID', ']', 'if', 'model', 'not', 'in', 'sm_selected_model_list', 'and', 'model', 'in', 'selected_model_list', ':', 'self', '.', '_tree_selection', '.', 'unselect_iter', '(', 'state_row_iter', ')', 'elif', 'model', 'in', 'sm_selected_model_list', 'and', 'model', 'not', 'in', 'selected_model_list', ':', 'self', '.', 'tree_view', '.', 'expand_to_path', '(', 'selected_path', ')', 'self', '.', '_tree_selection', '.', 'select_iter', '(', 'state_row_iter', ')'] | State machine prior update of tree selection for one tree model row | ['State', 'machine', 'prior', 'update', 'of', 'tree', 'selection', 'for', 'one', 'tree', 'model', 'row'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L775-L785 |
5,912 | grycap/cpyutils | iputils.py | ip2hex | def ip2hex(ip):
'''
Converts an ip to a hex value that can be used with a hex bit mask
'''
parts = ip.split(".")
if len(parts) != 4: return None
ipv = 0
for part in parts:
try:
p = int(part)
if p < 0 or p > 255: return None
ipv = (ipv << 8) + p
except:
return None
return ipv | python | def ip2hex(ip):
'''
Converts an ip to a hex value that can be used with a hex bit mask
'''
parts = ip.split(".")
if len(parts) != 4: return None
ipv = 0
for part in parts:
try:
p = int(part)
if p < 0 or p > 255: return None
ipv = (ipv << 8) + p
except:
return None
return ipv | ['def', 'ip2hex', '(', 'ip', ')', ':', 'parts', '=', 'ip', '.', 'split', '(', '"."', ')', 'if', 'len', '(', 'parts', ')', '!=', '4', ':', 'return', 'None', 'ipv', '=', '0', 'for', 'part', 'in', 'parts', ':', 'try', ':', 'p', '=', 'int', '(', 'part', ')', 'if', 'p', '<', '0', 'or', 'p', '>', '255', ':', 'return', 'None', 'ipv', '=', '(', 'ipv', '<<', '8', ')', '+', 'p', 'except', ':', 'return', 'None', 'return', 'ipv'] | Converts an ip to a hex value that can be used with a hex bit mask | ['Converts', 'an', 'ip', 'to', 'a', 'hex', 'value', 'that', 'can', 'be', 'used', 'with', 'a', 'hex', 'bit', 'mask'] | train | https://github.com/grycap/cpyutils/blob/fa966fc6d2ae1e1e799e19941561aa79b617f1b1/iputils.py#L19-L33 |
5,913 | pygridtools/gridmap | examples/manual.py | compute_factorial | def compute_factorial(n):
"""
computes factorial of n
"""
sleep_walk(10)
ret = 1
for i in range(n):
ret = ret * (i + 1)
return ret | python | def compute_factorial(n):
"""
computes factorial of n
"""
sleep_walk(10)
ret = 1
for i in range(n):
ret = ret * (i + 1)
return ret | ['def', 'compute_factorial', '(', 'n', ')', ':', 'sleep_walk', '(', '10', ')', 'ret', '=', '1', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'ret', '=', 'ret', '*', '(', 'i', '+', '1', ')', 'return', 'ret'] | computes factorial of n | ['computes', 'factorial', 'of', 'n'] | train | https://github.com/pygridtools/gridmap/blob/be4fb1478ab8d19fa3acddecdf1a5d8bd3789127/examples/manual.py#L49-L57 |
5,914 | geertj/gruvi | lib/gruvi/logging.py | ContextLogger.thread_info | def thread_info(self):
"""Return a string identifying the current thread and fiber."""
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid) | python | def thread_info(self):
"""Return a string identifying the current thread and fiber."""
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid) | ['def', 'thread_info', '(', 'self', ')', ':', 'tid', '=', 'threading', '.', 'current_thread', '(', ')', '.', 'name', 'if', 'tid', '==', "'MainThread'", ':', 'tid', '=', "'Main'", 'current', '=', 'fibers', '.', 'current', '(', ')', 'fid', '=', 'getattr', '(', 'current', ',', "'name'", ')', 'if', 'current', '.', 'parent', 'else', "'Root'", 'return', "'{}/{}'", '.', 'format', '(', 'tid', ',', 'fid', ')'] | Return a string identifying the current thread and fiber. | ['Return', 'a', 'string', 'identifying', 'the', 'current', 'thread', 'and', 'fiber', '.'] | train | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/logging.py#L79-L86 |
5,915 | lsbardel/python-stdnet | stdnet/odm/struct.py | Set.difference_update | def difference_update(self, values):
'''Remove an iterable of *values* from the set.'''
d = self.value_pickler.dumps
return self.cache.remove(tuple((d(v) for v in values))) | python | def difference_update(self, values):
'''Remove an iterable of *values* from the set.'''
d = self.value_pickler.dumps
return self.cache.remove(tuple((d(v) for v in values))) | ['def', 'difference_update', '(', 'self', ',', 'values', ')', ':', 'd', '=', 'self', '.', 'value_pickler', '.', 'dumps', 'return', 'self', '.', 'cache', '.', 'remove', '(', 'tuple', '(', '(', 'd', '(', 'v', ')', 'for', 'v', 'in', 'values', ')', ')', ')'] | Remove an iterable of *values* from the set. | ['Remove', 'an', 'iterable', 'of', '*', 'values', '*', 'from', 'the', 'set', '.'] | train | https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L630-L633 |
5,916 | tradenity/python-sdk | tradenity/resources/country.py | Country.create_country | def create_country(cls, country, **kwargs):
"""Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_country_with_http_info(country, **kwargs)
else:
(data) = cls._create_country_with_http_info(country, **kwargs)
return data | python | def create_country(cls, country, **kwargs):
"""Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_country_with_http_info(country, **kwargs)
else:
(data) = cls._create_country_with_http_info(country, **kwargs)
return data | ['def', 'create_country', '(', 'cls', ',', 'country', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async'", ')', ':', 'return', 'cls', '.', '_create_country_with_http_info', '(', 'country', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'cls', '.', '_create_country_with_http_info', '(', 'country', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread. | ['Create', 'Country'] | train | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/country.py#L401-L421 |
5,917 | mitsei/dlkit | dlkit/handcar/learning/sessions.py | ObjectiveAdminSession.get_objective_form_for_create | def get_objective_form_for_create(self, objective_record_types=None):
"""Gets the objective form for creating new objectives.
A new form should be requested for each create transaction.
arg: objectiveRecordTypes (osid.type.Type): array of
objective record types
return: (osid.learning.ObjectiveForm) - the objective form
raise: NullArgument - objectiveRecordTypes is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
compliance: mandatory - This method must be implemented.
"""
if objective_record_types is None:
pass # Still need to deal with the record_types argument
objective_form = objects.ObjectiveForm()
self._forms[objective_form.get_id().get_identifier()] = not CREATED
return objective_form | python | def get_objective_form_for_create(self, objective_record_types=None):
"""Gets the objective form for creating new objectives.
A new form should be requested for each create transaction.
arg: objectiveRecordTypes (osid.type.Type): array of
objective record types
return: (osid.learning.ObjectiveForm) - the objective form
raise: NullArgument - objectiveRecordTypes is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
compliance: mandatory - This method must be implemented.
"""
if objective_record_types is None:
pass # Still need to deal with the record_types argument
objective_form = objects.ObjectiveForm()
self._forms[objective_form.get_id().get_identifier()] = not CREATED
return objective_form | ['def', 'get_objective_form_for_create', '(', 'self', ',', 'objective_record_types', '=', 'None', ')', ':', 'if', 'objective_record_types', 'is', 'None', ':', 'pass', '# Still need to deal with the record_types argument', 'objective_form', '=', 'objects', '.', 'ObjectiveForm', '(', ')', 'self', '.', '_forms', '[', 'objective_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '=', 'not', 'CREATED', 'return', 'objective_form'] | Gets the objective form for creating new objectives.
A new form should be requested for each create transaction.
arg: objectiveRecordTypes (osid.type.Type): array of
objective record types
return: (osid.learning.ObjectiveForm) - the objective form
raise: NullArgument - objectiveRecordTypes is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
compliance: mandatory - This method must be implemented. | ['Gets', 'the', 'objective', 'form', 'for', 'creating', 'new', 'objectives', '.', 'A', 'new', 'form', 'should', 'be', 'requested', 'for', 'each', 'create', 'transaction', '.', 'arg', ':', 'objectiveRecordTypes', '(', 'osid', '.', 'type', '.', 'Type', ')', ':', 'array', 'of', 'objective', 'record', 'types', 'return', ':', '(', 'osid', '.', 'learning', '.', 'ObjectiveForm', ')', '-', 'the', 'objective', 'form', 'raise', ':', 'NullArgument', '-', 'objectiveRecordTypes', 'is', 'null', 'raise', ':', 'OperationFailed', '-', 'unable', 'to', 'complete', 'request', 'raise', ':', 'PermissionDenied', '-', 'authorization', 'failure', 'raise', ':', 'Unsupported', '-', 'unable', 'to', 'get', 'form', 'for', 'requested', 'record', 'types', 'compliance', ':', 'mandatory', '-', 'This', 'method', 'must', 'be', 'implemented', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L753-L771 |
5,918 | astrocatalogs/astrocats | astrocats/catalog/analysis.py | Analysis._file_nums_str | def _file_nums_str(self, n_all, n_type, n_ign):
"""Construct a string showing the number of different file types.
Returns
-------
f_str : str
"""
# 'other' is the difference between all and named
n_oth = n_all - np.sum(n_type)
f_str = "{} Files".format(n_all) + " ("
if len(n_type):
f_str += ", ".join("{} {}".format(name, num) for name, num in
zip(self._COUNT_FILE_TYPES, n_type))
f_str += ", "
f_str += "other {}; {} ignored)".format(n_oth, n_ign)
return f_str | python | def _file_nums_str(self, n_all, n_type, n_ign):
"""Construct a string showing the number of different file types.
Returns
-------
f_str : str
"""
# 'other' is the difference between all and named
n_oth = n_all - np.sum(n_type)
f_str = "{} Files".format(n_all) + " ("
if len(n_type):
f_str += ", ".join("{} {}".format(name, num) for name, num in
zip(self._COUNT_FILE_TYPES, n_type))
f_str += ", "
f_str += "other {}; {} ignored)".format(n_oth, n_ign)
return f_str | ['def', '_file_nums_str', '(', 'self', ',', 'n_all', ',', 'n_type', ',', 'n_ign', ')', ':', "# 'other' is the difference between all and named", 'n_oth', '=', 'n_all', '-', 'np', '.', 'sum', '(', 'n_type', ')', 'f_str', '=', '"{} Files"', '.', 'format', '(', 'n_all', ')', '+', '" ("', 'if', 'len', '(', 'n_type', ')', ':', 'f_str', '+=', '", "', '.', 'join', '(', '"{} {}"', '.', 'format', '(', 'name', ',', 'num', ')', 'for', 'name', ',', 'num', 'in', 'zip', '(', 'self', '.', '_COUNT_FILE_TYPES', ',', 'n_type', ')', ')', 'f_str', '+=', '", "', 'f_str', '+=', '"other {}; {} ignored)"', '.', 'format', '(', 'n_oth', ',', 'n_ign', ')', 'return', 'f_str'] | Construct a string showing the number of different file types.
Returns
-------
f_str : str | ['Construct', 'a', 'string', 'showing', 'the', 'number', 'of', 'different', 'file', 'types', '.'] | train | https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/analysis.py#L131-L147 |
5,919 | mathandy/svgpathtools | svgpathtools/smoothing.py | kinks | def kinks(path, tol=1e-8):
"""returns indices of segments that start on a non-differentiable joint."""
kink_list = []
for idx in range(len(path)):
if idx == 0 and not path.isclosed():
continue
try:
u = path[(idx - 1) % len(path)].unit_tangent(1)
v = path[idx].unit_tangent(0)
u_dot_v = u.real*v.real + u.imag*v.imag
flag = False
except ValueError:
flag = True
if flag or abs(u_dot_v - 1) > tol:
kink_list.append(idx)
return kink_list | python | def kinks(path, tol=1e-8):
"""returns indices of segments that start on a non-differentiable joint."""
kink_list = []
for idx in range(len(path)):
if idx == 0 and not path.isclosed():
continue
try:
u = path[(idx - 1) % len(path)].unit_tangent(1)
v = path[idx].unit_tangent(0)
u_dot_v = u.real*v.real + u.imag*v.imag
flag = False
except ValueError:
flag = True
if flag or abs(u_dot_v - 1) > tol:
kink_list.append(idx)
return kink_list | ['def', 'kinks', '(', 'path', ',', 'tol', '=', '1e-8', ')', ':', 'kink_list', '=', '[', ']', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'path', ')', ')', ':', 'if', 'idx', '==', '0', 'and', 'not', 'path', '.', 'isclosed', '(', ')', ':', 'continue', 'try', ':', 'u', '=', 'path', '[', '(', 'idx', '-', '1', ')', '%', 'len', '(', 'path', ')', ']', '.', 'unit_tangent', '(', '1', ')', 'v', '=', 'path', '[', 'idx', ']', '.', 'unit_tangent', '(', '0', ')', 'u_dot_v', '=', 'u', '.', 'real', '*', 'v', '.', 'real', '+', 'u', '.', 'imag', '*', 'v', '.', 'imag', 'flag', '=', 'False', 'except', 'ValueError', ':', 'flag', '=', 'True', 'if', 'flag', 'or', 'abs', '(', 'u_dot_v', '-', '1', ')', '>', 'tol', ':', 'kink_list', '.', 'append', '(', 'idx', ')', 'return', 'kink_list'] | returns indices of segments that start on a non-differentiable joint. | ['returns', 'indices', 'of', 'segments', 'that', 'start', 'on', 'a', 'non', '-', 'differentiable', 'joint', '.'] | train | https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/smoothing.py#L23-L39 |
5,920 | etingof/pysnmp | pysnmp/smi/rfc1902.py | ObjectType.resolveWithMib | def resolveWithMib(self, mibViewController):
"""Perform MIB variable ID and associated value conversion.
Parameters
----------
mibViewController : :py:class:`~pysnmp.smi.view.MibViewController`
class instance representing MIB browsing functionality.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
reference to itself
Raises
------
SmiError
In case of fatal MIB hanling errora
Notes
-----
Calling this method involves
:py:meth:`~pysnmp.smi.rfc1902.ObjectIdentity.resolveWithMib`
method invocation.
Examples
--------
>>> from pysmi.hlapi import varbinds
>>> mibViewController = varbinds.AbstractVarBinds.getMibViewController( engine )
>>> objectType = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), 'Linux i386')
>>> objectType.resolveWithMib(mibViewController)
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), DisplayString('Linux i386'))
>>> str(objectType)
'SNMPv2-MIB::sysDescr."0" = Linux i386'
>>>
"""
if self._state & self.ST_CLEAM:
return self
self._args[0].resolveWithMib(mibViewController)
MibScalar, MibTableColumn = mibViewController.mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalar', 'MibTableColumn')
if not isinstance(self._args[0].getMibNode(),
(MibScalar, MibTableColumn)):
if not isinstance(self._args[1], AbstractSimpleAsn1Item):
raise SmiError('MIB object %r is not OBJECT-TYPE '
'(MIB not loaded?)' % (self._args[0],))
self._state |= self.ST_CLEAM
return self
if isinstance(self._args[1], (rfc1905.UnSpecified,
rfc1905.NoSuchObject,
rfc1905.NoSuchInstance,
rfc1905.EndOfMibView)):
self._state |= self.ST_CLEAM
return self
syntax = self._args[0].getMibNode().getSyntax()
try:
self._args[1] = syntax.clone(self._args[1])
except PyAsn1Error as exc:
raise SmiError(
'MIB object %r having type %r failed to cast value '
'%r: %s' % (self._args[0].prettyPrint(),
syntax.__class__.__name__, self._args[1], exc))
if rfc1902.ObjectIdentifier().isSuperTypeOf(
self._args[1], matchConstraints=False):
self._args[1] = ObjectIdentity(
self._args[1]).resolveWithMib(mibViewController)
self._state |= self.ST_CLEAM
debug.logger & debug.FLAG_MIB and debug.logger(
'resolved %r syntax is %r' % (self._args[0], self._args[1]))
return self | python | def resolveWithMib(self, mibViewController):
"""Perform MIB variable ID and associated value conversion.
Parameters
----------
mibViewController : :py:class:`~pysnmp.smi.view.MibViewController`
class instance representing MIB browsing functionality.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
reference to itself
Raises
------
SmiError
In case of fatal MIB hanling errora
Notes
-----
Calling this method involves
:py:meth:`~pysnmp.smi.rfc1902.ObjectIdentity.resolveWithMib`
method invocation.
Examples
--------
>>> from pysmi.hlapi import varbinds
>>> mibViewController = varbinds.AbstractVarBinds.getMibViewController( engine )
>>> objectType = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), 'Linux i386')
>>> objectType.resolveWithMib(mibViewController)
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), DisplayString('Linux i386'))
>>> str(objectType)
'SNMPv2-MIB::sysDescr."0" = Linux i386'
>>>
"""
if self._state & self.ST_CLEAM:
return self
self._args[0].resolveWithMib(mibViewController)
MibScalar, MibTableColumn = mibViewController.mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalar', 'MibTableColumn')
if not isinstance(self._args[0].getMibNode(),
(MibScalar, MibTableColumn)):
if not isinstance(self._args[1], AbstractSimpleAsn1Item):
raise SmiError('MIB object %r is not OBJECT-TYPE '
'(MIB not loaded?)' % (self._args[0],))
self._state |= self.ST_CLEAM
return self
if isinstance(self._args[1], (rfc1905.UnSpecified,
rfc1905.NoSuchObject,
rfc1905.NoSuchInstance,
rfc1905.EndOfMibView)):
self._state |= self.ST_CLEAM
return self
syntax = self._args[0].getMibNode().getSyntax()
try:
self._args[1] = syntax.clone(self._args[1])
except PyAsn1Error as exc:
raise SmiError(
'MIB object %r having type %r failed to cast value '
'%r: %s' % (self._args[0].prettyPrint(),
syntax.__class__.__name__, self._args[1], exc))
if rfc1902.ObjectIdentifier().isSuperTypeOf(
self._args[1], matchConstraints=False):
self._args[1] = ObjectIdentity(
self._args[1]).resolveWithMib(mibViewController)
self._state |= self.ST_CLEAM
debug.logger & debug.FLAG_MIB and debug.logger(
'resolved %r syntax is %r' % (self._args[0], self._args[1]))
return self | ['def', 'resolveWithMib', '(', 'self', ',', 'mibViewController', ')', ':', 'if', 'self', '.', '_state', '&', 'self', '.', 'ST_CLEAM', ':', 'return', 'self', 'self', '.', '_args', '[', '0', ']', '.', 'resolveWithMib', '(', 'mibViewController', ')', 'MibScalar', ',', 'MibTableColumn', '=', 'mibViewController', '.', 'mibBuilder', '.', 'importSymbols', '(', "'SNMPv2-SMI'", ',', "'MibScalar'", ',', "'MibTableColumn'", ')', 'if', 'not', 'isinstance', '(', 'self', '.', '_args', '[', '0', ']', '.', 'getMibNode', '(', ')', ',', '(', 'MibScalar', ',', 'MibTableColumn', ')', ')', ':', 'if', 'not', 'isinstance', '(', 'self', '.', '_args', '[', '1', ']', ',', 'AbstractSimpleAsn1Item', ')', ':', 'raise', 'SmiError', '(', "'MIB object %r is not OBJECT-TYPE '", "'(MIB not loaded?)'", '%', '(', 'self', '.', '_args', '[', '0', ']', ',', ')', ')', 'self', '.', '_state', '|=', 'self', '.', 'ST_CLEAM', 'return', 'self', 'if', 'isinstance', '(', 'self', '.', '_args', '[', '1', ']', ',', '(', 'rfc1905', '.', 'UnSpecified', ',', 'rfc1905', '.', 'NoSuchObject', ',', 'rfc1905', '.', 'NoSuchInstance', ',', 'rfc1905', '.', 'EndOfMibView', ')', ')', ':', 'self', '.', '_state', '|=', 'self', '.', 'ST_CLEAM', 'return', 'self', 'syntax', '=', 'self', '.', '_args', '[', '0', ']', '.', 'getMibNode', '(', ')', '.', 'getSyntax', '(', ')', 'try', ':', 'self', '.', '_args', '[', '1', ']', '=', 'syntax', '.', 'clone', '(', 'self', '.', '_args', '[', '1', ']', ')', 'except', 'PyAsn1Error', 'as', 'exc', ':', 'raise', 'SmiError', '(', "'MIB object %r having type %r failed to cast value '", "'%r: %s'", '%', '(', 'self', '.', '_args', '[', '0', ']', '.', 'prettyPrint', '(', ')', ',', 'syntax', '.', '__class__', '.', '__name__', ',', 'self', '.', '_args', '[', '1', ']', ',', 'exc', ')', ')', 'if', 'rfc1902', '.', 'ObjectIdentifier', '(', ')', '.', 'isSuperTypeOf', '(', 'self', '.', '_args', '[', '1', ']', ',', 'matchConstraints', '=', 'False', ')', ':', 'self', '.', '_args', '[', '1', ']', '=', 'ObjectIdentity', '(', 'self', '.', '_args', '[', '1', ']', ')', '.', 'resolveWithMib', '(', 'mibViewController', ')', 'self', '.', '_state', '|=', 'self', '.', 'ST_CLEAM', 'debug', '.', 'logger', '&', 'debug', '.', 'FLAG_MIB', 'and', 'debug', '.', 'logger', '(', "'resolved %r syntax is %r'", '%', '(', 'self', '.', '_args', '[', '0', ']', ',', 'self', '.', '_args', '[', '1', ']', ')', ')', 'return', 'self'] | Perform MIB variable ID and associated value conversion.
Parameters
----------
mibViewController : :py:class:`~pysnmp.smi.view.MibViewController`
class instance representing MIB browsing functionality.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
reference to itself
Raises
------
SmiError
In case of fatal MIB hanling errora
Notes
-----
Calling this method involves
:py:meth:`~pysnmp.smi.rfc1902.ObjectIdentity.resolveWithMib`
method invocation.
Examples
--------
>>> from pysmi.hlapi import varbinds
>>> mibViewController = varbinds.AbstractVarBinds.getMibViewController( engine )
>>> objectType = ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), 'Linux i386')
>>> objectType.resolveWithMib(mibViewController)
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr'), DisplayString('Linux i386'))
>>> str(objectType)
'SNMPv2-MIB::sysDescr."0" = Linux i386'
>>> | ['Perform', 'MIB', 'variable', 'ID', 'and', 'associated', 'value', 'conversion', '.'] | train | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/rfc1902.py#L911-L993 |
5,921 | SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_biopython_pepstats | def get_biopython_pepstats(self, clean_seq=False):
"""Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute."""
if self.seq:
if clean_seq: # TODO: can make this a property of the SeqProp class
seq = self.seq_str.replace('X', '').replace('U', '')
else:
seq = self.seq_str
try:
pepstats = ssbio.protein.sequence.properties.residues.biopython_protein_analysis(seq)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.annotations.update(pepstats)
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | python | def get_biopython_pepstats(self, clean_seq=False):
"""Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute."""
if self.seq:
if clean_seq: # TODO: can make this a property of the SeqProp class
seq = self.seq_str.replace('X', '').replace('U', '')
else:
seq = self.seq_str
try:
pepstats = ssbio.protein.sequence.properties.residues.biopython_protein_analysis(seq)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.annotations.update(pepstats)
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | ['def', 'get_biopython_pepstats', '(', 'self', ',', 'clean_seq', '=', 'False', ')', ':', 'if', 'self', '.', 'seq', ':', 'if', 'clean_seq', ':', '# TODO: can make this a property of the SeqProp class', 'seq', '=', 'self', '.', 'seq_str', '.', 'replace', '(', "'X'", ',', "''", ')', '.', 'replace', '(', "'U'", ',', "''", ')', 'else', ':', 'seq', '=', 'self', '.', 'seq_str', 'try', ':', 'pepstats', '=', 'ssbio', '.', 'protein', '.', 'sequence', '.', 'properties', '.', 'residues', '.', 'biopython_protein_analysis', '(', 'seq', ')', 'except', 'KeyError', 'as', 'e', ':', 'log', '.', 'error', '(', "'{}: unable to run ProteinAnalysis module, unknown amino acid {}'", '.', 'format', '(', 'self', '.', 'id', ',', 'e', ')', ')', 'return', 'except', 'ValueError', 'as', 'e', ':', 'log', '.', 'error', '(', "'{}: unable to run ProteinAnalysis module, {}'", '.', 'format', '(', 'self', '.', 'id', ',', 'e', ')', ')', 'return', 'self', '.', 'annotations', '.', 'update', '(', 'pepstats', ')', 'else', ':', 'raise', 'ValueError', '(', "'{}: no sequence available, unable to run ProteinAnalysis'", '.', 'format', '(', 'self', '.', 'id', ')', ')'] | Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute. | ['Run', 'Biopython', 's', 'built', 'in', 'ProteinAnalysis', 'module', 'and', 'store', 'statistics', 'in', 'the', 'annotations', 'attribute', '.'] | train | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L660-L679 |
5,922 | pgmpy/pgmpy | pgmpy/inference/ExactInference.py | VariableElimination.query | def query(self, variables, evidence=None, elimination_order=None, joint=True):
"""
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
joint: boolean (default: True)
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.query(['A', 'B'])
"""
return self._variable_elimination(variables, 'marginalize',
evidence=evidence, elimination_order=elimination_order,
joint=joint) | python | def query(self, variables, evidence=None, elimination_order=None, joint=True):
"""
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
joint: boolean (default: True)
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.query(['A', 'B'])
"""
return self._variable_elimination(variables, 'marginalize',
evidence=evidence, elimination_order=elimination_order,
joint=joint) | ['def', 'query', '(', 'self', ',', 'variables', ',', 'evidence', '=', 'None', ',', 'elimination_order', '=', 'None', ',', 'joint', '=', 'True', ')', ':', 'return', 'self', '.', '_variable_elimination', '(', 'variables', ',', "'marginalize'", ',', 'evidence', '=', 'evidence', ',', 'elimination_order', '=', 'elimination_order', ',', 'joint', '=', 'joint', ')'] | Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
joint: boolean (default: True)
If True, returns a Joint Distribution over `variables`.
If False, returns a dict of distributions over each of the `variables`.
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.query(['A', 'B']) | ['Parameters', '----------', 'variables', ':', 'list', 'list', 'of', 'variables', 'for', 'which', 'you', 'want', 'to', 'compute', 'the', 'probability'] | train | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/inference/ExactInference.py#L109-L143 |
5,923 | probcomp/crosscat | src/utils/unionfind.py | union | def union(a, b):
"""Assert equality of two nodes a and b so find(a) is find(b)."""
a = find(a)
b = find(b)
if a is not b:
if a.rank < b.rank:
a.parent = b
elif b.rank < a.rank:
b.parent = a
else:
b.parent = a
a.rank += 1 | python | def union(a, b):
"""Assert equality of two nodes a and b so find(a) is find(b)."""
a = find(a)
b = find(b)
if a is not b:
if a.rank < b.rank:
a.parent = b
elif b.rank < a.rank:
b.parent = a
else:
b.parent = a
a.rank += 1 | ['def', 'union', '(', 'a', ',', 'b', ')', ':', 'a', '=', 'find', '(', 'a', ')', 'b', '=', 'find', '(', 'b', ')', 'if', 'a', 'is', 'not', 'b', ':', 'if', 'a', '.', 'rank', '<', 'b', '.', 'rank', ':', 'a', '.', 'parent', '=', 'b', 'elif', 'b', '.', 'rank', '<', 'a', '.', 'rank', ':', 'b', '.', 'parent', '=', 'a', 'else', ':', 'b', '.', 'parent', '=', 'a', 'a', '.', 'rank', '+=', '1'] | Assert equality of two nodes a and b so find(a) is find(b). | ['Assert', 'equality', 'of', 'two', 'nodes', 'a', 'and', 'b', 'so', 'find', '(', 'a', ')', 'is', 'find', '(', 'b', ')', '.'] | train | https://github.com/probcomp/crosscat/blob/4a05bddb06a45f3b7b3e05e095720f16257d1535/src/utils/unionfind.py#L45-L56 |
5,924 | jbloomlab/phydms | phydmslib/models.py | GammaDistributedModel.branchScale | def branchScale(self):
"""See docs for `Model` abstract base class."""
bscales = [m.branchScale for m in self._models]
return (self.catweights * bscales).sum() | python | def branchScale(self):
"""See docs for `Model` abstract base class."""
bscales = [m.branchScale for m in self._models]
return (self.catweights * bscales).sum() | ['def', 'branchScale', '(', 'self', ')', ':', 'bscales', '=', '[', 'm', '.', 'branchScale', 'for', 'm', 'in', 'self', '.', '_models', ']', 'return', '(', 'self', '.', 'catweights', '*', 'bscales', ')', '.', 'sum', '(', ')'] | See docs for `Model` abstract base class. | ['See', 'docs', 'for', 'Model', 'abstract', 'base', 'class', '.'] | train | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L2226-L2229 |
5,925 | secdev/scapy | scapy/arch/bpf/supersocket.py | bpf_select | def bpf_select(fds_list, timeout=None):
"""A call to recv() can return several frames. This functions hides the fact
that some frames are read from the internal buffer."""
# Check file descriptors types
bpf_scks_buffered = list()
select_fds = list()
for tmp_fd in fds_list:
# Specific BPF sockets: get buffers status
if isBPFSocket(tmp_fd) and tmp_fd.buffered_frames():
bpf_scks_buffered.append(tmp_fd)
continue
# Regular file descriptors or empty BPF buffer
select_fds.append(tmp_fd)
if select_fds:
# Call select for sockets with empty buffers
if timeout is None:
timeout = 0.05
ready_list, _, _ = select(select_fds, [], [], timeout)
return bpf_scks_buffered + ready_list
else:
return bpf_scks_buffered | python | def bpf_select(fds_list, timeout=None):
"""A call to recv() can return several frames. This functions hides the fact
that some frames are read from the internal buffer."""
# Check file descriptors types
bpf_scks_buffered = list()
select_fds = list()
for tmp_fd in fds_list:
# Specific BPF sockets: get buffers status
if isBPFSocket(tmp_fd) and tmp_fd.buffered_frames():
bpf_scks_buffered.append(tmp_fd)
continue
# Regular file descriptors or empty BPF buffer
select_fds.append(tmp_fd)
if select_fds:
# Call select for sockets with empty buffers
if timeout is None:
timeout = 0.05
ready_list, _, _ = select(select_fds, [], [], timeout)
return bpf_scks_buffered + ready_list
else:
return bpf_scks_buffered | ['def', 'bpf_select', '(', 'fds_list', ',', 'timeout', '=', 'None', ')', ':', '# Check file descriptors types', 'bpf_scks_buffered', '=', 'list', '(', ')', 'select_fds', '=', 'list', '(', ')', 'for', 'tmp_fd', 'in', 'fds_list', ':', '# Specific BPF sockets: get buffers status', 'if', 'isBPFSocket', '(', 'tmp_fd', ')', 'and', 'tmp_fd', '.', 'buffered_frames', '(', ')', ':', 'bpf_scks_buffered', '.', 'append', '(', 'tmp_fd', ')', 'continue', '# Regular file descriptors or empty BPF buffer', 'select_fds', '.', 'append', '(', 'tmp_fd', ')', 'if', 'select_fds', ':', '# Call select for sockets with empty buffers', 'if', 'timeout', 'is', 'None', ':', 'timeout', '=', '0.05', 'ready_list', ',', '_', ',', '_', '=', 'select', '(', 'select_fds', ',', '[', ']', ',', '[', ']', ',', 'timeout', ')', 'return', 'bpf_scks_buffered', '+', 'ready_list', 'else', ':', 'return', 'bpf_scks_buffered'] | A call to recv() can return several frames. This functions hides the fact
that some frames are read from the internal buffer. | ['A', 'call', 'to', 'recv', '()', 'can', 'return', 'several', 'frames', '.', 'This', 'functions', 'hides', 'the', 'fact', 'that', 'some', 'frames', 'are', 'read', 'from', 'the', 'internal', 'buffer', '.'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/bpf/supersocket.py#L369-L394 |
5,926 | spulec/moto | scripts/scaffold.py | initialize_service | def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist
"""
lib_dir = get_lib_dir(service)
test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green')
client = boto3.client(service)
service_class = client.__class__.__name__
endpoint_prefix = client._service_model.endpoint_prefix
tmpl_context = {
'service': service,
'service_class': service_class,
'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
}
# initialize service directory
if os.path.exists(lib_dir):
print_progress('skip creating', lib_dir, 'yellow')
else:
print_progress('creating', lib_dir, 'green')
os.makedirs(lib_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib')
for tmpl_filename in os.listdir(tmpl_dir):
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service
)
# initialize test directory
if os.path.exists(test_dir):
print_progress('skip creating', test_dir, 'yellow')
else:
print_progress('creating', test_dir, 'green')
os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
)
# append mock to init files
append_mock_to_init_py(service)
append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service) | python | def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist
"""
lib_dir = get_lib_dir(service)
test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green')
client = boto3.client(service)
service_class = client.__class__.__name__
endpoint_prefix = client._service_model.endpoint_prefix
tmpl_context = {
'service': service,
'service_class': service_class,
'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
}
# initialize service directory
if os.path.exists(lib_dir):
print_progress('skip creating', lib_dir, 'yellow')
else:
print_progress('creating', lib_dir, 'green')
os.makedirs(lib_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib')
for tmpl_filename in os.listdir(tmpl_dir):
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service
)
# initialize test directory
if os.path.exists(test_dir):
print_progress('skip creating', test_dir, 'yellow')
else:
print_progress('creating', test_dir, 'green')
os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
)
# append mock to init files
append_mock_to_init_py(service)
append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service) | ['def', 'initialize_service', '(', 'service', ',', 'operation', ',', 'api_protocol', ')', ':', 'lib_dir', '=', 'get_lib_dir', '(', 'service', ')', 'test_dir', '=', 'get_test_dir', '(', 'service', ')', 'print_progress', '(', "'Initializing service'", ',', 'service', ',', "'green'", ')', 'client', '=', 'boto3', '.', 'client', '(', 'service', ')', 'service_class', '=', 'client', '.', '__class__', '.', '__name__', 'endpoint_prefix', '=', 'client', '.', '_service_model', '.', 'endpoint_prefix', 'tmpl_context', '=', '{', "'service'", ':', 'service', ',', "'service_class'", ':', 'service_class', ',', "'endpoint_prefix'", ':', 'endpoint_prefix', ',', "'api_protocol'", ':', 'api_protocol', ',', "'escaped_service'", ':', 'get_escaped_service', '(', 'service', ')', '}', '# initialize service directory', 'if', 'os', '.', 'path', '.', 'exists', '(', 'lib_dir', ')', ':', 'print_progress', '(', "'skip creating'", ',', 'lib_dir', ',', "'yellow'", ')', 'else', ':', 'print_progress', '(', "'creating'", ',', 'lib_dir', ',', "'green'", ')', 'os', '.', 'makedirs', '(', 'lib_dir', ')', 'tmpl_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'TEMPLATE_DIR', ',', "'lib'", ')', 'for', 'tmpl_filename', 'in', 'os', '.', 'listdir', '(', 'tmpl_dir', ')', ':', 'render_template', '(', 'tmpl_dir', ',', 'tmpl_filename', ',', 'tmpl_context', ',', 'service', ')', '# initialize test directory', 'if', 'os', '.', 'path', '.', 'exists', '(', 'test_dir', ')', ':', 'print_progress', '(', "'skip creating'", ',', 'test_dir', ',', "'yellow'", ')', 'else', ':', 'print_progress', '(', "'creating'", ',', 'test_dir', ',', "'green'", ')', 'os', '.', 'makedirs', '(', 'test_dir', ')', 'tmpl_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'TEMPLATE_DIR', ',', "'test'", ')', 'for', 'tmpl_filename', 'in', 'os', '.', 'listdir', '(', 'tmpl_dir', ')', ':', 'alt_filename', '=', "'test_{}.py'", '.', 'format', '(', 'get_escaped_service', '(', 'service', ')', ')', 'if', 'tmpl_filename', '==', "'test_service.py.j2'", 'else', 'None', 'render_template', '(', 'tmpl_dir', ',', 'tmpl_filename', ',', 'tmpl_context', ',', 'service', ',', 'alt_filename', ')', '# append mock to init files', 'append_mock_to_init_py', '(', 'service', ')', 'append_mock_import_to_backends_py', '(', 'service', ')', 'append_mock_dict_to_backends_py', '(', 'service', ')'] | create lib and test dirs if not exist | ['create', 'lib', 'and', 'test', 'dirs', 'if', 'not', 'exist'] | train | https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/scripts/scaffold.py#L167-L216 |
5,927 | Fantomas42/django-blog-zinnia | zinnia/templatetags/zinnia.py | get_recent_linkbacks | def get_recent_linkbacks(number=5,
template='zinnia/tags/linkbacks_recent.html'):
"""
Return the most recent linkbacks.
"""
entry_published_pks = map(smart_text,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=entry_published_pks,
flags__flag__in=[PINGBACK, TRACKBACK],
is_public=True).order_by('-pk')[:number]
linkbacks = linkbacks.prefetch_related('content_object')
return {'template': template,
'linkbacks': linkbacks} | python | def get_recent_linkbacks(number=5,
template='zinnia/tags/linkbacks_recent.html'):
"""
Return the most recent linkbacks.
"""
entry_published_pks = map(smart_text,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=entry_published_pks,
flags__flag__in=[PINGBACK, TRACKBACK],
is_public=True).order_by('-pk')[:number]
linkbacks = linkbacks.prefetch_related('content_object')
return {'template': template,
'linkbacks': linkbacks} | ['def', 'get_recent_linkbacks', '(', 'number', '=', '5', ',', 'template', '=', "'zinnia/tags/linkbacks_recent.html'", ')', ':', 'entry_published_pks', '=', 'map', '(', 'smart_text', ',', 'Entry', '.', 'published', '.', 'values_list', '(', "'id'", ',', 'flat', '=', 'True', ')', ')', 'content_type', '=', 'ContentType', '.', 'objects', '.', 'get_for_model', '(', 'Entry', ')', 'linkbacks', '=', 'get_comment_model', '(', ')', '.', 'objects', '.', 'filter', '(', 'content_type', '=', 'content_type', ',', 'object_pk__in', '=', 'entry_published_pks', ',', 'flags__flag__in', '=', '[', 'PINGBACK', ',', 'TRACKBACK', ']', ',', 'is_public', '=', 'True', ')', '.', 'order_by', '(', "'-pk'", ')', '[', ':', 'number', ']', 'linkbacks', '=', 'linkbacks', '.', 'prefetch_related', '(', "'content_object'", ')', 'return', '{', "'template'", ':', 'template', ',', "'linkbacks'", ':', 'linkbacks', '}'] | Return the most recent linkbacks. | ['Return', 'the', 'most', 'recent', 'linkbacks', '.'] | train | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L245-L263 |
5,928 | gautammishra/lyft-rides-python-sdk | lyft_rides/request.py | Request._build_headers | def _build_headers(self, method, auth_session):
"""Create headers for the request.
Parameters
method (str)
HTTP method (e.g. 'POST').
auth_session (Session)
The Session object containing OAuth 2.0 credentials.
Returns
headers (dict)
Dictionary of access headers to attach to request.
Raises
LyftIllegalState (ApiError)
Raised if headers are invalid.
"""
token_type = auth_session.token_type
token = auth_session.oauth2credential.access_token
if not self._authorization_headers_valid(token_type, token):
message = 'Invalid token_type or token.'
raise LyftIllegalState(message)
headers = {
'Authorization': ' '.join([token_type, token]),
}
if method in http.BODY_METHODS:
headers.update(http.DEFAULT_CONTENT_HEADERS)
return headers | python | def _build_headers(self, method, auth_session):
"""Create headers for the request.
Parameters
method (str)
HTTP method (e.g. 'POST').
auth_session (Session)
The Session object containing OAuth 2.0 credentials.
Returns
headers (dict)
Dictionary of access headers to attach to request.
Raises
LyftIllegalState (ApiError)
Raised if headers are invalid.
"""
token_type = auth_session.token_type
token = auth_session.oauth2credential.access_token
if not self._authorization_headers_valid(token_type, token):
message = 'Invalid token_type or token.'
raise LyftIllegalState(message)
headers = {
'Authorization': ' '.join([token_type, token]),
}
if method in http.BODY_METHODS:
headers.update(http.DEFAULT_CONTENT_HEADERS)
return headers | ['def', '_build_headers', '(', 'self', ',', 'method', ',', 'auth_session', ')', ':', 'token_type', '=', 'auth_session', '.', 'token_type', 'token', '=', 'auth_session', '.', 'oauth2credential', '.', 'access_token', 'if', 'not', 'self', '.', '_authorization_headers_valid', '(', 'token_type', ',', 'token', ')', ':', 'message', '=', "'Invalid token_type or token.'", 'raise', 'LyftIllegalState', '(', 'message', ')', 'headers', '=', '{', "'Authorization'", ':', "' '", '.', 'join', '(', '[', 'token_type', ',', 'token', ']', ')', ',', '}', 'if', 'method', 'in', 'http', '.', 'BODY_METHODS', ':', 'headers', '.', 'update', '(', 'http', '.', 'DEFAULT_CONTENT_HEADERS', ')', 'return', 'headers'] | Create headers for the request.
Parameters
method (str)
HTTP method (e.g. 'POST').
auth_session (Session)
The Session object containing OAuth 2.0 credentials.
Returns
headers (dict)
Dictionary of access headers to attach to request.
Raises
LyftIllegalState (ApiError)
Raised if headers are invalid. | ['Create', 'headers', 'for', 'the', 'request', '.', 'Parameters', 'method', '(', 'str', ')', 'HTTP', 'method', '(', 'e', '.', 'g', '.', 'POST', ')', '.', 'auth_session', '(', 'Session', ')', 'The', 'Session', 'object', 'containing', 'OAuth', '2', '.', '0', 'credentials', '.', 'Returns', 'headers', '(', 'dict', ')', 'Dictionary', 'of', 'access', 'headers', 'to', 'attach', 'to', 'request', '.', 'Raises', 'LyftIllegalState', '(', 'ApiError', ')', 'Raised', 'if', 'headers', 'are', 'invalid', '.'] | train | https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/request.py#L131-L160 |
5,929 | llllllllll/codetransformer | codetransformer/decompiler/_343.py | make_if_statement | def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | python | def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | ['def', 'make_if_statement', '(', 'instr', ',', 'queue', ',', 'stack', ',', 'context', ')', ':', 'test_expr', '=', 'make_expr', '(', 'stack', ')', 'if', 'isinstance', '(', 'instr', ',', 'instrs', '.', 'POP_JUMP_IF_TRUE', ')', ':', 'test_expr', '=', 'ast', '.', 'UnaryOp', '(', 'op', '=', 'ast', '.', 'Not', '(', ')', ',', 'operand', '=', 'test_expr', ')', 'first_block', '=', 'popwhile', '(', 'op', '.', 'is_not', '(', 'instr', '.', 'arg', ')', ',', 'queue', ',', 'side', '=', "'left'", ')', 'if', 'isinstance', '(', 'first_block', '[', '-', '1', ']', ',', 'instrs', '.', 'RETURN_VALUE', ')', ':', 'body', '=', 'instrs_to_body', '(', 'first_block', ',', 'context', ')', 'return', 'ast', '.', 'If', '(', 'test', '=', 'test_expr', ',', 'body', '=', 'body', ',', 'orelse', '=', '[', ']', ')', 'jump_to_end', '=', 'expect', '(', 'first_block', '.', 'pop', '(', ')', ',', 'instrs', '.', 'JUMP_FORWARD', ',', '"at end of if-block"', ')', 'body', '=', 'instrs_to_body', '(', 'first_block', ',', 'context', ')', '# First instruction after the whole if-block.', 'end', '=', 'jump_to_end', '.', 'arg', 'if', 'instr', '.', 'arg', 'is', 'jump_to_end', '.', 'arg', ':', 'orelse', '=', '[', ']', 'else', ':', 'orelse', '=', 'instrs_to_body', '(', 'popwhile', '(', 'op', '.', 'is_not', '(', 'end', ')', ',', 'queue', ',', 'side', '=', "'left'", ')', ',', 'context', ',', ')', 'return', 'ast', '.', 'If', '(', 'test', '=', 'test_expr', ',', 'body', '=', 'body', ',', 'orelse', '=', 'orelse', ')'] | Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE. | ['Make', 'an', 'ast', '.', 'If', 'block', 'from', 'a', 'POP_JUMP_IF_TRUE', 'or', 'POP_JUMP_IF_FALSE', '.'] | train | https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L182-L211 |
5,930 | wdecoster/nanomath | nanomath/nanomath.py | get_N50 | def get_N50(readlengths):
"""Calculate read length N50.
Based on https://github.com/PapenfussLab/Mungo/blob/master/bin/fasta_stats.py
"""
return readlengths[np.where(np.cumsum(readlengths) >= 0.5 * np.sum(readlengths))[0][0]] | python | def get_N50(readlengths):
"""Calculate read length N50.
Based on https://github.com/PapenfussLab/Mungo/blob/master/bin/fasta_stats.py
"""
return readlengths[np.where(np.cumsum(readlengths) >= 0.5 * np.sum(readlengths))[0][0]] | ['def', 'get_N50', '(', 'readlengths', ')', ':', 'return', 'readlengths', '[', 'np', '.', 'where', '(', 'np', '.', 'cumsum', '(', 'readlengths', ')', '>=', '0.5', '*', 'np', '.', 'sum', '(', 'readlengths', ')', ')', '[', '0', ']', '[', '0', ']', ']'] | Calculate read length N50.
Based on https://github.com/PapenfussLab/Mungo/blob/master/bin/fasta_stats.py | ['Calculate', 'read', 'length', 'N50', '.'] | train | https://github.com/wdecoster/nanomath/blob/38ede9f957d5c53e2ba3648641e4f23e93b49132/nanomath/nanomath.py#L54-L59 |
5,931 | networks-lab/metaknowledge | metaknowledge/graphHelpers.py | getNodeDegrees | def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'):
"""
Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random.
"""
ndsDict = {}
for nd in grph.nodes():
ndsDict[nd] = returnType(0)
for e in grph.edges(data = True):
if weightString:
try:
edgVal = returnType(e[2][weightString])
except KeyError:
if strictMode:
raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'")
else:
edgVal = returnType(1)
else:
edgVal = returnType(1)
if edgeType == 'bi':
ndsDict[e[0]] += edgVal
ndsDict[e[1]] += edgVal
elif edgeType == 'in':
ndsDict[e[1]] += edgVal
elif edgeType == 'out':
ndsDict[e[0]] += edgVal
else:
raise ValueError("edgeType must be 'bi', 'in', or 'out'")
return ndsDict | python | def getNodeDegrees(grph, weightString = "weight", strictMode = False, returnType = int, edgeType = 'bi'):
"""
Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random.
"""
ndsDict = {}
for nd in grph.nodes():
ndsDict[nd] = returnType(0)
for e in grph.edges(data = True):
if weightString:
try:
edgVal = returnType(e[2][weightString])
except KeyError:
if strictMode:
raise KeyError("The edge from " + str(e[0]) + " to " + str(e[1]) + " does not have the attribute: '" + str(weightString) + "'")
else:
edgVal = returnType(1)
else:
edgVal = returnType(1)
if edgeType == 'bi':
ndsDict[e[0]] += edgVal
ndsDict[e[1]] += edgVal
elif edgeType == 'in':
ndsDict[e[1]] += edgVal
elif edgeType == 'out':
ndsDict[e[0]] += edgVal
else:
raise ValueError("edgeType must be 'bi', 'in', or 'out'")
return ndsDict | ['def', 'getNodeDegrees', '(', 'grph', ',', 'weightString', '=', '"weight"', ',', 'strictMode', '=', 'False', ',', 'returnType', '=', 'int', ',', 'edgeType', '=', "'bi'", ')', ':', 'ndsDict', '=', '{', '}', 'for', 'nd', 'in', 'grph', '.', 'nodes', '(', ')', ':', 'ndsDict', '[', 'nd', ']', '=', 'returnType', '(', '0', ')', 'for', 'e', 'in', 'grph', '.', 'edges', '(', 'data', '=', 'True', ')', ':', 'if', 'weightString', ':', 'try', ':', 'edgVal', '=', 'returnType', '(', 'e', '[', '2', ']', '[', 'weightString', ']', ')', 'except', 'KeyError', ':', 'if', 'strictMode', ':', 'raise', 'KeyError', '(', '"The edge from "', '+', 'str', '(', 'e', '[', '0', ']', ')', '+', '" to "', '+', 'str', '(', 'e', '[', '1', ']', ')', '+', '" does not have the attribute: \'"', '+', 'str', '(', 'weightString', ')', '+', '"\'"', ')', 'else', ':', 'edgVal', '=', 'returnType', '(', '1', ')', 'else', ':', 'edgVal', '=', 'returnType', '(', '1', ')', 'if', 'edgeType', '==', "'bi'", ':', 'ndsDict', '[', 'e', '[', '0', ']', ']', '+=', 'edgVal', 'ndsDict', '[', 'e', '[', '1', ']', ']', '+=', 'edgVal', 'elif', 'edgeType', '==', "'in'", ':', 'ndsDict', '[', 'e', '[', '1', ']', ']', '+=', 'edgVal', 'elif', 'edgeType', '==', "'out'", ':', 'ndsDict', '[', 'e', '[', '0', ']', ']', '+=', 'edgVal', 'else', ':', 'raise', 'ValueError', '(', '"edgeType must be \'bi\', \'in\', or \'out\'"', ')', 'return', 'ndsDict'] | Retunrs a dictionary of nodes to their degrees, the degree is determined by adding the weight of edge with the weight being the string weightString that gives the name of the attribute of each edge containng thier weight. The Weights are then converted to the type returnType. If weightString is give as False instead each edge is counted as 1.
edgeType, takes in one of three strings: 'bi', 'in', 'out'. 'bi' means both nodes on the edge count it, 'out' mans only the one the edge comes form counts it and 'in' means only the node the edge goes to counts it. 'bi' is the default. Use only on directional graphs as otherwise the selected nodes is random. | ['Retunrs', 'a', 'dictionary', 'of', 'nodes', 'to', 'their', 'degrees', 'the', 'degree', 'is', 'determined', 'by', 'adding', 'the', 'weight', 'of', 'edge', 'with', 'the', 'weight', 'being', 'the', 'string', 'weightString', 'that', 'gives', 'the', 'name', 'of', 'the', 'attribute', 'of', 'each', 'edge', 'containng', 'thier', 'weight', '.', 'The', 'Weights', 'are', 'then', 'converted', 'to', 'the', 'type', 'returnType', '.', 'If', 'weightString', 'is', 'give', 'as', 'False', 'instead', 'each', 'edge', 'is', 'counted', 'as', '1', '.'] | train | https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L457-L486 |
5,932 | kevinpt/hdlparse | hdlparse/verilog_parser.py | VerilogExtractor.extract_objects_from_source | def extract_objects_from_source(self, text, type_filter=None):
'''Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects.
'''
objects = parse_verilog(text)
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects | python | def extract_objects_from_source(self, text, type_filter=None):
'''Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects.
'''
objects = parse_verilog(text)
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects | ['def', 'extract_objects_from_source', '(', 'self', ',', 'text', ',', 'type_filter', '=', 'None', ')', ':', 'objects', '=', 'parse_verilog', '(', 'text', ')', 'if', 'type_filter', ':', 'objects', '=', '[', 'o', 'for', 'o', 'in', 'objects', 'if', 'isinstance', '(', 'o', ',', 'type_filter', ')', ']', 'return', 'objects'] | Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects. | ['Extract', 'object', 'declarations', 'from', 'a', 'text', 'buffer'] | train | https://github.com/kevinpt/hdlparse/blob/be7cdab08a8c18815cc4504003ce9ca7fff41022/hdlparse/verilog_parser.py#L249-L263 |
5,933 | thespacedoctor/rockAtlas | rockAtlas/bookkeeping/bookkeeper.py | bookkeeper.import_new_atlas_pointings | def import_new_atlas_pointings(
self,
recent=False):
"""
*Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database*
**Key Arguments:**
- ``recent`` -- only sync the most recent 2 weeks of data (speeds things up)
**Return:**
- None
**Usage:**
.. code-block:: python
from rockAtlas.bookkeeping import bookkeeper
bk = bookkeeper(
log=log,
settings=settings
)
bk.import_new_atlas_pointings()
"""
self.log.info('starting the ``import_new_atlas_pointings`` method')
if recent:
mjd = mjdnow(
log=self.log
).get_mjd()
recent = mjd - 14
recent = " mjd_obs > %(recent)s " % locals()
else:
recent = "1=1"
# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS3 DATABASE
sqlQuery = u"""
SELECT
`expname`,
`dec` as `decDeg`,
`exptime` as `exp_time`,
`filter`,
`mjd_obs` as `mjd`,
`ra` as `raDeg`,
if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`,
`object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas3DbConn,
quiet=False
)
dbSettings = self.settings["database settings"]["atlasMovers"]
# TIDY RESULTS BEFORE IMPORT
entries = list(rows)
if len(rows) > 0:
# ADD THE NEW RESULTS TO THE atlas_exposures TABLE
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
recent = recent.replace("mjd_obs", "mjd")
# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS4 DATABASE
sqlQuery = u"""
SELECT
`obs` as `expname`,
`dec` as `decDeg`,
`texp` as `exp_time`,
`filt` as `filter`,
`mjd`,
`ra` as `raDeg`,
`mag5sig` as `limiting_magnitude`,
`obj` as `atlas_object_id` from atlas_metadataddc where %(recent)s and obj like "TA%%" order by mjd desc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas4DbConn,
quiet=False
)
# TIDY RESULTS BEFORE IMPORT
entries = list(rows)
if len(rows) > 0:
# ADD THE NEW RESULTS TO THE atlas_exposures TABLE
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
# APPEND HTMIDs TO THE atlas_exposures TABLE
add_htm_ids_to_mysql_database_table(
raColName="raDeg",
declColName="decDeg",
tableName="atlas_exposures",
dbConn=self.atlasMoversDBConn,
log=self.log,
primaryIdColumnName="primaryId"
)
print "ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table"
self.log.info('completed the ``import_new_atlas_pointings`` method')
return None | python | def import_new_atlas_pointings(
self,
recent=False):
"""
*Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database*
**Key Arguments:**
- ``recent`` -- only sync the most recent 2 weeks of data (speeds things up)
**Return:**
- None
**Usage:**
.. code-block:: python
from rockAtlas.bookkeeping import bookkeeper
bk = bookkeeper(
log=log,
settings=settings
)
bk.import_new_atlas_pointings()
"""
self.log.info('starting the ``import_new_atlas_pointings`` method')
if recent:
mjd = mjdnow(
log=self.log
).get_mjd()
recent = mjd - 14
recent = " mjd_obs > %(recent)s " % locals()
else:
recent = "1=1"
# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS3 DATABASE
sqlQuery = u"""
SELECT
`expname`,
`dec` as `decDeg`,
`exptime` as `exp_time`,
`filter`,
`mjd_obs` as `mjd`,
`ra` as `raDeg`,
if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`,
`object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas3DbConn,
quiet=False
)
dbSettings = self.settings["database settings"]["atlasMovers"]
# TIDY RESULTS BEFORE IMPORT
entries = list(rows)
if len(rows) > 0:
# ADD THE NEW RESULTS TO THE atlas_exposures TABLE
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
recent = recent.replace("mjd_obs", "mjd")
# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS4 DATABASE
sqlQuery = u"""
SELECT
`obs` as `expname`,
`dec` as `decDeg`,
`texp` as `exp_time`,
`filt` as `filter`,
`mjd`,
`ra` as `raDeg`,
`mag5sig` as `limiting_magnitude`,
`obj` as `atlas_object_id` from atlas_metadataddc where %(recent)s and obj like "TA%%" order by mjd desc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlas4DbConn,
quiet=False
)
# TIDY RESULTS BEFORE IMPORT
entries = list(rows)
if len(rows) > 0:
# ADD THE NEW RESULTS TO THE atlas_exposures TABLE
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=entries,
dbTableName="atlas_exposures",
uniqueKeyList=["expname"],
dateModified=False,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
# APPEND HTMIDs TO THE atlas_exposures TABLE
add_htm_ids_to_mysql_database_table(
raColName="raDeg",
declColName="decDeg",
tableName="atlas_exposures",
dbConn=self.atlasMoversDBConn,
log=self.log,
primaryIdColumnName="primaryId"
)
print "ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table"
self.log.info('completed the ``import_new_atlas_pointings`` method')
return None | ['def', 'import_new_atlas_pointings', '(', 'self', ',', 'recent', '=', 'False', ')', ':', 'self', '.', 'log', '.', 'info', '(', "'starting the ``import_new_atlas_pointings`` method'", ')', 'if', 'recent', ':', 'mjd', '=', 'mjdnow', '(', 'log', '=', 'self', '.', 'log', ')', '.', 'get_mjd', '(', ')', 'recent', '=', 'mjd', '-', '14', 'recent', '=', '" mjd_obs > %(recent)s "', '%', 'locals', '(', ')', 'else', ':', 'recent', '=', '"1=1"', '# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS3 DATABASE', 'sqlQuery', '=', 'u"""\n SELECT\n `expname`,\n `dec` as `decDeg`,\n `exptime` as `exp_time`,\n `filter`,\n `mjd_obs` as `mjd`,\n `ra` as `raDeg`,\n if(mjd_obs<57855.0,mag5sig-0.75,mag5sig) as `limiting_magnitude`,\n `object` as `atlas_object_id` from atlas_metadata where %(recent)s and object like "TA%%" order by mjd_obs desc;\n """', '%', 'locals', '(', ')', 'rows', '=', 'readquery', '(', 'log', '=', 'self', '.', 'log', ',', 'sqlQuery', '=', 'sqlQuery', ',', 'dbConn', '=', 'self', '.', 'atlas3DbConn', ',', 'quiet', '=', 'False', ')', 'dbSettings', '=', 'self', '.', 'settings', '[', '"database settings"', ']', '[', '"atlasMovers"', ']', '# TIDY RESULTS BEFORE IMPORT', 'entries', '=', 'list', '(', 'rows', ')', 'if', 'len', '(', 'rows', ')', '>', '0', ':', '# ADD THE NEW RESULTS TO THE atlas_exposures TABLE', 'insert_list_of_dictionaries_into_database_tables', '(', 'dbConn', '=', 'self', '.', 'atlasMoversDBConn', ',', 'log', '=', 'self', '.', 'log', ',', 'dictList', '=', 'entries', ',', 'dbTableName', '=', '"atlas_exposures"', ',', 'uniqueKeyList', '=', '[', '"expname"', ']', ',', 'dateModified', '=', 'False', ',', 'batchSize', '=', '10000', ',', 'replace', '=', 'True', ',', 'dbSettings', '=', 'dbSettings', ')', 'recent', '=', 'recent', '.', 'replace', '(', '"mjd_obs"', ',', '"mjd"', ')', '# SELECT ALL OF THE POINTING INFO REQUIRED FROM THE ATLAS4 DATABASE', 'sqlQuery', '=', 'u"""\n SELECT\n `obs` as `expname`,\n `dec` as `decDeg`,\n `texp` as `exp_time`,\n `filt` as `filter`,\n `mjd`,\n `ra` as `raDeg`,\n `mag5sig` as `limiting_magnitude`,\n `obj` as `atlas_object_id` from atlas_metadataddc where %(recent)s and obj like "TA%%" order by mjd desc;\n """', '%', 'locals', '(', ')', 'rows', '=', 'readquery', '(', 'log', '=', 'self', '.', 'log', ',', 'sqlQuery', '=', 'sqlQuery', ',', 'dbConn', '=', 'self', '.', 'atlas4DbConn', ',', 'quiet', '=', 'False', ')', '# TIDY RESULTS BEFORE IMPORT', 'entries', '=', 'list', '(', 'rows', ')', 'if', 'len', '(', 'rows', ')', '>', '0', ':', '# ADD THE NEW RESULTS TO THE atlas_exposures TABLE', 'insert_list_of_dictionaries_into_database_tables', '(', 'dbConn', '=', 'self', '.', 'atlasMoversDBConn', ',', 'log', '=', 'self', '.', 'log', ',', 'dictList', '=', 'entries', ',', 'dbTableName', '=', '"atlas_exposures"', ',', 'uniqueKeyList', '=', '[', '"expname"', ']', ',', 'dateModified', '=', 'False', ',', 'batchSize', '=', '10000', ',', 'replace', '=', 'True', ',', 'dbSettings', '=', 'dbSettings', ')', '# APPEND HTMIDs TO THE atlas_exposures TABLE', 'add_htm_ids_to_mysql_database_table', '(', 'raColName', '=', '"raDeg"', ',', 'declColName', '=', '"decDeg"', ',', 'tableName', '=', '"atlas_exposures"', ',', 'dbConn', '=', 'self', '.', 'atlasMoversDBConn', ',', 'log', '=', 'self', '.', 'log', ',', 'primaryIdColumnName', '=', '"primaryId"', ')', 'print', '"ATLAS pointings synced between ATLAS3/ATLAS4 databases and the ATLAS Movers `atlas_exposures` database table"', 'self', '.', 'log', '.', 'info', '(', "'completed the ``import_new_atlas_pointings`` method'", ')', 'return', 'None'] | *Import any new ATLAS pointings from the atlas3/atlas4 databases into the ``atlas_exposures`` table of the Atlas Movers database*
**Key Arguments:**
- ``recent`` -- only sync the most recent 2 weeks of data (speeds things up)
**Return:**
- None
**Usage:**
.. code-block:: python
from rockAtlas.bookkeeping import bookkeeper
bk = bookkeeper(
log=log,
settings=settings
)
bk.import_new_atlas_pointings() | ['*', 'Import', 'any', 'new', 'ATLAS', 'pointings', 'from', 'the', 'atlas3', '/', 'atlas4', 'databases', 'into', 'the', 'atlas_exposures', 'table', 'of', 'the', 'Atlas', 'Movers', 'database', '*'] | train | https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/bookkeeping/bookkeeper.py#L108-L231 |
5,934 | manns/pyspread | pyspread/src/gui/_grid.py | GridEventHandlers.OnInsertCols | def OnInsertCols(self, event):
"""Inserts the maximum of 1 and the number of selected columns"""
bbox = self.grid.selection.get_bbox()
if bbox is None or bbox[1][1] is None:
# Insert rows at cursor
ins_point = self.grid.actions.cursor[1] - 1
no_cols = 1
else:
# Insert at right edge of bounding box
ins_point = bbox[0][1] - 1
no_cols = self._get_no_rowscols(bbox)[1]
with undo.group(_("Insert columns")):
self.grid.actions.insert_cols(ins_point, no_cols)
self.grid.GetTable().ResetView()
# Update the default sized cell sizes
self.grid.actions.zoom()
event.Skip() | python | def OnInsertCols(self, event):
"""Inserts the maximum of 1 and the number of selected columns"""
bbox = self.grid.selection.get_bbox()
if bbox is None or bbox[1][1] is None:
# Insert rows at cursor
ins_point = self.grid.actions.cursor[1] - 1
no_cols = 1
else:
# Insert at right edge of bounding box
ins_point = bbox[0][1] - 1
no_cols = self._get_no_rowscols(bbox)[1]
with undo.group(_("Insert columns")):
self.grid.actions.insert_cols(ins_point, no_cols)
self.grid.GetTable().ResetView()
# Update the default sized cell sizes
self.grid.actions.zoom()
event.Skip() | ['def', 'OnInsertCols', '(', 'self', ',', 'event', ')', ':', 'bbox', '=', 'self', '.', 'grid', '.', 'selection', '.', 'get_bbox', '(', ')', 'if', 'bbox', 'is', 'None', 'or', 'bbox', '[', '1', ']', '[', '1', ']', 'is', 'None', ':', '# Insert rows at cursor', 'ins_point', '=', 'self', '.', 'grid', '.', 'actions', '.', 'cursor', '[', '1', ']', '-', '1', 'no_cols', '=', '1', 'else', ':', '# Insert at right edge of bounding box', 'ins_point', '=', 'bbox', '[', '0', ']', '[', '1', ']', '-', '1', 'no_cols', '=', 'self', '.', '_get_no_rowscols', '(', 'bbox', ')', '[', '1', ']', 'with', 'undo', '.', 'group', '(', '_', '(', '"Insert columns"', ')', ')', ':', 'self', '.', 'grid', '.', 'actions', '.', 'insert_cols', '(', 'ins_point', ',', 'no_cols', ')', 'self', '.', 'grid', '.', 'GetTable', '(', ')', '.', 'ResetView', '(', ')', '# Update the default sized cell sizes', 'self', '.', 'grid', '.', 'actions', '.', 'zoom', '(', ')', 'event', '.', 'Skip', '(', ')'] | Inserts the maximum of 1 and the number of selected columns | ['Inserts', 'the', 'maximum', 'of', '1', 'and', 'the', 'number', 'of', 'selected', 'columns'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1346-L1368 |
5,935 | timdiels/pytil | pytil/path.py | hash | def hash(path, hash_function=hashlib.sha512): # @ReservedAssignment
'''
Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored.
'''
hash_ = hash_function()
if path.is_dir():
for directory, directories, files in os.walk(str(path), topdown=True):
# Note:
# - directory: path to current directory in walk relative to current working direcotry
# - directories/files: dir/file names
# Note: file names can contain nearly any character (even newlines).
# hash like (ignore the whitespace):
#
# h(relative-dir-path)
# h(dir_name)
# h(dir_name2)
# ,
# h(file_name) h(file_content)
# h(file_name2) h(file_content2)
# ;
# h(relative-dir-path2)
# ...
hash_.update(hash_function(str(Path(directory).relative_to(path)).encode()).digest())
for name in sorted(directories):
hash_.update(hash_function(name.encode()).digest())
hash_.update(b',')
for name in sorted(files):
hash_.update(hash_function(name.encode()).digest())
hash_.update(hash(Path(directory) / name).digest())
hash_.update(b';')
else:
with path.open('rb') as f:
while True:
buffer = f.read(65536)
if not buffer:
break
hash_.update(buffer)
return hash_ | python | def hash(path, hash_function=hashlib.sha512): # @ReservedAssignment
'''
Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored.
'''
hash_ = hash_function()
if path.is_dir():
for directory, directories, files in os.walk(str(path), topdown=True):
# Note:
# - directory: path to current directory in walk relative to current working direcotry
# - directories/files: dir/file names
# Note: file names can contain nearly any character (even newlines).
# hash like (ignore the whitespace):
#
# h(relative-dir-path)
# h(dir_name)
# h(dir_name2)
# ,
# h(file_name) h(file_content)
# h(file_name2) h(file_content2)
# ;
# h(relative-dir-path2)
# ...
hash_.update(hash_function(str(Path(directory).relative_to(path)).encode()).digest())
for name in sorted(directories):
hash_.update(hash_function(name.encode()).digest())
hash_.update(b',')
for name in sorted(files):
hash_.update(hash_function(name.encode()).digest())
hash_.update(hash(Path(directory) / name).digest())
hash_.update(b';')
else:
with path.open('rb') as f:
while True:
buffer = f.read(65536)
if not buffer:
break
hash_.update(buffer)
return hash_ | ['def', 'hash', '(', 'path', ',', 'hash_function', '=', 'hashlib', '.', 'sha512', ')', ':', '# @ReservedAssignment', 'hash_', '=', 'hash_function', '(', ')', 'if', 'path', '.', 'is_dir', '(', ')', ':', 'for', 'directory', ',', 'directories', ',', 'files', 'in', 'os', '.', 'walk', '(', 'str', '(', 'path', ')', ',', 'topdown', '=', 'True', ')', ':', '# Note:', '# - directory: path to current directory in walk relative to current working direcotry', '# - directories/files: dir/file names', '# Note: file names can contain nearly any character (even newlines).', '# hash like (ignore the whitespace):', '#', '# h(relative-dir-path)', '# h(dir_name)', '# h(dir_name2)', '# ,', '# h(file_name) h(file_content)', '# h(file_name2) h(file_content2)', '# ;', '# h(relative-dir-path2)', '# ...', 'hash_', '.', 'update', '(', 'hash_function', '(', 'str', '(', 'Path', '(', 'directory', ')', '.', 'relative_to', '(', 'path', ')', ')', '.', 'encode', '(', ')', ')', '.', 'digest', '(', ')', ')', 'for', 'name', 'in', 'sorted', '(', 'directories', ')', ':', 'hash_', '.', 'update', '(', 'hash_function', '(', 'name', '.', 'encode', '(', ')', ')', '.', 'digest', '(', ')', ')', 'hash_', '.', 'update', '(', "b','", ')', 'for', 'name', 'in', 'sorted', '(', 'files', ')', ':', 'hash_', '.', 'update', '(', 'hash_function', '(', 'name', '.', 'encode', '(', ')', ')', '.', 'digest', '(', ')', ')', 'hash_', '.', 'update', '(', 'hash', '(', 'Path', '(', 'directory', ')', '/', 'name', ')', '.', 'digest', '(', ')', ')', 'hash_', '.', 'update', '(', "b';'", ')', 'else', ':', 'with', 'path', '.', 'open', '(', "'rb'", ')', 'as', 'f', ':', 'while', 'True', ':', 'buffer', '=', 'f', '.', 'read', '(', '65536', ')', 'if', 'not', 'buffer', ':', 'break', 'hash_', '.', 'update', '(', 'buffer', ')', 'return', 'hash_'] | Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored. | ['Hash', 'file', 'or', 'directory', '.'] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L188-L243 |
5,936 | SUNCAT-Center/CatHub | cathub/ase_tools/__init__.py | update_ase | def update_ase(db_file, identity, stdout, **key_value_pairs):
"""Connect to ASE db"""
db_ase = ase.db.connect(db_file)
_normalize_key_value_pairs_inplace(key_value_pairs)
count = db_ase.update(identity, **key_value_pairs)
stdout.write(' Updating {0} key value pairs in ASE db row id = {1}\n'
.format(count, identity))
return | python | def update_ase(db_file, identity, stdout, **key_value_pairs):
"""Connect to ASE db"""
db_ase = ase.db.connect(db_file)
_normalize_key_value_pairs_inplace(key_value_pairs)
count = db_ase.update(identity, **key_value_pairs)
stdout.write(' Updating {0} key value pairs in ASE db row id = {1}\n'
.format(count, identity))
return | ['def', 'update_ase', '(', 'db_file', ',', 'identity', ',', 'stdout', ',', '*', '*', 'key_value_pairs', ')', ':', 'db_ase', '=', 'ase', '.', 'db', '.', 'connect', '(', 'db_file', ')', '_normalize_key_value_pairs_inplace', '(', 'key_value_pairs', ')', 'count', '=', 'db_ase', '.', 'update', '(', 'identity', ',', '*', '*', 'key_value_pairs', ')', 'stdout', '.', 'write', '(', "' Updating {0} key value pairs in ASE db row id = {1}\\n'", '.', 'format', '(', 'count', ',', 'identity', ')', ')', 'return'] | Connect to ASE db | ['Connect', 'to', 'ASE', 'db'] | train | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/__init__.py#L360-L368 |
5,937 | senaite/senaite.core | bika/lims/api/__init__.py | get_parent | def get_parent(brain_or_object, catalog_search=False):
"""Locate the parent object of the content/catalog brain
The `catalog_search` switch uses the `portal_catalog` to do a search return
a brain instead of the full parent object. However, if the search returned
no results, it falls back to return the full parent object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param catalog_search: Use a catalog query to find the parent object
:type catalog_search: bool
:returns: parent object
:rtype: ATContentType/DexterityContentType/PloneSite/CatalogBrain
"""
if is_portal(brain_or_object):
return get_portal()
# Do a catalog search and return the brain
if catalog_search:
parent_path = get_parent_path(brain_or_object)
# parent is the portal object
if parent_path == get_path(get_portal()):
return get_portal()
# get the catalog tool
pc = get_portal_catalog()
# query for the parent path
results = pc(path={
"query": parent_path,
"depth": 0})
# No results fallback: return the parent object
if not results:
return get_object(brain_or_object).aq_parent
# return the brain
return results[0]
return get_object(brain_or_object).aq_parent | python | def get_parent(brain_or_object, catalog_search=False):
"""Locate the parent object of the content/catalog brain
The `catalog_search` switch uses the `portal_catalog` to do a search return
a brain instead of the full parent object. However, if the search returned
no results, it falls back to return the full parent object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param catalog_search: Use a catalog query to find the parent object
:type catalog_search: bool
:returns: parent object
:rtype: ATContentType/DexterityContentType/PloneSite/CatalogBrain
"""
if is_portal(brain_or_object):
return get_portal()
# Do a catalog search and return the brain
if catalog_search:
parent_path = get_parent_path(brain_or_object)
# parent is the portal object
if parent_path == get_path(get_portal()):
return get_portal()
# get the catalog tool
pc = get_portal_catalog()
# query for the parent path
results = pc(path={
"query": parent_path,
"depth": 0})
# No results fallback: return the parent object
if not results:
return get_object(brain_or_object).aq_parent
# return the brain
return results[0]
return get_object(brain_or_object).aq_parent | ['def', 'get_parent', '(', 'brain_or_object', ',', 'catalog_search', '=', 'False', ')', ':', 'if', 'is_portal', '(', 'brain_or_object', ')', ':', 'return', 'get_portal', '(', ')', '# Do a catalog search and return the brain', 'if', 'catalog_search', ':', 'parent_path', '=', 'get_parent_path', '(', 'brain_or_object', ')', '# parent is the portal object', 'if', 'parent_path', '==', 'get_path', '(', 'get_portal', '(', ')', ')', ':', 'return', 'get_portal', '(', ')', '# get the catalog tool', 'pc', '=', 'get_portal_catalog', '(', ')', '# query for the parent path', 'results', '=', 'pc', '(', 'path', '=', '{', '"query"', ':', 'parent_path', ',', '"depth"', ':', '0', '}', ')', '# No results fallback: return the parent object', 'if', 'not', 'results', ':', 'return', 'get_object', '(', 'brain_or_object', ')', '.', 'aq_parent', '# return the brain', 'return', 'results', '[', '0', ']', 'return', 'get_object', '(', 'brain_or_object', ')', '.', 'aq_parent'] | Locate the parent object of the content/catalog brain
The `catalog_search` switch uses the `portal_catalog` to do a search return
a brain instead of the full parent object. However, if the search returned
no results, it falls back to return the full parent object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param catalog_search: Use a catalog query to find the parent object
:type catalog_search: bool
:returns: parent object
:rtype: ATContentType/DexterityContentType/PloneSite/CatalogBrain | ['Locate', 'the', 'parent', 'object', 'of', 'the', 'content', '/', 'catalog', 'brain'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L561-L602 |
5,938 | napalm-automation/napalm | napalm/ios/ios.py | IOSDriver._discard_config | def _discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = "copy running-config {}".format(
self._gen_full_path(self.candidate_cfg)
)
discard_merge = "copy null: {}".format(self._gen_full_path(self.merge_cfg))
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge) | python | def _discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = "copy running-config {}".format(
self._gen_full_path(self.candidate_cfg)
)
discard_merge = "copy null: {}".format(self._gen_full_path(self.merge_cfg))
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge) | ['def', '_discard_config', '(', 'self', ')', ':', 'discard_candidate', '=', '"copy running-config {}"', '.', 'format', '(', 'self', '.', '_gen_full_path', '(', 'self', '.', 'candidate_cfg', ')', ')', 'discard_merge', '=', '"copy null: {}"', '.', 'format', '(', 'self', '.', '_gen_full_path', '(', 'self', '.', 'merge_cfg', ')', ')', 'self', '.', 'device', '.', 'send_command_expect', '(', 'discard_candidate', ')', 'self', '.', 'device', '.', 'send_command_expect', '(', 'discard_merge', ')'] | Set candidate_cfg to current running-config. Erase the merge_cfg file. | ['Set', 'candidate_cfg', 'to', 'current', 'running', '-', 'config', '.', 'Erase', 'the', 'merge_cfg', 'file', '.'] | train | https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/ios/ios.py#L544-L551 |
5,939 | programa-stic/barf-project | barf/arch/x86/parser.py | parse_instruction | def parse_instruction(string, location, tokens):
"""Parse an x86 instruction.
"""
prefix_str = tokens.get("prefix", None)
mnemonic_str = tokens.get("mnemonic")
operands = [op for op in tokens.get("operands", [])]
infer_operands_size(operands)
# Quick hack: Capstone returns rep instead of repe for cmps and scas
# instructions.
if prefix_str == "rep" and (mnemonic_str.startswith("cmps") or mnemonic_str.startswith("scas")):
prefix_str = "repe"
instr = X86Instruction(
prefix_str,
mnemonic_str,
operands,
arch_info.architecture_mode
)
return instr | python | def parse_instruction(string, location, tokens):
"""Parse an x86 instruction.
"""
prefix_str = tokens.get("prefix", None)
mnemonic_str = tokens.get("mnemonic")
operands = [op for op in tokens.get("operands", [])]
infer_operands_size(operands)
# Quick hack: Capstone returns rep instead of repe for cmps and scas
# instructions.
if prefix_str == "rep" and (mnemonic_str.startswith("cmps") or mnemonic_str.startswith("scas")):
prefix_str = "repe"
instr = X86Instruction(
prefix_str,
mnemonic_str,
operands,
arch_info.architecture_mode
)
return instr | ['def', 'parse_instruction', '(', 'string', ',', 'location', ',', 'tokens', ')', ':', 'prefix_str', '=', 'tokens', '.', 'get', '(', '"prefix"', ',', 'None', ')', 'mnemonic_str', '=', 'tokens', '.', 'get', '(', '"mnemonic"', ')', 'operands', '=', '[', 'op', 'for', 'op', 'in', 'tokens', '.', 'get', '(', '"operands"', ',', '[', ']', ')', ']', 'infer_operands_size', '(', 'operands', ')', '# Quick hack: Capstone returns rep instead of repe for cmps and scas', '# instructions.', 'if', 'prefix_str', '==', '"rep"', 'and', '(', 'mnemonic_str', '.', 'startswith', '(', '"cmps"', ')', 'or', 'mnemonic_str', '.', 'startswith', '(', '"scas"', ')', ')', ':', 'prefix_str', '=', '"repe"', 'instr', '=', 'X86Instruction', '(', 'prefix_str', ',', 'mnemonic_str', ',', 'operands', ',', 'arch_info', '.', 'architecture_mode', ')', 'return', 'instr'] | Parse an x86 instruction. | ['Parse', 'an', 'x86', 'instruction', '.'] | train | https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/arch/x86/parser.py#L134-L155 |
5,940 | datamachine/twx | twx/twx.py | Peer.send_message | def send_message(self, text: str, reply: int=None, link_preview: bool=None,
on_success: callable=None, reply_markup: botapi.ReplyMarkup=None):
"""
Send message to this peer.
:param text: Text to send.
:param reply: Message object or message_id to reply to.
:param link_preview: Whether or not to show the link preview for this message
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
self.twx.send_message(self, text=text, reply=reply, link_preview=link_preview, on_success=on_success,
reply_markup=reply_markup) | python | def send_message(self, text: str, reply: int=None, link_preview: bool=None,
on_success: callable=None, reply_markup: botapi.ReplyMarkup=None):
"""
Send message to this peer.
:param text: Text to send.
:param reply: Message object or message_id to reply to.
:param link_preview: Whether or not to show the link preview for this message
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
self.twx.send_message(self, text=text, reply=reply, link_preview=link_preview, on_success=on_success,
reply_markup=reply_markup) | ['def', 'send_message', '(', 'self', ',', 'text', ':', 'str', ',', 'reply', ':', 'int', '=', 'None', ',', 'link_preview', ':', 'bool', '=', 'None', ',', 'on_success', ':', 'callable', '=', 'None', ',', 'reply_markup', ':', 'botapi', '.', 'ReplyMarkup', '=', 'None', ')', ':', 'self', '.', 'twx', '.', 'send_message', '(', 'self', ',', 'text', '=', 'text', ',', 'reply', '=', 'reply', ',', 'link_preview', '=', 'link_preview', ',', 'on_success', '=', 'on_success', ',', 'reply_markup', '=', 'reply_markup', ')'] | Send message to this peer.
:param text: Text to send.
:param reply: Message object or message_id to reply to.
:param link_preview: Whether or not to show the link preview for this message
:param on_success: Callback to call when call is complete.
:type reply: int or Message | ['Send', 'message', 'to', 'this', 'peer', '.', ':', 'param', 'text', ':', 'Text', 'to', 'send', '.', ':', 'param', 'reply', ':', 'Message', 'object', 'or', 'message_id', 'to', 'reply', 'to', '.', ':', 'param', 'link_preview', ':', 'Whether', 'or', 'not', 'to', 'show', 'the', 'link', 'preview', 'for', 'this', 'message', ':', 'param', 'on_success', ':', 'Callback', 'to', 'call', 'when', 'call', 'is', 'complete', '.'] | train | https://github.com/datamachine/twx/blob/d9633f12f3647b1e54ba87b70b39df3b7e02b4eb/twx/twx.py#L78-L90 |
5,941 | googleapis/google-cloud-python | logging/google/cloud/logging/_gapic.py | _parse_log_entry | def _parse_log_entry(entry_pb):
"""Special helper to parse ``LogEntry`` protobuf into a dictionary.
The ``proto_payload`` field in ``LogEntry`` is of type ``Any``. This
can be problematic if the type URL in the payload isn't in the
``google.protobuf`` registry. To help with parsing unregistered types,
this function will remove ``proto_payload`` before parsing.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf.
:rtype: dict
:returns: The parsed log entry. The ``protoPayload`` key may contain
the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if
it could not be parsed.
"""
try:
return MessageToDict(entry_pb)
except TypeError:
if entry_pb.HasField("proto_payload"):
proto_payload = entry_pb.proto_payload
entry_pb.ClearField("proto_payload")
entry_mapping = MessageToDict(entry_pb)
entry_mapping["protoPayload"] = proto_payload
return entry_mapping
else:
raise | python | def _parse_log_entry(entry_pb):
"""Special helper to parse ``LogEntry`` protobuf into a dictionary.
The ``proto_payload`` field in ``LogEntry`` is of type ``Any``. This
can be problematic if the type URL in the payload isn't in the
``google.protobuf`` registry. To help with parsing unregistered types,
this function will remove ``proto_payload`` before parsing.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf.
:rtype: dict
:returns: The parsed log entry. The ``protoPayload`` key may contain
the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if
it could not be parsed.
"""
try:
return MessageToDict(entry_pb)
except TypeError:
if entry_pb.HasField("proto_payload"):
proto_payload = entry_pb.proto_payload
entry_pb.ClearField("proto_payload")
entry_mapping = MessageToDict(entry_pb)
entry_mapping["protoPayload"] = proto_payload
return entry_mapping
else:
raise | ['def', '_parse_log_entry', '(', 'entry_pb', ')', ':', 'try', ':', 'return', 'MessageToDict', '(', 'entry_pb', ')', 'except', 'TypeError', ':', 'if', 'entry_pb', '.', 'HasField', '(', '"proto_payload"', ')', ':', 'proto_payload', '=', 'entry_pb', '.', 'proto_payload', 'entry_pb', '.', 'ClearField', '(', '"proto_payload"', ')', 'entry_mapping', '=', 'MessageToDict', '(', 'entry_pb', ')', 'entry_mapping', '[', '"protoPayload"', ']', '=', 'proto_payload', 'return', 'entry_mapping', 'else', ':', 'raise'] | Special helper to parse ``LogEntry`` protobuf into a dictionary.
The ``proto_payload`` field in ``LogEntry`` is of type ``Any``. This
can be problematic if the type URL in the payload isn't in the
``google.protobuf`` registry. To help with parsing unregistered types,
this function will remove ``proto_payload`` before parsing.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf.
:rtype: dict
:returns: The parsed log entry. The ``protoPayload`` key may contain
the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if
it could not be parsed. | ['Special', 'helper', 'to', 'parse', 'LogEntry', 'protobuf', 'into', 'a', 'dictionary', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_gapic.py#L421-L447 |
5,942 | couchbase/couchbase-python-client | couchbase/bucket.py | Bucket.append_items | def append_items(self, items, **kwargs):
"""
Method to append data to multiple :class:`~.Item` objects.
This method differs from the normal :meth:`append_multi` in that
each `Item`'s `value` field is updated with the appended data
upon successful completion of the operation.
:param items: The item dictionary. The value for each key should
contain a ``fragment`` field containing the object to append
to the value on the server.
:type items: :class:`~couchbase.items.ItemOptionDict`.
The rest of the options are passed verbatim to
:meth:`append_multi`
.. seealso:: :meth:`append_multi`, :meth:`append`
"""
rv = self.append_multi(items, **kwargs)
# Assume this is an 'ItemOptionDict'
for k, v in items.dict.items():
if k.success:
k.value += v['fragment']
return rv | python | def append_items(self, items, **kwargs):
"""
Method to append data to multiple :class:`~.Item` objects.
This method differs from the normal :meth:`append_multi` in that
each `Item`'s `value` field is updated with the appended data
upon successful completion of the operation.
:param items: The item dictionary. The value for each key should
contain a ``fragment`` field containing the object to append
to the value on the server.
:type items: :class:`~couchbase.items.ItemOptionDict`.
The rest of the options are passed verbatim to
:meth:`append_multi`
.. seealso:: :meth:`append_multi`, :meth:`append`
"""
rv = self.append_multi(items, **kwargs)
# Assume this is an 'ItemOptionDict'
for k, v in items.dict.items():
if k.success:
k.value += v['fragment']
return rv | ['def', 'append_items', '(', 'self', ',', 'items', ',', '*', '*', 'kwargs', ')', ':', 'rv', '=', 'self', '.', 'append_multi', '(', 'items', ',', '*', '*', 'kwargs', ')', "# Assume this is an 'ItemOptionDict'", 'for', 'k', ',', 'v', 'in', 'items', '.', 'dict', '.', 'items', '(', ')', ':', 'if', 'k', '.', 'success', ':', 'k', '.', 'value', '+=', 'v', '[', "'fragment'", ']', 'return', 'rv'] | Method to append data to multiple :class:`~.Item` objects.
This method differs from the normal :meth:`append_multi` in that
each `Item`'s `value` field is updated with the appended data
upon successful completion of the operation.
:param items: The item dictionary. The value for each key should
contain a ``fragment`` field containing the object to append
to the value on the server.
:type items: :class:`~couchbase.items.ItemOptionDict`.
The rest of the options are passed verbatim to
:meth:`append_multi`
.. seealso:: :meth:`append_multi`, :meth:`append` | ['Method', 'to', 'append', 'data', 'to', 'multiple', ':', 'class', ':', '~', '.', 'Item', 'objects', '.'] | train | https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L1597-L1622 |
5,943 | odlgroup/odl | odl/discr/discretization.py | DiscretizedSpace._lincomb | def _lincomb(self, a, x1, b, x2, out):
"""Raw linear combination."""
self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor) | python | def _lincomb(self, a, x1, b, x2, out):
"""Raw linear combination."""
self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor) | ['def', '_lincomb', '(', 'self', ',', 'a', ',', 'x1', ',', 'b', ',', 'x2', ',', 'out', ')', ':', 'self', '.', 'tspace', '.', '_lincomb', '(', 'a', ',', 'x1', '.', 'tensor', ',', 'b', ',', 'x2', '.', 'tensor', ',', 'out', '.', 'tensor', ')'] | Raw linear combination. | ['Raw', 'linear', 'combination', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L253-L255 |
5,944 | merll/docker-map | dockermap/map/input.py | SharedHostVolumesList.get_type_item | def get_type_item(self, value):
"""
Converts the input to a ``SharedVolume`` or ``HostVolume`` tuple for a host bind. Input can be a single string, a
list or tuple, or a single-entry dictionary.
Single values are assumed to be volume aliases for read-write access. Tuples or lists with two elements, can be
``(alias, read-only indicator)``, or ``(container path, host path)``. The latter is assumed, unless the second
element is boolean or a string of either ``ro`` or ``rw``, indicating read-only or read-write access for a volume
alias. Three elements are always used as ``(container path, host path, read-only indicator)``.
Nested values are unpacked, so that valid input formats are also ``{container path: (host path, read-only)}`` or
``(container_path: [host path, read-only])``.
:param value: Input value for conversion.
:return: A SharedVolume tuple
:rtype: SharedVolume
"""
if isinstance(value, (HostVolume, SharedVolume)):
return value
elif isinstance(value, six.string_types):
return SharedVolume(value, False)
elif isinstance(value, (list, tuple)):
return _shared_host_volume_from_tuple(*value)
elif isinstance(value, dict):
v_len = len(value)
if v_len == 1:
k, v = list(value.items())[0]
if k == 'name':
return SharedVolume(v)
elif isinstance(v, (list, tuple)):
return _shared_host_volume_from_tuple(k, *v)
return _shared_host_volume_from_tuple(k, v)
elif 'path' in value:
return HostVolume(**value)
return SharedVolume(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | python | def get_type_item(self, value):
"""
Converts the input to a ``SharedVolume`` or ``HostVolume`` tuple for a host bind. Input can be a single string, a
list or tuple, or a single-entry dictionary.
Single values are assumed to be volume aliases for read-write access. Tuples or lists with two elements, can be
``(alias, read-only indicator)``, or ``(container path, host path)``. The latter is assumed, unless the second
element is boolean or a string of either ``ro`` or ``rw``, indicating read-only or read-write access for a volume
alias. Three elements are always used as ``(container path, host path, read-only indicator)``.
Nested values are unpacked, so that valid input formats are also ``{container path: (host path, read-only)}`` or
``(container_path: [host path, read-only])``.
:param value: Input value for conversion.
:return: A SharedVolume tuple
:rtype: SharedVolume
"""
if isinstance(value, (HostVolume, SharedVolume)):
return value
elif isinstance(value, six.string_types):
return SharedVolume(value, False)
elif isinstance(value, (list, tuple)):
return _shared_host_volume_from_tuple(*value)
elif isinstance(value, dict):
v_len = len(value)
if v_len == 1:
k, v = list(value.items())[0]
if k == 'name':
return SharedVolume(v)
elif isinstance(v, (list, tuple)):
return _shared_host_volume_from_tuple(k, *v)
return _shared_host_volume_from_tuple(k, v)
elif 'path' in value:
return HostVolume(**value)
return SharedVolume(**value)
raise ValueError(
"Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__)) | ['def', 'get_type_item', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', '(', 'HostVolume', ',', 'SharedVolume', ')', ')', ':', 'return', 'value', 'elif', 'isinstance', '(', 'value', ',', 'six', '.', 'string_types', ')', ':', 'return', 'SharedVolume', '(', 'value', ',', 'False', ')', 'elif', 'isinstance', '(', 'value', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'return', '_shared_host_volume_from_tuple', '(', '*', 'value', ')', 'elif', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'v_len', '=', 'len', '(', 'value', ')', 'if', 'v_len', '==', '1', ':', 'k', ',', 'v', '=', 'list', '(', 'value', '.', 'items', '(', ')', ')', '[', '0', ']', 'if', 'k', '==', "'name'", ':', 'return', 'SharedVolume', '(', 'v', ')', 'elif', 'isinstance', '(', 'v', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'return', '_shared_host_volume_from_tuple', '(', 'k', ',', '*', 'v', ')', 'return', '_shared_host_volume_from_tuple', '(', 'k', ',', 'v', ')', 'elif', "'path'", 'in', 'value', ':', 'return', 'HostVolume', '(', '*', '*', 'value', ')', 'return', 'SharedVolume', '(', '*', '*', 'value', ')', 'raise', 'ValueError', '(', '"Invalid type; expected a list, tuple, dict, or string type, found {0}."', '.', 'format', '(', 'type', '(', 'value', ')', '.', '__name__', ')', ')'] | Converts the input to a ``SharedVolume`` or ``HostVolume`` tuple for a host bind. Input can be a single string, a
list or tuple, or a single-entry dictionary.
Single values are assumed to be volume aliases for read-write access. Tuples or lists with two elements, can be
``(alias, read-only indicator)``, or ``(container path, host path)``. The latter is assumed, unless the second
element is boolean or a string of either ``ro`` or ``rw``, indicating read-only or read-write access for a volume
alias. Three elements are always used as ``(container path, host path, read-only indicator)``.
Nested values are unpacked, so that valid input formats are also ``{container path: (host path, read-only)}`` or
``(container_path: [host path, read-only])``.
:param value: Input value for conversion.
:return: A SharedVolume tuple
:rtype: SharedVolume | ['Converts', 'the', 'input', 'to', 'a', 'SharedVolume', 'or', 'HostVolume', 'tuple', 'for', 'a', 'host', 'bind', '.', 'Input', 'can', 'be', 'a', 'single', 'string', 'a', 'list', 'or', 'tuple', 'or', 'a', 'single', '-', 'entry', 'dictionary', '.', 'Single', 'values', 'are', 'assumed', 'to', 'be', 'volume', 'aliases', 'for', 'read', '-', 'write', 'access', '.', 'Tuples', 'or', 'lists', 'with', 'two', 'elements', 'can', 'be', '(', 'alias', 'read', '-', 'only', 'indicator', ')', 'or', '(', 'container', 'path', 'host', 'path', ')', '.', 'The', 'latter', 'is', 'assumed', 'unless', 'the', 'second', 'element', 'is', 'boolean', 'or', 'a', 'string', 'of', 'either', 'ro', 'or', 'rw', 'indicating', 'read', '-', 'only', 'or', 'read', '-', 'write', 'access', 'for', 'a', 'volume', 'alias', '.', 'Three', 'elements', 'are', 'always', 'used', 'as', '(', 'container', 'path', 'host', 'path', 'read', '-', 'only', 'indicator', ')', '.', 'Nested', 'values', 'are', 'unpacked', 'so', 'that', 'valid', 'input', 'formats', 'are', 'also', '{', 'container', 'path', ':', '(', 'host', 'path', 'read', '-', 'only', ')', '}', 'or', '(', 'container_path', ':', '[', 'host', 'path', 'read', '-', 'only', ']', ')', '.'] | train | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/input.py#L394-L428 |
5,945 | i3visio/osrframework | osrframework/thirdparties/pipl_com/lib/fields.py | DateRange.from_dict | def from_dict(d):
"""Transform the dict to a DateRange object."""
start = d.get('start')
end = d.get('end')
if not (start and end):
raise ValueError('DateRange must have both start and end')
start = str_to_date(start)
end = str_to_date(end)
return DateRange(start, end) | python | def from_dict(d):
"""Transform the dict to a DateRange object."""
start = d.get('start')
end = d.get('end')
if not (start and end):
raise ValueError('DateRange must have both start and end')
start = str_to_date(start)
end = str_to_date(end)
return DateRange(start, end) | ['def', 'from_dict', '(', 'd', ')', ':', 'start', '=', 'd', '.', 'get', '(', "'start'", ')', 'end', '=', 'd', '.', 'get', '(', "'end'", ')', 'if', 'not', '(', 'start', 'and', 'end', ')', ':', 'raise', 'ValueError', '(', "'DateRange must have both start and end'", ')', 'start', '=', 'str_to_date', '(', 'start', ')', 'end', '=', 'str_to_date', '(', 'end', ')', 'return', 'DateRange', '(', 'start', ',', 'end', ')'] | Transform the dict to a DateRange object. | ['Transform', 'the', 'dict', 'to', 'a', 'DateRange', 'object', '.'] | train | https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/fields.py#L854-L862 |
5,946 | tensorflow/tensor2tensor | tensor2tensor/data_generators/video_utils.py | display_video_hooks | def display_video_hooks(hook_args):
"""Hooks to display videos at decode time."""
predictions = hook_args.predictions
max_outputs = hook_args.decode_hparams.max_display_outputs
max_decodes = hook_args.decode_hparams.max_display_decodes
with tf.Graph().as_default():
_, best_decodes = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
all_summaries = []
# Displays decodes corresponding to the best/worst metric,
for metric, metric_decode_inds in best_decodes.items():
curr_metric_inds = metric_decode_inds[:max_outputs]
best_inputs, best_outputs, best_targets = [], [], []
for sample_ind, decode_ind in enumerate(curr_metric_inds):
curr_decode = predictions[decode_ind][sample_ind]
best_inputs.append(curr_decode["inputs"])
best_outputs.append(curr_decode["outputs"])
best_targets.append(curr_decode["targets"])
best_inputs = np.array(best_inputs, dtype=np.uint8)
best_outputs = np.array(best_outputs, dtype=np.uint8)
best_targets = np.array(best_targets, dtype=np.uint8)
summaries = convert_videos_to_summaries(
best_inputs, best_outputs, best_targets,
tag=metric, decode_hparams=hook_args.decode_hparams)
all_summaries.extend(summaries)
# Display random decodes for ten conditioning frames.
for decode_ind, decode in enumerate(predictions[: max_decodes]):
target_videos = video_metrics.stack_data_given_key(decode, "targets")
output_videos = video_metrics.stack_data_given_key(decode, "outputs")
input_videos = video_metrics.stack_data_given_key(decode, "inputs")
target_videos = np.asarray(target_videos, dtype=np.uint8)
output_videos = np.asarray(output_videos, dtype=np.uint8)
input_videos = np.asarray(input_videos, dtype=np.uint8)
summaries = convert_videos_to_summaries(
input_videos, output_videos, target_videos,
tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams,
display_ground_truth=decode_ind == 0)
all_summaries.extend(summaries)
return all_summaries | python | def display_video_hooks(hook_args):
"""Hooks to display videos at decode time."""
predictions = hook_args.predictions
max_outputs = hook_args.decode_hparams.max_display_outputs
max_decodes = hook_args.decode_hparams.max_display_decodes
with tf.Graph().as_default():
_, best_decodes = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
all_summaries = []
# Displays decodes corresponding to the best/worst metric,
for metric, metric_decode_inds in best_decodes.items():
curr_metric_inds = metric_decode_inds[:max_outputs]
best_inputs, best_outputs, best_targets = [], [], []
for sample_ind, decode_ind in enumerate(curr_metric_inds):
curr_decode = predictions[decode_ind][sample_ind]
best_inputs.append(curr_decode["inputs"])
best_outputs.append(curr_decode["outputs"])
best_targets.append(curr_decode["targets"])
best_inputs = np.array(best_inputs, dtype=np.uint8)
best_outputs = np.array(best_outputs, dtype=np.uint8)
best_targets = np.array(best_targets, dtype=np.uint8)
summaries = convert_videos_to_summaries(
best_inputs, best_outputs, best_targets,
tag=metric, decode_hparams=hook_args.decode_hparams)
all_summaries.extend(summaries)
# Display random decodes for ten conditioning frames.
for decode_ind, decode in enumerate(predictions[: max_decodes]):
target_videos = video_metrics.stack_data_given_key(decode, "targets")
output_videos = video_metrics.stack_data_given_key(decode, "outputs")
input_videos = video_metrics.stack_data_given_key(decode, "inputs")
target_videos = np.asarray(target_videos, dtype=np.uint8)
output_videos = np.asarray(output_videos, dtype=np.uint8)
input_videos = np.asarray(input_videos, dtype=np.uint8)
summaries = convert_videos_to_summaries(
input_videos, output_videos, target_videos,
tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams,
display_ground_truth=decode_ind == 0)
all_summaries.extend(summaries)
return all_summaries | ['def', 'display_video_hooks', '(', 'hook_args', ')', ':', 'predictions', '=', 'hook_args', '.', 'predictions', 'max_outputs', '=', 'hook_args', '.', 'decode_hparams', '.', 'max_display_outputs', 'max_decodes', '=', 'hook_args', '.', 'decode_hparams', '.', 'max_display_decodes', 'with', 'tf', '.', 'Graph', '(', ')', '.', 'as_default', '(', ')', ':', '_', ',', 'best_decodes', '=', 'video_metrics', '.', 'compute_video_metrics_from_predictions', '(', 'predictions', ',', 'decode_hparams', '=', 'hook_args', '.', 'decode_hparams', ')', 'all_summaries', '=', '[', ']', '# Displays decodes corresponding to the best/worst metric,', 'for', 'metric', ',', 'metric_decode_inds', 'in', 'best_decodes', '.', 'items', '(', ')', ':', 'curr_metric_inds', '=', 'metric_decode_inds', '[', ':', 'max_outputs', ']', 'best_inputs', ',', 'best_outputs', ',', 'best_targets', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'sample_ind', ',', 'decode_ind', 'in', 'enumerate', '(', 'curr_metric_inds', ')', ':', 'curr_decode', '=', 'predictions', '[', 'decode_ind', ']', '[', 'sample_ind', ']', 'best_inputs', '.', 'append', '(', 'curr_decode', '[', '"inputs"', ']', ')', 'best_outputs', '.', 'append', '(', 'curr_decode', '[', '"outputs"', ']', ')', 'best_targets', '.', 'append', '(', 'curr_decode', '[', '"targets"', ']', ')', 'best_inputs', '=', 'np', '.', 'array', '(', 'best_inputs', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'best_outputs', '=', 'np', '.', 'array', '(', 'best_outputs', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'best_targets', '=', 'np', '.', 'array', '(', 'best_targets', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'summaries', '=', 'convert_videos_to_summaries', '(', 'best_inputs', ',', 'best_outputs', ',', 'best_targets', ',', 'tag', '=', 'metric', ',', 'decode_hparams', '=', 'hook_args', '.', 'decode_hparams', ')', 'all_summaries', '.', 'extend', '(', 'summaries', ')', '# Display random decodes for ten conditioning frames.', 'for', 'decode_ind', ',', 'decode', 'in', 'enumerate', '(', 'predictions', '[', ':', 'max_decodes', ']', ')', ':', 'target_videos', '=', 'video_metrics', '.', 'stack_data_given_key', '(', 'decode', ',', '"targets"', ')', 'output_videos', '=', 'video_metrics', '.', 'stack_data_given_key', '(', 'decode', ',', '"outputs"', ')', 'input_videos', '=', 'video_metrics', '.', 'stack_data_given_key', '(', 'decode', ',', '"inputs"', ')', 'target_videos', '=', 'np', '.', 'asarray', '(', 'target_videos', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'output_videos', '=', 'np', '.', 'asarray', '(', 'output_videos', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'input_videos', '=', 'np', '.', 'asarray', '(', 'input_videos', ',', 'dtype', '=', 'np', '.', 'uint8', ')', 'summaries', '=', 'convert_videos_to_summaries', '(', 'input_videos', ',', 'output_videos', ',', 'target_videos', ',', 'tag', '=', '"decode_%d"', '%', 'decode_ind', ',', 'decode_hparams', '=', 'hook_args', '.', 'decode_hparams', ',', 'display_ground_truth', '=', 'decode_ind', '==', '0', ')', 'all_summaries', '.', 'extend', '(', 'summaries', ')', 'return', 'all_summaries'] | Hooks to display videos at decode time. | ['Hooks', 'to', 'display', 'videos', 'at', 'decode', 'time', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L165-L206 |
5,947 | bspaans/python-mingus | mingus/midi/midi_file_in.py | MidiFile.parse_track_header | def parse_track_header(self, fp):
"""Return the size of the track chunk."""
# Check the header
try:
h = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track header from file. Byte %d."
% self.bytes_read)
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.'
% self.bytes_read)
# Parse the size of the header
try:
chunk_size = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track chunk size from file.")
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size | python | def parse_track_header(self, fp):
"""Return the size of the track chunk."""
# Check the header
try:
h = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track header from file. Byte %d."
% self.bytes_read)
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.'
% self.bytes_read)
# Parse the size of the header
try:
chunk_size = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track chunk size from file.")
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size | ['def', 'parse_track_header', '(', 'self', ',', 'fp', ')', ':', '# Check the header', 'try', ':', 'h', '=', 'fp', '.', 'read', '(', '4', ')', 'self', '.', 'bytes_read', '+=', '4', 'except', ':', 'raise', 'IOError', '(', '"Couldn\'t read track header from file. Byte %d."', '%', 'self', '.', 'bytes_read', ')', 'if', 'h', '!=', "'MTrk'", ':', 'raise', 'HeaderError', '(', "'Not a valid Track header. Byte %d.'", '%', 'self', '.', 'bytes_read', ')', '# Parse the size of the header', 'try', ':', 'chunk_size', '=', 'fp', '.', 'read', '(', '4', ')', 'self', '.', 'bytes_read', '+=', '4', 'except', ':', 'raise', 'IOError', '(', '"Couldn\'t read track chunk size from file."', ')', 'chunk_size', '=', 'self', '.', 'bytes_to_int', '(', 'chunk_size', ')', 'return', 'chunk_size'] | Return the size of the track chunk. | ['Return', 'the', 'size', 'of', 'the', 'track', 'chunk', '.'] | train | https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L322-L342 |
5,948 | datacamp/protowhat | protowhat/checks/check_funcs.py | has_code | def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state | python | def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state | ['def', 'has_code', '(', 'state', ',', 'text', ',', 'incorrect_msg', '=', '"Check the {ast_path}. The checker expected to find {text}."', ',', 'fixed', '=', 'False', ',', ')', ':', 'stu_ast', '=', 'state', '.', 'student_ast', 'stu_code', '=', 'state', '.', 'student_code', '# fallback on using complete student code if no ast', 'ParseError', '=', 'state', '.', 'ast_dispatcher', '.', 'ParseError', 'def', 'get_text', '(', 'ast', ',', 'code', ')', ':', 'if', 'isinstance', '(', 'ast', ',', 'ParseError', ')', ':', 'return', 'code', 'try', ':', 'return', 'ast', '.', 'get_text', '(', 'code', ')', 'except', ':', 'return', 'code', 'stu_text', '=', 'get_text', '(', 'stu_ast', ',', 'stu_code', ')', '_msg', '=', 'incorrect_msg', '.', 'format', '(', 'ast_path', '=', 'state', '.', 'get_ast_path', '(', ')', 'or', '"highlighted code"', ',', 'text', '=', 'text', ')', '# either simple text matching or regex test', 'res', '=', 'text', 'in', 'stu_text', 'if', 'fixed', 'else', 're', '.', 'search', '(', 'text', ',', 'stu_text', ')', 'if', 'not', 'res', ':', 'state', '.', 'report', '(', 'Feedback', '(', '_msg', ')', ')', 'return', 'state'] | Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10) | ['Test', 'whether', 'the', 'student', 'code', 'contains', 'text', '.'] | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L172-L243 |
5,949 | pletzer/pnumpy | src/pnGhostedDistArray.py | gmdaArray | def gmdaArray(arry, dtype, mask=None, numGhosts=1):
"""
ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0)
"""
a = numpy.array(arry, dtype)
res = GhostedMaskedDistArray(a.shape, a.dtype)
res.mask = mask
res.setNumberOfGhosts(numGhosts)
res[:] = a
return res | python | def gmdaArray(arry, dtype, mask=None, numGhosts=1):
"""
ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0)
"""
a = numpy.array(arry, dtype)
res = GhostedMaskedDistArray(a.shape, a.dtype)
res.mask = mask
res.setNumberOfGhosts(numGhosts)
res[:] = a
return res | ['def', 'gmdaArray', '(', 'arry', ',', 'dtype', ',', 'mask', '=', 'None', ',', 'numGhosts', '=', '1', ')', ':', 'a', '=', 'numpy', '.', 'array', '(', 'arry', ',', 'dtype', ')', 'res', '=', 'GhostedMaskedDistArray', '(', 'a', '.', 'shape', ',', 'a', '.', 'dtype', ')', 'res', '.', 'mask', '=', 'mask', 'res', '.', 'setNumberOfGhosts', '(', 'numGhosts', ')', 'res', '[', ':', ']', '=', 'a', 'return', 'res'] | ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0) | ['ghosted', 'distributed', 'array', 'constructor'] | train | https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L147-L158 |
5,950 | bxlab/bx-python | lib/bx_extras/pstat.py | abut | def abut (source,*args):
"""
Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
until it is as long as the longest list. If this behavior is not desired,
use pstat.simpleabut().
Usage: abut(source, args) where args=any # of lists
Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
for addon in args:
if type(addon) not in [ListType,TupleType]:
addon = [addon]
if len(addon) < len(source): # is source list longer?
if len(source) % len(addon) == 0: # are they integer multiples?
repeats = len(source)/len(addon) # repeat addon n times
origadd = copy.deepcopy(addon)
for i in range(repeats-1):
addon = addon + origadd
else:
repeats = len(source)/len(addon)+1 # repeat addon x times,
origadd = copy.deepcopy(addon) # x is NOT an integer
for i in range(repeats-1):
addon = addon + origadd
addon = addon[0:len(source)]
elif len(source) < len(addon): # is addon list longer?
if len(addon) % len(source) == 0: # are they integer multiples?
repeats = len(addon)/len(source) # repeat source n times
origsour = copy.deepcopy(source)
for i in range(repeats-1):
source = source + origsour
else:
repeats = len(addon)/len(source)+1 # repeat source x times,
origsour = copy.deepcopy(source) # x is NOT an integer
for i in range(repeats-1):
source = source + origsour
source = source[0:len(addon)]
source = simpleabut(source,addon)
return source | python | def abut (source,*args):
"""
Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
until it is as long as the longest list. If this behavior is not desired,
use pstat.simpleabut().
Usage: abut(source, args) where args=any # of lists
Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
for addon in args:
if type(addon) not in [ListType,TupleType]:
addon = [addon]
if len(addon) < len(source): # is source list longer?
if len(source) % len(addon) == 0: # are they integer multiples?
repeats = len(source)/len(addon) # repeat addon n times
origadd = copy.deepcopy(addon)
for i in range(repeats-1):
addon = addon + origadd
else:
repeats = len(source)/len(addon)+1 # repeat addon x times,
origadd = copy.deepcopy(addon) # x is NOT an integer
for i in range(repeats-1):
addon = addon + origadd
addon = addon[0:len(source)]
elif len(source) < len(addon): # is addon list longer?
if len(addon) % len(source) == 0: # are they integer multiples?
repeats = len(addon)/len(source) # repeat source n times
origsour = copy.deepcopy(source)
for i in range(repeats-1):
source = source + origsour
else:
repeats = len(addon)/len(source)+1 # repeat source x times,
origsour = copy.deepcopy(source) # x is NOT an integer
for i in range(repeats-1):
source = source + origsour
source = source[0:len(addon)]
source = simpleabut(source,addon)
return source | ['def', 'abut', '(', 'source', ',', '*', 'args', ')', ':', 'if', 'type', '(', 'source', ')', 'not', 'in', '[', 'ListType', ',', 'TupleType', ']', ':', 'source', '=', '[', 'source', ']', 'for', 'addon', 'in', 'args', ':', 'if', 'type', '(', 'addon', ')', 'not', 'in', '[', 'ListType', ',', 'TupleType', ']', ':', 'addon', '=', '[', 'addon', ']', 'if', 'len', '(', 'addon', ')', '<', 'len', '(', 'source', ')', ':', '# is source list longer?', 'if', 'len', '(', 'source', ')', '%', 'len', '(', 'addon', ')', '==', '0', ':', '# are they integer multiples?', 'repeats', '=', 'len', '(', 'source', ')', '/', 'len', '(', 'addon', ')', '# repeat addon n times', 'origadd', '=', 'copy', '.', 'deepcopy', '(', 'addon', ')', 'for', 'i', 'in', 'range', '(', 'repeats', '-', '1', ')', ':', 'addon', '=', 'addon', '+', 'origadd', 'else', ':', 'repeats', '=', 'len', '(', 'source', ')', '/', 'len', '(', 'addon', ')', '+', '1', '# repeat addon x times,', 'origadd', '=', 'copy', '.', 'deepcopy', '(', 'addon', ')', '# x is NOT an integer', 'for', 'i', 'in', 'range', '(', 'repeats', '-', '1', ')', ':', 'addon', '=', 'addon', '+', 'origadd', 'addon', '=', 'addon', '[', '0', ':', 'len', '(', 'source', ')', ']', 'elif', 'len', '(', 'source', ')', '<', 'len', '(', 'addon', ')', ':', '# is addon list longer?', 'if', 'len', '(', 'addon', ')', '%', 'len', '(', 'source', ')', '==', '0', ':', '# are they integer multiples?', 'repeats', '=', 'len', '(', 'addon', ')', '/', 'len', '(', 'source', ')', '# repeat source n times', 'origsour', '=', 'copy', '.', 'deepcopy', '(', 'source', ')', 'for', 'i', 'in', 'range', '(', 'repeats', '-', '1', ')', ':', 'source', '=', 'source', '+', 'origsour', 'else', ':', 'repeats', '=', 'len', '(', 'addon', ')', '/', 'len', '(', 'source', ')', '+', '1', '# repeat source x times,', 'origsour', '=', 'copy', '.', 'deepcopy', '(', 'source', ')', '# x is NOT an integer', 'for', 'i', 'in', 'range', '(', 'repeats', '-', '1', ')', ':', 'source', '=', 'source', '+', 'origsour', 'source', '=', 'source', '[', '0', ':', 'len', '(', 'addon', ')', ']', 'source', '=', 'simpleabut', '(', 'source', ',', 'addon', ')', 'return', 'source'] | Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
until it is as long as the longest list. If this behavior is not desired,
use pstat.simpleabut().
Usage: abut(source, args) where args=any # of lists
Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right' | ['Like', 'the', '|Stat', 'abut', 'command', '.', 'It', 'concatenates', 'two', 'lists', 'side', '-', 'by', '-', 'side', 'and', 'returns', 'the', 'result', '.', '2D', 'lists', 'are', 'also', 'accomodated', 'for', 'either', 'argument', '(', 'source', 'or', 'addon', ')', '.', 'CAUTION', ':', 'If', 'one', 'list', 'is', 'shorter', 'it', 'will', 'be', 'repeated', 'until', 'it', 'is', 'as', 'long', 'as', 'the', 'longest', 'list', '.', 'If', 'this', 'behavior', 'is', 'not', 'desired', 'use', 'pstat', '.', 'simpleabut', '()', '.'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L122-L166 |
5,951 | steenzout/python-serialization-json | steenzout/serialization/json/encoders.py | as_date | def as_date(dat):
"""Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object.
"""
LOGGER.debug('as_date(%s)', dat)
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
calendar.timegm(dat.timetuple())) | python | def as_date(dat):
"""Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object.
"""
LOGGER.debug('as_date(%s)', dat)
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
calendar.timegm(dat.timetuple())) | ['def', 'as_date', '(', 'dat', ')', ':', 'LOGGER', '.', 'debug', '(', "'as_date(%s)'", ',', 'dat', ')', 'return', 'strict_rfc3339', '.', 'timestamp_to_rfc3339_utcoffset', '(', 'calendar', '.', 'timegm', '(', 'dat', '.', 'timetuple', '(', ')', ')', ')'] | Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given object. | ['Return', 'the', 'RFC3339', 'UTC', 'string', 'representation', 'of', 'the', 'given', 'date', 'and', 'time', '.'] | train | https://github.com/steenzout/python-serialization-json/blob/583568e14cc02ba0bf711f56b8a0a3ad142c696d/steenzout/serialization/json/encoders.py#L64-L80 |
5,952 | manns/pyspread | pyspread/src/actions/_grid_actions.py | FindActions.find | def find(self, gridpos, find_string, flags, search_result=True):
"""Return next position of event_find_string in MainGrid
Parameters:
-----------
gridpos: 3-tuple of Integer
\tPosition at which the search starts
find_string: String
\tString to find in grid
flags: List of strings
\tSearch flag out of
\t["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower)
"""
findfunc = self.grid.code_array.findnextmatch
if "DOWN" in flags:
if gridpos[0] < self.grid.code_array.shape[0]:
gridpos[0] += 1
elif gridpos[1] < self.grid.code_array.shape[1]:
gridpos[1] += 1
elif gridpos[2] < self.grid.code_array.shape[2]:
gridpos[2] += 1
else:
gridpos = (0, 0, 0)
elif "UP" in flags:
if gridpos[0] > 0:
gridpos[0] -= 1
elif gridpos[1] > 0:
gridpos[1] -= 1
elif gridpos[2] > 0:
gridpos[2] -= 1
else:
gridpos = [dim - 1 for dim in self.grid.code_array.shape]
return findfunc(tuple(gridpos), find_string, flags, search_result) | python | def find(self, gridpos, find_string, flags, search_result=True):
"""Return next position of event_find_string in MainGrid
Parameters:
-----------
gridpos: 3-tuple of Integer
\tPosition at which the search starts
find_string: String
\tString to find in grid
flags: List of strings
\tSearch flag out of
\t["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower)
"""
findfunc = self.grid.code_array.findnextmatch
if "DOWN" in flags:
if gridpos[0] < self.grid.code_array.shape[0]:
gridpos[0] += 1
elif gridpos[1] < self.grid.code_array.shape[1]:
gridpos[1] += 1
elif gridpos[2] < self.grid.code_array.shape[2]:
gridpos[2] += 1
else:
gridpos = (0, 0, 0)
elif "UP" in flags:
if gridpos[0] > 0:
gridpos[0] -= 1
elif gridpos[1] > 0:
gridpos[1] -= 1
elif gridpos[2] > 0:
gridpos[2] -= 1
else:
gridpos = [dim - 1 for dim in self.grid.code_array.shape]
return findfunc(tuple(gridpos), find_string, flags, search_result) | ['def', 'find', '(', 'self', ',', 'gridpos', ',', 'find_string', ',', 'flags', ',', 'search_result', '=', 'True', ')', ':', 'findfunc', '=', 'self', '.', 'grid', '.', 'code_array', '.', 'findnextmatch', 'if', '"DOWN"', 'in', 'flags', ':', 'if', 'gridpos', '[', '0', ']', '<', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '0', ']', ':', 'gridpos', '[', '0', ']', '+=', '1', 'elif', 'gridpos', '[', '1', ']', '<', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '1', ']', ':', 'gridpos', '[', '1', ']', '+=', '1', 'elif', 'gridpos', '[', '2', ']', '<', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '2', ']', ':', 'gridpos', '[', '2', ']', '+=', '1', 'else', ':', 'gridpos', '=', '(', '0', ',', '0', ',', '0', ')', 'elif', '"UP"', 'in', 'flags', ':', 'if', 'gridpos', '[', '0', ']', '>', '0', ':', 'gridpos', '[', '0', ']', '-=', '1', 'elif', 'gridpos', '[', '1', ']', '>', '0', ':', 'gridpos', '[', '1', ']', '-=', '1', 'elif', 'gridpos', '[', '2', ']', '>', '0', ':', 'gridpos', '[', '2', ']', '-=', '1', 'else', ':', 'gridpos', '=', '[', 'dim', '-', '1', 'for', 'dim', 'in', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', ']', 'return', 'findfunc', '(', 'tuple', '(', 'gridpos', ')', ',', 'find_string', ',', 'flags', ',', 'search_result', ')'] | Return next position of event_find_string in MainGrid
Parameters:
-----------
gridpos: 3-tuple of Integer
\tPosition at which the search starts
find_string: String
\tString to find in grid
flags: List of strings
\tSearch flag out of
\t["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
search_result: Bool, defaults to True
\tIf True then the search includes the result string (slower) | ['Return', 'next', 'position', 'of', 'event_find_string', 'in', 'MainGrid'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L1750-L1788 |
5,953 | mitsei/dlkit | dlkit/records/assessment/mecqbank/mecqbank_base_records.py | MecQBankBaseMixin._init_map | def _init_map(self):
"""stub"""
SimpleDifficultyItemFormRecord._init_map(self)
SourceItemFormRecord._init_map(self)
PDFPreviewFormRecord._init_map(self)
PublishedFormRecord._init_map(self)
ProvenanceFormRecord._init_map(self)
super(MecQBankBaseMixin, self)._init_map() | python | def _init_map(self):
"""stub"""
SimpleDifficultyItemFormRecord._init_map(self)
SourceItemFormRecord._init_map(self)
PDFPreviewFormRecord._init_map(self)
PublishedFormRecord._init_map(self)
ProvenanceFormRecord._init_map(self)
super(MecQBankBaseMixin, self)._init_map() | ['def', '_init_map', '(', 'self', ')', ':', 'SimpleDifficultyItemFormRecord', '.', '_init_map', '(', 'self', ')', 'SourceItemFormRecord', '.', '_init_map', '(', 'self', ')', 'PDFPreviewFormRecord', '.', '_init_map', '(', 'self', ')', 'PublishedFormRecord', '.', '_init_map', '(', 'self', ')', 'ProvenanceFormRecord', '.', '_init_map', '(', 'self', ')', 'super', '(', 'MecQBankBaseMixin', ',', 'self', ')', '.', '_init_map', '(', ')'] | stub | ['stub'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/mecqbank/mecqbank_base_records.py#L440-L447 |
5,954 | OLC-Bioinformatics/sipprverse | genesippr_validation.py | ReadPrep.link_reads | def link_reads(self, analysistype):
"""
Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled'
"""
logging.info('Linking {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Create the output directories
genesippr_dir = os.path.join(self.path, 'genesippr', sample.name)
sample.genesippr_dir = genesippr_dir
make_path(genesippr_dir)
cowbat_dir = os.path.join(self.path, 'cowbat', sample.name)
sample.cowbat_dir = cowbat_dir
make_path(cowbat_dir)
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
# Link reads to both output directories
for output_dir in [genesippr_dir, cowbat_dir]:
# If the original reads are shorter than the specified read length, the FASTQ files will exist,
# but will be empty. Do not create links for these files
size = os.path.getsize(sample[read_type][depth][read_pair].forward_reads[fastq_type])
if size > 20:
# Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e.g.
# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
# is the relative path to the output_dir. The link name is the base name of the reads
# joined to the desired output directory e.g.
# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
relative_symlink(sample[read_type][depth][read_pair].forward_reads[fastq_type],
output_dir)
# Original FASTQ files
relative_symlink(sample.forward_fastq,
output_dir)
relative_symlink(sample.reverse_fastq,
output_dir)
# Reverse reads
try:
size = os.path.getsize(sample[read_type][depth][read_pair].reverse_reads[fastq_type])
if size > 20:
relative_symlink(sample[read_type][depth][read_pair].reverse_reads[fastq_type],
output_dir)
except FileNotFoundError:
pass | python | def link_reads(self, analysistype):
"""
Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled'
"""
logging.info('Linking {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Create the output directories
genesippr_dir = os.path.join(self.path, 'genesippr', sample.name)
sample.genesippr_dir = genesippr_dir
make_path(genesippr_dir)
cowbat_dir = os.path.join(self.path, 'cowbat', sample.name)
sample.cowbat_dir = cowbat_dir
make_path(cowbat_dir)
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
# Link reads to both output directories
for output_dir in [genesippr_dir, cowbat_dir]:
# If the original reads are shorter than the specified read length, the FASTQ files will exist,
# but will be empty. Do not create links for these files
size = os.path.getsize(sample[read_type][depth][read_pair].forward_reads[fastq_type])
if size > 20:
# Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e.g.
# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
# is the relative path to the output_dir. The link name is the base name of the reads
# joined to the desired output directory e.g.
# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
relative_symlink(sample[read_type][depth][read_pair].forward_reads[fastq_type],
output_dir)
# Original FASTQ files
relative_symlink(sample.forward_fastq,
output_dir)
relative_symlink(sample.reverse_fastq,
output_dir)
# Reverse reads
try:
size = os.path.getsize(sample[read_type][depth][read_pair].reverse_reads[fastq_type])
if size > 20:
relative_symlink(sample[read_type][depth][read_pair].reverse_reads[fastq_type],
output_dir)
except FileNotFoundError:
pass | ['def', 'link_reads', '(', 'self', ',', 'analysistype', ')', ':', 'logging', '.', 'info', '(', "'Linking {at} reads'", '.', 'format', '(', 'at', '=', 'analysistype', ')', ')', 'for', 'sample', 'in', 'self', '.', 'metadata', ':', '# Create the output directories', 'genesippr_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', "'genesippr'", ',', 'sample', '.', 'name', ')', 'sample', '.', 'genesippr_dir', '=', 'genesippr_dir', 'make_path', '(', 'genesippr_dir', ')', 'cowbat_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', "'cowbat'", ',', 'sample', '.', 'name', ')', 'sample', '.', 'cowbat_dir', '=', 'cowbat_dir', 'make_path', '(', 'cowbat_dir', ')', '# Iterate through all the desired depths of coverage', 'for', 'depth', 'in', 'self', '.', 'read_depths', ':', 'for', 'read_pair', 'in', 'self', '.', 'read_lengths', ':', '# Create variables using the analysis type. These will be used in setting GenObject attributes', 'read_type', '=', "'{at}_reads'", '.', 'format', '(', 'at', '=', 'analysistype', ')', 'fastq_type', '=', "'trimmed_{at}_fastq'", '.', 'format', '(', 'at', '=', 'analysistype', ')', '# Link reads to both output directories', 'for', 'output_dir', 'in', '[', 'genesippr_dir', ',', 'cowbat_dir', ']', ':', '# If the original reads are shorter than the specified read length, the FASTQ files will exist,', '# but will be empty. Do not create links for these files', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'sample', '[', 'read_type', ']', '[', 'depth', ']', '[', 'read_pair', ']', '.', 'forward_reads', '[', 'fastq_type', ']', ')', 'if', 'size', '>', '20', ':', '# Create relative symlinks to the FASTQ files - use the relative path from the desired', '# output directory to the read storage path e.g.', '# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz', '# is the relative path to the output_dir. The link name is the base name of the reads', '# joined to the desired output directory e.g.', '# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz', 'relative_symlink', '(', 'sample', '[', 'read_type', ']', '[', 'depth', ']', '[', 'read_pair', ']', '.', 'forward_reads', '[', 'fastq_type', ']', ',', 'output_dir', ')', '# Original FASTQ files', 'relative_symlink', '(', 'sample', '.', 'forward_fastq', ',', 'output_dir', ')', 'relative_symlink', '(', 'sample', '.', 'reverse_fastq', ',', 'output_dir', ')', '# Reverse reads', 'try', ':', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'sample', '[', 'read_type', ']', '[', 'depth', ']', '[', 'read_pair', ']', '.', 'reverse_reads', '[', 'fastq_type', ']', ')', 'if', 'size', '>', '20', ':', 'relative_symlink', '(', 'sample', '[', 'read_type', ']', '[', 'depth', ']', '[', 'read_pair', ']', '.', 'reverse_reads', '[', 'fastq_type', ']', ',', 'output_dir', ')', 'except', 'FileNotFoundError', ':', 'pass'] | Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled' | ['Create', 'folders', 'with', 'relative', 'symlinks', 'to', 'the', 'desired', 'simulated', '/', 'sampled', 'reads', '.', 'These', 'folders', 'will', 'contain', 'all', 'the', 'reads', 'created', 'for', 'each', 'sample', 'and', 'will', 'be', 'processed', 'with', 'GeneSippr', 'and', 'COWBAT', 'pipelines', ':', 'param', 'analysistype', ':', 'Current', 'analysis', 'type', '.', 'Will', 'either', 'be', 'simulated', 'or', 'sampled'] | train | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/genesippr_validation.py#L537-L584 |
5,955 | KrishnaswamyLab/graphtools | graphtools/graphs.py | LandmarkGraph.extend_to_data | def extend_to_data(self, data, **kwargs):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
"""
kernel = self.build_kernel_to_data(data, **kwargs)
if sparse.issparse(kernel):
pnm = sparse.hstack(
[sparse.csr_matrix(kernel[:, self.clusters == i].sum(
axis=1)) for i in np.unique(self.clusters)])
else:
pnm = np.array([np.sum(
kernel[:, self.clusters == i],
axis=1).T for i in np.unique(self.clusters)]).transpose()
pnm = normalize(pnm, norm='l1', axis=1)
return pnm | python | def extend_to_data(self, data, **kwargs):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
"""
kernel = self.build_kernel_to_data(data, **kwargs)
if sparse.issparse(kernel):
pnm = sparse.hstack(
[sparse.csr_matrix(kernel[:, self.clusters == i].sum(
axis=1)) for i in np.unique(self.clusters)])
else:
pnm = np.array([np.sum(
kernel[:, self.clusters == i],
axis=1).T for i in np.unique(self.clusters)]).transpose()
pnm = normalize(pnm, norm='l1', axis=1)
return pnm | ['def', 'extend_to_data', '(', 'self', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'kernel', '=', 'self', '.', 'build_kernel_to_data', '(', 'data', ',', '*', '*', 'kwargs', ')', 'if', 'sparse', '.', 'issparse', '(', 'kernel', ')', ':', 'pnm', '=', 'sparse', '.', 'hstack', '(', '[', 'sparse', '.', 'csr_matrix', '(', 'kernel', '[', ':', ',', 'self', '.', 'clusters', '==', 'i', ']', '.', 'sum', '(', 'axis', '=', '1', ')', ')', 'for', 'i', 'in', 'np', '.', 'unique', '(', 'self', '.', 'clusters', ')', ']', ')', 'else', ':', 'pnm', '=', 'np', '.', 'array', '(', '[', 'np', '.', 'sum', '(', 'kernel', '[', ':', ',', 'self', '.', 'clusters', '==', 'i', ']', ',', 'axis', '=', '1', ')', '.', 'T', 'for', 'i', 'in', 'np', '.', 'unique', '(', 'self', '.', 'clusters', ')', ']', ')', '.', 'transpose', '(', ')', 'pnm', '=', 'normalize', '(', 'pnm', ',', 'norm', '=', "'l1'", ',', 'axis', '=', '1', ')', 'return', 'pnm'] | Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data` | ['Build', 'transition', 'matrix', 'from', 'new', 'data', 'to', 'the', 'graph'] | train | https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L637-L671 |
5,956 | Numigi/gitoo | src/core.py | parse_url | def parse_url(url):
""" Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found.
"""
# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3
# we use a try except to make the code compatible with py2 and py3
try:
url = unicode(url)
except NameError:
url = url
parsed = pystache.parse(url)
# pylint: disable=protected-access
variables = (element.key for element in parsed._parse_tree if isinstance(element, _EscapeNode))
return pystache.render(url, {variable: os.environ[variable] for variable in variables}) | python | def parse_url(url):
""" Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found.
"""
# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3
# we use a try except to make the code compatible with py2 and py3
try:
url = unicode(url)
except NameError:
url = url
parsed = pystache.parse(url)
# pylint: disable=protected-access
variables = (element.key for element in parsed._parse_tree if isinstance(element, _EscapeNode))
return pystache.render(url, {variable: os.environ[variable] for variable in variables}) | ['def', 'parse_url', '(', 'url', ')', ':', "# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3", '# we use a try except to make the code compatible with py2 and py3', 'try', ':', 'url', '=', 'unicode', '(', 'url', ')', 'except', 'NameError', ':', 'url', '=', 'url', 'parsed', '=', 'pystache', '.', 'parse', '(', 'url', ')', '# pylint: disable=protected-access', 'variables', '=', '(', 'element', '.', 'key', 'for', 'element', 'in', 'parsed', '.', '_parse_tree', 'if', 'isinstance', '(', 'element', ',', '_EscapeNode', ')', ')', 'return', 'pystache', '.', 'render', '(', 'url', ',', '{', 'variable', ':', 'os', '.', 'environ', '[', 'variable', ']', 'for', 'variable', 'in', 'variables', '}', ')'] | Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found. | ['Parse', 'the', 'given', 'url', 'and', 'update', 'it', 'with', 'environment', 'value', 'if', 'required', '.'] | train | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L242-L259 |
5,957 | pycontribs/pyrax | pyrax/cloudblockstorage.py | CloudBlockStorageVolume.detach | def detach(self):
"""
Detaches this volume from any device it may be attached to. If it
is not attached, nothing happens.
"""
attachments = self.attachments
if not attachments:
# Not attached; no error needed, just return
return
# A volume can only be attached to one device at a time, but for some
# reason this is a list instead of a singular value
att = attachments[0]
instance_id = att["server_id"]
attachment_id = att["id"]
try:
self._nova_volumes.delete_server_volume(instance_id, attachment_id)
except Exception as e:
raise exc.VolumeDetachmentFailed("%s" % e) | python | def detach(self):
"""
Detaches this volume from any device it may be attached to. If it
is not attached, nothing happens.
"""
attachments = self.attachments
if not attachments:
# Not attached; no error needed, just return
return
# A volume can only be attached to one device at a time, but for some
# reason this is a list instead of a singular value
att = attachments[0]
instance_id = att["server_id"]
attachment_id = att["id"]
try:
self._nova_volumes.delete_server_volume(instance_id, attachment_id)
except Exception as e:
raise exc.VolumeDetachmentFailed("%s" % e) | ['def', 'detach', '(', 'self', ')', ':', 'attachments', '=', 'self', '.', 'attachments', 'if', 'not', 'attachments', ':', '# Not attached; no error needed, just return', 'return', '# A volume can only be attached to one device at a time, but for some', '# reason this is a list instead of a singular value', 'att', '=', 'attachments', '[', '0', ']', 'instance_id', '=', 'att', '[', '"server_id"', ']', 'attachment_id', '=', 'att', '[', '"id"', ']', 'try', ':', 'self', '.', '_nova_volumes', '.', 'delete_server_volume', '(', 'instance_id', ',', 'attachment_id', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'exc', '.', 'VolumeDetachmentFailed', '(', '"%s"', '%', 'e', ')'] | Detaches this volume from any device it may be attached to. If it
is not attached, nothing happens. | ['Detaches', 'this', 'volume', 'from', 'any', 'device', 'it', 'may', 'be', 'attached', 'to', '.', 'If', 'it', 'is', 'not', 'attached', 'nothing', 'happens', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudblockstorage.py#L167-L184 |
5,958 | gem/oq-engine | openquake/server/db/actions.py | get_log_slice | def get_log_slice(db, job_id, start, stop):
"""
Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded)
"""
start = int(start)
stop = int(stop)
limit = -1 if stop == 0 else stop - start
logs = db('SELECT * FROM log WHERE job_id=?x '
'ORDER BY id LIMIT ?s OFFSET ?s',
job_id, limit, start)
# NB: .isoformat() returns a string like '2016-08-29T15:42:34.984756'
# we consider only the first 22 characters, i.e. '2016-08-29T15:42:34.98'
return [[log.timestamp.isoformat()[:22], log.level,
log.process, log.message] for log in logs] | python | def get_log_slice(db, job_id, start, stop):
"""
Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded)
"""
start = int(start)
stop = int(stop)
limit = -1 if stop == 0 else stop - start
logs = db('SELECT * FROM log WHERE job_id=?x '
'ORDER BY id LIMIT ?s OFFSET ?s',
job_id, limit, start)
# NB: .isoformat() returns a string like '2016-08-29T15:42:34.984756'
# we consider only the first 22 characters, i.e. '2016-08-29T15:42:34.98'
return [[log.timestamp.isoformat()[:22], log.level,
log.process, log.message] for log in logs] | ['def', 'get_log_slice', '(', 'db', ',', 'job_id', ',', 'start', ',', 'stop', ')', ':', 'start', '=', 'int', '(', 'start', ')', 'stop', '=', 'int', '(', 'stop', ')', 'limit', '=', '-', '1', 'if', 'stop', '==', '0', 'else', 'stop', '-', 'start', 'logs', '=', 'db', '(', "'SELECT * FROM log WHERE job_id=?x '", "'ORDER BY id LIMIT ?s OFFSET ?s'", ',', 'job_id', ',', 'limit', ',', 'start', ')', "# NB: .isoformat() returns a string like '2016-08-29T15:42:34.984756'", "# we consider only the first 22 characters, i.e. '2016-08-29T15:42:34.98'", 'return', '[', '[', 'log', '.', 'timestamp', '.', 'isoformat', '(', ')', '[', ':', '22', ']', ',', 'log', '.', 'level', ',', 'log', '.', 'process', ',', 'log', '.', 'message', ']', 'for', 'log', 'in', 'logs', ']'] | Get a slice of the calculation log as a JSON list of rows
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
a job ID
:param start:
start of the slice
:param stop:
end of the slice (the last element is excluded) | ['Get', 'a', 'slice', 'of', 'the', 'calculation', 'log', 'as', 'a', 'JSON', 'list', 'of', 'rows'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/db/actions.py#L588-L610 |
5,959 | hellosign/hellosign-python-sdk | hellosign_sdk/resource/signature_request.py | SignatureRequest.find_response_component | def find_response_component(self, api_id=None, signature_id=None):
''' Find one or many repsonse components.
Args:
api_id (str): Api id associated with the component(s) to be retrieved.
signature_id (str): Signature id associated with the component(s) to be retrieved.
Returns:
A list of dictionaries containing component data
'''
if not api_id and not signature_id:
raise ValueError('At least one of api_id and signature_id is required')
components = list()
if self.response_data:
for component in self.response_data:
if (api_id and component['api_id']) == api_id or (signature_id and component['signature_id'] == signature_id):
components.append(component)
return components | python | def find_response_component(self, api_id=None, signature_id=None):
''' Find one or many repsonse components.
Args:
api_id (str): Api id associated with the component(s) to be retrieved.
signature_id (str): Signature id associated with the component(s) to be retrieved.
Returns:
A list of dictionaries containing component data
'''
if not api_id and not signature_id:
raise ValueError('At least one of api_id and signature_id is required')
components = list()
if self.response_data:
for component in self.response_data:
if (api_id and component['api_id']) == api_id or (signature_id and component['signature_id'] == signature_id):
components.append(component)
return components | ['def', 'find_response_component', '(', 'self', ',', 'api_id', '=', 'None', ',', 'signature_id', '=', 'None', ')', ':', 'if', 'not', 'api_id', 'and', 'not', 'signature_id', ':', 'raise', 'ValueError', '(', "'At least one of api_id and signature_id is required'", ')', 'components', '=', 'list', '(', ')', 'if', 'self', '.', 'response_data', ':', 'for', 'component', 'in', 'self', '.', 'response_data', ':', 'if', '(', 'api_id', 'and', 'component', '[', "'api_id'", ']', ')', '==', 'api_id', 'or', '(', 'signature_id', 'and', 'component', '[', "'signature_id'", ']', '==', 'signature_id', ')', ':', 'components', '.', 'append', '(', 'component', ')', 'return', 'components'] | Find one or many repsonse components.
Args:
api_id (str): Api id associated with the component(s) to be retrieved.
signature_id (str): Signature id associated with the component(s) to be retrieved.
Returns:
A list of dictionaries containing component data | ['Find', 'one', 'or', 'many', 'repsonse', 'components', '.'] | train | https://github.com/hellosign/hellosign-python-sdk/blob/4325a29ad5766380a214eac3914511f62f7ecba4/hellosign_sdk/resource/signature_request.py#L114-L137 |
5,960 | SFDO-Tooling/CumulusCI | cumulusci/core/keychain/BaseProjectKeychain.py | BaseProjectKeychain.set_service | def set_service(self, name, service_config, project=False):
""" Store a ServiceConfig in the keychain """
if not self.project_config.services or name not in self.project_config.services:
self._raise_service_not_valid(name)
self._validate_service(name, service_config)
self._set_service(name, service_config, project)
self._load_services() | python | def set_service(self, name, service_config, project=False):
""" Store a ServiceConfig in the keychain """
if not self.project_config.services or name not in self.project_config.services:
self._raise_service_not_valid(name)
self._validate_service(name, service_config)
self._set_service(name, service_config, project)
self._load_services() | ['def', 'set_service', '(', 'self', ',', 'name', ',', 'service_config', ',', 'project', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'project_config', '.', 'services', 'or', 'name', 'not', 'in', 'self', '.', 'project_config', '.', 'services', ':', 'self', '.', '_raise_service_not_valid', '(', 'name', ')', 'self', '.', '_validate_service', '(', 'name', ',', 'service_config', ')', 'self', '.', '_set_service', '(', 'name', ',', 'service_config', ',', 'project', ')', 'self', '.', '_load_services', '(', ')'] | Store a ServiceConfig in the keychain | ['Store', 'a', 'ServiceConfig', 'in', 'the', 'keychain'] | train | https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L184-L190 |
5,961 | senaite/senaite.core | bika/lims/exportimport/instruments/thermoscientific/multiskan/__init__.py | ThermoScientificMultiskanCSVParser.parse_data | def parse_data(self, sline):
"""This function builds the addRawResults dictionary using the header values of the labels section
as sample Ids.
"""
if sline[0] == '':
return 0
for idx, label in enumerate(self._labels_values[sline[0]]):
if label != '':
self._addRawResult(label.split(' ')[0], {self.analysiskey: sline[1:][idx]}, False)
return 0 | python | def parse_data(self, sline):
"""This function builds the addRawResults dictionary using the header values of the labels section
as sample Ids.
"""
if sline[0] == '':
return 0
for idx, label in enumerate(self._labels_values[sline[0]]):
if label != '':
self._addRawResult(label.split(' ')[0], {self.analysiskey: sline[1:][idx]}, False)
return 0 | ['def', 'parse_data', '(', 'self', ',', 'sline', ')', ':', 'if', 'sline', '[', '0', ']', '==', "''", ':', 'return', '0', 'for', 'idx', ',', 'label', 'in', 'enumerate', '(', 'self', '.', '_labels_values', '[', 'sline', '[', '0', ']', ']', ')', ':', 'if', 'label', '!=', "''", ':', 'self', '.', '_addRawResult', '(', 'label', '.', 'split', '(', "' '", ')', '[', '0', ']', ',', '{', 'self', '.', 'analysiskey', ':', 'sline', '[', '1', ':', ']', '[', 'idx', ']', '}', ',', 'False', ')', 'return', '0'] | This function builds the addRawResults dictionary using the header values of the labels section
as sample Ids. | ['This', 'function', 'builds', 'the', 'addRawResults', 'dictionary', 'using', 'the', 'header', 'values', 'of', 'the', 'labels', 'section', 'as', 'sample', 'Ids', '.'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/thermoscientific/multiskan/__init__.py#L63-L72 |
5,962 | sdss/tree | python/tree/tree.py | Tree.branch_out | def branch_out(self, limb=None):
''' Set the individual section branches
This adds the various sections of the config file into the
tree environment for access later. Optically can specify a specific
branch. This does not yet load them into the os environment.
Parameters:
limb (str/list):
The name of the section of the config to add into the environ
or a list of strings
'''
# Filter on sections
if not limb:
limbs = self._cfg.sections()
else:
# we must have the general always + secton
limb = limb if isinstance(limb, list) else [limb]
limbs = ['general']
limbs.extend(limb)
# add all limbs into the tree environ
for leaf in limbs:
leaf = leaf if leaf in self._cfg.sections() else leaf.upper()
self.environ[leaf] = OrderedDict()
options = self._cfg.options(leaf)
for opt in options:
if opt in self.environ['default']:
continue
val = self._cfg.get(leaf, opt)
if val.find(self._file_replace) == 0:
val = val.replace(self._file_replace, self.sasbasedir)
self.environ[leaf][opt] = val | python | def branch_out(self, limb=None):
''' Set the individual section branches
This adds the various sections of the config file into the
tree environment for access later. Optically can specify a specific
branch. This does not yet load them into the os environment.
Parameters:
limb (str/list):
The name of the section of the config to add into the environ
or a list of strings
'''
# Filter on sections
if not limb:
limbs = self._cfg.sections()
else:
# we must have the general always + secton
limb = limb if isinstance(limb, list) else [limb]
limbs = ['general']
limbs.extend(limb)
# add all limbs into the tree environ
for leaf in limbs:
leaf = leaf if leaf in self._cfg.sections() else leaf.upper()
self.environ[leaf] = OrderedDict()
options = self._cfg.options(leaf)
for opt in options:
if opt in self.environ['default']:
continue
val = self._cfg.get(leaf, opt)
if val.find(self._file_replace) == 0:
val = val.replace(self._file_replace, self.sasbasedir)
self.environ[leaf][opt] = val | ['def', 'branch_out', '(', 'self', ',', 'limb', '=', 'None', ')', ':', '# Filter on sections', 'if', 'not', 'limb', ':', 'limbs', '=', 'self', '.', '_cfg', '.', 'sections', '(', ')', 'else', ':', '# we must have the general always + secton', 'limb', '=', 'limb', 'if', 'isinstance', '(', 'limb', ',', 'list', ')', 'else', '[', 'limb', ']', 'limbs', '=', '[', "'general'", ']', 'limbs', '.', 'extend', '(', 'limb', ')', '# add all limbs into the tree environ', 'for', 'leaf', 'in', 'limbs', ':', 'leaf', '=', 'leaf', 'if', 'leaf', 'in', 'self', '.', '_cfg', '.', 'sections', '(', ')', 'else', 'leaf', '.', 'upper', '(', ')', 'self', '.', 'environ', '[', 'leaf', ']', '=', 'OrderedDict', '(', ')', 'options', '=', 'self', '.', '_cfg', '.', 'options', '(', 'leaf', ')', 'for', 'opt', 'in', 'options', ':', 'if', 'opt', 'in', 'self', '.', 'environ', '[', "'default'", ']', ':', 'continue', 'val', '=', 'self', '.', '_cfg', '.', 'get', '(', 'leaf', ',', 'opt', ')', 'if', 'val', '.', 'find', '(', 'self', '.', '_file_replace', ')', '==', '0', ':', 'val', '=', 'val', '.', 'replace', '(', 'self', '.', '_file_replace', ',', 'self', '.', 'sasbasedir', ')', 'self', '.', 'environ', '[', 'leaf', ']', '[', 'opt', ']', '=', 'val'] | Set the individual section branches
This adds the various sections of the config file into the
tree environment for access later. Optically can specify a specific
branch. This does not yet load them into the os environment.
Parameters:
limb (str/list):
The name of the section of the config to add into the environ
or a list of strings | ['Set', 'the', 'individual', 'section', 'branches'] | train | https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/python/tree/tree.py#L137-L172 |
5,963 | lreis2415/PyGeoC | pygeoc/postTauDEM.py | StreamnetUtil.serialize_streamnet | def serialize_streamnet(streamnet_file, output_reach_file):
"""Eliminate reach with zero length and return the reach ID map.
Args:
streamnet_file: original stream net ESRI shapefile
output_reach_file: serialized stream net, ESRI shapefile
Returns:
id pairs {origin: newly assigned}
"""
FileClass.copy_files(streamnet_file, output_reach_file)
ds_reach = ogr_Open(output_reach_file, update=True)
layer_reach = ds_reach.GetLayer(0)
layer_def = layer_reach.GetLayerDefn()
i_link = layer_def.GetFieldIndex(FLD_LINKNO)
i_link_downslope = layer_def.GetFieldIndex(FLD_DSLINKNO)
i_len = layer_def.GetFieldIndex(REACH_LENGTH)
old_id_list = []
# there are some reaches with zero length.
# this program will remove these zero-length reaches
# output_dic is used to store the downstream reaches of these zero-length
# reaches
output_dic = {}
ft = layer_reach.GetNextFeature()
while ft is not None:
link_id = ft.GetFieldAsInteger(i_link)
reach_len = ft.GetFieldAsDouble(i_len)
if link_id not in old_id_list:
if reach_len < DELTA:
downstream_id = ft.GetFieldAsInteger(i_link_downslope)
output_dic[link_id] = downstream_id
else:
old_id_list.append(link_id)
ft = layer_reach.GetNextFeature()
old_id_list.sort()
id_map = {}
for i, old_id in enumerate(old_id_list):
id_map[old_id] = i + 1
# print(id_map)
# change old ID to new ID
layer_reach.ResetReading()
ft = layer_reach.GetNextFeature()
while ft is not None:
link_id = ft.GetFieldAsInteger(i_link)
if link_id not in id_map:
layer_reach.DeleteFeature(ft.GetFID())
ft = layer_reach.GetNextFeature()
continue
ds_id = ft.GetFieldAsInteger(i_link_downslope)
ds_id = output_dic.get(ds_id, ds_id)
ds_id = output_dic.get(ds_id, ds_id)
ft.SetField(FLD_LINKNO, id_map[link_id])
if ds_id in id_map:
ft.SetField(FLD_DSLINKNO, id_map[ds_id])
else:
# print(ds_id)
ft.SetField(FLD_DSLINKNO, -1)
layer_reach.SetFeature(ft)
ft = layer_reach.GetNextFeature()
ds_reach.ExecuteSQL(str('REPACK reach'))
layer_reach.SyncToDisk()
ds_reach.Destroy()
del ds_reach
return id_map | python | def serialize_streamnet(streamnet_file, output_reach_file):
"""Eliminate reach with zero length and return the reach ID map.
Args:
streamnet_file: original stream net ESRI shapefile
output_reach_file: serialized stream net, ESRI shapefile
Returns:
id pairs {origin: newly assigned}
"""
FileClass.copy_files(streamnet_file, output_reach_file)
ds_reach = ogr_Open(output_reach_file, update=True)
layer_reach = ds_reach.GetLayer(0)
layer_def = layer_reach.GetLayerDefn()
i_link = layer_def.GetFieldIndex(FLD_LINKNO)
i_link_downslope = layer_def.GetFieldIndex(FLD_DSLINKNO)
i_len = layer_def.GetFieldIndex(REACH_LENGTH)
old_id_list = []
# there are some reaches with zero length.
# this program will remove these zero-length reaches
# output_dic is used to store the downstream reaches of these zero-length
# reaches
output_dic = {}
ft = layer_reach.GetNextFeature()
while ft is not None:
link_id = ft.GetFieldAsInteger(i_link)
reach_len = ft.GetFieldAsDouble(i_len)
if link_id not in old_id_list:
if reach_len < DELTA:
downstream_id = ft.GetFieldAsInteger(i_link_downslope)
output_dic[link_id] = downstream_id
else:
old_id_list.append(link_id)
ft = layer_reach.GetNextFeature()
old_id_list.sort()
id_map = {}
for i, old_id in enumerate(old_id_list):
id_map[old_id] = i + 1
# print(id_map)
# change old ID to new ID
layer_reach.ResetReading()
ft = layer_reach.GetNextFeature()
while ft is not None:
link_id = ft.GetFieldAsInteger(i_link)
if link_id not in id_map:
layer_reach.DeleteFeature(ft.GetFID())
ft = layer_reach.GetNextFeature()
continue
ds_id = ft.GetFieldAsInteger(i_link_downslope)
ds_id = output_dic.get(ds_id, ds_id)
ds_id = output_dic.get(ds_id, ds_id)
ft.SetField(FLD_LINKNO, id_map[link_id])
if ds_id in id_map:
ft.SetField(FLD_DSLINKNO, id_map[ds_id])
else:
# print(ds_id)
ft.SetField(FLD_DSLINKNO, -1)
layer_reach.SetFeature(ft)
ft = layer_reach.GetNextFeature()
ds_reach.ExecuteSQL(str('REPACK reach'))
layer_reach.SyncToDisk()
ds_reach.Destroy()
del ds_reach
return id_map | ['def', 'serialize_streamnet', '(', 'streamnet_file', ',', 'output_reach_file', ')', ':', 'FileClass', '.', 'copy_files', '(', 'streamnet_file', ',', 'output_reach_file', ')', 'ds_reach', '=', 'ogr_Open', '(', 'output_reach_file', ',', 'update', '=', 'True', ')', 'layer_reach', '=', 'ds_reach', '.', 'GetLayer', '(', '0', ')', 'layer_def', '=', 'layer_reach', '.', 'GetLayerDefn', '(', ')', 'i_link', '=', 'layer_def', '.', 'GetFieldIndex', '(', 'FLD_LINKNO', ')', 'i_link_downslope', '=', 'layer_def', '.', 'GetFieldIndex', '(', 'FLD_DSLINKNO', ')', 'i_len', '=', 'layer_def', '.', 'GetFieldIndex', '(', 'REACH_LENGTH', ')', 'old_id_list', '=', '[', ']', '# there are some reaches with zero length.', '# this program will remove these zero-length reaches', '# output_dic is used to store the downstream reaches of these zero-length', '# reaches', 'output_dic', '=', '{', '}', 'ft', '=', 'layer_reach', '.', 'GetNextFeature', '(', ')', 'while', 'ft', 'is', 'not', 'None', ':', 'link_id', '=', 'ft', '.', 'GetFieldAsInteger', '(', 'i_link', ')', 'reach_len', '=', 'ft', '.', 'GetFieldAsDouble', '(', 'i_len', ')', 'if', 'link_id', 'not', 'in', 'old_id_list', ':', 'if', 'reach_len', '<', 'DELTA', ':', 'downstream_id', '=', 'ft', '.', 'GetFieldAsInteger', '(', 'i_link_downslope', ')', 'output_dic', '[', 'link_id', ']', '=', 'downstream_id', 'else', ':', 'old_id_list', '.', 'append', '(', 'link_id', ')', 'ft', '=', 'layer_reach', '.', 'GetNextFeature', '(', ')', 'old_id_list', '.', 'sort', '(', ')', 'id_map', '=', '{', '}', 'for', 'i', ',', 'old_id', 'in', 'enumerate', '(', 'old_id_list', ')', ':', 'id_map', '[', 'old_id', ']', '=', 'i', '+', '1', '# print(id_map)', '# change old ID to new ID', 'layer_reach', '.', 'ResetReading', '(', ')', 'ft', '=', 'layer_reach', '.', 'GetNextFeature', '(', ')', 'while', 'ft', 'is', 'not', 'None', ':', 'link_id', '=', 'ft', '.', 'GetFieldAsInteger', '(', 'i_link', ')', 'if', 'link_id', 'not', 'in', 'id_map', ':', 'layer_reach', '.', 'DeleteFeature', '(', 'ft', '.', 'GetFID', '(', ')', ')', 'ft', '=', 'layer_reach', '.', 'GetNextFeature', '(', ')', 'continue', 'ds_id', '=', 'ft', '.', 'GetFieldAsInteger', '(', 'i_link_downslope', ')', 'ds_id', '=', 'output_dic', '.', 'get', '(', 'ds_id', ',', 'ds_id', ')', 'ds_id', '=', 'output_dic', '.', 'get', '(', 'ds_id', ',', 'ds_id', ')', 'ft', '.', 'SetField', '(', 'FLD_LINKNO', ',', 'id_map', '[', 'link_id', ']', ')', 'if', 'ds_id', 'in', 'id_map', ':', 'ft', '.', 'SetField', '(', 'FLD_DSLINKNO', ',', 'id_map', '[', 'ds_id', ']', ')', 'else', ':', '# print(ds_id)', 'ft', '.', 'SetField', '(', 'FLD_DSLINKNO', ',', '-', '1', ')', 'layer_reach', '.', 'SetFeature', '(', 'ft', ')', 'ft', '=', 'layer_reach', '.', 'GetNextFeature', '(', ')', 'ds_reach', '.', 'ExecuteSQL', '(', 'str', '(', "'REPACK reach'", ')', ')', 'layer_reach', '.', 'SyncToDisk', '(', ')', 'ds_reach', '.', 'Destroy', '(', ')', 'del', 'ds_reach', 'return', 'id_map'] | Eliminate reach with zero length and return the reach ID map.
Args:
streamnet_file: original stream net ESRI shapefile
output_reach_file: serialized stream net, ESRI shapefile
Returns:
id pairs {origin: newly assigned} | ['Eliminate', 'reach', 'with', 'zero', 'length', 'and', 'return', 'the', 'reach', 'ID', 'map', '.', 'Args', ':', 'streamnet_file', ':', 'original', 'stream', 'net', 'ESRI', 'shapefile', 'output_reach_file', ':', 'serialized', 'stream', 'net', 'ESRI', 'shapefile'] | train | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/postTauDEM.py#L199-L266 |
5,964 | turicas/rows | rows/plugins/txt.py | import_from_txt | def import_from_txt(
filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs
):
"""Return a rows.Table created from imported TXT file."""
# TODO: (maybe)
# enable parsing of non-fixed-width-columns
# with old algorithm - that would just split columns
# at the vertical separator character for the frame.
# (if doing so, include an optional parameter)
# Also, this fixes an outstanding unreported issue:
# trying to parse tables which fields values
# included a Pipe char - "|" - would silently
# yield bad results.
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding)
raw_contents = source.fobj.read().decode(encoding).rstrip("\n")
if frame_style is FRAME_SENTINEL:
frame_style = _guess_frame_style(raw_contents)
else:
frame_style = _parse_frame_style(frame_style)
contents = raw_contents.splitlines()
del raw_contents
if frame_style != "None":
contents = contents[1:-1]
del contents[1]
else:
# the table is possibly generated from other source.
# check if the line we reserve as a separator is realy empty.
if not contents[1].strip():
del contents[1]
col_positions = _parse_col_positions(frame_style, contents[0])
table_rows = [
[
row[start + 1 : end].strip()
for start, end in zip(col_positions, col_positions[1:])
]
for row in contents
]
meta = {
"imported_from": "txt",
"source": source,
"frame_style": frame_style,
}
return create_table(table_rows, meta=meta, *args, **kwargs) | python | def import_from_txt(
filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs
):
"""Return a rows.Table created from imported TXT file."""
# TODO: (maybe)
# enable parsing of non-fixed-width-columns
# with old algorithm - that would just split columns
# at the vertical separator character for the frame.
# (if doing so, include an optional parameter)
# Also, this fixes an outstanding unreported issue:
# trying to parse tables which fields values
# included a Pipe char - "|" - would silently
# yield bad results.
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding)
raw_contents = source.fobj.read().decode(encoding).rstrip("\n")
if frame_style is FRAME_SENTINEL:
frame_style = _guess_frame_style(raw_contents)
else:
frame_style = _parse_frame_style(frame_style)
contents = raw_contents.splitlines()
del raw_contents
if frame_style != "None":
contents = contents[1:-1]
del contents[1]
else:
# the table is possibly generated from other source.
# check if the line we reserve as a separator is realy empty.
if not contents[1].strip():
del contents[1]
col_positions = _parse_col_positions(frame_style, contents[0])
table_rows = [
[
row[start + 1 : end].strip()
for start, end in zip(col_positions, col_positions[1:])
]
for row in contents
]
meta = {
"imported_from": "txt",
"source": source,
"frame_style": frame_style,
}
return create_table(table_rows, meta=meta, *args, **kwargs) | ['def', 'import_from_txt', '(', 'filename_or_fobj', ',', 'encoding', '=', '"utf-8"', ',', 'frame_style', '=', 'FRAME_SENTINEL', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# TODO: (maybe)', '# enable parsing of non-fixed-width-columns', '# with old algorithm - that would just split columns', '# at the vertical separator character for the frame.', '# (if doing so, include an optional parameter)', '# Also, this fixes an outstanding unreported issue:', '# trying to parse tables which fields values', '# included a Pipe char - "|" - would silently', '# yield bad results.', 'source', '=', 'Source', '.', 'from_file', '(', 'filename_or_fobj', ',', 'mode', '=', '"rb"', ',', 'plugin_name', '=', '"txt"', ',', 'encoding', '=', 'encoding', ')', 'raw_contents', '=', 'source', '.', 'fobj', '.', 'read', '(', ')', '.', 'decode', '(', 'encoding', ')', '.', 'rstrip', '(', '"\\n"', ')', 'if', 'frame_style', 'is', 'FRAME_SENTINEL', ':', 'frame_style', '=', '_guess_frame_style', '(', 'raw_contents', ')', 'else', ':', 'frame_style', '=', '_parse_frame_style', '(', 'frame_style', ')', 'contents', '=', 'raw_contents', '.', 'splitlines', '(', ')', 'del', 'raw_contents', 'if', 'frame_style', '!=', '"None"', ':', 'contents', '=', 'contents', '[', '1', ':', '-', '1', ']', 'del', 'contents', '[', '1', ']', 'else', ':', '# the table is possibly generated from other source.', '# check if the line we reserve as a separator is realy empty.', 'if', 'not', 'contents', '[', '1', ']', '.', 'strip', '(', ')', ':', 'del', 'contents', '[', '1', ']', 'col_positions', '=', '_parse_col_positions', '(', 'frame_style', ',', 'contents', '[', '0', ']', ')', 'table_rows', '=', '[', '[', 'row', '[', 'start', '+', '1', ':', 'end', ']', '.', 'strip', '(', ')', 'for', 'start', ',', 'end', 'in', 'zip', '(', 'col_positions', ',', 'col_positions', '[', '1', ':', ']', ')', ']', 'for', 'row', 'in', 'contents', ']', 'meta', '=', '{', '"imported_from"', ':', '"txt"', ',', '"source"', ':', 'source', ',', '"frame_style"', ':', 'frame_style', ',', '}', 'return', 'create_table', '(', 'table_rows', ',', 'meta', '=', 'meta', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Return a rows.Table created from imported TXT file. | ['Return', 'a', 'rows', '.', 'Table', 'created', 'from', 'imported', 'TXT', 'file', '.'] | train | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L130-L179 |
5,965 | OLC-Bioinformatics/sipprverse | pointsippr/pointsippr.py | PointSippr.run_pointfinder | def run_pointfinder(self):
"""
Run PointFinder on the FASTA sequences extracted from the raw reads
"""
logging.info('Running PointFinder on FASTA files')
for i in range(len(self.runmetadata.samples)):
# Start threads
threads = Thread(target=self.pointfinder_threads, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# PointFinder requires the path to the blastn executable
blast_path = shutil.which('blastn')
for sample in self.runmetadata.samples:
# Ensure that the attribute storing the name of the FASTA file has been created
if GenObject.isattr(sample[self.analysistype], 'pointfinderfasta'):
sample[self.analysistype].pointfinder_outputs = os.path.join(sample[self.analysistype].outputdir,
'pointfinder_outputs')
# Don't run the analyses if the outputs have already been created
if not os.path.isfile(os.path.join(sample[self.analysistype].pointfinder_outputs,
'{samplename}_blastn_results.tsv'.format(samplename=sample.name))):
make_path(sample[self.analysistype].pointfinder_outputs)
# Create and run the PointFinder system call
pointfinder_cmd = \
'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn ' \
'-o {output_dir} -m_p {blast_path}'\
.format(input=sample[self.analysistype].pointfinderfasta,
species=sample[self.analysistype].pointfindergenus,
db_path=self.targetpath,
output_dir=sample[self.analysistype].pointfinder_outputs,
blast_path=blast_path)
self.queue.put(pointfinder_cmd)
self.queue.join() | python | def run_pointfinder(self):
"""
Run PointFinder on the FASTA sequences extracted from the raw reads
"""
logging.info('Running PointFinder on FASTA files')
for i in range(len(self.runmetadata.samples)):
# Start threads
threads = Thread(target=self.pointfinder_threads, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# PointFinder requires the path to the blastn executable
blast_path = shutil.which('blastn')
for sample in self.runmetadata.samples:
# Ensure that the attribute storing the name of the FASTA file has been created
if GenObject.isattr(sample[self.analysistype], 'pointfinderfasta'):
sample[self.analysistype].pointfinder_outputs = os.path.join(sample[self.analysistype].outputdir,
'pointfinder_outputs')
# Don't run the analyses if the outputs have already been created
if not os.path.isfile(os.path.join(sample[self.analysistype].pointfinder_outputs,
'{samplename}_blastn_results.tsv'.format(samplename=sample.name))):
make_path(sample[self.analysistype].pointfinder_outputs)
# Create and run the PointFinder system call
pointfinder_cmd = \
'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn ' \
'-o {output_dir} -m_p {blast_path}'\
.format(input=sample[self.analysistype].pointfinderfasta,
species=sample[self.analysistype].pointfindergenus,
db_path=self.targetpath,
output_dir=sample[self.analysistype].pointfinder_outputs,
blast_path=blast_path)
self.queue.put(pointfinder_cmd)
self.queue.join() | ['def', 'run_pointfinder', '(', 'self', ')', ':', 'logging', '.', 'info', '(', "'Running PointFinder on FASTA files'", ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'runmetadata', '.', 'samples', ')', ')', ':', '# Start threads', 'threads', '=', 'Thread', '(', 'target', '=', 'self', '.', 'pointfinder_threads', ',', 'args', '=', '(', ')', ')', '# Set the daemon to True - something to do with thread management', 'threads', '.', 'setDaemon', '(', 'True', ')', '# Start the threading', 'threads', '.', 'start', '(', ')', '# PointFinder requires the path to the blastn executable', 'blast_path', '=', 'shutil', '.', 'which', '(', "'blastn'", ')', 'for', 'sample', 'in', 'self', '.', 'runmetadata', '.', 'samples', ':', '# Ensure that the attribute storing the name of the FASTA file has been created', 'if', 'GenObject', '.', 'isattr', '(', 'sample', '[', 'self', '.', 'analysistype', ']', ',', "'pointfinderfasta'", ')', ':', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfinder_outputs', '=', 'os', '.', 'path', '.', 'join', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'outputdir', ',', "'pointfinder_outputs'", ')', "# Don't run the analyses if the outputs have already been created", 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfinder_outputs', ',', "'{samplename}_blastn_results.tsv'", '.', 'format', '(', 'samplename', '=', 'sample', '.', 'name', ')', ')', ')', ':', 'make_path', '(', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfinder_outputs', ')', '# Create and run the PointFinder system call', 'pointfinder_cmd', '=', "'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn '", "'-o {output_dir} -m_p {blast_path}'", '.', 'format', '(', 'input', '=', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfinderfasta', ',', 'species', '=', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfindergenus', ',', 'db_path', '=', 'self', '.', 'targetpath', ',', 'output_dir', '=', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'pointfinder_outputs', ',', 'blast_path', '=', 'blast_path', ')', 'self', '.', 'queue', '.', 'put', '(', 'pointfinder_cmd', ')', 'self', '.', 'queue', '.', 'join', '(', ')'] | Run PointFinder on the FASTA sequences extracted from the raw reads | ['Run', 'PointFinder', 'on', 'the', 'FASTA', 'sequences', 'extracted', 'from', 'the', 'raw', 'reads'] | train | https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointsippr/pointsippr.py#L66-L99 |
5,966 | hydpy-dev/hydpy | hydpy/core/timetools.py | Date.from_cfunits | def from_cfunits(cls, units) -> 'Date':
"""Return a |Date| object representing the reference date of the
given `units` string agreeing with the NetCDF-CF conventions.
The following example string is taken from the `Time Coordinate`_
chapter of the NetCDF-CF conventions documentation (modified).
Note that the first entry (the unit) is ignored:
>>> from hydpy import Date
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits(' day since 1992-10-8 15:15:00')
Date('1992-10-08 15:15:00')
>>> Date.from_cfunits('seconds since 1992-10-8 -6:00')
Date('1992-10-08 07:00:00')
>>> Date.from_cfunits('m since 1992-10-8')
Date('1992-10-08 00:00:00')
Without modification, when "0" is included as the decimal fractions
of a second, the example string from `Time Coordinate`_ can also
be passed. However, fractions different from "0" result in
an error:
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.00')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42. -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.0 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.005 -6:00')
Traceback (most recent call last):
...
ValueError: While trying to parse the date of the NetCDF-CF "units" \
string `seconds since 1992-10-8 15:15:42.005 -6:00`, the following error \
occurred: No other decimal fraction of a second than "0" allowed.
"""
try:
string = units[units.find('since')+6:]
idx = string.find('.')
if idx != -1:
jdx = None
for jdx, char in enumerate(string[idx+1:]):
if not char.isnumeric():
break
if char != '0':
raise ValueError(
'No other decimal fraction of a second '
'than "0" allowed.')
else:
if jdx is None:
jdx = idx+1
else:
jdx += 1
string = f'{string[:idx]}{string[idx+jdx+1:]}'
return cls(string)
except BaseException:
objecttools.augment_excmessage(
f'While trying to parse the date of the NetCDF-CF "units" '
f'string `{units}`') | python | def from_cfunits(cls, units) -> 'Date':
"""Return a |Date| object representing the reference date of the
given `units` string agreeing with the NetCDF-CF conventions.
The following example string is taken from the `Time Coordinate`_
chapter of the NetCDF-CF conventions documentation (modified).
Note that the first entry (the unit) is ignored:
>>> from hydpy import Date
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits(' day since 1992-10-8 15:15:00')
Date('1992-10-08 15:15:00')
>>> Date.from_cfunits('seconds since 1992-10-8 -6:00')
Date('1992-10-08 07:00:00')
>>> Date.from_cfunits('m since 1992-10-8')
Date('1992-10-08 00:00:00')
Without modification, when "0" is included as the decimal fractions
of a second, the example string from `Time Coordinate`_ can also
be passed. However, fractions different from "0" result in
an error:
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.00')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42. -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.0 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.005 -6:00')
Traceback (most recent call last):
...
ValueError: While trying to parse the date of the NetCDF-CF "units" \
string `seconds since 1992-10-8 15:15:42.005 -6:00`, the following error \
occurred: No other decimal fraction of a second than "0" allowed.
"""
try:
string = units[units.find('since')+6:]
idx = string.find('.')
if idx != -1:
jdx = None
for jdx, char in enumerate(string[idx+1:]):
if not char.isnumeric():
break
if char != '0':
raise ValueError(
'No other decimal fraction of a second '
'than "0" allowed.')
else:
if jdx is None:
jdx = idx+1
else:
jdx += 1
string = f'{string[:idx]}{string[idx+jdx+1:]}'
return cls(string)
except BaseException:
objecttools.augment_excmessage(
f'While trying to parse the date of the NetCDF-CF "units" '
f'string `{units}`') | ['def', 'from_cfunits', '(', 'cls', ',', 'units', ')', '->', "'Date'", ':', 'try', ':', 'string', '=', 'units', '[', 'units', '.', 'find', '(', "'since'", ')', '+', '6', ':', ']', 'idx', '=', 'string', '.', 'find', '(', "'.'", ')', 'if', 'idx', '!=', '-', '1', ':', 'jdx', '=', 'None', 'for', 'jdx', ',', 'char', 'in', 'enumerate', '(', 'string', '[', 'idx', '+', '1', ':', ']', ')', ':', 'if', 'not', 'char', '.', 'isnumeric', '(', ')', ':', 'break', 'if', 'char', '!=', "'0'", ':', 'raise', 'ValueError', '(', "'No other decimal fraction of a second '", '\'than "0" allowed.\'', ')', 'else', ':', 'if', 'jdx', 'is', 'None', ':', 'jdx', '=', 'idx', '+', '1', 'else', ':', 'jdx', '+=', '1', 'string', '=', "f'{string[:idx]}{string[idx+jdx+1:]}'", 'return', 'cls', '(', 'string', ')', 'except', 'BaseException', ':', 'objecttools', '.', 'augment_excmessage', '(', 'f\'While trying to parse the date of the NetCDF-CF "units" \'', "f'string `{units}`'", ')'] | Return a |Date| object representing the reference date of the
given `units` string agreeing with the NetCDF-CF conventions.
The following example string is taken from the `Time Coordinate`_
chapter of the NetCDF-CF conventions documentation (modified).
Note that the first entry (the unit) is ignored:
>>> from hydpy import Date
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits(' day since 1992-10-8 15:15:00')
Date('1992-10-08 15:15:00')
>>> Date.from_cfunits('seconds since 1992-10-8 -6:00')
Date('1992-10-08 07:00:00')
>>> Date.from_cfunits('m since 1992-10-8')
Date('1992-10-08 00:00:00')
Without modification, when "0" is included as the decimal fractions
of a second, the example string from `Time Coordinate`_ can also
be passed. However, fractions different from "0" result in
an error:
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.00')
Date('1992-10-08 15:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42. -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.0 -6:00')
Date('1992-10-08 22:15:42')
>>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.005 -6:00')
Traceback (most recent call last):
...
ValueError: While trying to parse the date of the NetCDF-CF "units" \
string `seconds since 1992-10-8 15:15:42.005 -6:00`, the following error \
occurred: No other decimal fraction of a second than "0" allowed. | ['Return', 'a', '|Date|', 'object', 'representing', 'the', 'reference', 'date', 'of', 'the', 'given', 'units', 'string', 'agreeing', 'with', 'the', 'NetCDF', '-', 'CF', 'conventions', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L301-L361 |
5,967 | junaruga/rpm-py-installer | install.py | Downloader.download_and_expand | def download_and_expand(self):
"""Download and expand RPM Python binding."""
top_dir_name = None
if self.git_branch:
# Download a source by git clone.
top_dir_name = self._download_and_expand_by_git()
else:
# Download a source from the arcihve URL.
# Downloading the compressed archive is better than "git clone",
# because it is faster.
# If download failed due to URL not found, try "git clone".
try:
top_dir_name = self._download_and_expand_from_archive_url()
except RemoteFileNotFoundError:
Log.info('Try to download by git clone.')
top_dir_name = self._download_and_expand_by_git()
return top_dir_name | python | def download_and_expand(self):
"""Download and expand RPM Python binding."""
top_dir_name = None
if self.git_branch:
# Download a source by git clone.
top_dir_name = self._download_and_expand_by_git()
else:
# Download a source from the arcihve URL.
# Downloading the compressed archive is better than "git clone",
# because it is faster.
# If download failed due to URL not found, try "git clone".
try:
top_dir_name = self._download_and_expand_from_archive_url()
except RemoteFileNotFoundError:
Log.info('Try to download by git clone.')
top_dir_name = self._download_and_expand_by_git()
return top_dir_name | ['def', 'download_and_expand', '(', 'self', ')', ':', 'top_dir_name', '=', 'None', 'if', 'self', '.', 'git_branch', ':', '# Download a source by git clone.', 'top_dir_name', '=', 'self', '.', '_download_and_expand_by_git', '(', ')', 'else', ':', '# Download a source from the arcihve URL.', '# Downloading the compressed archive is better than "git clone",', '# because it is faster.', '# If download failed due to URL not found, try "git clone".', 'try', ':', 'top_dir_name', '=', 'self', '.', '_download_and_expand_from_archive_url', '(', ')', 'except', 'RemoteFileNotFoundError', ':', 'Log', '.', 'info', '(', "'Try to download by git clone.'", ')', 'top_dir_name', '=', 'self', '.', '_download_and_expand_by_git', '(', ')', 'return', 'top_dir_name'] | Download and expand RPM Python binding. | ['Download', 'and', 'expand', 'RPM', 'Python', 'binding', '.'] | train | https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L412-L428 |
5,968 | ausaki/subfinder | subfinder/utils.py | rm_subtitles | def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count | python | def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count | ['def', 'rm_subtitles', '(', 'path', ')', ':', 'sub_exts', '=', '[', "'ass'", ',', "'srt'", ',', "'sub'", ']', 'count', '=', '0', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'path', ')', ':', 'for', 'f', 'in', 'files', ':', '_', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'f', ')', 'ext', '=', 'ext', '[', '1', ':', ']', 'if', 'ext', 'in', 'sub_exts', ':', 'p', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'f', ')', 'count', '+=', '1', 'print', '(', "'Delete {}'", '.', 'format', '(', 'p', ')', ')', 'os', '.', 'remove', '(', 'p', ')', 'return', 'count'] | delete all subtitles in path recursively | ['delete', 'all', 'subtitles', 'in', 'path', 'recursively'] | train | https://github.com/ausaki/subfinder/blob/b74b79214f618c603a551b9334ebb110ccf9684c/subfinder/utils.py#L11-L25 |
5,969 | SheffieldML/GPyOpt | GPyOpt/optimization/optimizer.py | OptimizationWithContext.f_nc | def f_nc(self,x):
'''
Wrapper of *f*: takes an input x with size of the noncontext dimensions
expands it and evaluates the entire function.
'''
x = np.atleast_2d(x)
xx = self.context_manager._expand_vector(x)
if x.shape[0] == 1:
return self.f(xx)[0]
else:
return self.f(xx) | python | def f_nc(self,x):
'''
Wrapper of *f*: takes an input x with size of the noncontext dimensions
expands it and evaluates the entire function.
'''
x = np.atleast_2d(x)
xx = self.context_manager._expand_vector(x)
if x.shape[0] == 1:
return self.f(xx)[0]
else:
return self.f(xx) | ['def', 'f_nc', '(', 'self', ',', 'x', ')', ':', 'x', '=', 'np', '.', 'atleast_2d', '(', 'x', ')', 'xx', '=', 'self', '.', 'context_manager', '.', '_expand_vector', '(', 'x', ')', 'if', 'x', '.', 'shape', '[', '0', ']', '==', '1', ':', 'return', 'self', '.', 'f', '(', 'xx', ')', '[', '0', ']', 'else', ':', 'return', 'self', '.', 'f', '(', 'xx', ')'] | Wrapper of *f*: takes an input x with size of the noncontext dimensions
expands it and evaluates the entire function. | ['Wrapper', 'of', '*', 'f', '*', ':', 'takes', 'an', 'input', 'x', 'with', 'size', 'of', 'the', 'noncontext', 'dimensions', 'expands', 'it', 'and', 'evaluates', 'the', 'entire', 'function', '.'] | train | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/optimization/optimizer.py#L203-L213 |
5,970 | google-research/batch-ppo | agents/tools/wrappers.py | ConvertTo32Bit._convert_observ | def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ | python | def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ | ['def', '_convert_observ', '(', 'self', ',', 'observ', ')', ':', 'if', 'not', 'np', '.', 'isfinite', '(', 'observ', ')', '.', 'all', '(', ')', ':', 'raise', 'ValueError', '(', "'Infinite observation encountered.'", ')', 'if', 'observ', '.', 'dtype', '==', 'np', '.', 'float64', ':', 'return', 'observ', '.', 'astype', '(', 'np', '.', 'float32', ')', 'if', 'observ', '.', 'dtype', '==', 'np', '.', 'int64', ':', 'return', 'observ', '.', 'astype', '(', 'np', '.', 'int32', ')', 'return', 'observ'] | Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type. | ['Convert', 'the', 'observation', 'to', '32', 'bits', '.'] | train | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L530-L548 |
5,971 | apple/turicreate | src/unity/python/turicreate/data_structures/sframe.py | SFrame.add_column | def add_column(self, data, column_name="", inplace=False):
"""
Returns an SFrame with a new column. The number of elements in the data
given must match the length of every other column of the SFrame.
If no name is given, a default name is chosen.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data to add.
column_name : string, optional
The name of the column. If no name is given, a default name is
chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = turicreate.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalent to `sf['species'] = sa`
>>> res = sf.add_column(sa, 'species')
>>> res
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
if isinstance(data, _Iterable):
data = SArray(data)
else:
if self.num_columns() == 0:
data = SArray([data])
else:
data = SArray.from_const(data, self.num_rows())
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_column(data.__proxy__, column_name)
ret._cache = None
return ret | python | def add_column(self, data, column_name="", inplace=False):
"""
Returns an SFrame with a new column. The number of elements in the data
given must match the length of every other column of the SFrame.
If no name is given, a default name is chosen.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data to add.
column_name : string, optional
The name of the column. If no name is given, a default name is
chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = turicreate.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalent to `sf['species'] = sa`
>>> res = sf.add_column(sa, 'species')
>>> res
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
if isinstance(data, _Iterable):
data = SArray(data)
else:
if self.num_columns() == 0:
data = SArray([data])
else:
data = SArray.from_const(data, self.num_rows())
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_column(data.__proxy__, column_name)
ret._cache = None
return ret | ['def', 'add_column', '(', 'self', ',', 'data', ',', 'column_name', '=', '""', ',', 'inplace', '=', 'False', ')', ':', '# Check type for pandas dataframe or SArray?', 'if', 'not', 'isinstance', '(', 'data', ',', 'SArray', ')', ':', 'if', 'isinstance', '(', 'data', ',', '_Iterable', ')', ':', 'data', '=', 'SArray', '(', 'data', ')', 'else', ':', 'if', 'self', '.', 'num_columns', '(', ')', '==', '0', ':', 'data', '=', 'SArray', '(', '[', 'data', ']', ')', 'else', ':', 'data', '=', 'SArray', '.', 'from_const', '(', 'data', ',', 'self', '.', 'num_rows', '(', ')', ')', 'if', 'not', 'isinstance', '(', 'column_name', ',', 'str', ')', ':', 'raise', 'TypeError', '(', '"Invalid column name: must be str"', ')', 'if', 'inplace', ':', 'ret', '=', 'self', 'else', ':', 'ret', '=', 'self', '.', 'copy', '(', ')', 'with', 'cython_context', '(', ')', ':', 'ret', '.', '__proxy__', '.', 'add_column', '(', 'data', '.', '__proxy__', ',', 'column_name', ')', 'ret', '.', '_cache', '=', 'None', 'return', 'ret'] | Returns an SFrame with a new column. The number of elements in the data
given must match the length of every other column of the SFrame.
If no name is given, a default name is chosen.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data to add.
column_name : string, optional
The name of the column. If no name is given, a default name is
chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = turicreate.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalent to `sf['species'] = sa`
>>> res = sf.add_column(sa, 'species')
>>> res
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns] | ['Returns', 'an', 'SFrame', 'with', 'a', 'new', 'column', '.', 'The', 'number', 'of', 'elements', 'in', 'the', 'data', 'given', 'must', 'match', 'the', 'length', 'of', 'every', 'other', 'column', 'of', 'the', 'SFrame', '.', 'If', 'no', 'name', 'is', 'given', 'a', 'default', 'name', 'is', 'chosen', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L3139-L3212 |
5,972 | google/transitfeed | merge.py | DataSetMerger._MergeOptional | def _MergeOptional(self, a, b):
"""Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
"""
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b | python | def _MergeOptional(self, a, b):
"""Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
"""
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b | ['def', '_MergeOptional', '(', 'self', ',', 'a', ',', 'b', ')', ':', 'if', 'a', 'and', 'b', ':', 'if', 'a', '!=', 'b', ':', 'raise', 'MergeError', '(', '"values must be identical if both specified "', '"(\'%s\' vs \'%s\')"', '%', '(', 'transitfeed', '.', 'EncodeUnicode', '(', 'a', ')', ',', 'transitfeed', '.', 'EncodeUnicode', '(', 'b', ')', ')', ')', 'return', 'a', 'or', 'b'] | Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same. | ['Tries', 'to', 'merge', 'two', 'values', 'which', 'may', 'be', 'None', '.'] | train | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L435-L458 |
5,973 | KelSolaar/Umbra | umbra/components/factory/script_editor/editor_status.py | EditorStatus.__Languages_comboBox_set_default_view_state | def __Languages_comboBox_set_default_view_state(self):
"""
Sets the **Languages_comboBox** Widget default View state.
"""
if not self.__container.has_editor_tab():
return
editor = self.__container.get_current_editor()
index = self.Languages_comboBox.findText(editor.language.name)
self.Languages_comboBox.setCurrentIndex(index) | python | def __Languages_comboBox_set_default_view_state(self):
"""
Sets the **Languages_comboBox** Widget default View state.
"""
if not self.__container.has_editor_tab():
return
editor = self.__container.get_current_editor()
index = self.Languages_comboBox.findText(editor.language.name)
self.Languages_comboBox.setCurrentIndex(index) | ['def', '__Languages_comboBox_set_default_view_state', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__container', '.', 'has_editor_tab', '(', ')', ':', 'return', 'editor', '=', 'self', '.', '__container', '.', 'get_current_editor', '(', ')', 'index', '=', 'self', '.', 'Languages_comboBox', '.', 'findText', '(', 'editor', '.', 'language', '.', 'name', ')', 'self', '.', 'Languages_comboBox', '.', 'setCurrentIndex', '(', 'index', ')'] | Sets the **Languages_comboBox** Widget default View state. | ['Sets', 'the', '**', 'Languages_comboBox', '**', 'Widget', 'default', 'View', 'state', '.'] | train | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/editor_status.py#L152-L163 |
5,974 | pkgw/pwkit | pwkit/sherpa.py | make_fixed_temp_multi_apec | def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
"""Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter.
"""
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models | python | def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
"""Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter.
"""
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models | ['def', 'make_fixed_temp_multi_apec', '(', 'kTs', ',', 'name_template', '=', "'apec%d'", ',', 'norm', '=', 'None', ')', ':', 'total_model', '=', 'None', 'sub_models', '=', '[', ']', 'for', 'i', ',', 'kT', 'in', 'enumerate', '(', 'kTs', ')', ':', 'component', '=', 'ui', '.', 'xsapec', '(', 'name_template', '%', 'i', ')', 'component', '.', 'kT', '=', 'kT', 'ui', '.', 'freeze', '(', 'component', '.', 'kT', ')', 'if', 'norm', 'is', 'not', 'None', ':', 'component', '.', 'norm', '=', 'norm', 'sub_models', '.', 'append', '(', 'component', ')', 'if', 'total_model', 'is', 'None', ':', 'total_model', '=', 'component', 'else', ':', 'total_model', '=', 'total_model', '+', 'component', 'return', 'total_model', ',', 'sub_models'] | Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter. | ['Create', 'a', 'model', 'summing', 'multiple', 'APEC', 'components', 'at', 'fixed', 'temperatures', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/sherpa.py#L102-L140 |
5,975 | instaloader/instaloader | instaloader/instaloader.py | Instaloader.save_location | def save_location(self, filename: str, location: PostLocation, mtime: datetime) -> None:
"""Save post location name and Google Maps link."""
filename += '_location.txt'
location_string = (location.name + "\n" +
"https://maps.google.com/maps?q={0},{1}&ll={0},{1}\n".format(location.lat,
location.lng))
with open(filename, 'wb') as text_file:
shutil.copyfileobj(BytesIO(location_string.encode()), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
self.context.log('geo', end=' ', flush=True) | python | def save_location(self, filename: str, location: PostLocation, mtime: datetime) -> None:
"""Save post location name and Google Maps link."""
filename += '_location.txt'
location_string = (location.name + "\n" +
"https://maps.google.com/maps?q={0},{1}&ll={0},{1}\n".format(location.lat,
location.lng))
with open(filename, 'wb') as text_file:
shutil.copyfileobj(BytesIO(location_string.encode()), text_file)
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
self.context.log('geo', end=' ', flush=True) | ['def', 'save_location', '(', 'self', ',', 'filename', ':', 'str', ',', 'location', ':', 'PostLocation', ',', 'mtime', ':', 'datetime', ')', '->', 'None', ':', 'filename', '+=', "'_location.txt'", 'location_string', '=', '(', 'location', '.', 'name', '+', '"\\n"', '+', '"https://maps.google.com/maps?q={0},{1}&ll={0},{1}\\n"', '.', 'format', '(', 'location', '.', 'lat', ',', 'location', '.', 'lng', ')', ')', 'with', 'open', '(', 'filename', ',', "'wb'", ')', 'as', 'text_file', ':', 'shutil', '.', 'copyfileobj', '(', 'BytesIO', '(', 'location_string', '.', 'encode', '(', ')', ')', ',', 'text_file', ')', 'os', '.', 'utime', '(', 'filename', ',', '(', 'datetime', '.', 'now', '(', ')', '.', 'timestamp', '(', ')', ',', 'mtime', '.', 'timestamp', '(', ')', ')', ')', 'self', '.', 'context', '.', 'log', '(', "'geo'", ',', 'end', '=', "' '", ',', 'flush', '=', 'True', ')'] | Save post location name and Google Maps link. | ['Save', 'post', 'location', 'name', 'and', 'Google', 'Maps', 'link', '.'] | train | https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L323-L332 |
5,976 | lincolnloop/salmon | salmon/metrics/models.py | Metric.whisper_filename | def whisper_filename(self):
"""Build a file path to the Whisper database"""
source_name = self.source_id and self.source.name or ''
return get_valid_filename("{0}__{1}.wsp".format(source_name,
self.name)) | python | def whisper_filename(self):
"""Build a file path to the Whisper database"""
source_name = self.source_id and self.source.name or ''
return get_valid_filename("{0}__{1}.wsp".format(source_name,
self.name)) | ['def', 'whisper_filename', '(', 'self', ')', ':', 'source_name', '=', 'self', '.', 'source_id', 'and', 'self', '.', 'source', '.', 'name', 'or', "''", 'return', 'get_valid_filename', '(', '"{0}__{1}.wsp"', '.', 'format', '(', 'source_name', ',', 'self', '.', 'name', ')', ')'] | Build a file path to the Whisper database | ['Build', 'a', 'file', 'path', 'to', 'the', 'Whisper', 'database'] | train | https://github.com/lincolnloop/salmon/blob/62a965ad9716707ea1db4afb5d9646766f29b64b/salmon/metrics/models.py#L69-L73 |
5,977 | acsone/bobtemplates.odoo | bobtemplates/odoo/hooks.py | _insert_manifest_item | def _insert_manifest_item(configurator, key, item):
""" Insert an item in the list of an existing manifest key """
with _open_manifest(configurator) as f:
manifest = f.read()
if item in ast.literal_eval(manifest).get(key, []):
return
pattern = """(["']{}["']:\\s*\\[)""".format(key)
repl = """\\1\n '{}',""".format(item)
manifest = re.sub(pattern, repl, manifest, re.MULTILINE)
with _open_manifest(configurator, "w") as f:
f.write(manifest) | python | def _insert_manifest_item(configurator, key, item):
""" Insert an item in the list of an existing manifest key """
with _open_manifest(configurator) as f:
manifest = f.read()
if item in ast.literal_eval(manifest).get(key, []):
return
pattern = """(["']{}["']:\\s*\\[)""".format(key)
repl = """\\1\n '{}',""".format(item)
manifest = re.sub(pattern, repl, manifest, re.MULTILINE)
with _open_manifest(configurator, "w") as f:
f.write(manifest) | ['def', '_insert_manifest_item', '(', 'configurator', ',', 'key', ',', 'item', ')', ':', 'with', '_open_manifest', '(', 'configurator', ')', 'as', 'f', ':', 'manifest', '=', 'f', '.', 'read', '(', ')', 'if', 'item', 'in', 'ast', '.', 'literal_eval', '(', 'manifest', ')', '.', 'get', '(', 'key', ',', '[', ']', ')', ':', 'return', 'pattern', '=', '"""(["\']{}["\']:\\\\s*\\\\[)"""', '.', 'format', '(', 'key', ')', 'repl', '=', '"""\\\\1\\n \'{}\',"""', '.', 'format', '(', 'item', ')', 'manifest', '=', 're', '.', 'sub', '(', 'pattern', ',', 'repl', ',', 'manifest', ',', 're', '.', 'MULTILINE', ')', 'with', '_open_manifest', '(', 'configurator', ',', '"w"', ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'manifest', ')'] | Insert an item in the list of an existing manifest key | ['Insert', 'an', 'item', 'in', 'the', 'list', 'of', 'an', 'existing', 'manifest', 'key'] | train | https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L58-L68 |
5,978 | DataKitchen/DKCloudCommand | DKCloudCommand/cli/__main__.py | recipe_create | def recipe_create(backend, kitchen, name):
"""
Create a new Recipe
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho("%s - Creating Recipe %s for Kitchen '%s'" % (get_datetime(), name, use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.recipe_create(backend.dki, use_kitchen,name)) | python | def recipe_create(backend, kitchen, name):
"""
Create a new Recipe
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho("%s - Creating Recipe %s for Kitchen '%s'" % (get_datetime(), name, use_kitchen), fg='green')
check_and_print(DKCloudCommandRunner.recipe_create(backend.dki, use_kitchen,name)) | ['def', 'recipe_create', '(', 'backend', ',', 'kitchen', ',', 'name', ')', ':', 'err_str', ',', 'use_kitchen', '=', 'Backend', '.', 'get_kitchen_from_user', '(', 'kitchen', ')', 'if', 'use_kitchen', 'is', 'None', ':', 'raise', 'click', '.', 'ClickException', '(', 'err_str', ')', 'click', '.', 'secho', '(', '"%s - Creating Recipe %s for Kitchen \'%s\'"', '%', '(', 'get_datetime', '(', ')', ',', 'name', ',', 'use_kitchen', ')', ',', 'fg', '=', "'green'", ')', 'check_and_print', '(', 'DKCloudCommandRunner', '.', 'recipe_create', '(', 'backend', '.', 'dki', ',', 'use_kitchen', ',', 'name', ')', ')'] | Create a new Recipe | ['Create', 'a', 'new', 'Recipe'] | train | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/cli/__main__.py#L432-L440 |
5,979 | mete0r/hypua2jamo | src/hypua2jamo/decoder.py | _uptrace | def _uptrace(nodelist, node):
'''
노드를 상향 추적한다.
현 노드로부터 조상 노드들을 차례로 순회하며 반환한다.
루트 노드는 제외한다.
'''
if node.parent_index is None:
return
parent = nodelist[node.parent_index]
for x in _uptrace(nodelist, parent):
yield x
yield node | python | def _uptrace(nodelist, node):
'''
노드를 상향 추적한다.
현 노드로부터 조상 노드들을 차례로 순회하며 반환한다.
루트 노드는 제외한다.
'''
if node.parent_index is None:
return
parent = nodelist[node.parent_index]
for x in _uptrace(nodelist, parent):
yield x
yield node | ['def', '_uptrace', '(', 'nodelist', ',', 'node', ')', ':', 'if', 'node', '.', 'parent_index', 'is', 'None', ':', 'return', 'parent', '=', 'nodelist', '[', 'node', '.', 'parent_index', ']', 'for', 'x', 'in', '_uptrace', '(', 'nodelist', ',', 'parent', ')', ':', 'yield', 'x', 'yield', 'node'] | 노드를 상향 추적한다.
현 노드로부터 조상 노드들을 차례로 순회하며 반환한다.
루트 노드는 제외한다. | ['노드를', '상향', '추적한다', '.'] | train | https://github.com/mete0r/hypua2jamo/blob/caceb33a26c27645703d659a82bb1152deef1469/src/hypua2jamo/decoder.py#L262-L275 |
5,980 | DallasMorningNews/django-datafreezer | datafreezer/views.py | parse_csv_headers | def parse_csv_headers(dataset_id):
"""Return the first row of a CSV as a list of headers."""
data = Dataset.objects.get(pk=dataset_id)
with open(data.dataset_file.path, 'r') as datasetFile:
csvReader = reader(datasetFile, delimiter=',', quotechar='"')
headers = next(csvReader)
# print headers
return headers | python | def parse_csv_headers(dataset_id):
"""Return the first row of a CSV as a list of headers."""
data = Dataset.objects.get(pk=dataset_id)
with open(data.dataset_file.path, 'r') as datasetFile:
csvReader = reader(datasetFile, delimiter=',', quotechar='"')
headers = next(csvReader)
# print headers
return headers | ['def', 'parse_csv_headers', '(', 'dataset_id', ')', ':', 'data', '=', 'Dataset', '.', 'objects', '.', 'get', '(', 'pk', '=', 'dataset_id', ')', 'with', 'open', '(', 'data', '.', 'dataset_file', '.', 'path', ',', "'r'", ')', 'as', 'datasetFile', ':', 'csvReader', '=', 'reader', '(', 'datasetFile', ',', 'delimiter', '=', "','", ',', 'quotechar', '=', '\'"\'', ')', 'headers', '=', 'next', '(', 'csvReader', ')', '# print headers', 'return', 'headers'] | Return the first row of a CSV as a list of headers. | ['Return', 'the', 'first', 'row', 'of', 'a', 'CSV', 'as', 'a', 'list', 'of', 'headers', '.'] | train | https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/views.py#L253-L260 |
5,981 | gitpython-developers/GitPython | git/repo/base.py | Repo.config_reader | def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True) | python | def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True) | ['def', 'config_reader', '(', 'self', ',', 'config_level', '=', 'None', ')', ':', 'files', '=', 'None', 'if', 'config_level', 'is', 'None', ':', 'files', '=', '[', 'self', '.', '_get_config_path', '(', 'f', ')', 'for', 'f', 'in', 'self', '.', 'config_level', ']', 'else', ':', 'files', '=', '[', 'self', '.', '_get_config_path', '(', 'config_level', ')', ']', 'return', 'GitConfigParser', '(', 'files', ',', 'read_only', '=', 'True', ')'] | :return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used. | [':', 'return', ':', 'GitConfigParser', 'allowing', 'to', 'read', 'the', 'full', 'git', 'configuration', 'but', 'not', 'to', 'write', 'it'] | train | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/repo/base.py#L438-L457 |
5,982 | ymyzk/python-gyazo | gyazo/image.py | Image.to_dict | def to_dict(self):
"""Return a dict representation of this instance"""
data = {}
if self.created_at:
data['created_at'] = self.created_at.strftime(
'%Y-%m-%dT%H:%M:%S%z')
if self.image_id:
data['image_id'] = self.image_id
if self.permalink_url:
data['permalink_url'] = self.permalink_url
if self.thumb_url:
data['thumb_url'] = self.thumb_url
if self.type:
data['type'] = self.type
if self.url:
data['url'] = self.url
return data | python | def to_dict(self):
"""Return a dict representation of this instance"""
data = {}
if self.created_at:
data['created_at'] = self.created_at.strftime(
'%Y-%m-%dT%H:%M:%S%z')
if self.image_id:
data['image_id'] = self.image_id
if self.permalink_url:
data['permalink_url'] = self.permalink_url
if self.thumb_url:
data['thumb_url'] = self.thumb_url
if self.type:
data['type'] = self.type
if self.url:
data['url'] = self.url
return data | ['def', 'to_dict', '(', 'self', ')', ':', 'data', '=', '{', '}', 'if', 'self', '.', 'created_at', ':', 'data', '[', "'created_at'", ']', '=', 'self', '.', 'created_at', '.', 'strftime', '(', "'%Y-%m-%dT%H:%M:%S%z'", ')', 'if', 'self', '.', 'image_id', ':', 'data', '[', "'image_id'", ']', '=', 'self', '.', 'image_id', 'if', 'self', '.', 'permalink_url', ':', 'data', '[', "'permalink_url'", ']', '=', 'self', '.', 'permalink_url', 'if', 'self', '.', 'thumb_url', ':', 'data', '[', "'thumb_url'", ']', '=', 'self', '.', 'thumb_url', 'if', 'self', '.', 'type', ':', 'data', '[', "'type'", ']', '=', 'self', '.', 'type', 'if', 'self', '.', 'url', ':', 'data', '[', "'url'", ']', '=', 'self', '.', 'url', 'return', 'data'] | Return a dict representation of this instance | ['Return', 'a', 'dict', 'representation', 'of', 'this', 'instance'] | train | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L111-L129 |
5,983 | wmayner/pyphi | pyphi/distance.py | klm | def klm(p, q):
"""Compute the KLM divergence."""
p, q = flatten(p), flatten(q)
return max(abs(p * np.nan_to_num(np.log(p / q)))) | python | def klm(p, q):
"""Compute the KLM divergence."""
p, q = flatten(p), flatten(q)
return max(abs(p * np.nan_to_num(np.log(p / q)))) | ['def', 'klm', '(', 'p', ',', 'q', ')', ':', 'p', ',', 'q', '=', 'flatten', '(', 'p', ')', ',', 'flatten', '(', 'q', ')', 'return', 'max', '(', 'abs', '(', 'p', '*', 'np', '.', 'nan_to_num', '(', 'np', '.', 'log', '(', 'p', '/', 'q', ')', ')', ')', ')'] | Compute the KLM divergence. | ['Compute', 'the', 'KLM', 'divergence', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L236-L239 |
5,984 | meowklaski/custom_inherit | custom_inherit/_doc_parse_tools/numpy_parse_tools.py | parse_numpy_doc | def parse_numpy_doc(doc):
""" Extract the text from the various sections of a numpy-formatted docstring.
Parameters
----------
doc: Union[str, None]
Returns
-------
OrderedDict[str, Union[None,str]]
The extracted numpy-styled docstring sections."""
doc_sections = OrderedDict([("Short Summary", None),
("Deprecation Warning", None),
("Attributes", None),
("Extended Summary", None),
("Parameters", None),
("Returns", None),
("Yields", None),
("Other Parameters", None),
("Raises", None),
("See Also", None),
("Notes", None),
("References", None),
("Examples", None)])
if not doc:
return doc_sections
doc = cleandoc(doc)
lines = iter(doc.splitlines())
key = "Short Summary"
body = []
while True:
try:
line = next(lines).rstrip()
if line in doc_sections:
doc_sections[key] = "\n".join(body).rstrip() if body else None
body = []
key = line
next(lines) # skip section delimiter
else:
body.append(line)
except StopIteration:
doc_sections[key] = "\n".join(body)
break
return doc_sections | python | def parse_numpy_doc(doc):
""" Extract the text from the various sections of a numpy-formatted docstring.
Parameters
----------
doc: Union[str, None]
Returns
-------
OrderedDict[str, Union[None,str]]
The extracted numpy-styled docstring sections."""
doc_sections = OrderedDict([("Short Summary", None),
("Deprecation Warning", None),
("Attributes", None),
("Extended Summary", None),
("Parameters", None),
("Returns", None),
("Yields", None),
("Other Parameters", None),
("Raises", None),
("See Also", None),
("Notes", None),
("References", None),
("Examples", None)])
if not doc:
return doc_sections
doc = cleandoc(doc)
lines = iter(doc.splitlines())
key = "Short Summary"
body = []
while True:
try:
line = next(lines).rstrip()
if line in doc_sections:
doc_sections[key] = "\n".join(body).rstrip() if body else None
body = []
key = line
next(lines) # skip section delimiter
else:
body.append(line)
except StopIteration:
doc_sections[key] = "\n".join(body)
break
return doc_sections | ['def', 'parse_numpy_doc', '(', 'doc', ')', ':', 'doc_sections', '=', 'OrderedDict', '(', '[', '(', '"Short Summary"', ',', 'None', ')', ',', '(', '"Deprecation Warning"', ',', 'None', ')', ',', '(', '"Attributes"', ',', 'None', ')', ',', '(', '"Extended Summary"', ',', 'None', ')', ',', '(', '"Parameters"', ',', 'None', ')', ',', '(', '"Returns"', ',', 'None', ')', ',', '(', '"Yields"', ',', 'None', ')', ',', '(', '"Other Parameters"', ',', 'None', ')', ',', '(', '"Raises"', ',', 'None', ')', ',', '(', '"See Also"', ',', 'None', ')', ',', '(', '"Notes"', ',', 'None', ')', ',', '(', '"References"', ',', 'None', ')', ',', '(', '"Examples"', ',', 'None', ')', ']', ')', 'if', 'not', 'doc', ':', 'return', 'doc_sections', 'doc', '=', 'cleandoc', '(', 'doc', ')', 'lines', '=', 'iter', '(', 'doc', '.', 'splitlines', '(', ')', ')', 'key', '=', '"Short Summary"', 'body', '=', '[', ']', 'while', 'True', ':', 'try', ':', 'line', '=', 'next', '(', 'lines', ')', '.', 'rstrip', '(', ')', 'if', 'line', 'in', 'doc_sections', ':', 'doc_sections', '[', 'key', ']', '=', '"\\n"', '.', 'join', '(', 'body', ')', '.', 'rstrip', '(', ')', 'if', 'body', 'else', 'None', 'body', '=', '[', ']', 'key', '=', 'line', 'next', '(', 'lines', ')', '# skip section delimiter', 'else', ':', 'body', '.', 'append', '(', 'line', ')', 'except', 'StopIteration', ':', 'doc_sections', '[', 'key', ']', '=', '"\\n"', '.', 'join', '(', 'body', ')', 'break', 'return', 'doc_sections'] | Extract the text from the various sections of a numpy-formatted docstring.
Parameters
----------
doc: Union[str, None]
Returns
-------
OrderedDict[str, Union[None,str]]
The extracted numpy-styled docstring sections. | ['Extract', 'the', 'text', 'from', 'the', 'various', 'sections', 'of', 'a', 'numpy', '-', 'formatted', 'docstring', '.'] | train | https://github.com/meowklaski/custom_inherit/blob/13bce675e246d84e21bcd7658e0a4fbf25db4adc/custom_inherit/_doc_parse_tools/numpy_parse_tools.py#L8-L56 |
5,985 | codeforamerica/epa_python | epa/pcs/pcs.py | PCS.permit_event | def permit_event(self, column=None, value=None, **kwargs):
"""
A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04')
"""
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) | python | def permit_event(self, column=None, value=None, **kwargs):
"""
A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04')
"""
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) | ['def', 'permit_event', '(', 'self', ',', 'column', '=', 'None', ',', 'value', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_resolve_call', '(', "'PCS_PERMIT_EVENT'", ',', 'column', ',', 'value', ',', '*', '*', 'kwargs', ')'] | A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04') | ['A', 'permit', 'event', 'tracks', 'the', 'lifecycle', 'of', 'a', 'permit', 'from', 'issuance', 'to', 'expiration', '.', 'Examples', 'include', 'Application', 'Received', 'and', 'Permit', 'Issued', 'etc', '.'] | train | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L144-L152 |
5,986 | robotools/fontParts | Lib/fontParts/base/bPoint.py | BaseBPoint._set_anchor | def _set_anchor(self, value):
"""
Subclasses may override this method.
"""
pX, pY = self.anchor
x, y = value
dX = x - pX
dY = y - pY
self.moveBy((dX, dY)) | python | def _set_anchor(self, value):
"""
Subclasses may override this method.
"""
pX, pY = self.anchor
x, y = value
dX = x - pX
dY = y - pY
self.moveBy((dX, dY)) | ['def', '_set_anchor', '(', 'self', ',', 'value', ')', ':', 'pX', ',', 'pY', '=', 'self', '.', 'anchor', 'x', ',', 'y', '=', 'value', 'dX', '=', 'x', '-', 'pX', 'dY', '=', 'y', '-', 'pY', 'self', '.', 'moveBy', '(', '(', 'dX', ',', 'dY', ')', ')'] | Subclasses may override this method. | ['Subclasses', 'may', 'override', 'this', 'method', '.'] | train | https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/bPoint.py#L157-L165 |
5,987 | marten-de-vries/Flask-WebSub | flask_websub/hub/tasks.py | send_change_notification | def send_change_notification(hub, topic_url, updated_content=None):
"""7. Content Distribution"""
if updated_content:
body = base64.b64decode(updated_content['content'])
else:
body, updated_content = get_new_content(hub.config, topic_url)
b64_body = updated_content['content']
headers = updated_content['headers']
link_header = headers.get('Link', '')
if 'rel="hub"' not in link_header or 'rel="self"' not in link_header:
raise NotificationError(INVALID_LINK)
for callback_url, secret in hub.storage.get_callbacks(topic_url):
schedule_request(hub, topic_url, callback_url, secret, body, b64_body,
headers) | python | def send_change_notification(hub, topic_url, updated_content=None):
"""7. Content Distribution"""
if updated_content:
body = base64.b64decode(updated_content['content'])
else:
body, updated_content = get_new_content(hub.config, topic_url)
b64_body = updated_content['content']
headers = updated_content['headers']
link_header = headers.get('Link', '')
if 'rel="hub"' not in link_header or 'rel="self"' not in link_header:
raise NotificationError(INVALID_LINK)
for callback_url, secret in hub.storage.get_callbacks(topic_url):
schedule_request(hub, topic_url, callback_url, secret, body, b64_body,
headers) | ['def', 'send_change_notification', '(', 'hub', ',', 'topic_url', ',', 'updated_content', '=', 'None', ')', ':', 'if', 'updated_content', ':', 'body', '=', 'base64', '.', 'b64decode', '(', 'updated_content', '[', "'content'", ']', ')', 'else', ':', 'body', ',', 'updated_content', '=', 'get_new_content', '(', 'hub', '.', 'config', ',', 'topic_url', ')', 'b64_body', '=', 'updated_content', '[', "'content'", ']', 'headers', '=', 'updated_content', '[', "'headers'", ']', 'link_header', '=', 'headers', '.', 'get', '(', "'Link'", ',', "''", ')', 'if', '\'rel="hub"\'', 'not', 'in', 'link_header', 'or', '\'rel="self"\'', 'not', 'in', 'link_header', ':', 'raise', 'NotificationError', '(', 'INVALID_LINK', ')', 'for', 'callback_url', ',', 'secret', 'in', 'hub', '.', 'storage', '.', 'get_callbacks', '(', 'topic_url', ')', ':', 'schedule_request', '(', 'hub', ',', 'topic_url', ',', 'callback_url', ',', 'secret', ',', 'body', ',', 'b64_body', ',', 'headers', ')'] | 7. Content Distribution | ['7', '.', 'Content', 'Distribution'] | train | https://github.com/marten-de-vries/Flask-WebSub/blob/422d5b597245554c47e881483f99cae7c57a81ba/flask_websub/hub/tasks.py#L18-L34 |
5,988 | locationlabs/mockredis | mockredis/client.py | MockRedis._get_zset | def _get_zset(self, name, operation, create=False):
"""
Get (and maybe create) a sorted set by name.
"""
return self._get_by_type(name, operation, create, b'zset', SortedSet(), return_default=False) | python | def _get_zset(self, name, operation, create=False):
"""
Get (and maybe create) a sorted set by name.
"""
return self._get_by_type(name, operation, create, b'zset', SortedSet(), return_default=False) | ['def', '_get_zset', '(', 'self', ',', 'name', ',', 'operation', ',', 'create', '=', 'False', ')', ':', 'return', 'self', '.', '_get_by_type', '(', 'name', ',', 'operation', ',', 'create', ',', "b'zset'", ',', 'SortedSet', '(', ')', ',', 'return_default', '=', 'False', ')'] | Get (and maybe create) a sorted set by name. | ['Get', '(', 'and', 'maybe', 'create', ')', 'a', 'sorted', 'set', 'by', 'name', '.'] | train | https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L1459-L1463 |
5,989 | AlexMathew/scrapple | scrapple/commands/web.py | WebCommand.execute_command | def execute_command(self):
"""
The web command runs the Scrapple web interface through a simple \
`Flask <http://flask.pocoo.org>`_ app.
When the execute_command() method is called from the \
:ref:`runCLI() <implementation-cli>` function, it starts of two simultaneous \
processes :
- Calls the run_flask() method to start the Flask app on port 5000 of localhost
- Opens the web interface on a web browser
The '/' view of the Flask app, opens up the Scrapple web interface. This \
provides a basic form, to fill in the required configuration file. On submitting \
the form, it makes a POST request, passing in the form in the request header. \
This form is passed to the form_to_json() \
:ref:`utility function <implementation-utils>`, where the form is converted into \
the resultant JSON configuration file.
Currently, closing the web command execution requires making a keyboard interrupt \
on the command line after the web interface has been closed.
"""
print(Back.GREEN + Fore.BLACK + "Scrapple Web Interface")
print(Back.RESET + Fore.RESET)
p1 = Process(target = self.run_flask)
p2 = Process(target = lambda : webbrowser.open('http://127.0.0.1:5000'))
p1.start()
p2.start() | python | def execute_command(self):
"""
The web command runs the Scrapple web interface through a simple \
`Flask <http://flask.pocoo.org>`_ app.
When the execute_command() method is called from the \
:ref:`runCLI() <implementation-cli>` function, it starts of two simultaneous \
processes :
- Calls the run_flask() method to start the Flask app on port 5000 of localhost
- Opens the web interface on a web browser
The '/' view of the Flask app, opens up the Scrapple web interface. This \
provides a basic form, to fill in the required configuration file. On submitting \
the form, it makes a POST request, passing in the form in the request header. \
This form is passed to the form_to_json() \
:ref:`utility function <implementation-utils>`, where the form is converted into \
the resultant JSON configuration file.
Currently, closing the web command execution requires making a keyboard interrupt \
on the command line after the web interface has been closed.
"""
print(Back.GREEN + Fore.BLACK + "Scrapple Web Interface")
print(Back.RESET + Fore.RESET)
p1 = Process(target = self.run_flask)
p2 = Process(target = lambda : webbrowser.open('http://127.0.0.1:5000'))
p1.start()
p2.start() | ['def', 'execute_command', '(', 'self', ')', ':', 'print', '(', 'Back', '.', 'GREEN', '+', 'Fore', '.', 'BLACK', '+', '"Scrapple Web Interface"', ')', 'print', '(', 'Back', '.', 'RESET', '+', 'Fore', '.', 'RESET', ')', 'p1', '=', 'Process', '(', 'target', '=', 'self', '.', 'run_flask', ')', 'p2', '=', 'Process', '(', 'target', '=', 'lambda', ':', 'webbrowser', '.', 'open', '(', "'http://127.0.0.1:5000'", ')', ')', 'p1', '.', 'start', '(', ')', 'p2', '.', 'start', '(', ')'] | The web command runs the Scrapple web interface through a simple \
`Flask <http://flask.pocoo.org>`_ app.
When the execute_command() method is called from the \
:ref:`runCLI() <implementation-cli>` function, it starts of two simultaneous \
processes :
- Calls the run_flask() method to start the Flask app on port 5000 of localhost
- Opens the web interface on a web browser
The '/' view of the Flask app, opens up the Scrapple web interface. This \
provides a basic form, to fill in the required configuration file. On submitting \
the form, it makes a POST request, passing in the form in the request header. \
This form is passed to the form_to_json() \
:ref:`utility function <implementation-utils>`, where the form is converted into \
the resultant JSON configuration file.
Currently, closing the web command execution requires making a keyboard interrupt \
on the command line after the web interface has been closed. | ['The', 'web', 'command', 'runs', 'the', 'Scrapple', 'web', 'interface', 'through', 'a', 'simple', '\\', 'Flask', '<http', ':', '//', 'flask', '.', 'pocoo', '.', 'org', '>', '_', 'app', '.'] | train | https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/web.py#L39-L67 |
5,990 | hydroshare/hs_restclient | hs_restclient/__init__.py | HydroShare.getUserInfo | def getUserInfo(self):
"""
Query the GET /hsapi/userInfo/ REST end point of the HydroShare server.
:raises: HydroShareHTTPException to signal an HTTP error
:return: A JSON object representing user info, for example:
{
"username": "username",
"first_name": "First",
"last_name": "Last",
"email": "[email protected]"
}
"""
url = "{url_base}/userInfo/".format(url_base=self.url_base)
r = self._request('GET', url)
if r.status_code != 200:
raise HydroShareHTTPException((url, 'GET', r.status_code))
return r.json() | python | def getUserInfo(self):
"""
Query the GET /hsapi/userInfo/ REST end point of the HydroShare server.
:raises: HydroShareHTTPException to signal an HTTP error
:return: A JSON object representing user info, for example:
{
"username": "username",
"first_name": "First",
"last_name": "Last",
"email": "[email protected]"
}
"""
url = "{url_base}/userInfo/".format(url_base=self.url_base)
r = self._request('GET', url)
if r.status_code != 200:
raise HydroShareHTTPException((url, 'GET', r.status_code))
return r.json() | ['def', 'getUserInfo', '(', 'self', ')', ':', 'url', '=', '"{url_base}/userInfo/"', '.', 'format', '(', 'url_base', '=', 'self', '.', 'url_base', ')', 'r', '=', 'self', '.', '_request', '(', "'GET'", ',', 'url', ')', 'if', 'r', '.', 'status_code', '!=', '200', ':', 'raise', 'HydroShareHTTPException', '(', '(', 'url', ',', "'GET'", ',', 'r', '.', 'status_code', ')', ')', 'return', 'r', '.', 'json', '(', ')'] | Query the GET /hsapi/userInfo/ REST end point of the HydroShare server.
:raises: HydroShareHTTPException to signal an HTTP error
:return: A JSON object representing user info, for example:
{
"username": "username",
"first_name": "First",
"last_name": "Last",
"email": "[email protected]"
} | ['Query', 'the', 'GET', '/', 'hsapi', '/', 'userInfo', '/', 'REST', 'end', 'point', 'of', 'the', 'HydroShare', 'server', '.'] | train | https://github.com/hydroshare/hs_restclient/blob/9cd106238b512e01ecd3e33425fe48c13b7f63d5/hs_restclient/__init__.py#L1180-L1201 |
5,991 | Syndace/python-x3dh | x3dh/state.py | State.getSharedSecretPassive | def getSharedSecretPassive(
self,
passive_exchange_data,
allow_no_otpk = False,
keep_otpk = False
):
"""
Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = passive_exchange_data["ik"])
other_ek = self.__KeyPair(pub = passive_exchange_data["ek"])
if self.__spk["key"].pub != passive_exchange_data["spk"]:
raise KeyExchangeException(
"The SPK used for this key exchange has been rotated, the key exchange " +
"can not be completed."
)
my_otpk = None
if "otpk" in passive_exchange_data:
for otpk in self.__otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
for otpk in self.__hidden_otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
if not my_otpk:
raise KeyExchangeException(
"The OTPK used for this key exchange has been deleted, the key " +
"exchange can not be completed."
)
elif not allow_no_otpk:
raise KeyExchangeException(
"This key exchange data does not contain an OTPK, which is not allowed."
)
dh1 = self.__spk["key"].getSharedSecret(other_ik)
dh2 = self.__ik.getSharedSecret(other_ek)
dh3 = self.__spk["key"].getSharedSecret(other_ek)
dh4 = b""
if my_otpk:
dh4 = my_otpk.getSharedSecret(other_ek)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
ad = other_ik_pub_serialized + ik_pub_serialized
if my_otpk and not keep_otpk:
self.deleteOTPK(my_otpk.pub)
return {
"ad": ad,
"sk": sk
} | python | def getSharedSecretPassive(
self,
passive_exchange_data,
allow_no_otpk = False,
keep_otpk = False
):
"""
Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details.
"""
self.__checkSPKTimestamp()
other_ik = self.__KeyPair(pub = passive_exchange_data["ik"])
other_ek = self.__KeyPair(pub = passive_exchange_data["ek"])
if self.__spk["key"].pub != passive_exchange_data["spk"]:
raise KeyExchangeException(
"The SPK used for this key exchange has been rotated, the key exchange " +
"can not be completed."
)
my_otpk = None
if "otpk" in passive_exchange_data:
for otpk in self.__otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
for otpk in self.__hidden_otpks:
if otpk.pub == passive_exchange_data["otpk"]:
my_otpk = otpk
break
if not my_otpk:
raise KeyExchangeException(
"The OTPK used for this key exchange has been deleted, the key " +
"exchange can not be completed."
)
elif not allow_no_otpk:
raise KeyExchangeException(
"This key exchange data does not contain an OTPK, which is not allowed."
)
dh1 = self.__spk["key"].getSharedSecret(other_ik)
dh2 = self.__ik.getSharedSecret(other_ek)
dh3 = self.__spk["key"].getSharedSecret(other_ek)
dh4 = b""
if my_otpk:
dh4 = my_otpk.getSharedSecret(other_ek)
sk = self.__kdf(dh1 + dh2 + dh3 + dh4)
other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
other_ik.pub,
self.__curve
)
ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey(
self.__ik.pub,
self.__curve
)
ad = other_ik_pub_serialized + ik_pub_serialized
if my_otpk and not keep_otpk:
self.deleteOTPK(my_otpk.pub)
return {
"ad": ad,
"sk": sk
} | ['def', 'getSharedSecretPassive', '(', 'self', ',', 'passive_exchange_data', ',', 'allow_no_otpk', '=', 'False', ',', 'keep_otpk', '=', 'False', ')', ':', 'self', '.', '__checkSPKTimestamp', '(', ')', 'other_ik', '=', 'self', '.', '__KeyPair', '(', 'pub', '=', 'passive_exchange_data', '[', '"ik"', ']', ')', 'other_ek', '=', 'self', '.', '__KeyPair', '(', 'pub', '=', 'passive_exchange_data', '[', '"ek"', ']', ')', 'if', 'self', '.', '__spk', '[', '"key"', ']', '.', 'pub', '!=', 'passive_exchange_data', '[', '"spk"', ']', ':', 'raise', 'KeyExchangeException', '(', '"The SPK used for this key exchange has been rotated, the key exchange "', '+', '"can not be completed."', ')', 'my_otpk', '=', 'None', 'if', '"otpk"', 'in', 'passive_exchange_data', ':', 'for', 'otpk', 'in', 'self', '.', '__otpks', ':', 'if', 'otpk', '.', 'pub', '==', 'passive_exchange_data', '[', '"otpk"', ']', ':', 'my_otpk', '=', 'otpk', 'break', 'for', 'otpk', 'in', 'self', '.', '__hidden_otpks', ':', 'if', 'otpk', '.', 'pub', '==', 'passive_exchange_data', '[', '"otpk"', ']', ':', 'my_otpk', '=', 'otpk', 'break', 'if', 'not', 'my_otpk', ':', 'raise', 'KeyExchangeException', '(', '"The OTPK used for this key exchange has been deleted, the key "', '+', '"exchange can not be completed."', ')', 'elif', 'not', 'allow_no_otpk', ':', 'raise', 'KeyExchangeException', '(', '"This key exchange data does not contain an OTPK, which is not allowed."', ')', 'dh1', '=', 'self', '.', '__spk', '[', '"key"', ']', '.', 'getSharedSecret', '(', 'other_ik', ')', 'dh2', '=', 'self', '.', '__ik', '.', 'getSharedSecret', '(', 'other_ek', ')', 'dh3', '=', 'self', '.', '__spk', '[', '"key"', ']', '.', 'getSharedSecret', '(', 'other_ek', ')', 'dh4', '=', 'b""', 'if', 'my_otpk', ':', 'dh4', '=', 'my_otpk', '.', 'getSharedSecret', '(', 'other_ek', ')', 'sk', '=', 'self', '.', '__kdf', '(', 'dh1', '+', 'dh2', '+', 'dh3', '+', 'dh4', ')', 'other_ik_pub_serialized', '=', 'self', '.', '__PublicKeyEncoder', '.', 'encodePublicKey', '(', 'other_ik', '.', 'pub', ',', 'self', '.', '__curve', ')', 'ik_pub_serialized', '=', 'self', '.', '__PublicKeyEncoder', '.', 'encodePublicKey', '(', 'self', '.', '__ik', '.', 'pub', ',', 'self', '.', '__curve', ')', 'ad', '=', 'other_ik_pub_serialized', '+', 'ik_pub_serialized', 'if', 'my_otpk', 'and', 'not', 'keep_otpk', ':', 'self', '.', 'deleteOTPK', '(', 'my_otpk', '.', 'pub', ')', 'return', '{', '"ad"', ':', 'ad', ',', '"sk"', ':', 'sk', '}'] | Do the key exchange, as the passive party. This involves retrieving data about the
key exchange from the active party.
:param passive_exchange_data: A structure generated by the active party, which
contains data requried to complete the key exchange. See the "to_other" part
of the structure returned by "getSharedSecretActive".
:param allow_no_otpk: A boolean indicating whether to allow key exchange, even if
the active party did not use a one-time pre key. The recommended default is
False.
:param keep_otpk: Keep the one-time pre key after using it, instead of deleting
it. See the notes below.
:returns: A dictionary containing the shared secret and the shared associated
data.
The returned structure looks like this::
{
"ad": bytes, # The shared associated data
"sk": bytes # The shared secret
}
The specification of X3DH dictates to delete one-time pre keys as soon as they are
used.
This behaviour provides security but may lead to considerable usability downsides
in some environments.
For that reason the keep_otpk flag exists.
If set to True, the one-time pre key is not automatically deleted.
USE WITH CARE, THIS MAY INTRODUCE SECURITY LEAKS IF USED INCORRECTLY.
If you decide to set the flag and to keep the otpks, you have to manage deleting
them yourself, e.g. by subclassing this class and overriding this method.
:raises KeyExchangeException: If an error occurs during the key exchange. The
exception message will contain (human-readable) details. | ['Do', 'the', 'key', 'exchange', 'as', 'the', 'passive', 'party', '.', 'This', 'involves', 'retrieving', 'data', 'about', 'the', 'key', 'exchange', 'from', 'the', 'active', 'party', '.'] | train | https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L440-L545 |
5,992 | PyGithub/PyGithub | github/AuthenticatedUser.py | AuthenticatedUser.create_repo | def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(license_template, (str, unicode)), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | python | def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(license_template, (str, unicode)), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | ['def', 'create_repo', '(', 'self', ',', 'name', ',', 'description', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'homepage', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'private', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'has_issues', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'has_wiki', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'has_downloads', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'has_projects', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'auto_init', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'license_template', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'gitignore_template', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'allow_squash_merge', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'allow_merge_commit', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ',', 'allow_rebase_merge', '=', 'github', '.', 'GithubObject', '.', 'NotSet', ')', ':', 'assert', 'isinstance', '(', 'name', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'name', 'assert', 'description', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'description', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'description', 'assert', 'homepage', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'homepage', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'homepage', 'assert', 'private', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'private', ',', 'bool', ')', ',', 'private', 'assert', 'has_issues', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'has_issues', ',', 'bool', ')', ',', 'has_issues', 'assert', 'has_wiki', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'has_wiki', ',', 'bool', ')', ',', 'has_wiki', 'assert', 'has_downloads', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'has_downloads', ',', 'bool', ')', ',', 'has_downloads', 'assert', 'has_projects', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'has_projects', ',', 'bool', ')', ',', 'has_projects', 'assert', 'auto_init', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'auto_init', ',', 'bool', ')', ',', 'auto_init', 'assert', 'license_template', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'license_template', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'license_template', 'assert', 'gitignore_template', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'gitignore_template', ',', '(', 'str', ',', 'unicode', ')', ')', ',', 'gitignore_template', 'assert', 'allow_squash_merge', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'allow_squash_merge', ',', 'bool', ')', ',', 'allow_squash_merge', 'assert', 'allow_merge_commit', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'allow_merge_commit', ',', 'bool', ')', ',', 'allow_merge_commit', 'assert', 'allow_rebase_merge', 'is', 'github', '.', 'GithubObject', '.', 'NotSet', 'or', 'isinstance', '(', 'allow_rebase_merge', ',', 'bool', ')', ',', 'allow_rebase_merge', 'post_parameters', '=', '{', '"name"', ':', 'name', ',', '}', 'if', 'description', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"description"', ']', '=', 'description', 'if', 'homepage', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"homepage"', ']', '=', 'homepage', 'if', 'private', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"private"', ']', '=', 'private', 'if', 'has_issues', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"has_issues"', ']', '=', 'has_issues', 'if', 'has_wiki', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"has_wiki"', ']', '=', 'has_wiki', 'if', 'has_downloads', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"has_downloads"', ']', '=', 'has_downloads', 'if', 'has_projects', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"has_projects"', ']', '=', 'has_projects', 'if', 'auto_init', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"auto_init"', ']', '=', 'auto_init', 'if', 'license_template', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"license_template"', ']', '=', 'license_template', 'if', 'gitignore_template', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"gitignore_template"', ']', '=', 'gitignore_template', 'if', 'allow_squash_merge', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"allow_squash_merge"', ']', '=', 'allow_squash_merge', 'if', 'allow_merge_commit', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"allow_merge_commit"', ']', '=', 'allow_merge_commit', 'if', 'allow_rebase_merge', 'is', 'not', 'github', '.', 'GithubObject', '.', 'NotSet', ':', 'post_parameters', '[', '"allow_rebase_merge"', ']', '=', 'allow_rebase_merge', 'headers', ',', 'data', '=', 'self', '.', '_requester', '.', 'requestJsonAndCheck', '(', '"POST"', ',', '"/user/repos"', ',', 'input', '=', 'post_parameters', ')', 'return', 'github', '.', 'Repository', '.', 'Repository', '(', 'self', '.', '_requester', ',', 'headers', ',', 'data', ',', 'completed', '=', 'True', ')'] | :calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository` | [':', 'calls', ':', 'POST', '/', 'user', '/', 'repos', '<http', ':', '//', 'developer', '.', 'github', '.', 'com', '/', 'v3', '/', 'repos', '>', '_', ':', 'param', 'name', ':', 'string', ':', 'param', 'description', ':', 'string', ':', 'param', 'homepage', ':', 'string', ':', 'param', 'private', ':', 'bool', ':', 'param', 'has_issues', ':', 'bool', ':', 'param', 'has_wiki', ':', 'bool', ':', 'param', 'has_downloads', ':', 'bool', ':', 'param', 'has_projects', ':', 'bool', ':', 'param', 'auto_init', ':', 'bool', ':', 'param', 'license_template', ':', 'string', ':', 'param', 'gitignore_template', ':', 'string', ':', 'param', 'allow_squash_merge', ':', 'bool', ':', 'param', 'allow_merge_commit', ':', 'bool', ':', 'param', 'allow_rebase_merge', ':', 'bool', ':', 'rtype', ':', ':', 'class', ':', 'github', '.', 'Repository', '.', 'Repository'] | train | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L529-L601 |
5,993 | pyamg/pyamg | pyamg/krylov/_gmres_mgs.py | gmres_mgs | def gmres_mgs(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None,
M=None, callback=None, residuals=None, reorth=False):
"""Generalized Minimum Residual Method (GMRES) based on MGS.
GMRES iteratively refines the initial solution guess to the system
Ax = b
Modified Gram-Schmidt version
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : None, int
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : None, int
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print norm(b - A*x)
>>> 6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
dimen = A.shape[0]
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._gmres_mgs')
# Choose type
if not hasattr(A, 'dtype'):
Atype = upcast(x.dtype, b.dtype)
else:
Atype = A.dtype
if not hasattr(M, 'dtype'):
Mtype = upcast(x.dtype, b.dtype)
else:
Mtype = M.dtype
xtype = upcast(Atype, x.dtype, b.dtype, Mtype)
if restrt is not None:
restrt = int(restrt)
if maxiter is not None:
maxiter = int(maxiter)
# Get fast access to underlying BLAS routines
# dotc is the conjugate dot, dotu does no conjugation
[lartg] = get_lapack_funcs(['lartg'], [x])
if np.iscomplexobj(np.zeros((1,), dtype=xtype)):
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dotu', 'dotc', 'scal'], [x])
else:
# real type
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dot', 'dot', 'scal'], [x])
# Make full use of direct access to BLAS by defining own norm
def norm(z):
return np.sqrt(np.real(dotc(z, z)))
# Should norm(r) be kept
if residuals == []:
keep_r = True
else:
keep_r = False
# Set number of outer and inner iterations
if restrt:
if maxiter:
max_outer = maxiter
else:
max_outer = 1
if restrt > dimen:
warn('Setting number of inner iterations (restrt) to maximum\
allowed, which is A.shape[0] ')
restrt = dimen
max_inner = restrt
else:
max_outer = 1
if maxiter > dimen:
warn('Setting number of inner iterations (maxiter) to maximum\
allowed, which is A.shape[0] ')
maxiter = dimen
elif maxiter is None:
maxiter = min(dimen, 40)
max_inner = maxiter
# Is this a one dimensional matrix?
if dimen == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
# Prep for method
r = b - np.ravel(A*x)
# Apply preconditioner
r = np.ravel(M*r)
normr = norm(r)
if keep_r:
residuals.append(normr)
# Check for nan, inf
# if isnan(r).any() or isinf(r).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_2, we use the preconditioned residual
# because this is left preconditioned GMRES.
if normr != 0.0:
tol = tol*normr
# Use separate variable to track iterations. If convergence fails, we
# cannot simply report niter = (outer-1)*max_outer + inner. Numerical
# error could cause the inner loop to halt while the actual ||r|| > tol.
niter = 0
# Begin GMRES
for outer in range(max_outer):
# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
# Space required is O(dimen*max_inner).
# NOTE: We are dealing with row-major matrices, so we traverse in a
# row-major fashion,
# i.e., H and V's transpose is what we store.
Q = [] # Givens Rotations
# Upper Hessenberg matrix, which is then
# converted to upper tri with Givens Rots
H = np.zeros((max_inner+1, max_inner+1), dtype=xtype)
V = np.zeros((max_inner+1, dimen), dtype=xtype) # Krylov Space
# vs store the pointers to each column of V.
# This saves a considerable amount of time.
vs = []
# v = r/normr
V[0, :] = scal(1.0/normr, r)
vs.append(V[0, :])
# This is the RHS vector for the problem in the Krylov Space
g = np.zeros((dimen,), dtype=xtype)
g[0] = normr
for inner in range(max_inner):
# New Search Direction
v = V[inner+1, :]
v[:] = np.ravel(M*(A*vs[-1]))
vs.append(v)
normv_old = norm(v)
# Check for nan, inf
# if isnan(V[inner+1, :]).any() or isinf(V[inner+1, :]).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Modified Gram Schmidt
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = alpha
v[:] = axpy(vk, v, dimen, -alpha)
normv = norm(v)
H[inner, inner+1] = normv
# Re-orthogonalize
if (reorth is True) and (normv_old == normv_old + 0.001*normv):
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = H[inner, k] + alpha
v[:] = axpy(vk, v, dimen, -alpha)
# Check for breakdown
if H[inner, inner+1] != 0.0:
v[:] = scal(1.0/H[inner, inner+1], v)
# Apply previous Givens rotations to H
if inner > 0:
apply_givens(Q, H[inner, :], inner)
# Calculate and apply next complex-valued Givens Rotation
# ==> Note that if max_inner = dimen, then this is unnecessary
# for the last inner
# iteration, when inner = dimen-1.
if inner != dimen-1:
if H[inner, inner+1] != 0:
[c, s, r] = lartg(H[inner, inner], H[inner, inner+1])
Qblock = np.array([[c, s], [-np.conjugate(s), c]],
dtype=xtype)
Q.append(Qblock)
# Apply Givens Rotation to g,
# the RHS for the linear system in the Krylov Subspace.
g[inner:inner+2] = np.dot(Qblock, g[inner:inner+2])
# Apply effect of Givens Rotation to H
H[inner, inner] = dotu(Qblock[0, :],
H[inner, inner:inner+2])
H[inner, inner+1] = 0.0
niter += 1
# Don't update normr if last inner iteration, because
# normr is calculated directly after this loop ends.
if inner < max_inner-1:
normr = np.abs(g[inner+1])
if normr < tol:
break
# Allow user access to the iterates
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
# end inner loop, back to outer loop
# Find best update to x in Krylov Space V. Solve inner x inner system.
y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1])
update = np.ravel(np.mat(V[:inner+1, :]).T*y.reshape(-1, 1))
x = x + update
r = b - np.ravel(A*x)
# Apply preconditioner
r = np.ravel(M*r)
normr = norm(r)
# Check for nan, inf
# if isnan(r).any() or isinf(r).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Allow user access to the iterates
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
# Has GMRES stagnated?
indices = (x != 0)
if indices.any():
change = np.max(np.abs(update[indices] / x[indices]))
if change < 1e-12:
# No change, halt
return (postprocess(x), -1)
# test for convergence
if normr < tol:
return (postprocess(x), 0)
# end outer loop
return (postprocess(x), niter) | python | def gmres_mgs(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None,
M=None, callback=None, residuals=None, reorth=False):
"""Generalized Minimum Residual Method (GMRES) based on MGS.
GMRES iteratively refines the initial solution guess to the system
Ax = b
Modified Gram-Schmidt version
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : None, int
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : None, int
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print norm(b - A*x)
>>> 6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
dimen = A.shape[0]
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._gmres_mgs')
# Choose type
if not hasattr(A, 'dtype'):
Atype = upcast(x.dtype, b.dtype)
else:
Atype = A.dtype
if not hasattr(M, 'dtype'):
Mtype = upcast(x.dtype, b.dtype)
else:
Mtype = M.dtype
xtype = upcast(Atype, x.dtype, b.dtype, Mtype)
if restrt is not None:
restrt = int(restrt)
if maxiter is not None:
maxiter = int(maxiter)
# Get fast access to underlying BLAS routines
# dotc is the conjugate dot, dotu does no conjugation
[lartg] = get_lapack_funcs(['lartg'], [x])
if np.iscomplexobj(np.zeros((1,), dtype=xtype)):
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dotu', 'dotc', 'scal'], [x])
else:
# real type
[axpy, dotu, dotc, scal] =\
get_blas_funcs(['axpy', 'dot', 'dot', 'scal'], [x])
# Make full use of direct access to BLAS by defining own norm
def norm(z):
return np.sqrt(np.real(dotc(z, z)))
# Should norm(r) be kept
if residuals == []:
keep_r = True
else:
keep_r = False
# Set number of outer and inner iterations
if restrt:
if maxiter:
max_outer = maxiter
else:
max_outer = 1
if restrt > dimen:
warn('Setting number of inner iterations (restrt) to maximum\
allowed, which is A.shape[0] ')
restrt = dimen
max_inner = restrt
else:
max_outer = 1
if maxiter > dimen:
warn('Setting number of inner iterations (maxiter) to maximum\
allowed, which is A.shape[0] ')
maxiter = dimen
elif maxiter is None:
maxiter = min(dimen, 40)
max_inner = maxiter
# Is this a one dimensional matrix?
if dimen == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
# Prep for method
r = b - np.ravel(A*x)
# Apply preconditioner
r = np.ravel(M*r)
normr = norm(r)
if keep_r:
residuals.append(normr)
# Check for nan, inf
# if isnan(r).any() or isinf(r).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_2, we use the preconditioned residual
# because this is left preconditioned GMRES.
if normr != 0.0:
tol = tol*normr
# Use separate variable to track iterations. If convergence fails, we
# cannot simply report niter = (outer-1)*max_outer + inner. Numerical
# error could cause the inner loop to halt while the actual ||r|| > tol.
niter = 0
# Begin GMRES
for outer in range(max_outer):
# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
# Space required is O(dimen*max_inner).
# NOTE: We are dealing with row-major matrices, so we traverse in a
# row-major fashion,
# i.e., H and V's transpose is what we store.
Q = [] # Givens Rotations
# Upper Hessenberg matrix, which is then
# converted to upper tri with Givens Rots
H = np.zeros((max_inner+1, max_inner+1), dtype=xtype)
V = np.zeros((max_inner+1, dimen), dtype=xtype) # Krylov Space
# vs store the pointers to each column of V.
# This saves a considerable amount of time.
vs = []
# v = r/normr
V[0, :] = scal(1.0/normr, r)
vs.append(V[0, :])
# This is the RHS vector for the problem in the Krylov Space
g = np.zeros((dimen,), dtype=xtype)
g[0] = normr
for inner in range(max_inner):
# New Search Direction
v = V[inner+1, :]
v[:] = np.ravel(M*(A*vs[-1]))
vs.append(v)
normv_old = norm(v)
# Check for nan, inf
# if isnan(V[inner+1, :]).any() or isinf(V[inner+1, :]).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Modified Gram Schmidt
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = alpha
v[:] = axpy(vk, v, dimen, -alpha)
normv = norm(v)
H[inner, inner+1] = normv
# Re-orthogonalize
if (reorth is True) and (normv_old == normv_old + 0.001*normv):
for k in range(inner+1):
vk = vs[k]
alpha = dotc(vk, v)
H[inner, k] = H[inner, k] + alpha
v[:] = axpy(vk, v, dimen, -alpha)
# Check for breakdown
if H[inner, inner+1] != 0.0:
v[:] = scal(1.0/H[inner, inner+1], v)
# Apply previous Givens rotations to H
if inner > 0:
apply_givens(Q, H[inner, :], inner)
# Calculate and apply next complex-valued Givens Rotation
# ==> Note that if max_inner = dimen, then this is unnecessary
# for the last inner
# iteration, when inner = dimen-1.
if inner != dimen-1:
if H[inner, inner+1] != 0:
[c, s, r] = lartg(H[inner, inner], H[inner, inner+1])
Qblock = np.array([[c, s], [-np.conjugate(s), c]],
dtype=xtype)
Q.append(Qblock)
# Apply Givens Rotation to g,
# the RHS for the linear system in the Krylov Subspace.
g[inner:inner+2] = np.dot(Qblock, g[inner:inner+2])
# Apply effect of Givens Rotation to H
H[inner, inner] = dotu(Qblock[0, :],
H[inner, inner:inner+2])
H[inner, inner+1] = 0.0
niter += 1
# Don't update normr if last inner iteration, because
# normr is calculated directly after this loop ends.
if inner < max_inner-1:
normr = np.abs(g[inner+1])
if normr < tol:
break
# Allow user access to the iterates
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
# end inner loop, back to outer loop
# Find best update to x in Krylov Space V. Solve inner x inner system.
y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1])
update = np.ravel(np.mat(V[:inner+1, :]).T*y.reshape(-1, 1))
x = x + update
r = b - np.ravel(A*x)
# Apply preconditioner
r = np.ravel(M*r)
normr = norm(r)
# Check for nan, inf
# if isnan(r).any() or isinf(r).any():
# warn('inf or nan after application of preconditioner')
# return(postprocess(x), -1)
# Allow user access to the iterates
if callback is not None:
callback(x)
if keep_r:
residuals.append(normr)
# Has GMRES stagnated?
indices = (x != 0)
if indices.any():
change = np.max(np.abs(update[indices] / x[indices]))
if change < 1e-12:
# No change, halt
return (postprocess(x), -1)
# test for convergence
if normr < tol:
return (postprocess(x), 0)
# end outer loop
return (postprocess(x), niter) | ['def', 'gmres_mgs', '(', 'A', ',', 'b', ',', 'x0', '=', 'None', ',', 'tol', '=', '1e-5', ',', 'restrt', '=', 'None', ',', 'maxiter', '=', 'None', ',', 'xtype', '=', 'None', ',', 'M', '=', 'None', ',', 'callback', '=', 'None', ',', 'residuals', '=', 'None', ',', 'reorth', '=', 'False', ')', ':', '# Convert inputs to linear system, with error checking', 'A', ',', 'M', ',', 'x', ',', 'b', ',', 'postprocess', '=', 'make_system', '(', 'A', ',', 'M', ',', 'x0', ',', 'b', ')', 'dimen', '=', 'A', '.', 'shape', '[', '0', ']', '# Ensure that warnings are always reissued from this function', 'import', 'warnings', 'warnings', '.', 'filterwarnings', '(', "'always'", ',', 'module', '=', "'pyamg\\.krylov\\._gmres_mgs'", ')', '# Choose type', 'if', 'not', 'hasattr', '(', 'A', ',', "'dtype'", ')', ':', 'Atype', '=', 'upcast', '(', 'x', '.', 'dtype', ',', 'b', '.', 'dtype', ')', 'else', ':', 'Atype', '=', 'A', '.', 'dtype', 'if', 'not', 'hasattr', '(', 'M', ',', "'dtype'", ')', ':', 'Mtype', '=', 'upcast', '(', 'x', '.', 'dtype', ',', 'b', '.', 'dtype', ')', 'else', ':', 'Mtype', '=', 'M', '.', 'dtype', 'xtype', '=', 'upcast', '(', 'Atype', ',', 'x', '.', 'dtype', ',', 'b', '.', 'dtype', ',', 'Mtype', ')', 'if', 'restrt', 'is', 'not', 'None', ':', 'restrt', '=', 'int', '(', 'restrt', ')', 'if', 'maxiter', 'is', 'not', 'None', ':', 'maxiter', '=', 'int', '(', 'maxiter', ')', '# Get fast access to underlying BLAS routines', '# dotc is the conjugate dot, dotu does no conjugation', '[', 'lartg', ']', '=', 'get_lapack_funcs', '(', '[', "'lartg'", ']', ',', '[', 'x', ']', ')', 'if', 'np', '.', 'iscomplexobj', '(', 'np', '.', 'zeros', '(', '(', '1', ',', ')', ',', 'dtype', '=', 'xtype', ')', ')', ':', '[', 'axpy', ',', 'dotu', ',', 'dotc', ',', 'scal', ']', '=', 'get_blas_funcs', '(', '[', "'axpy'", ',', "'dotu'", ',', "'dotc'", ',', "'scal'", ']', ',', '[', 'x', ']', ')', 'else', ':', '# real type', '[', 'axpy', ',', 'dotu', ',', 'dotc', ',', 'scal', ']', '=', 'get_blas_funcs', '(', '[', "'axpy'", ',', "'dot'", ',', "'dot'", ',', "'scal'", ']', ',', '[', 'x', ']', ')', '# Make full use of direct access to BLAS by defining own norm', 'def', 'norm', '(', 'z', ')', ':', 'return', 'np', '.', 'sqrt', '(', 'np', '.', 'real', '(', 'dotc', '(', 'z', ',', 'z', ')', ')', ')', '# Should norm(r) be kept', 'if', 'residuals', '==', '[', ']', ':', 'keep_r', '=', 'True', 'else', ':', 'keep_r', '=', 'False', '# Set number of outer and inner iterations', 'if', 'restrt', ':', 'if', 'maxiter', ':', 'max_outer', '=', 'maxiter', 'else', ':', 'max_outer', '=', '1', 'if', 'restrt', '>', 'dimen', ':', 'warn', '(', "'Setting number of inner iterations (restrt) to maximum\\\n allowed, which is A.shape[0] '", ')', 'restrt', '=', 'dimen', 'max_inner', '=', 'restrt', 'else', ':', 'max_outer', '=', '1', 'if', 'maxiter', '>', 'dimen', ':', 'warn', '(', "'Setting number of inner iterations (maxiter) to maximum\\\n allowed, which is A.shape[0] '", ')', 'maxiter', '=', 'dimen', 'elif', 'maxiter', 'is', 'None', ':', 'maxiter', '=', 'min', '(', 'dimen', ',', '40', ')', 'max_inner', '=', 'maxiter', '# Is this a one dimensional matrix?', 'if', 'dimen', '==', '1', ':', 'entry', '=', 'np', '.', 'ravel', '(', 'A', '*', 'np', '.', 'array', '(', '[', '1.0', ']', ',', 'dtype', '=', 'xtype', ')', ')', 'return', '(', 'postprocess', '(', 'b', '/', 'entry', ')', ',', '0', ')', '# Prep for method', 'r', '=', 'b', '-', 'np', '.', 'ravel', '(', 'A', '*', 'x', ')', '# Apply preconditioner', 'r', '=', 'np', '.', 'ravel', '(', 'M', '*', 'r', ')', 'normr', '=', 'norm', '(', 'r', ')', 'if', 'keep_r', ':', 'residuals', '.', 'append', '(', 'normr', ')', '# Check for nan, inf', '# if isnan(r).any() or isinf(r).any():', "# warn('inf or nan after application of preconditioner')", '# return(postprocess(x), -1)', '# Check initial guess ( scaling by b, if b != 0,', '# must account for case when norm(b) is very small)', 'normb', '=', 'norm', '(', 'b', ')', 'if', 'normb', '==', '0.0', ':', 'normb', '=', '1.0', 'if', 'normr', '<', 'tol', '*', 'normb', ':', 'return', '(', 'postprocess', '(', 'x', ')', ',', '0', ')', '# Scale tol by ||r_0||_2, we use the preconditioned residual', '# because this is left preconditioned GMRES.', 'if', 'normr', '!=', '0.0', ':', 'tol', '=', 'tol', '*', 'normr', '# Use separate variable to track iterations. If convergence fails, we', '# cannot simply report niter = (outer-1)*max_outer + inner. Numerical', '# error could cause the inner loop to halt while the actual ||r|| > tol.', 'niter', '=', '0', '# Begin GMRES', 'for', 'outer', 'in', 'range', '(', 'max_outer', ')', ':', '# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space', '# Space required is O(dimen*max_inner).', '# NOTE: We are dealing with row-major matrices, so we traverse in a', '# row-major fashion,', "# i.e., H and V's transpose is what we store.", 'Q', '=', '[', ']', '# Givens Rotations', '# Upper Hessenberg matrix, which is then', '# converted to upper tri with Givens Rots', 'H', '=', 'np', '.', 'zeros', '(', '(', 'max_inner', '+', '1', ',', 'max_inner', '+', '1', ')', ',', 'dtype', '=', 'xtype', ')', 'V', '=', 'np', '.', 'zeros', '(', '(', 'max_inner', '+', '1', ',', 'dimen', ')', ',', 'dtype', '=', 'xtype', ')', '# Krylov Space', '# vs store the pointers to each column of V.', '# This saves a considerable amount of time.', 'vs', '=', '[', ']', '# v = r/normr', 'V', '[', '0', ',', ':', ']', '=', 'scal', '(', '1.0', '/', 'normr', ',', 'r', ')', 'vs', '.', 'append', '(', 'V', '[', '0', ',', ':', ']', ')', '# This is the RHS vector for the problem in the Krylov Space', 'g', '=', 'np', '.', 'zeros', '(', '(', 'dimen', ',', ')', ',', 'dtype', '=', 'xtype', ')', 'g', '[', '0', ']', '=', 'normr', 'for', 'inner', 'in', 'range', '(', 'max_inner', ')', ':', '# New Search Direction', 'v', '=', 'V', '[', 'inner', '+', '1', ',', ':', ']', 'v', '[', ':', ']', '=', 'np', '.', 'ravel', '(', 'M', '*', '(', 'A', '*', 'vs', '[', '-', '1', ']', ')', ')', 'vs', '.', 'append', '(', 'v', ')', 'normv_old', '=', 'norm', '(', 'v', ')', '# Check for nan, inf', '# if isnan(V[inner+1, :]).any() or isinf(V[inner+1, :]).any():', "# warn('inf or nan after application of preconditioner')", '# return(postprocess(x), -1)', '# Modified Gram Schmidt', 'for', 'k', 'in', 'range', '(', 'inner', '+', '1', ')', ':', 'vk', '=', 'vs', '[', 'k', ']', 'alpha', '=', 'dotc', '(', 'vk', ',', 'v', ')', 'H', '[', 'inner', ',', 'k', ']', '=', 'alpha', 'v', '[', ':', ']', '=', 'axpy', '(', 'vk', ',', 'v', ',', 'dimen', ',', '-', 'alpha', ')', 'normv', '=', 'norm', '(', 'v', ')', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', '=', 'normv', '# Re-orthogonalize', 'if', '(', 'reorth', 'is', 'True', ')', 'and', '(', 'normv_old', '==', 'normv_old', '+', '0.001', '*', 'normv', ')', ':', 'for', 'k', 'in', 'range', '(', 'inner', '+', '1', ')', ':', 'vk', '=', 'vs', '[', 'k', ']', 'alpha', '=', 'dotc', '(', 'vk', ',', 'v', ')', 'H', '[', 'inner', ',', 'k', ']', '=', 'H', '[', 'inner', ',', 'k', ']', '+', 'alpha', 'v', '[', ':', ']', '=', 'axpy', '(', 'vk', ',', 'v', ',', 'dimen', ',', '-', 'alpha', ')', '# Check for breakdown', 'if', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', '!=', '0.0', ':', 'v', '[', ':', ']', '=', 'scal', '(', '1.0', '/', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', ',', 'v', ')', '# Apply previous Givens rotations to H', 'if', 'inner', '>', '0', ':', 'apply_givens', '(', 'Q', ',', 'H', '[', 'inner', ',', ':', ']', ',', 'inner', ')', '# Calculate and apply next complex-valued Givens Rotation', '# ==> Note that if max_inner = dimen, then this is unnecessary', '# for the last inner', '# iteration, when inner = dimen-1.', 'if', 'inner', '!=', 'dimen', '-', '1', ':', 'if', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', '!=', '0', ':', '[', 'c', ',', 's', ',', 'r', ']', '=', 'lartg', '(', 'H', '[', 'inner', ',', 'inner', ']', ',', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', ')', 'Qblock', '=', 'np', '.', 'array', '(', '[', '[', 'c', ',', 's', ']', ',', '[', '-', 'np', '.', 'conjugate', '(', 's', ')', ',', 'c', ']', ']', ',', 'dtype', '=', 'xtype', ')', 'Q', '.', 'append', '(', 'Qblock', ')', '# Apply Givens Rotation to g,', '# the RHS for the linear system in the Krylov Subspace.', 'g', '[', 'inner', ':', 'inner', '+', '2', ']', '=', 'np', '.', 'dot', '(', 'Qblock', ',', 'g', '[', 'inner', ':', 'inner', '+', '2', ']', ')', '# Apply effect of Givens Rotation to H', 'H', '[', 'inner', ',', 'inner', ']', '=', 'dotu', '(', 'Qblock', '[', '0', ',', ':', ']', ',', 'H', '[', 'inner', ',', 'inner', ':', 'inner', '+', '2', ']', ')', 'H', '[', 'inner', ',', 'inner', '+', '1', ']', '=', '0.0', 'niter', '+=', '1', "# Don't update normr if last inner iteration, because", '# normr is calculated directly after this loop ends.', 'if', 'inner', '<', 'max_inner', '-', '1', ':', 'normr', '=', 'np', '.', 'abs', '(', 'g', '[', 'inner', '+', '1', ']', ')', 'if', 'normr', '<', 'tol', ':', 'break', '# Allow user access to the iterates', 'if', 'callback', 'is', 'not', 'None', ':', 'callback', '(', 'x', ')', 'if', 'keep_r', ':', 'residuals', '.', 'append', '(', 'normr', ')', '# end inner loop, back to outer loop', '# Find best update to x in Krylov Space V. Solve inner x inner system.', 'y', '=', 'sp', '.', 'linalg', '.', 'solve', '(', 'H', '[', '0', ':', 'inner', '+', '1', ',', '0', ':', 'inner', '+', '1', ']', '.', 'T', ',', 'g', '[', '0', ':', 'inner', '+', '1', ']', ')', 'update', '=', 'np', '.', 'ravel', '(', 'np', '.', 'mat', '(', 'V', '[', ':', 'inner', '+', '1', ',', ':', ']', ')', '.', 'T', '*', 'y', '.', 'reshape', '(', '-', '1', ',', '1', ')', ')', 'x', '=', 'x', '+', 'update', 'r', '=', 'b', '-', 'np', '.', 'ravel', '(', 'A', '*', 'x', ')', '# Apply preconditioner', 'r', '=', 'np', '.', 'ravel', '(', 'M', '*', 'r', ')', 'normr', '=', 'norm', '(', 'r', ')', '# Check for nan, inf', '# if isnan(r).any() or isinf(r).any():', "# warn('inf or nan after application of preconditioner')", '# return(postprocess(x), -1)', '# Allow user access to the iterates', 'if', 'callback', 'is', 'not', 'None', ':', 'callback', '(', 'x', ')', 'if', 'keep_r', ':', 'residuals', '.', 'append', '(', 'normr', ')', '# Has GMRES stagnated?', 'indices', '=', '(', 'x', '!=', '0', ')', 'if', 'indices', '.', 'any', '(', ')', ':', 'change', '=', 'np', '.', 'max', '(', 'np', '.', 'abs', '(', 'update', '[', 'indices', ']', '/', 'x', '[', 'indices', ']', ')', ')', 'if', 'change', '<', '1e-12', ':', '# No change, halt', 'return', '(', 'postprocess', '(', 'x', ')', ',', '-', '1', ')', '# test for convergence', 'if', 'normr', '<', 'tol', ':', 'return', '(', 'postprocess', '(', 'x', ')', ',', '0', ')', '# end outer loop', 'return', '(', 'postprocess', '(', 'x', ')', ',', 'niter', ')'] | Generalized Minimum Residual Method (GMRES) based on MGS.
GMRES iteratively refines the initial solution guess to the system
Ax = b
Modified Gram-Schmidt version
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the norm
of the initial preconditioned residual
restrt : None, int
- if int, restrt is max number of inner iterations
and maxiter is the max number of outer iterations
- if None, do not restart GMRES, and max number of inner iterations
is maxiter
maxiter : None, int
- if restrt is None, maxiter is the max number of inner iterations
and GMRES does not restart
- if restrt is int, maxiter is the max number of outer iterations,
and restrt is the max number of inner iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the preconditioned residual norm history,
including the initial residual.
reorth : boolean
If True, then a check is made whether to re-orthogonalize the Krylov
space each GMRES iteration
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of gmres
== =============================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead. This value
is precisely the order of the Krylov space.
<0 numerical breakdown, or illegal input
== =============================================
Notes
-----
- The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
- For robustness, modified Gram-Schmidt is used to orthogonalize the
Krylov Space Givens Rotations are used to provide the residual norm
each iteration
Examples
--------
>>> from pyamg.krylov import gmres
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs')
>>> print norm(b - A*x)
>>> 6.5428213057
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
http://www-users.cs.umn.edu/~saad/books.html
.. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html | ['Generalized', 'Minimum', 'Residual', 'Method', '(', 'GMRES', ')', 'based', 'on', 'MGS', '.'] | train | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_gmres_mgs.py#L41-L365 |
5,994 | quantopian/zipline | zipline/data/history_loader.py | HistoryLoader.history | def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
) | python | def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
) | ['def', 'history', '(', 'self', ',', 'assets', ',', 'dts', ',', 'field', ',', 'is_perspective_after', ')', ':', 'block', '=', 'self', '.', '_ensure_sliding_windows', '(', 'assets', ',', 'dts', ',', 'field', ',', 'is_perspective_after', ')', 'end_ix', '=', 'self', '.', '_calendar', '.', 'searchsorted', '(', 'dts', '[', '-', '1', ']', ')', 'return', 'concatenate', '(', '[', 'window', '.', 'get', '(', 'end_ix', ')', 'for', 'window', 'in', 'block', ']', ',', 'axis', '=', '1', ',', ')'] | A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets)) | ['A', 'window', 'of', 'pricing', 'data', 'with', 'adjustments', 'applied', 'assuming', 'that', 'the', 'end', 'of', 'the', 'window', 'is', 'the', 'day', 'before', 'the', 'current', 'simulation', 'time', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/history_loader.py#L471-L555 |
5,995 | pybel/pybel | src/pybel/manager/cache_manager.py | NamespaceManager.get_namespace_by_url | def get_namespace_by_url(self, url: str) -> Optional[Namespace]:
"""Look up a namespace by url."""
return self.session.query(Namespace).filter(Namespace.url == url).one_or_none() | python | def get_namespace_by_url(self, url: str) -> Optional[Namespace]:
"""Look up a namespace by url."""
return self.session.query(Namespace).filter(Namespace.url == url).one_or_none() | ['def', 'get_namespace_by_url', '(', 'self', ',', 'url', ':', 'str', ')', '->', 'Optional', '[', 'Namespace', ']', ':', 'return', 'self', '.', 'session', '.', 'query', '(', 'Namespace', ')', '.', 'filter', '(', 'Namespace', '.', 'url', '==', 'url', ')', '.', 'one_or_none', '(', ')'] | Look up a namespace by url. | ['Look', 'up', 'a', 'namespace', 'by', 'url', '.'] | train | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L149-L151 |
5,996 | amaas-fintech/amaas-core-sdk-python | amaascore/config.py | ConfigFactory.lookup | def lookup(self, section, name):
"""Lookup config value."""
value = os.environ.get('AMAAS_{}'.format(name.upper()))
if value:
return value
try:
value = self.file_config.get(section, name)
except ConfigParserError:
pass
else:
if value:
return value
raise MissingConfigurationError(section, name) | python | def lookup(self, section, name):
"""Lookup config value."""
value = os.environ.get('AMAAS_{}'.format(name.upper()))
if value:
return value
try:
value = self.file_config.get(section, name)
except ConfigParserError:
pass
else:
if value:
return value
raise MissingConfigurationError(section, name) | ['def', 'lookup', '(', 'self', ',', 'section', ',', 'name', ')', ':', 'value', '=', 'os', '.', 'environ', '.', 'get', '(', "'AMAAS_{}'", '.', 'format', '(', 'name', '.', 'upper', '(', ')', ')', ')', 'if', 'value', ':', 'return', 'value', 'try', ':', 'value', '=', 'self', '.', 'file_config', '.', 'get', '(', 'section', ',', 'name', ')', 'except', 'ConfigParserError', ':', 'pass', 'else', ':', 'if', 'value', ':', 'return', 'value', 'raise', 'MissingConfigurationError', '(', 'section', ',', 'name', ')'] | Lookup config value. | ['Lookup', 'config', 'value', '.'] | train | https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/config.py#L86-L100 |
5,997 | mushkevych/scheduler | synergy/scheduler/state_machine_recomputing.py | StateMachineRecomputing._process_state_in_progress | def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
time_qualifier = context.process_context[job_record.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
if uow.is_invalid or uow.is_requested:
# current uow has not been processed yet. update it
self.update_scope_of_processing(job_record.process_name, uow, start_timeperiod, end_timeperiod)
else:
# STATE_IN_PROGRESS, STATE_PROCESSED, STATE_CANCELED, STATE_NOOP
# create new uow to cover new inserts
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
# create new uow for FINAL RUN
self._compute_and_transfer_to_final_run(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
else:
msg = 'Job {0} has timeperiod {1} from the future vs current timeperiod {2}' \
.format(job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg) | python | def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
time_qualifier = context.process_context[job_record.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
if uow.is_invalid or uow.is_requested:
# current uow has not been processed yet. update it
self.update_scope_of_processing(job_record.process_name, uow, start_timeperiod, end_timeperiod)
else:
# STATE_IN_PROGRESS, STATE_PROCESSED, STATE_CANCELED, STATE_NOOP
# create new uow to cover new inserts
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
# create new uow for FINAL RUN
self._compute_and_transfer_to_final_run(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
else:
msg = 'Job {0} has timeperiod {1} from the future vs current timeperiod {2}' \
.format(job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg) | ['def', '_process_state_in_progress', '(', 'self', ',', 'job_record', ')', ':', 'start_timeperiod', '=', 'self', '.', 'compute_start_timeperiod', '(', 'job_record', '.', 'process_name', ',', 'job_record', '.', 'timeperiod', ')', 'end_timeperiod', '=', 'self', '.', 'compute_end_timeperiod', '(', 'job_record', '.', 'process_name', ',', 'job_record', '.', 'timeperiod', ')', 'time_qualifier', '=', 'context', '.', 'process_context', '[', 'job_record', '.', 'process_name', ']', '.', 'time_qualifier', 'actual_timeperiod', '=', 'time_helper', '.', 'actual_timeperiod', '(', 'time_qualifier', ')', 'is_job_finalizable', '=', 'self', '.', 'timetable', '.', 'is_job_record_finalizable', '(', 'job_record', ')', 'uow', '=', 'self', '.', 'uow_dao', '.', 'get_one', '(', 'job_record', '.', 'related_unit_of_work', ')', 'if', 'job_record', '.', 'timeperiod', '==', 'actual_timeperiod', 'or', 'is_job_finalizable', 'is', 'False', ':', 'if', 'uow', '.', 'is_invalid', 'or', 'uow', '.', 'is_requested', ':', '# current uow has not been processed yet. update it', 'self', '.', 'update_scope_of_processing', '(', 'job_record', '.', 'process_name', ',', 'uow', ',', 'start_timeperiod', ',', 'end_timeperiod', ')', 'else', ':', '# STATE_IN_PROGRESS, STATE_PROCESSED, STATE_CANCELED, STATE_NOOP', '# create new uow to cover new inserts', 'self', '.', '_compute_and_transfer_to_progress', '(', 'job_record', '.', 'process_name', ',', 'start_timeperiod', ',', 'end_timeperiod', ',', 'job_record', ')', 'elif', 'job_record', '.', 'timeperiod', '<', 'actual_timeperiod', 'and', 'is_job_finalizable', 'is', 'True', ':', '# create new uow for FINAL RUN', 'self', '.', '_compute_and_transfer_to_final_run', '(', 'job_record', '.', 'process_name', ',', 'start_timeperiod', ',', 'end_timeperiod', ',', 'job_record', ')', 'else', ':', 'msg', '=', "'Job {0} has timeperiod {1} from the future vs current timeperiod {2}'", '.', 'format', '(', 'job_record', '.', 'db_id', ',', 'job_record', '.', 'timeperiod', ',', 'actual_timeperiod', ')', 'self', '.', '_log_message', '(', 'ERROR', ',', 'job_record', '.', 'process_name', ',', 'job_record', '.', 'timeperiod', ',', 'msg', ')'] | method that takes care of processing job records in STATE_IN_PROGRESS state | ['method', 'that', 'takes', 'care', 'of', 'processing', 'job', 'records', 'in', 'STATE_IN_PROGRESS', 'state'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/state_machine_recomputing.py#L84-L113 |
5,998 | buckmaxwell/neoapi | neoapi/serializable_structured_node.py | SerializableStructuredNode.create_resource | def create_resource(cls, request_json):
r"""
Used to create a node in the database of type 'cls' in response to a POST request. create_resource should only \
be invoked on a resource when the client specifies a POST request.
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-creating
:return: An HTTP response object in accordance with the same specification
"""
response = dict()
new_resource, location = None, None
try:
data = request_json['data']
if data['type'] != cls.__type__:
raise WrongTypeError('type must match the type of the resource being created.')
attributes = data.get('attributes')
if attributes:
for x in attributes.keys():
if x in cls.dates:
dt = datetime.strptime(attributes[x], '%Y-%m-%d')
attributes[x] = dt
new_resource = cls(**attributes)
new_resource.save()
enum_keys = new_resource.enums.keys()
for key in attributes.keys():
if key in enum_keys:
if attributes[key] in new_resource.enums[key]:
setattr(new_resource, key, attributes[key])
else:
raise EnumeratedTypeError
else:
setattr(new_resource, key, attributes[key])
new_resource.save()
for r in new_resource.hashed:
unhashed = getattr(new_resource, r)
if unhashed:
setattr(new_resource, r, hashlib.sha256(unhashed).hexdigest())
new_resource.save()
relationships = data.get('relationships')
if relationships:
for relation_name in relationships.keys():
relations = relationships.get(relation_name)
if relations:
relations = relations['data']
if isinstance(relations, list):
for relation in relations:
the_type = relation['type'] # must translate type to cls
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
else:
relation = relations
the_type = relation['type']
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
response['data'] = new_resource.get_resource_object()
response['links'] = {'self': new_resource.get_self_link()}
status_code = http_error_codes.CREATED
location = new_resource.get_self_link()
r = make_response(jsonify(response))
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
if location and new_resource:
r.headers['Location'] = location
r.status_code = status_code
except UniqueProperty:
r = application_codes.error_response([application_codes.UNIQUE_KEY_VIOLATION])
try:
new_resource.delete()
except:
pass
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
try:
new_resource.delete()
except:
pass
except WrongTypeError as e:
r = application_codes.error_response([application_codes.WRONG_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except KeyError as e:
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
print e
try:
new_resource.delete()
except:
pass
except EnumeratedTypeError:
r = application_codes.error_response([application_codes.ENUMERATED_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except ParameterMissing:
r = application_codes.error_response([application_codes.BAD_PARAMETER_VIOLATION])
try:
new_resource.delete()
except:
pass
return r | python | def create_resource(cls, request_json):
r"""
Used to create a node in the database of type 'cls' in response to a POST request. create_resource should only \
be invoked on a resource when the client specifies a POST request.
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-creating
:return: An HTTP response object in accordance with the same specification
"""
response = dict()
new_resource, location = None, None
try:
data = request_json['data']
if data['type'] != cls.__type__:
raise WrongTypeError('type must match the type of the resource being created.')
attributes = data.get('attributes')
if attributes:
for x in attributes.keys():
if x in cls.dates:
dt = datetime.strptime(attributes[x], '%Y-%m-%d')
attributes[x] = dt
new_resource = cls(**attributes)
new_resource.save()
enum_keys = new_resource.enums.keys()
for key in attributes.keys():
if key in enum_keys:
if attributes[key] in new_resource.enums[key]:
setattr(new_resource, key, attributes[key])
else:
raise EnumeratedTypeError
else:
setattr(new_resource, key, attributes[key])
new_resource.save()
for r in new_resource.hashed:
unhashed = getattr(new_resource, r)
if unhashed:
setattr(new_resource, r, hashlib.sha256(unhashed).hexdigest())
new_resource.save()
relationships = data.get('relationships')
if relationships:
for relation_name in relationships.keys():
relations = relationships.get(relation_name)
if relations:
relations = relations['data']
if isinstance(relations, list):
for relation in relations:
the_type = relation['type'] # must translate type to cls
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
else:
relation = relations
the_type = relation['type']
the_id = relation['id']
the_class = cls.get_class_from_type(the_type)
new_resources_relation = the_class.nodes.get(id=the_id, active=True)
meta = relation.get('meta')
eval('new_resource.{relation_name}.connect(new_resources_relation, meta)'.format(
relation_name=relation_name)
)
new_resource.save()
response['data'] = new_resource.get_resource_object()
response['links'] = {'self': new_resource.get_self_link()}
status_code = http_error_codes.CREATED
location = new_resource.get_self_link()
r = make_response(jsonify(response))
r.headers['Content-Type'] = "application/vnd.api+json; charset=utf-8"
if location and new_resource:
r.headers['Location'] = location
r.status_code = status_code
except UniqueProperty:
r = application_codes.error_response([application_codes.UNIQUE_KEY_VIOLATION])
try:
new_resource.delete()
except:
pass
except DoesNotExist:
r = application_codes.error_response([application_codes.RESOURCE_NOT_FOUND])
try:
new_resource.delete()
except:
pass
except WrongTypeError as e:
r = application_codes.error_response([application_codes.WRONG_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except KeyError as e:
r = application_codes.error_response([application_codes.BAD_FORMAT_VIOLATION])
print e
try:
new_resource.delete()
except:
pass
except EnumeratedTypeError:
r = application_codes.error_response([application_codes.ENUMERATED_TYPE_VIOLATION])
try:
new_resource.delete()
except:
pass
except ParameterMissing:
r = application_codes.error_response([application_codes.BAD_PARAMETER_VIOLATION])
try:
new_resource.delete()
except:
pass
return r | ['def', 'create_resource', '(', 'cls', ',', 'request_json', ')', ':', 'response', '=', 'dict', '(', ')', 'new_resource', ',', 'location', '=', 'None', ',', 'None', 'try', ':', 'data', '=', 'request_json', '[', "'data'", ']', 'if', 'data', '[', "'type'", ']', '!=', 'cls', '.', '__type__', ':', 'raise', 'WrongTypeError', '(', "'type must match the type of the resource being created.'", ')', 'attributes', '=', 'data', '.', 'get', '(', "'attributes'", ')', 'if', 'attributes', ':', 'for', 'x', 'in', 'attributes', '.', 'keys', '(', ')', ':', 'if', 'x', 'in', 'cls', '.', 'dates', ':', 'dt', '=', 'datetime', '.', 'strptime', '(', 'attributes', '[', 'x', ']', ',', "'%Y-%m-%d'", ')', 'attributes', '[', 'x', ']', '=', 'dt', 'new_resource', '=', 'cls', '(', '*', '*', 'attributes', ')', 'new_resource', '.', 'save', '(', ')', 'enum_keys', '=', 'new_resource', '.', 'enums', '.', 'keys', '(', ')', 'for', 'key', 'in', 'attributes', '.', 'keys', '(', ')', ':', 'if', 'key', 'in', 'enum_keys', ':', 'if', 'attributes', '[', 'key', ']', 'in', 'new_resource', '.', 'enums', '[', 'key', ']', ':', 'setattr', '(', 'new_resource', ',', 'key', ',', 'attributes', '[', 'key', ']', ')', 'else', ':', 'raise', 'EnumeratedTypeError', 'else', ':', 'setattr', '(', 'new_resource', ',', 'key', ',', 'attributes', '[', 'key', ']', ')', 'new_resource', '.', 'save', '(', ')', 'for', 'r', 'in', 'new_resource', '.', 'hashed', ':', 'unhashed', '=', 'getattr', '(', 'new_resource', ',', 'r', ')', 'if', 'unhashed', ':', 'setattr', '(', 'new_resource', ',', 'r', ',', 'hashlib', '.', 'sha256', '(', 'unhashed', ')', '.', 'hexdigest', '(', ')', ')', 'new_resource', '.', 'save', '(', ')', 'relationships', '=', 'data', '.', 'get', '(', "'relationships'", ')', 'if', 'relationships', ':', 'for', 'relation_name', 'in', 'relationships', '.', 'keys', '(', ')', ':', 'relations', '=', 'relationships', '.', 'get', '(', 'relation_name', ')', 'if', 'relations', ':', 'relations', '=', 'relations', '[', "'data'", ']', 'if', 'isinstance', '(', 'relations', ',', 'list', ')', ':', 'for', 'relation', 'in', 'relations', ':', 'the_type', '=', 'relation', '[', "'type'", ']', '# must translate type to cls', 'the_id', '=', 'relation', '[', "'id'", ']', 'the_class', '=', 'cls', '.', 'get_class_from_type', '(', 'the_type', ')', 'new_resources_relation', '=', 'the_class', '.', 'nodes', '.', 'get', '(', 'id', '=', 'the_id', ',', 'active', '=', 'True', ')', 'meta', '=', 'relation', '.', 'get', '(', "'meta'", ')', 'eval', '(', "'new_resource.{relation_name}.connect(new_resources_relation, meta)'", '.', 'format', '(', 'relation_name', '=', 'relation_name', ')', ')', 'new_resource', '.', 'save', '(', ')', 'else', ':', 'relation', '=', 'relations', 'the_type', '=', 'relation', '[', "'type'", ']', 'the_id', '=', 'relation', '[', "'id'", ']', 'the_class', '=', 'cls', '.', 'get_class_from_type', '(', 'the_type', ')', 'new_resources_relation', '=', 'the_class', '.', 'nodes', '.', 'get', '(', 'id', '=', 'the_id', ',', 'active', '=', 'True', ')', 'meta', '=', 'relation', '.', 'get', '(', "'meta'", ')', 'eval', '(', "'new_resource.{relation_name}.connect(new_resources_relation, meta)'", '.', 'format', '(', 'relation_name', '=', 'relation_name', ')', ')', 'new_resource', '.', 'save', '(', ')', 'response', '[', "'data'", ']', '=', 'new_resource', '.', 'get_resource_object', '(', ')', 'response', '[', "'links'", ']', '=', '{', "'self'", ':', 'new_resource', '.', 'get_self_link', '(', ')', '}', 'status_code', '=', 'http_error_codes', '.', 'CREATED', 'location', '=', 'new_resource', '.', 'get_self_link', '(', ')', 'r', '=', 'make_response', '(', 'jsonify', '(', 'response', ')', ')', 'r', '.', 'headers', '[', "'Content-Type'", ']', '=', '"application/vnd.api+json; charset=utf-8"', 'if', 'location', 'and', 'new_resource', ':', 'r', '.', 'headers', '[', "'Location'", ']', '=', 'location', 'r', '.', 'status_code', '=', 'status_code', 'except', 'UniqueProperty', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'UNIQUE_KEY_VIOLATION', ']', ')', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'except', 'DoesNotExist', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'RESOURCE_NOT_FOUND', ']', ')', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'except', 'WrongTypeError', 'as', 'e', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'WRONG_TYPE_VIOLATION', ']', ')', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'except', 'KeyError', 'as', 'e', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'BAD_FORMAT_VIOLATION', ']', ')', 'print', 'e', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'except', 'EnumeratedTypeError', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'ENUMERATED_TYPE_VIOLATION', ']', ')', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'except', 'ParameterMissing', ':', 'r', '=', 'application_codes', '.', 'error_response', '(', '[', 'application_codes', '.', 'BAD_PARAMETER_VIOLATION', ']', ')', 'try', ':', 'new_resource', '.', 'delete', '(', ')', 'except', ':', 'pass', 'return', 'r'] | r"""
Used to create a node in the database of type 'cls' in response to a POST request. create_resource should only \
be invoked on a resource when the client specifies a POST request.
:param request_json: a dictionary formatted according to the specification at \
http://jsonapi.org/format/#crud-creating
:return: An HTTP response object in accordance with the same specification | ['r', 'Used', 'to', 'create', 'a', 'node', 'in', 'the', 'database', 'of', 'type', 'cls', 'in', 'response', 'to', 'a', 'POST', 'request', '.', 'create_resource', 'should', 'only', '\\', 'be', 'invoked', 'on', 'a', 'resource', 'when', 'the', 'client', 'specifies', 'a', 'POST', 'request', '.'] | train | https://github.com/buckmaxwell/neoapi/blob/96c5d83c847d7a12d3d1f17931d85776f5280877/neoapi/serializable_structured_node.py#L589-L716 |
5,999 | fracpete/python-weka-wrapper3 | python/weka/flow/transformer.py | Train.do_execute | def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
if isinstance(self.input.payload, Instances):
inst = None
data = self.input.payload
else:
inst = self.input.payload
data = inst.dataset
retrain = False
if (self._header is None) or (self._header.equal_headers(data) is not None) or (inst is None):
retrain = True
self._header = Instances.template_instances(data, 0)
if retrain or (self._model is None):
cls = self.resolve_option("setup")
if isinstance(cls, Classifier):
self._model = Classifier.make_copy(cls)
elif isinstance(cls, Clusterer):
self._model = Clusterer.make_copy(cls)
elif isinstance(cls, Associator):
self._model = Associator.make_copy(cls)
else:
return "Unhandled class: " + classes.get_classname(cls)
if retrain:
if inst is not None:
data = Instances.template_instances(data, 1)
data.add_instance(inst)
if isinstance(self._model, Classifier):
self._model.build_classifier(data)
elif isinstance(self._model, Clusterer):
self._model.build_clusterer(data)
elif isinstance(self._model, Associator):
self._model.build_associations(data)
else:
if isinstance(self._model, Classifier):
self._model.update_classifier(inst)
elif isinstance(self._model, Clusterer):
self._model.update_clusterer(inst)
else:
return "Cannot train incrementally: " + classes.get_classname(self._model)
cont = ModelContainer(model=self._model, header=self._header)
self._output.append(Token(cont))
return None | python | def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
if isinstance(self.input.payload, Instances):
inst = None
data = self.input.payload
else:
inst = self.input.payload
data = inst.dataset
retrain = False
if (self._header is None) or (self._header.equal_headers(data) is not None) or (inst is None):
retrain = True
self._header = Instances.template_instances(data, 0)
if retrain or (self._model is None):
cls = self.resolve_option("setup")
if isinstance(cls, Classifier):
self._model = Classifier.make_copy(cls)
elif isinstance(cls, Clusterer):
self._model = Clusterer.make_copy(cls)
elif isinstance(cls, Associator):
self._model = Associator.make_copy(cls)
else:
return "Unhandled class: " + classes.get_classname(cls)
if retrain:
if inst is not None:
data = Instances.template_instances(data, 1)
data.add_instance(inst)
if isinstance(self._model, Classifier):
self._model.build_classifier(data)
elif isinstance(self._model, Clusterer):
self._model.build_clusterer(data)
elif isinstance(self._model, Associator):
self._model.build_associations(data)
else:
if isinstance(self._model, Classifier):
self._model.update_classifier(inst)
elif isinstance(self._model, Clusterer):
self._model.update_clusterer(inst)
else:
return "Cannot train incrementally: " + classes.get_classname(self._model)
cont = ModelContainer(model=self._model, header=self._header)
self._output.append(Token(cont))
return None | ['def', 'do_execute', '(', 'self', ')', ':', 'if', 'isinstance', '(', 'self', '.', 'input', '.', 'payload', ',', 'Instances', ')', ':', 'inst', '=', 'None', 'data', '=', 'self', '.', 'input', '.', 'payload', 'else', ':', 'inst', '=', 'self', '.', 'input', '.', 'payload', 'data', '=', 'inst', '.', 'dataset', 'retrain', '=', 'False', 'if', '(', 'self', '.', '_header', 'is', 'None', ')', 'or', '(', 'self', '.', '_header', '.', 'equal_headers', '(', 'data', ')', 'is', 'not', 'None', ')', 'or', '(', 'inst', 'is', 'None', ')', ':', 'retrain', '=', 'True', 'self', '.', '_header', '=', 'Instances', '.', 'template_instances', '(', 'data', ',', '0', ')', 'if', 'retrain', 'or', '(', 'self', '.', '_model', 'is', 'None', ')', ':', 'cls', '=', 'self', '.', 'resolve_option', '(', '"setup"', ')', 'if', 'isinstance', '(', 'cls', ',', 'Classifier', ')', ':', 'self', '.', '_model', '=', 'Classifier', '.', 'make_copy', '(', 'cls', ')', 'elif', 'isinstance', '(', 'cls', ',', 'Clusterer', ')', ':', 'self', '.', '_model', '=', 'Clusterer', '.', 'make_copy', '(', 'cls', ')', 'elif', 'isinstance', '(', 'cls', ',', 'Associator', ')', ':', 'self', '.', '_model', '=', 'Associator', '.', 'make_copy', '(', 'cls', ')', 'else', ':', 'return', '"Unhandled class: "', '+', 'classes', '.', 'get_classname', '(', 'cls', ')', 'if', 'retrain', ':', 'if', 'inst', 'is', 'not', 'None', ':', 'data', '=', 'Instances', '.', 'template_instances', '(', 'data', ',', '1', ')', 'data', '.', 'add_instance', '(', 'inst', ')', 'if', 'isinstance', '(', 'self', '.', '_model', ',', 'Classifier', ')', ':', 'self', '.', '_model', '.', 'build_classifier', '(', 'data', ')', 'elif', 'isinstance', '(', 'self', '.', '_model', ',', 'Clusterer', ')', ':', 'self', '.', '_model', '.', 'build_clusterer', '(', 'data', ')', 'elif', 'isinstance', '(', 'self', '.', '_model', ',', 'Associator', ')', ':', 'self', '.', '_model', '.', 'build_associations', '(', 'data', ')', 'else', ':', 'if', 'isinstance', '(', 'self', '.', '_model', ',', 'Classifier', ')', ':', 'self', '.', '_model', '.', 'update_classifier', '(', 'inst', ')', 'elif', 'isinstance', '(', 'self', '.', '_model', ',', 'Clusterer', ')', ':', 'self', '.', '_model', '.', 'update_clusterer', '(', 'inst', ')', 'else', ':', 'return', '"Cannot train incrementally: "', '+', 'classes', '.', 'get_classname', '(', 'self', '.', '_model', ')', 'cont', '=', 'ModelContainer', '(', 'model', '=', 'self', '.', '_model', ',', 'header', '=', 'self', '.', '_header', ')', 'self', '.', '_output', '.', 'append', '(', 'Token', '(', 'cont', ')', ')', 'return', 'None'] | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str | ['The', 'actual', 'execution', 'of', 'the', 'actor', '.'] | train | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/transformer.py#L793-L843 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.