content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import time
def retry(func_name, max_retry, *args):
"""Retry a function if the output of the function is false
:param func_name: name of the function to retry
:type func_name: Object
:param max_retry: Maximum number of times to be retried
:type max_retry: Integer
:param args: Arguments passed to the function
:type args: args
:return: Output of the function if function is True
:rtype: Boolean (True/False) or None Type(None)
"""
output = None
for _ in range(max_retry):
output = func_name(*args)
if output and output != 'False':
return output
else:
time.sleep(5)
else:
return output | 29051605dbad65823c1ca99afb3237679a37a08c | 21,018 |
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31] | 5d1ba06d4d16f724a86c2c47c180c12fe0b16602 | 21,019 |
from typing import OrderedDict
import six
import json
def obtain_parameter_values(flow):
"""
Extracts all parameter settings from the model inside a flow in OpenML
format.
Parameters
----------
flow : OpenMLFlow
openml flow object (containing flow ids, i.e., it has to be downloaded
from the server)
Returns
-------
list
A list of dicts, where each dict has the following names:
- oml:name (str): The OpenML parameter name
- oml:value (mixed): A representation of the parameter value
- oml:component (int): flow id to which the parameter belongs
"""
openml.flows.functions._check_flow_for_server_id(flow)
def get_flow_dict(_flow):
flow_map = {_flow.name: _flow.flow_id}
for subflow in _flow.components:
flow_map.update(get_flow_dict(_flow.components[subflow]))
return flow_map
def extract_parameters(_flow, _flow_dict, component_model,
_main_call=False, main_id=None):
def is_subcomponent_specification(values):
# checks whether the current value can be a specification of
# subcomponents, as for example the value for steps parameter
# (in Pipeline) or transformers parameter (in
# ColumnTransformer). These are always lists/tuples of lists/
# tuples, size bigger than 2 and an OpenMLFlow item involved.
if not isinstance(values, (tuple, list)):
return False
for item in values:
if not isinstance(item, (tuple, list)):
return False
if len(item) < 2:
return False
if not isinstance(item[1], openml.flows.OpenMLFlow):
return False
return True
# _flow is openml flow object, _param dict maps from flow name to flow
# id for the main call, the param dict can be overridden (useful for
# unit tests / sentinels) this way, for flows without subflows we do
# not have to rely on _flow_dict
exp_parameters = set(_flow.parameters)
exp_components = set(_flow.components)
model_parameters = set([mp for mp in component_model.get_params()
if '__' not in mp])
if len((exp_parameters | exp_components) ^ model_parameters) != 0:
flow_params = sorted(exp_parameters | exp_components)
model_params = sorted(model_parameters)
raise ValueError('Parameters of the model do not match the '
'parameters expected by the '
'flow:\nexpected flow parameters: '
'%s\nmodel parameters: %s' % (flow_params,
model_params))
_params = []
for _param_name in _flow.parameters:
_current = OrderedDict()
_current['oml:name'] = _param_name
current_param_values = openml.flows.sklearn_to_flow(
component_model.get_params()[_param_name])
# Try to filter out components (a.k.a. subflows) which are
# handled further down in the code (by recursively calling
# this function)!
if isinstance(current_param_values, openml.flows.OpenMLFlow):
continue
if is_subcomponent_specification(current_param_values):
# complex parameter value, with subcomponents
parsed_values = list()
for subcomponent in current_param_values:
# scikit-learn stores usually tuples in the form
# (name (str), subcomponent (mixed), argument
# (mixed)). OpenML replaces the subcomponent by an
# OpenMLFlow object.
if len(subcomponent) < 2 or len(subcomponent) > 3:
raise ValueError('Component reference should be '
'size {2,3}. ')
subcomponent_identifier = subcomponent[0]
subcomponent_flow = subcomponent[1]
if not isinstance(subcomponent_identifier, six.string_types):
raise TypeError('Subcomponent identifier should be '
'string')
if not isinstance(subcomponent_flow,
openml.flows.OpenMLFlow):
raise TypeError('Subcomponent flow should be string')
current = {
"oml-python:serialized_object": "component_reference",
"value": {
"key": subcomponent_identifier,
"step_name": subcomponent_identifier
}
}
if len(subcomponent) == 3:
if not isinstance(subcomponent[2], list):
raise TypeError('Subcomponent argument should be'
'list')
current['value']['argument_1'] = subcomponent[2]
parsed_values.append(current)
parsed_values = json.dumps(parsed_values)
else:
# vanilla parameter value
parsed_values = json.dumps(current_param_values)
_current['oml:value'] = parsed_values
if _main_call:
_current['oml:component'] = main_id
else:
_current['oml:component'] = _flow_dict[_flow.name]
_params.append(_current)
for _identifier in _flow.components:
subcomponent_model = component_model.get_params()[_identifier]
_params.extend(extract_parameters(_flow.components[_identifier],
_flow_dict, subcomponent_model))
return _params
flow_dict = get_flow_dict(flow)
parameters = extract_parameters(flow, flow_dict, flow.model,
True, flow.flow_id)
return parameters | 25374b844eb3172927e74fe20b26483e547a1583 | 21,020 |
def logging_sync_ocns(cookie, in_from_or_zero, in_to_or_zero):
""" Auto-generated UCSC XML API Method. """
method = ExternalMethod("LoggingSyncOcns")
method.cookie = cookie
method.in_from_or_zero = str(in_from_or_zero)
method.in_to_or_zero = str(in_to_or_zero)
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 178e8207305f419a8f7d182b10b23ab8548ad624 | 21,021 |
def story_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to a JIRA issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages.
Both are allowed to be empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
return role_base(name, rawtext, text, lineno, inliner,
options=options, content=content, role_type='story') | 0f347d7c5a7a802b9f3b23ee70996e86155d2ca9 | 21,022 |
def benedict_bornder_constants(g, critical=False):
""" Computes the g,h constants for a Benedict-Bordner filter, which
minimizes transient errors for a g-h filter.
Returns the values g,h for a specified g. Strictly speaking, only h
is computed, g is returned unchanged.
The default formula for the Benedict-Bordner allows ringing. We can
"nearly" critically damp it; ringing will be reduced, but not entirely
eliminated at the cost of reduced performance.
Parameters
----------
g : float
scaling factor g for the filter
critical : boolean, default False
Attempts to critically damp the filter.
Returns
-------
g : float
scaling factor g (same as the g that was passed in)
h : float
scaling factor h that minimizes the transient errors
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, benedict_bornder_constants
g, h = benedict_bornder_constants(.855)
f = GHFilter(0, 0, 1, g, h)
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
g_sqr = g**2
if critical:
return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr)
return (g, g_sqr / (2.-g)) | ca40941b4843b3d71030549da2810c9241ebdf72 | 21,023 |
import ispyb.model.datacollection
import ispyb.model.processingprogram
import ispyb.model.screening
import ispyb.model.image_quality_indicators
import ispyb.model.detector
import ispyb.model.sample
import ispyb.model.samplegroup
import logging
import configparser
def enable(configuration_file, section="ispyb"):
"""Enable access to features that are currently under development."""
global _db, _db_cc, _db_config
if _db_config:
if _db_config == configuration_file:
# This database connection is already set up.
return
logging.getLogger("ispyb").warn(
"__future__ configuration file change requested"
)
disable()
logging.getLogger("ispyb").info(
"NOTICE: This code uses __future__ functionality in the ISPyB API. "
"This enables unsupported and potentially unstable code, which may "
"change from version to version without warnings. Here be dragons."
)
cfgparser = configparser.RawConfigParser()
if not cfgparser.read(configuration_file):
raise RuntimeError(
"Could not read from configuration file %s" % configuration_file
)
cfgsection = dict(cfgparser.items(section))
host = cfgsection.get("host")
port = cfgsection.get("port", 3306)
database = cfgsection.get("database", cfgsection.get("db"))
username = cfgsection.get("username", cfgsection.get("user"))
password = cfgsection.get("password", cfgsection.get("pw"))
# Open a direct MySQL connection
_db = mysql.connector.connect(
host=host,
port=port,
user=username,
password=password,
database=database,
use_pure=True,
)
_db_config = configuration_file
_db.autocommit = True
class DictionaryCursorContextManager:
"""This class creates dictionary cursors for mysql.connector connections.
By using a context manager it is ensured that cursors are closed
immediately after use.
Cursors created with this context manager return results as a dictionary
and offer a .run() function, which is an alias to .execute that accepts
query parameters as function parameters rather than a list.
"""
def __enter__(cm):
"""Enter context. Ensure the database is alive and return a cursor
with an extra .run() function."""
_db.ping(reconnect=True)
cm.cursor = _db.cursor(dictionary=True)
def flat_execute(stmt, *parameters):
"""Pass all given function parameters as a list to the existing
.execute() function."""
return cm.cursor.execute(stmt, parameters)
setattr(cm.cursor, "run", flat_execute)
return cm.cursor
def __exit__(cm, *args):
"""Leave context. Close cursor. Destroy reference."""
cm.cursor.close()
cm.cursor = None
_db_cc = DictionaryCursorContextManager
ispyb.model.datacollection.DataCollection.integrations = (
_get_linked_autoprocintegration_for_dc
)
ispyb.model.datacollection.DataCollection.screenings = _get_linked_screenings_for_dc
ispyb.model.datacollection.DataCollection.pdb = _get_linked_pdb_for_dc
ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram
ispyb.model.screening.Screening.outputs = _get_linked_outputs_for_screening
ispyb.model.screening.Screening.reload = _get_screening
ispyb.model.screening.ScreeningOutput.lattices = (
_get_linked_lattices_for_screening_output
)
ispyb.model.screening.ScreeningOutput.strategies = (
_get_linked_strategies_for_screening_output
)
ispyb.model.screening.ScreeningOutput.reload = _get_screening_output
ispyb.model.screening.ScreeningOutputLattice.reload = _get_screening_output_lattice
ispyb.model.screening.ScreeningStrategy.wedges = (
_get_linked_wedges_for_screening_strategy
)
ispyb.model.screening.ScreeningStrategy.reload = _get_screening_strategy
ispyb.model.screening.ScreeningStrategyWedge.sub_wedges = (
_get_linked_sub_wedges_for_screening_strategy_wedge
)
ispyb.model.screening.ScreeningStrategyWedge.reload = _get_screening_strategy_wedge
ispyb.model.screening.ScreeningStrategySubWedge.reload = (
_get_screening_strategy_sub_wedge
)
ispyb.model.image_quality_indicators.ImageQualityIndicators.reload = (
_get_image_quality_indicators
)
ispyb.model.image_quality_indicators.ImageQualityIndicatorsList.reload = (
_get_image_quality_indicators_for_dcid
)
ispyb.model.datacollection.DataCollection.image_quality = (
_get_linked_image_quality_indicators_for_data_collection
)
ispyb.model.detector.Detector.reload = _get_detector
ispyb.model.sample.Sample.reload = _get_sample
ispyb.model.datacollection.DataCollection.sample = _get_linked_sample_for_dcid
ispyb.model.samplegroup.SampleGroup.reload = _get_sample_group
ispyb.model.datacollection.DataCollection.sample_groups = (
_get_linked_sample_groups_for_dcid
) | a48ce8d2157f151a4f3e7146e7d8c8881a4dfc23 | 21,024 |
def median(f, x, y, a, b):
"""
Return the median value of the `size`-neighbors of the given point.
"""
# Create the sub 2d array
sub_f = f[x - a:x + a + 1, y - b:y + b + 1]
# Return the median
arr = np.sort(np.asarray(sub_f).reshape(-1))
return np.median(arr) | 7cdb625ad4906efac92cd94b1dfce91df7854daf | 21,025 |
from typing import Set
from pathlib import Path
def build_relevant_api_reference_files(
docstring: str, api_doc_id: str, api_doc_path: str
) -> Set[str]:
"""Builds importable link snippets according to the contents of a docstring's `# Documentation` block.
This method will create files if they do not exist, and will append links to the files that already do exist.
Args:
docstring: the docstring that contains the `# Documentation` block listing urls to be cross-linked.
api_doc_id: A string representation of the API doc that will have the link applied to it.
api_doc_path: a Docusaurus compliant path to the API document.
Returns:
A set containing the file paths that were created or appended to.
"""
output_paths = set()
document_paths = get_document_paths(docstring)
for relevant_path in document_paths:
links_path = Path(f"..{relevant_path}__api_links.mdx")
output_paths.add(links_path)
if links_path.exists():
with open(links_path, "a") as f:
f.write(f"- [{api_doc_id}]({api_doc_path})\n")
else:
with open(links_path, "w") as f:
f.write(f"- [{api_doc_id}]({api_doc_path})\n")
return output_paths | e83aaed8cfc0ec7ee8fffb3f95eb2c5aa948d212 | 21,026 |
def find_zip_entry(zFile, override_file):
"""
Implement ZipFile.getinfo() as case insensitive for systems with a case
insensitive file system so that looking up overrides will work the same
as it does in the Sublime core.
"""
try:
return zFile.getinfo(override_file)
except KeyError:
if _wrap("ABC") == _wrap("abc"):
override_file = _wrap(override_file)
entry_list = zFile.infolist()
for entry in entry_list:
if _wrap(entry.filename) == override_file:
return entry
raise | 33b1b868378a789ebc014615b1bc93b34b3f1e67 | 21,027 |
def get_mode(elements):
"""The element(s) that occur most frequently in a data set."""
dictionary = {}
elements.sort()
for element in elements:
if element in dictionary:
dictionary[element] += 1
else:
dictionary[element] = 1
# Get the max value
max_value = max(dictionary.values())
highest_elements = [key for key, value in dictionary.items() if value == max_value]
modes = sorted(highest_elements)
return modes[0] | bc792ffe58ffb3b9368559fe45ec623fe8accff6 | 21,028 |
def holtWintersAberration(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots the
positive or negative deviation of the series data from the forecast.
"""
results = []
for series in seriesList:
confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta)
lowerBand = confidenceBands[0]
upperBand = confidenceBands[1]
aberration = list()
for i, actual in enumerate(series):
if series[i] is None:
aberration.append(0)
elif upperBand[i] is not None and series[i] > upperBand[i]:
aberration.append(series[i] - upperBand[i])
elif lowerBand[i] is not None and series[i] < lowerBand[i]:
aberration.append(series[i] - lowerBand[i])
else:
aberration.append(0)
newName = "holtWintersAberration(%s)" % series.name
results.append(TimeSeries(newName, series.start, series.end
, series.step, aberration))
return results | 05040695e7d6f6e5d8e117d32f66ebbfb0cb7392 | 21,029 |
def get_in_addition_from_start_to_end_item(li, start, end):
"""
获取除开始到结束之外的元素
:param li: 列表元素
:param start: 开始位置
:param end: 结束位置
:return: 返回开始位置到结束位置之间的元素
"""
return li[start:end + 1] | 7106a9d409d9d77ab20e7e85d85c2ddb7a2a431c | 21,030 |
import re
def remove_special_message(section_content):
"""
Remove special message - "medicinal product no longer authorised"
e.g.
'me di cin al p ro du ct n o lo ng er a ut ho ris ed'
'me dic ina l p rod uc t n o l on ge r a uth ori se d'
:param section_content: content of a section
:return: content of a section without special message
"""
# string as it is present in the section content
SPECIAL_MESSAGE1 = 'me di cin al p ro du ct n o lo ng er a ut ho ris ed'
SPECIAL_MESSAGE2 = 'me dic ina l p ro du ct no lo ng er au th or ise d'
SPECIAL_MESSAGE3 = 'me dic ina l p rod uc t n o l on ge r a uth ori se d'
SPECIAL_MESSAGE4 = 'me dic ina l p ro du ct no lo ng er au tho ris ed'
SPECIAL_MESSAGE5 = 'me dic ina l p ro du ct no lo ng er a ut ho ris ed'
SPECIAL_MESSAGE6 = 'me dic ina l p rod uc t n o l on ge r a uth ori sed'
SPECIAL_MESSAGE7 = 'm ed ici na l p ro du ct no lo ng er a ut ho ris ed'
SPECIAL_MESSAGE8 = 'm ed ici na l p ro du ct no lo ng er au th or ise d'
SPECIAL_MESSAGE9 = 'med icin al pro du ct no lo ng er au tho ris ed'
SPECIAL_MESSAGE_ARRAY = [SPECIAL_MESSAGE1, SPECIAL_MESSAGE2, SPECIAL_MESSAGE3, SPECIAL_MESSAGE4,
SPECIAL_MESSAGE5, SPECIAL_MESSAGE6, SPECIAL_MESSAGE7, SPECIAL_MESSAGE8,
SPECIAL_MESSAGE9]
# in case message present in section content
for SPECIAL_MESSAGE in SPECIAL_MESSAGE_ARRAY:
section_content = section_content.replace(SPECIAL_MESSAGE, '')
# remove multiple consecutive spaces
section_content = re.sub(' +', ' ', section_content)
return section_content | 37d9cbd697a98891b3f19848c90cb17dafcd6345 | 21,031 |
def simulate_cash_flow_values(cash_flow_data, number_of_simulations=1):
"""Simulate cash flow values from their mean and standard deviation.
The function returns a list of numpy arrays with cash flow values.
Example:
Input:
cash_flow_data: [[100, 20], [-500, 10]]
number_of_simulations: 3
Output: [array([113.36222158, 77.39297513, 77.15350701]),
array([-506.58408186, -503.27855081, -500.37690891])]"""
if cash_flow_data and number_of_simulations > 0:
simulated = [get_random_numbers(mean, standard_deviation,
number_of_simulations)
for mean, standard_deviation in cash_flow_data]
else:
simulated = []
return simulated | 691122945f811e20b40032cb49920d3b2c7f5c13 | 21,032 |
import time
def sim_v1(sim_params, prep_result, progress=None, pipeline=None):
"""
Map the simulation over the peptides in prep_result.
This is actually performed twice in order to get a train and (different!) test set
The "train" set includes decoys, the test set does not; furthermore
the the error modes and radiometry noise is different in each set.
"""
if sim_params.random_seed is None:
sim_params.random_seed = int(time.time())
np.random.seed(sim_params.random_seed)
# CREATE a *training-set* for all peptides (real and decoy)
if pipeline:
pipeline.set_phase(0, 2)
# Sanity check that all the peps are accounted for
pep_seqs_with_decoys = prep_result.pepseqs__with_decoys()
n_peps = pep_seqs_with_decoys.pep_i.nunique()
assert n_peps == prep_result.n_peps
(
train_dytmat,
train_radmat,
train_pep_recalls,
train_flus,
train_flu_remainders,
train_true_pep_iz,
) = _run_sim(
sim_params,
pep_seqs_with_decoys,
name="train",
n_peps=n_peps,
n_samples=sim_params.n_samples_train,
progress=progress,
)
if sim_params.is_survey:
test_dyemat = None
test_radmat = None
test_recalls = None
test_flus = None
test_flu_remainders = None
test_true_pep_iz = None
else:
# CREATE a *test-set* for real-only peptides
if pipeline:
pipeline.set_phase(1, 2)
(
test_dyemat,
test_radmat,
test_recalls,
test_flus,
test_flu_remainders,
test_true_pep_iz,
) = _run_sim(
sim_params,
prep_result.pepseqs__no_decoys(),
name="test",
n_peps=n_peps,
n_samples=sim_params.n_samples_test,
progress=progress,
)
# CHECK that the train and test are not identical in SOME non_zero_row
# If they are, there was some sort of RNG seed errors which might happen
# for example if sub-processes failed to re-init their RNG seeds.
# Test this by looking at pep_i==1
non_zero_rows = np.any(train_radmat[1] > 0, axis=(1, 2))
non_zero_row_args = np.argwhere(non_zero_rows)[0:100]
train_rows = train_radmat[1, non_zero_row_args].reshape(
(
non_zero_row_args.shape[0],
non_zero_row_args.shape[1]
* train_radmat.shape[2]
* train_radmat.shape[3],
)
)
test_rows = test_radmat[1, non_zero_row_args].reshape(
(
non_zero_row_args.shape[0],
non_zero_row_args.shape[1]
* test_radmat.shape[2]
* test_radmat.shape[3],
)
)
if train_rows.shape[0] > 0 and not sim_params.allow_train_test_to_be_identical:
any_differences = np.any(np.diagonal(cdist(train_rows, test_rows)) != 0.0)
check.affirm(any_differences, "Train and test sets are identical")
if train_dytmat is not None:
train_dytmat.reshape(
(train_dytmat.shape[0] * train_dytmat.shape[1], *train_dytmat.shape[2:])
)
if train_radmat is not None:
train_radmat.reshape(
(train_radmat.shape[0] * train_radmat.shape[1], *train_radmat.shape[2:])
)
if test_dyemat is not None:
test_dyemat.reshape(
(test_dyemat.shape[0] * test_dyemat.shape[1], *test_dyemat.shape[2:])
)
if test_radmat is not None:
test_radmat.reshape(
(test_radmat.shape[0] * test_radmat.shape[1], *test_radmat.shape[2:])
)
# REMOVE all-zero rows (EXCEPT THE FIRST which is the nul row)
assert np.all(train_dytmat[0, :, :] == 0)
some_non_zero_row_args = np.argwhere(
~np.all(train_dytmat[:, :, :] == 0, axis=(1, 2))
).flatten()
some_non_zero_row_args = np.concatenate(([0], some_non_zero_row_args))
# TASK: Plucking out the non-zero rows doesn't work well
# with Arrtay results -- I need to rethink that.
# For now, I'm converting this back to np.ndarray
train_dytmat = train_dytmat[some_non_zero_row_args]
train_radmat = train_radmat[some_non_zero_row_args]
train_true_pep_iz = train_true_pep_iz[some_non_zero_row_args]
if test_dyemat is not None:
assert np.all(test_dyemat[0, :, :] == 0)
some_non_zero_row_args = np.argwhere(
~np.all(test_dyemat[:, :, :] == 0, axis=(1, 2))
).flatten()
# DO not add a nul row into the test data
# some_non_zero_row_args = np.concatenate(([0], some_non_zero_row_args))
test_dyemat = test_dyemat[some_non_zero_row_args]
test_radmat = test_radmat[some_non_zero_row_args]
test_true_pep_iz = test_true_pep_iz[some_non_zero_row_args]
return SimV1Result(
params=sim_params,
train_dytmat=train_dytmat,
train_radmat=train_radmat,
train_pep_recalls=train_pep_recalls,
train_flus=train_flus,
train_flu_remainders=train_flu_remainders,
train_true_pep_iz=train_true_pep_iz,
test_dyemat=test_dyemat,
test_radmat=test_radmat,
test_recalls=test_recalls,
test_flus=test_flus,
test_true_pep_iz=test_true_pep_iz,
test_flu_remainders=test_flu_remainders,
) | 243fca643749a5d346013f0547cefea1c1df7767 | 21,033 |
def apply_function_elementwise_series(ser, func):
"""Apply a function on a row/column basis of a DataFrame.
Args:
ser (pd.Series): Series.
func (function): The function to apply.
Returns:
pd.Series: Series with the applied function.
Examples:
>>> df = pd.DataFrame(np.array(range(12)).reshape(4, 3), columns=list('abc'))
>>> ser = df['b']
>>> f = lambda x: '%.1f' % x
>>> apply_function_elementwise_series(ser, f)
0 1.0
1 4.0
2 7.0
3 10.0
Name: b, dtype: object
"""
return ser.map(func) | d2af0a9c7817c602b4621603a8f06283f34ae81a | 21,034 |
from bs4 import BeautifulSoup
def is_the_bbc_html(raw_html, is_lists_enabled):
"""
Creates a concatenate string of the article, with or without li elements included from bbc.co.uk.
:param raw_html: resp.content from response.get().
:param is_lists_enabled: Boolean to include <Li> elements.
:return: List where List[0] is a concatenated String of the article.
"""
article = [""]
parsed_html = BeautifulSoup(raw_html.decode('utf-8', 'ignore'), 'html.parser')
text_body = parsed_html.find("div", {"class": "story-body__inner"}).findAll('p')
for text in text_body:
article[0] += text.text
if is_lists_enabled:
text_lists = parsed_html.find("div", {"class": "story-body__inner"}).findAll('ls')
if len(text_lists) > 0:
for text in text_lists:
article[0] += text.text
return article | fb6bca09e1ebb78d7afd6d2afaa52feab9843d21 | 21,035 |
def create_empty_module(module_name, origin=None):
"""Creates a blank module.
Args:
module_name: The name to be given to the module.
origin: The origin of the module. Defaults to None.
Returns:
A blank module.
"""
spec = spec_from_loader(module_name, loader=None, origin=origin)
module = module_from_spec(spec)
return module | f65e1fbbbba13fc25e84ea89c57329ba48d22ac7 | 21,036 |
def BitWidth(n: int):
""" compute the minimum bitwidth needed to represent and integer """
if n == 0:
return 0
if n > 0:
return n.bit_length()
if n < 0:
# two's-complement WITHOUT sign
return (n + 1).bit_length() | 46dcdfb0987268133d606e609d39c641b9e6faab | 21,038 |
import copy
import numpy
def read_many_nam_cube(netcdf_file_names, PREDICTOR_NAMES):
"""Reads storm-centered images from many NetCDF files.
:param netcdf_file_names: 1-D list of paths to input files.
:return: image_dict: See doc for `read_image_file`.
"""
image_dict = None
keys_to_concat = [PREDICTOR_MATRIX_KEY]
for this_file_name in netcdf_file_names:
#print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_nam_maps(this_file_name, PREDICTOR_NAMES)
if image_dict is None:
image_dict = copy.deepcopy(this_image_dict)
continue
for this_key in keys_to_concat:
image_dict[this_key] = numpy.concatenate(
(image_dict[this_key], this_image_dict[this_key]), axis=0
)
return image_dict | 100e6dfcd998ae6d2d2f673251c6110ccec90b00 | 21,039 |
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
:param evaluated_sentences:
The sentences that have been picked by the summarizer
:param reference_sentences:
The sentences from the referene set
:returns float: F_lcs
:raises ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise (ValueError("Collections must contain at least 1 sentence."))
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s)
return _f_lcs(union_lcs_sum_across_all_references, m, n) | 9022cc4cc90d9b57f48716839b5e97315a7b78c6 | 21,040 |
def construct_classifier(cfg,
module_names,
in_features,
slot_machine=False,
k=8,
greedy_selection=True
):
"""
Constructs a sequential model of fully-connected layers
:param cfg:(List) The configuration of the model
:param module_names: (List) The names of the layers
:param in_features: (int) The number of input features to first fully-connected layer
:param slot_machine: (bool) constructs a module for weight updates or slot_machines
:param k:(int), the number of options per weight if model is a slot machine
:param greedy_selection: (bool), use greedy selection if model is slot machine
:return: model: a sequential module of fully-connected layers
"""
model = nn.Sequential()
for i, v in enumerate(cfg):
if v == 'D':
model.add_module(module_names[i], nn.Dropout(p=0.5))
elif v == "relu":
model.add_module(module_names[i], nn.ReLU(inplace=True))
else:
if slot_machine:
model.add_module(module_names[i],Linear(in_features, v, k, greedy_selection))
else:
model.add_module(module_names[i], nn.Linear(in_features, v, bias=False))
in_features = v
return model | 84091ce1a74a5baae8cde8b32c2ab28e0ccc7175 | 21,041 |
def size_adjustment(imgs, shape):
"""
Args:
imgs: Numpy array with shape (data, width, height, channel)
= (*, 240, 320, 3).
shape: 256 or None.
256: imgs_adj.shape = (*, 256, 256, 3)
None: No modification of imgs.
Returns:
imgs_adj: Numpy array with shape (data, modified width, modified height, channel)
"""
if shape is None:
imgs_adj = imgs
elif shape == 256:
# Reshape from 240x320 to 256x256
imgs_adj = np.delete(imgs, obj=[i for i in range(32)] + [i for i in range(287, 319)], axis=2)
_tmp = imgs_adj.shape
mask = np.zeros(shape=(_tmp[0], 8, _tmp[2], _tmp[3]), dtype=np.uint8)
imgs_adj = np.concatenate([imgs_adj, mask], axis=1)
imgs_adj = np.concatenate([mask, imgs_adj], axis=1)
return imgs_adj | 5143a34b3ad2085596a682811b6f35dca040c3e0 | 21,042 |
def to_full_model_name(root_key: str) -> str:
"""
Find model name from the root_key in the file.
Args:
root_key: root key such as 'system-security-plan' from a top level OSCAL model.
"""
if root_key not in const.MODEL_TYPE_LIST:
raise TrestleError(f'{root_key} is not a top level model name.')
module = const.MODEL_TYPE_TO_MODEL_MODULE[root_key]
class_name = utils.alias_to_classname(root_key, utils.AliasMode.JSON)
return f'{module}.{class_name}' | 8c73a54cb03c8cc52d24ec4bc284326289ff04f1 | 21,043 |
from typing import Dict
def is_unique(s: str) -> bool:
"""
Time: O(n)
Space: O(n)
"""
chars: Dict[str, int] = {}
for char in s:
if char in chars:
return False
else:
chars[char] = 1
return True | 4f77691be1192202b57b20bdc5676a31bc8b175e | 21,044 |
def is_available() -> bool:
"""Return ``True`` if the handler has its dependencies met."""
return HAVE_RLE | b4e035dc62ef79211cb038a8b567985679c500aa | 21,046 |
def model_with_buckets(encoder_inputs,
decoder_inputs,
targets,
weights,
buckets,
seq2seq,
softmax_loss_function=None,
per_example_loss=False,
name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(
x, y, core_rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be
a scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors. The shape of output tensors can be either
[batch_size x output_size] or [batch_size x num_decoder_symbols]
depending on the seq2seq model used.
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputs, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with ops.name_scope(name, "model_with_buckets", all_inputs):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True if j > 0 else None):
bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(
sequence_loss_by_example(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(
sequence_loss(
outputs[-1],
targets[:bucket[1]],
weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses | 795c7445bdf608db85148656179ccc0467af6dee | 21,047 |
def sqlite_cast(vtype, v):
"""
Returns the casted version of v, for use in
database.
SQLite does not perform any type check or conversion
so this function should be used anytime a data comes
from outstide to be put in database.
This function also handles CoiotDatetime objects and
accepts "now" as an argument for them (the date will
then be the calling date of this function).
"""
if vtype is type(v) or v is None:
return v
if vtype is bool:
if type(v) is int:
return bool(v)
elif type(v) is str and v.lower() in ('true', 'false'):
return v.lower() == 'true'
elif vtype is int:
if type(v) in (bool, str):
return int(v)
elif vtype is str:
return str(v)
elif vtype is CoiotDatetime:
if type(v) in (float, int):
return CoiotDatetime.fromepoch(v)
elif v.lower() == 'now':
return CoiotDatetime.now()
raise TypeError("argument of type {} cannot be " +
"casted to {}".format(type(v), vtype)) | 2ecf79b5aec2d5516cc624b9aa279be9f1b9d1b2 | 21,048 |
def read_table(name):
"""
Mock of IkatsApi.table.read method
"""
return TABLES[name] | 261ab82a5389155997924c1468087a139b50f9e8 | 21,050 |
def cosh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.cosh(x, out)
return Quantity(
np.cosh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
) | d50891be37de3c9729c3a15e1315f74ff55baedc | 21,051 |
from datetime import datetime
def dates_from_360cal(time):
"""Convert numpy.datetime64 values in 360 calendar format.
This is because 360 calendar cftime objects are problematic, so we
will use datetime module to re-create all dates using the
available data.
Parameters
----------
time: single or numpy.ndarray of cftime._cftime.Datetime360Day
Returns
-------
DatetimeIndex object.
""" # noqa
# get all dates as strings
dates = []
for d in time:
dstr = '%0.4i-%0.2i-%0.2i' % (d.year, d.month, d.day)
date = datetime.datetime.strptime(dstr, '%Y-%m-%d')
dates.append(date)
return pd.to_datetime(dates) | d13e2146414a4dbd25cab0015348281503134331 | 21,052 |
def db_queue(**data):
"""Add a record to queue table.
Arguments:
**data: The queue record data.
Returns:
(dict): The inserted queue record.
"""
fields = data.keys()
assert 'request' in fields
queue = Queue(**data)
db.session.add(queue)
db.session.commit()
return dict(queue) | ca5dda54fecf37be9eae682c2b04325b55caf931 | 21,053 |
def loadMnistData(trainOrTestData='test'):
"""Loads MNIST data from sklearn or web.
:param str trainOrTestData: Must be 'train' or 'test' and specifies which \
part of the MNIST dataset to load.
:return: images, targets
"""
mnist = loadMNIST()
if trainOrTestData == 'train':
X = mnist.data[:60000, :].astype(np.uint8)
y = mnist.target[:60000].astype(np.uint8)
elif trainOrTestData == 'test':
X = mnist.data[60000:, :].astype(np.uint8)
y = mnist.target[60000:].astype(np.uint8)
else:
raise ValueError("trainOrTestData must be 'train' or 'test'.")
return X, y | 3fb06616a784ac863f4df093e981982be077f5a7 | 21,054 |
def times_once() -> _Timing:
"""
Expect the request a single time
:return: Timing object
"""
return _Timing(1) | dd4d97344613676668cf7e07fad6e5f696861924 | 21,055 |
def linear_growth(mesh, pos, coefficient):
"""Applies a homotety to a dictionary of coordinates.
Parameters
----------
mesh : Topomesh
Not used in this algorithm
pos : dict(int -> iterable)
Dictionary (pid -> ndarray) of the tissue vertices
coefficient : float or ndarray
Scaling coefficient for the homothety
Returns
-------
dict(int -> ndarray)
dictionary (pid -> new position) of the vertices
"""
utilities.check_pos(pos)
scaling = np.array(coefficient)
res = dict((pid, scaling * vec) for pid,vec in pos.iteritems())
assert np.all(res.values() <> None)
return res | bed27bc4a75d1628bf3331062817d1bf1b21e9c8 | 21,056 |
def einstein_t(tini, tfin, npoint, HT_lim=3000,dul=False,model=1):
"""
Computes the *Einstein temperature*
Args:
tini: minimum temperature (K) of the fitting interval
tfin: maximum temperature
npoint: number of points in the T range
HT_lim: high temperature limit where Cv approaches the Dulong-Petit value
model: if model=1 a single Einstein oscillator is considered (default),
if model > 1, 2 Einstein oscillators are considered
"""
flag_int=False
if f_fix.flag:
kp_original=f_fix.value
flag_int=True
reset_fix()
v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True)
set_fix(kp)
print("Kp fixed to %4.2f" % kp)
vol=new_volume(298.15,0.0001)
ent, cve=entropy_v(298.15,vol[0])
dp_limit=apfu*3*avo*kb # Dulong Petit limit
emp=10636/(ent/apfu+6.44) # Empirical Einstein T
t_range=np.linspace(tini, tfin, npoint)
cv_list=np.array([])
for ti in t_range:
enti, cvi=entropy_v(ti, vol, plot=False, prt=False)
cv_list=np.append(cv_list, cvi)
reset_fix()
if flag_int:
set_fix(kp_original)
t_range=np.append(t_range,HT_lim)
cv_list=np.append(cv_list, dp_limit)
sigma=np.ones(len(t_range))
sigma[len(sigma)-1]=0.1
if model==1:
ein_fit, ein_cov=curve_fit(einstein_fun, t_range, cv_list, p0=emp, \
sigma=sigma, xtol=1e-15, ftol=1e-15)
else:
ein_fit, ein_cov=curve_fit(einstein_2_fun, t_range, cv_list, \
sigma=sigma,p0=[emp,emp], xtol=1e-15, ftol=1e-15)
t_range_new=np.linspace(tini,HT_lim,50)
plt.figure()
if model==1:
plt.plot(t_range_new, einstein_fun(t_range_new, ein_fit[0]), "k-", \
t_range, cv_list, "k*")
else:
plt.plot(t_range_new, einstein_2_fun(t_range_new, ein_fit[0],ein_fit[1]), "k-", \
t_range, cv_list, "k*")
plt.xlabel("Temperature (K)")
plt.ylabel("Cv (J/mol K)")
plt.show()
print("\nEinstein temperature")
print("empirical estimation (from molar entropy): %6.2f K" % emp)
if model==1:
print("result from fit: %6.2f K" % ein_fit[0])
else:
print("result from fit: %6.2f, %6.2f K" % (ein_fit[0],ein_fit[1]))
print("Dulong-Petit limit for Cv (T = %5.2f K): %6.2f J/mol K" % \
(HT_lim, dp_limit))
t_table=np.linspace(tini,tfin,10)
cv_real=np.array([])
cv_ein=np.array([])
for ti in t_table:
enti, cri=entropy_v(ti, vol, plot=False, prt=False)
if model==1:
ce=einstein_fun(ti,ein_fit[0])
else:
ce=einstein_2_fun(ti,ein_fit[0],ein_fit[1])
cv_real=np.append(cv_real, cri)
cv_ein=np.append(cv_ein, ce)
serie=(t_table,cv_real,cv_ein)
pd.set_option('colheader_justify', 'center')
df=pd.DataFrame(serie, index=['T (K)','Cv "real"','Cv "fit"'])
df=df.T
df2=df.round(2)
print("")
print(df2.to_string(index=False))
if model==1:
print("\nFit at T = %6.2f K: Cv = %6.2f J/mol K" % \
(HT_lim, einstein_fun(HT_lim, ein_fit[0])))
else:
print("\nFit at T = %6.2f K: Cv = %6.2f J/mol K" % \
(HT_lim, einstein_2_fun(HT_lim, ein_fit[0], ein_fit[1])))
if dul:
return ein_fit | bc914dcd600f9f5b3327a0e954356f4dd5d87493 | 21,057 |
import pathlib
def normalize_uri(path_uri: str) -> str:
"""Convert any path to URI. If not a path, return the URI."""
if not isinstance(path_uri, pathlib.Path) and is_url(path_uri):
return path_uri
return pathlib.Path(path_uri).resolve().as_uri() | b0682d1b2b1dea07195865db4be534a18e6b965e | 21,058 |
import logging
def RETune(ont: Ontology, training: [Annotation]):
""" Tune the relation extraction class over a range of various values and return the correct
parameters
Params:
ont (RelationExtractor/Ontology) - The ontology of information needed to form the base
training ([Datapoint]) - A collection of data points to be able to perform cross
validation
Returns:
scores - A data structure that holds all of the metric scores for the extractor against
the structures then against the alphas
structures - The network sizes and shapes
alphas - The neural network
"""
logging.getLogger().setLevel(logging.ERROR) # Ensure that logging output is captured
# The structures to validate
structures = [(3,1), (4,2), (6,3), (8,4), (12,6), (20,10), (50,20)]
alphas = logspace(-16,1,20)
scores = []
for layers in structures:
layer_scores = []
for alpha in alphas:
def run(queue, tr, val):
tr, val = [training[i] for i in tr], [training[i] for i in val]
# Create a new extractor model
ext = RelationExtractor(ontology=ont, hidden_layers=layers, alpha=alpha)
# Generate the training and validation documents
Xtr, Xtv = Document(), Document()
Xtr.datapoints(tr)
Xtv.datapoints(val)
# Fit, predict and score
ext.fit(Xtr)
ext.predict(Xtv)
results = score(ont, [Xtv])
queue.put(results[0])
queue = Queue()
processors = [Process(target=run, args=(queue, tr, val))
for tr, val in KFold(n_splits=5, shuffle=True).split(training)]
[p.start() for p in processors]
[p.join() for p in processors]
alpha_scores = [queue.get() for _ in range(5)]
compressed = {"precision":[],"recall":[],"f1":[]}
for r in alpha_scores:
for k, v in r.items():
compressed[k].append(v)
for k, v in compressed.items():
compressed[k] = sum(v)/len(v)
layer_scores.append(compressed)
scores.append(layer_scores)
return scores, structures, alphas | d53831f08fd1855537b3bb7cb5a5f27625fa8b31 | 21,059 |
def create_instance(test_id, config, args):
"""
Invoked by TestExecutor class to create a test instance
@test_id - test index number
@config - test parameters from, config
@args - command line args
"""
return TestNodeConnectivity(test_id, config, args) | a3defb1f0f72fc0788fa2120829334f9a9670042 | 21,060 |
def to_me() -> Rule:
"""
:说明:
通过 ``event.is_tome()`` 判断事件是否与机器人有关
:参数:
* 无
"""
return Rule(ToMeRule()) | 92b6a04bbeac6e0b3eb3f53641efd2552b19f620 | 21,061 |
def unsaturated_atom_keys(xgr):
""" keys of unsaturated (radical or pi-bonded) atoms
"""
atm_unsat_vlc_dct = atom_unsaturated_valences(xgr, bond_order=False)
unsat_atm_keys = frozenset(dict_.keys_by_value(atm_unsat_vlc_dct, bool))
return unsat_atm_keys | 0af0469b3370a0c015238cad5b2717fbb977e6c5 | 21,062 |
def clip_data(input_file, latlim, lonlim):
"""
Clip the data to the defined extend of the user (latlim, lonlim)
Keyword Arguments:
input_file -- output data, output of the clipped dataset
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
"""
try:
if input_file.split('.')[-1] == 'tif':
dest_in = gdal.Open(input_file)
else:
dest_in = input_file
except:
dest_in = input_file
# Open Array
data_in = dest_in.GetRasterBand(1).ReadAsArray()
# Define the array that must remain
Geo_in = dest_in.GetGeoTransform()
Geo_in = list(Geo_in)
Start_x = np.max([int(np.floor(((lonlim[0]) - Geo_in[0])/ Geo_in[1])),0])
End_x = np.min([int(np.ceil(((lonlim[1]) - Geo_in[0])/ Geo_in[1])),int(dest_in.RasterXSize)])
Start_y = np.max([int(np.floor((Geo_in[3] - latlim[1])/ -Geo_in[5])),0])
End_y = np.min([int(np.ceil(((latlim[0]) - Geo_in[3])/Geo_in[5])), int(dest_in.RasterYSize)])
#Create new GeoTransform
Geo_in[0] = Geo_in[0] + Start_x * Geo_in[1]
Geo_in[3] = Geo_in[3] + Start_y * Geo_in[5]
Geo_out = tuple(Geo_in)
data = np.zeros([End_y - Start_y, End_x - Start_x])
data = data_in[Start_y:End_y,Start_x:End_x]
dest_in = None
return(data, Geo_out) | bf691d4021cf0bbeade47b6d389e5daa3261f22a | 21,063 |
def fetch_last_posts(conn) -> list:
"""Fetch tooted posts from db"""
cur = conn.cursor()
cur.execute("select postid from posts")
last_posts = cur.fetchall()
return [e[0] for e in last_posts] | dd5addd1ba19ec2663a84617904f6754fe7fc1fc | 21,064 |
def update_click_map(selectedData, date, hoverData, inputData):
"""
click to select a airport to find the detail information
:param selectedData:
:param date:
:param hoverData:
:return:
"""
timestamp = pd.to_datetime(date) if date else 0
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=airports_info["LATITUDE"],
lon=airports_info["LONGITUDE"],
hover_name=airports_info["IATA_CODE"],
color="COLOR_MAP",
color_discrete_map="identity"
)
fig.update_layout(hovermode="closest",
margin=dict(l=5, r=0, t=20, b=20),
clickmode="event+select",
template='ggplot2')
if inputData:
origin_lon = location_dic[inputData]['lon']
origin_lat = location_dic[inputData]['lat']
airport = inputData
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
points = airports_info[airports_info["IATA_CODE"].isin(destinations) | (airports_info["IATA_CODE"] == airport)]
points["COLOR_MAP"] = "#525252"
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=points["LATITUDE"],
lon=points["LONGITUDE"],
hover_name=points["IATA_CODE"],
hover_data=None,
color=points["COLOR_MAP"],
color_discrete_map="identity"
)
fig.update_layout(clickmode="event+select",
margin=dict(l=0, r=0, t=20, b=20),
template="ggplot2")
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
marker=dict(color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
return fig
if selectedData and inputData:
point_dict = selectedData["points"][0]
origin_lon = point_dict['lon']
origin_lat = point_dict['lat']
airport = point_dict['hovertext']
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
points = airports_info[airports_info["IATA_CODE"].isin(destinations) | (airports_info["IATA_CODE"] == airport)]
points["COLOR_MAP"] = "#525252"
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=points["LATITUDE"],
lon=points["LONGITUDE"],
hover_name=points["IATA_CODE"],
hover_data=None,
color=points["COLOR_MAP"],
color_discrete_map="identity"
)
fig.update_layout(clickmode="event+select")
fig.update_layout(
margin=dict(l=0, r=0, t=20, b=20),
template="ggplot2"
)
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
marker=dict(color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
return fig
# hover的时候显示hover的点可以去到的机场
elif hoverData:
point_dict = hoverData["points"][0]
origin_lon = point_dict['lon']
origin_lat = point_dict['lat']
airport = point_dict['hovertext']
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
# infos = airports[(airports["ORIGIN_AIRPORT"]==airport) & (airports["DATE"]==timestamp)]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
# fig.update_layout(clear_on_unhover=True)
return fig
else:
return fig | 1baaba25254eede65c2dff9b95c9ac40a0777dac | 21,065 |
def EncoderText(model_name, vocab_size, word_dim, embed_size, num_layers, use_bi_gru=False, text_norm=True, dropout=0.0):
"""A wrapper to text encoders. Chooses between an different encoders
that uses precomputed image features.
"""
model_name = model_name.lower()
EncoderMap = {
'scan': EncoderTextRegion,
'vsepp': EncoderTextGlobal,
'sgraf': EncoderTextRegion,
'imram': EncoderTextRegion
}
if model_name in EncoderMap:
txt_enc = EncoderMap[model_name](vocab_size, word_dim, embed_size, num_layers, use_bi_gru, text_norm, dropout)
else:
raise ValueError("Unknown model: {}".format(model_name))
return txt_enc | bf3657e2c5def238e9ec84cd674c21c079169b9e | 21,066 |
def feat_extract(pretrained=False, **kwargs):
"""Constructs a ResNet-Mini-Imagenet model"""
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet52': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
logger = kwargs['opts'].logger
# resnet"x", x = 1 + sum(layers)x3
if kwargs['structure'] == 'resnet40':
model = ResNet(Bottleneck, [3, 4, 6], kwargs['in_c'])
elif kwargs['structure'] == 'resnet19':
model = ResNet(Bottleneck, [2, 2, 2], kwargs['in_c'])
elif kwargs['structure'] == 'resnet12':
dropblock_size = 5 if 'imagenet' in kwargs['opts'].dataset.name else 2
model = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=dropblock_size)
elif kwargs['structure'] == 'resnet52':
model = ResNet(Bottleneck, [4, 8, 5], kwargs['in_c'])
elif kwargs['structure'] == 'resnet34':
model = ResNet(Bottleneck, [3, 4, 4], kwargs['in_c'])
elif kwargs['structure'] == 'shallow':
model = CNNEncoder(kwargs['in_c'])
else:
raise NameError('structure not known {} ...'.format(kwargs['structure']))
if pretrained:
logger('Using pre-trained model from pytorch official webiste, {:s}'.format(kwargs['structure']))
model.load_state_dict(model_zoo.load_url(model_urls[kwargs['structure']]), strict=False)
return model | 9e628b4905e696aa55c9e4313888f406bf1fb413 | 21,067 |
from typing import Union
from pathlib import Path
from typing import Optional
import fnmatch
import tempfile
def compose_all(
mirror: Union[str, Path],
branch_pattern: str = "android-*",
work_dir: Optional[Path] = None,
force: bool = False,
) -> Path:
"""Iterates through all the branches in AOSP and create the source maps.
This methods:
- list all the existing branches and filter those matching the pattern
- does a partial checkout of each of them
- parses the Soong File and store them
:param mirror: Path/Link to a mirror directory or an URL.
:param branch_pattern: Optional. Pattern to filter branches
:param work_dir: Optional. Work directory
:param force: Optional. Overrides results.
:return: The path to the work directory
"""
# List branches
all_branches = get_all_branches(mirror)
branches = fnmatch.filter(all_branches, branch_pattern)
if work_dir is None:
work_dir = Path(tempfile.mkdtemp(prefix="bgraph_"))
logger.info("Found %d branches", len(branches))
for branch_name in branches:
compose_manifest_branch(branch_name, mirror, work_dir, force)
logger.info("Finished")
return work_dir | 4293df4708633574ccab70fe597ca390b04aa12c | 21,068 |
def rearrange_digits(input_list):
"""
Rearrange Array Elements so as to form two number such that their sum is maximum.
Args:
input_list(list): Input List
Returns:
(int),(int): Two maximum sums
"""
n = len(input_list)
heap_sort(input_list)
decimal_value = 1
n1 = 0
for i in range(0, n, 2):
n1 += input_list[i] * decimal_value
decimal_value *= 10
decimal_value = 1
n2 = 0
for i in range(1, n, 2):
n2 += input_list[i] * decimal_value
decimal_value *= 10
return n1, n2 | 3d0d4964ce5faca8aeb27bef56de1840e5cb5f51 | 21,069 |
def _partial_ema_scov_update(s:dict, x:[float], r:float=None, target=None):
""" Update recency weighted estimate of scov-like matrix by treating quadrants individually """
assert len(x)==s['n_dim']
# If target is not supplied we maintain a mean that switches from emp to ema
if target is None:
target = s['target']
if target is None:
target = s['sma']['mean']
# Update running partial scatter estimates
for q,(w,sgn1,sgn2) in QUADRANTS.items():
# Morally:
# x1 = max(0, (x-target)*sgn1) * sgn1
# x2 = (np.max(0, (x-target)*sgn2) * sgn2) if sgn1!=sgn2 else x1
x1 = (x-target)*sgn1
x2 = (x-target)*sgn2
x1[x1<0]=0
x2[x2<0]=0
x1 = sgn1*x1
x2 = sgn2*x2
s[q] = _ema_scov_update(s[q],x=x1,r=r,target=0, y=x2)
s['mean'] = np.copy( s['sma']['mean'] )
s['n_samples'] = s['sma']['n_samples']
if s['n_samples']>=2:
s['scov'] = np.zeros(shape=((s['n_dim'],s['n_dim'])))
for q in QUADRANTS:
try:
s['scov'] += s[q]['scov']
except:
pass
else:
s['scov'] = np.eye(s['n_dim'])
s['sma'] = sma(s=s['sma'], x=x, r=r)
return s | b54f2897abe45eec85cb843a23e8d6f0f4f2642d | 21,070 |
def _get_chrome_options():
"""
Returns the chrome options for the following arguments
"""
chrome_options = Options()
# Standard options
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument('--ignore-certificate-errors')
# chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--auto-select-desktop-capture-source=Entire screen")
return chrome_options | 0db0799c53487e35b4d2de977fa07fb260d7e930 | 21,072 |
def legendre(n, monic=0):
"""Returns the nth order Legendre polynomial, P_n(x), orthogonal over
[-1,1] with weight function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n==0: n1 = n+1
else: n1 = n
x,w,mu0 = p_roots(n1,mu=1)
if n==0: x,w = [],[]
hn = 2.0/(2*n+1)
kn = _gam(2*n+1)/_gam(n+1)**2 / 2.0**n
p = orthopoly1d(x,w,hn,kn,wfunc=lambda x: 1.0,limits=(-1,1),monic=monic,
eval_func=lambda x: eval_legendre(n,x))
return p | bfd2bb0603e320e9ea330c8e51b17ab53a03382f | 21,074 |
def cal_sort_key(cal):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"]) | 4235700b003689fed304b88085ba9fa9880f3839 | 21,075 |
def preview_game_num():
"""retorna el numero de la ultima partida jugada"""
df = pd.read_csv('./data/stats.csv', encoding="utf8")
x = sorted(df["Partida"],reverse=True)[0]
return x | 7af698416fd60be4e7be74e7a104cd6fa956f649 | 21,077 |
def XCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "4.46", **kwargs
) -> Graph:
"""Return XCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "4.46"
Version to retrieve
The available versions are:
- 4.46
"""
return AutomaticallyRetrievedGraph(
"XCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 34c77f3074031b41fba8da0523a263a511734bff | 21,078 |
def wraplatex(text, width=WIDTH):
""" Wrap the text, for LaTeX, using ``textwrap`` module, and ``width``."""
return "$\n$".join(wrap(text, width=width)) | b558f2524917ec73160f4bea48029dedb9b6a12e | 21,080 |
def register(request):
"""
Render and process a basic registration form.
"""
ctx = {}
if request.user.is_authenticated():
if "next" in request.GET:
return redirect(request.GET.get("next", 'control:index'))
return redirect('control:index')
if request.method == 'POST':
form = GlobalRegistrationForm(data=request.POST)
if form.is_valid():
user = User.objects.create_global_user(
form.cleaned_data['email'], form.cleaned_data['password'],
locale=request.LANGUAGE_CODE,
timezone=request.timezone if hasattr(request, 'timezone') else settings.TIME_ZONE
)
user = authenticate(identifier=user.identifier, password=form.cleaned_data['password'])
auth_login(request, user)
return redirect('control:index')
else:
form = GlobalRegistrationForm()
ctx['form'] = form
return render(request, 'pretixcontrol/auth/register.html', ctx) | f8d81d16903d0d5fe2e3224a535fd8f1795f9ad0 | 21,081 |
from typing import List
def green_agg(robots: List[gs.Robot]) -> np.ndarray:
"""
This is a dummy aggregator function (for demonstration) that just saves
the value of each robot's green color channel
"""
out_arr = np.zeros([len(robots)])
for i, r in enumerate(robots):
out_arr[i] = r._color[1]
return out_arr | 8e86200bf7ed51cea3bdce06be2fb3300ac20a5a | 21,082 |
import socket
def tcp_port_open_locally(port):
"""
Returns True if the given TCP port is open on the local machine
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(("127.0.0.1", port))
return result == 0 | f5c801a5016085eedbed953089742e184f514db5 | 21,083 |
def wrap(text, width=80):
"""
Wraps a string at a fixed width.
Arguments
---------
text : str
Text to be wrapped
width : int
Line width
Returns
-------
str
Wrapped string
"""
return "\n".join(
[text[i:i + width] for i in range(0, len(text), width)]
) | 793840a1cae51397de15dd16051c5dfffc211768 | 21,084 |
def parallel_vector(R, alt, max_alt=1e5):
"""
Generates a viewing and tangent vectors
parallel to the surface of a sphere
"""
if not hasattr(alt, '__len__'):
alt = np.array([alt])
viewer = np.zeros(shape=(3, len(alt)))
tangent = np.zeros_like(viewer)
viewer[0] = -(R+max_alt*2)
viewer[1] = R+alt
tangent[1] = R+alt
return viewer, tangent | 49f4a1c4fe7267078cfac05af78c2fc850c1edfb | 21,085 |
from pathlib import Path
def load_datasets(parser, args):
"""Loads the specified dataset from commandline arguments
Returns:
train_dataset, validation_dataset
"""
args = parser.parse_args()
dataset_kwargs = {
"root": Path(args.train_dir),
}
source_augmentations = Compose(
[globals()["_augment_" + aug] for aug in args.source_augmentations]
)
train_dataset = MIMIIDataset(
split="0dB",
subset=train_tracks,
sources=args.sources,
targets=args.sources,
source_augmentations=source_augmentations,
random_track_mix=True,
segment=args.seq_dur,
random_segments=True,
sample_rate=args.sample_rate,
samples_per_track=args.samples_per_track,
**dataset_kwargs,
)
train_dataset = filtering_out_valid(train_dataset)
valid_dataset = MIMIIDataset(
split="0dB",
subset=validation_tracks,
sources=args.sources,
targets=args.sources,
segment=None,
**dataset_kwargs,
)
return train_dataset, valid_dataset | 17f25443b34b9b6bc87c259c65d4af13b76b5303 | 21,086 |
def stock_total_deal_money():
"""
总成交量
:return:
"""
df = stock_zh_index_spot()
# 深证成指:sz399001,上证指数:sh00001
ds = df[(df['代码'] == 'sz399001') | (df['代码'] == 'sh000001')]
return ds['成交额'].sum() / 100000000 | 241c0080ed64acc21c1d8072befd168415184130 | 21,087 |
def _ls(dir=None, project=None, all=False, appendType=False, dereference=False, directoryOnly=False):
"""
Lists file(s) in specified MDSS directory.
:type dir: :obj:`str`
:param dir: MDSS directory path for which files are listed.
:type project: :obj:`str`
:param project: NCI project identifier string, if :samp:`None`, uses default
project (as returned from the :func:`getDefaultProject` function).
:type all: :obj:`bool` or :obj:`str`
:param all: If :samp:`True` or :samp:`"all"` lists files/directories whose names begin with '.'.
If :samp:`almost-all` lists files/directories whose names begin with '.' but not
the :samp:`"."` and :samp:`".."` entries.
:type appendType: :obj:`bool`
:param appendType: If :samp:`True` each name in the listing will have a character appended
which indicates the type of *file*.
:type dereference: :obj:`bool`
:param dereference: If :samp:`True` symbolic links are dereferenced in the listing.
:type directoryOnly: :obj:`bool`
:param directoryOnly: If :samp:`True` only list directory name and not directory contents.
:rtype: :obj:`list` of :obj:`str`
:return: MDSS directory listing.
"""
args = ["-1"] # Separate listed entries with newline, one entry per line.
args += _getListDirAllArg(all)
args += _getListDirDirectoryOnlyArg(directoryOnly)
args += _getListDirAppendTypeArg(appendType)
args += _getListDirDereferenceArg(dereference)
if (dir != None):
args += [dir,]
else:
args = []
p = MdssCommand(commandStr="ls", project=project, args=args).execute()
return p.communicate()[0].split("\n")[0:-1] | 7a26c9459381364ad145bab2b6230fd2037e5433 | 21,088 |
def uploadMetadata(doi, current, delta, forceUpload=False, datacenter=None):
"""
Uploads citation metadata for the resource identified by an existing
scheme-less DOI identifier (e.g., "10.5060/FOO") to DataCite. This
same function can be used to overwrite previously-uploaded metadata.
'current' and 'delta' should be dictionaries mapping metadata
element names (e.g., "Title") to values. 'current+delta' is
uploaded, but only if there is at least one DataCite-relevant
difference between it and 'current' alone (unless 'forceUpload' is
true). 'datacenter', if specified, should be the identifier's
datacenter, e.g., "CDL.BUL". There are three possible returns: None
on success; a string error message if the uploaded DataCite Metadata
Scheme record was not accepted by DataCite (due to an XML-related
problem); or a thrown exception on other error. No error checking
is done on the inputs.
"""
try:
oldRecord = formRecord("doi:" + doi, current)
except AssertionError:
oldRecord = None
m = current.copy()
m.update(delta)
try:
newRecord = formRecord("doi:" + doi, m)
except AssertionError, e:
return "DOI metadata requirements not satisfied: " + str(e)
if newRecord == oldRecord and not forceUpload:
return None
if not _enabled:
return None
# To hide transient network errors, we make multiple attempts.
for i in range(_numAttempts):
o = urllib2.build_opener(_HTTPErrorProcessor)
r = urllib2.Request(_metadataUrl)
# We manually supply the HTTP Basic authorization header to avoid
# the doubling of the number of HTTP transactions caused by the
# challenge/response model.
r.add_header("Authorization", _authorization(doi, datacenter))
r.add_header("Content-Type", "application/xml; charset=UTF-8")
r.add_data(newRecord.encode("UTF-8"))
c = None
try:
_modifyActiveCount(1)
c = o.open(r, timeout=_timeout)
s = c.read()
assert s.startswith("OK"), (
"unexpected return from DataCite store metadata operation: " + s
)
except urllib2.HTTPError, e:
message = e.fp.read()
if e.code in [400, 422]:
return "element 'datacite': " + message
if e.code != 500 or i == _numAttempts - 1:
raise e
except:
if i == _numAttempts - 1:
raise
else:
return None
finally:
_modifyActiveCount(-1)
if c:
c.close()
time.sleep(_reattemptDelay) | 22902f2649f20d638ba61b8db7ff6a32821bf965 | 21,089 |
def one_away(string_1: str, string_2: str)-> bool:
"""DP, classic edit distance
funny move, we calculate the LCS and then substract from the len() of the biggest string in O(n*m)
"""
if string_1 == string_2: return False
@lru_cache(maxsize=1024)
def dp(s_1, s_2, distance=0):
"""standard longest common substring
"""
if not s_1 or not s_2: return distance
if s_1[0] == s_2[0]:
return dp(s_1[1:], s_2[1:], distance+1)
return max(dp(s_1[1:], s_2, distance), dp(s_1, s_2[1:], distance))
return max(len(string_1), len(string_2)) - dp(string_1, string_2) == 1 | 754cd1b383d21935992ba95bde65bde5340a8ef8 | 21,090 |
def test(net, loss_normalizer):
"""
Tests the Neural Network using IdProbNet on the test set.
Args:
net -- (IdProbNet instance)
loss_normalizer -- (Torch.Tensor) value to be divided from the loss
Returns:
3-tuple -- (Execution Time, End loss value,
Model's prediction after feed forward [Px])
"""
return run_model_data_t(net, loss_normalizer, NUM_TEST, 'test') | 4abdd1426545af6d093be2f549f6e2b8e86b3659 | 21,091 |
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
"""
M = jnp.array(matrix, dtype=jnp.float64, copy=False)
M33 = M[:3, :3]
factor = jnp.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = jnp.linalg.eig(M33)
i = jnp.where(abs(jnp.real(w) - factor) < 1e-8)[0][0]
direction = jnp.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
#WARNING(@cpgoodri): I'm not sure if this error-handling approach works with JAX, but it seems to pass tests...
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = jnp.linalg.eig(M)
i = jnp.where(abs(jnp.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError('no eigenvector corresponding to eigenvalue 1')
origin = jnp.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction | 1e6ef044b35ec4eff86764d9a222764c74977fb1 | 21,092 |
def get_fort44_info(NDX, NDY, NATM, NMOL, NION, NSTRA, NCL, NPLS, NSTS, NLIM):
"""Collection of labels and dimensions for all fort.44 variables, as collected in the
SOLPS-ITER 2020 manual.
"""
fort44_info = {
"dab2": [r"Atom density ($m^{-3}$)", (NDX, NDY, NATM)],
"tab2": [r"Atom temperature (eV )", (NDX, NDY, NATM)],
"dmb2": [r"Molecule density ($m^{-3}$)", (NDX, NDY, NMOL)],
"tmb2": [r"Molecule temperature (eV )", (NDX, NDY, NMOL)],
"dib2": [r"Test ion density ($m^{-3}$)", (NDX, NDY, NION)],
"tib2": [r" Test ion temperature (eV)", (NDX, NDY, NION)],
"rfluxa": [r"Radial flux density of atoms ($m^{-2} s^{-1}$)", (NDX, NDY, NATM)],
"rfluxm": [
r"Radial flux density of molecules ($m^{-2} s^{-1}$)",
(NDX, NDY, NMOL),
],
"pfluxa": [
r"Poloidal flux density of atoms ($m^{-2} s^{-1}$)",
(NDX, NDY, NATM),
],
"pfluxm": [
r"Poloidal flux density of molecules ($m^{-2} s^{-1}$)",
(NDX, NDY, NMOL),
],
"refluxa": [
r"Radial energy flux density carried by atoms ($W m^{-2}$)",
(NDX, NDY, NATM),
],
"refluxm": [
r"Radial energy flux density carried by molecules ($W m^{-2}$)",
(NDX, NDY, NMOL),
],
"pefluxa": [
r"Poloidal energy flux density carried by atoms ($W m^{-2}$)",
(NDX, NDY, NATM),
],
"pefluxm": [
r"Poloidal energy flux density carried by molecules ($W m^{-2}$)",
(NDX, NDY, NMOL),
],
#
"emiss": [
r"$H_\alpha$ emissivity due to atoms ($photons m^{-2} s^{-1}$)",
(NDX, NDY),
],
"emissmol": [
r"$H_\alpha$ emissivity due to molecules and molecular ions ($photons m^{-2} s^{-1}$)",
(NDX, NDY),
],
"srcml": [r"Molecule particle source (A)", (NDX, NDY, NMOL)],
"edissml": [
r"Energy spent for dissociating hydrogenic molecules (W)",
(NDX, NDY, NMOL),
],
"wldnek": [
r"Heat transferred by neutrals (W), total over strata",
(NLIM + NSTS,),
],
"wldnep": [
r"Potential energy released by neutrals (W), total over strata",
(NLIM + NSTS,),
],
"wldna": [
r"Flux of atoms impinging on surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"ewlda": [
r"Average energy of impinging atoms on surface (eV), total over strata",
(NLIM + NSTS, NATM),
],
"wldnm": [
r"Flux of molecules impinging on surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
"ewldm": [
r"Average energy of impinging molecules on surface (eV), total over strata",
(NLIM + NSTS, NMOL),
],
"p1,p2": [
r"Endpoints of surface (X and Y coordinates, in m), total over strata",
(NLIM,),
],
"wldra": [
r"Flux of reflected atoms from surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldrm": [
r"Flux of reflected molecules from surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
}
for i in np.arange(NSTRA + 1): # from 0 to NSTRA, unlike in manual
fort44_info.update(
{
f"wldnek({i})": [r"Heat transferred by neutrals (W)", (NLIM + NSTS,)],
f"wldnep({i})": [
r"Potential energy released by neutrals (W)",
(NLIM + NSTS,),
],
f"wldna({i})": [
r"Flux of atoms impinging on surface (A)",
(NLIM + NSTS, NATM),
],
f"ewlda({i})": [
r"Average energy of impinging atoms on surface (eV)",
(NLIM + NSTS, NATM),
],
f"wldnm({i})": [
r"Flux of molecules impinging on surface (A)",
(NLIM + NSTS, NMOL),
],
f"ewldm({i})": [
r"Average energy of impinging molecules on surface (eV)",
(NLIM + NSTS, NMOL),
],
f"wldra({i})": [
r"Flux of reflected atoms from surface (A)",
(NLIM + NSTS, NATM),
],
f"wldrm({i})": [
r"Flux of reflected molecules from surface (A)",
(NLIM + NSTS, NMOL),
],
}
)
fort44_info.update(
{
"wldpp": [
r"Flux of plasma ions impinging on surface (A), total over strata",
(NLIM + NSTS, NPLS),
],
"wldpa": [
r"Net flux of atoms emitted from surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldpm": [
r"Net flux of molecules emitted from surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
"wldpeb": [
r"Power carried by particles emitted from surface (W), total over strata",
(NLIM + NSTS,),
],
"wldspt": [
r"Flux of sputtered wall material (A), total over strata",
(NLIM + NSTS,),
],
"wldspta": [
r"Flux of sputtered wall material per atom (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldsptm": [
r"Flux of sputtered wall material per molecule (A), total over strata",
(NLIM + NSTS, NMOL),
],
}
)
for i in np.arange(NSTRA + 1): # from 0 to NSTRA, unlike in manual
fort44_info.update(
{
f"wldpp({i})": [
r"Flux of plasma ions impinging on surface (A)",
(NLIM + NSTS, NPLS),
],
f"wldpa({i})": [
r"Net flux of atoms emitted from surface (A)",
(NLIM + NSTS, NATM),
],
f"wldpm({i})": [
r"Net flux of molecules emitted from surface (A)",
(NLIM + NSTS, NMOL),
],
f"wldpeb({i})": [
r"Power carried by particles emitted from surface (W)",
(NLIM + NSTS,),
],
f"wldspt({i})": [
r"Flux of sputtered wall material (A)",
(NLIM + NSTS,),
],
f"wldspta({i})": [
r"Flux of sputtered wall material per atom (A)",
(NLIM + NSTS, NATM),
],
f"wldsptm({i})": [
r"Flux of sputtered wall material per molecule (A)",
(NLIM + NSTS, NMOL),
],
}
)
fort44_info.update(
{
"isrftype": [r"ILIIN surface type variable in Eirene", (NLIM + NSTS,)],
"wlarea": [r"Surface area (m2)", (NLIM + NSTS,)],
"wlabsrp(A)": [r"Absorption rate for atoms", (NATM, NLIM + NSTS)],
"wlabsrp(M)": [r"Absorption rate for molecules", (NMOL, NLIM + NSTS)],
"wlabsrp(I)": [r"Absorption rate for test ions", (NION, NLIM + NSTS)],
"wlabsrp(P)": [r"Absorption rate for plasma ions", (NPLS, NLIM + NSTS)],
"wlpump(A)": [r"Pumped flux per atom (A)", (NATM, NLIM + NSTS)],
"wlpump(M)": [r"Pumped flux per molecule (A)", (NMOL, NLIM + NSTS)],
"wlpump(I)": [r"Pumped flux per test ion (A)", (NION, NLIM + NSTS)],
"wlpump(P)": [r"Pumped flux per plasma ion (A)", (NPLS, NLIM + NSTS)],
"eneutrad": [r"Radiation rate due to atoms (W)", (NDX, NDY, NATM)],
"emolrad": [r"Radiation rate due to molecules (W)", (NDX, NDY, NMOL)],
"eionrad": [r"Radiation rate due to test ions (W)", (NDX, NDY, NION)],
# eirdiag rather than eirdiag_nds, as in manual...
"eirdiag": [
r"Indices for segments on resolved non-standard surfaces",
(5 * NSTS + 1,),
],
"sarea_res": [r"Surface area of surface segment (m2)", (NCL,)],
"wldna_res": [
r"Flux of atoms impinging on surface segment (A)",
(NATM, NCL),
],
"wldnm_res": [
r"Flux of molecules impinging on surface segment (A)",
(NMOL, NCL),
],
"ewlda_res": [
r"Average energy of impinging atoms on surface segment (eV)",
(NATM, NCL),
],
"ewldm_res": [
r"Average energy of impinging molecules on surface segment (eV)",
(NMOL, NCL),
],
"ewldea_res": [
r"Energy flux carried by emitted atoms from surface segment (W)",
(NATM, NCL),
],
"ewldem_res": [
r"Energy flux carried by emitted molecules from surface segment (W)",
(NMOL, NCL),
],
"ewldrp_res": [
r"Total energy flux carried by emitted particles from surface segment (W)",
(NCL,),
],
"ewldmr_res": [
r"Flux of emitted molecules from recycling atoms (A)",
(NMOL, NCL),
],
"wldspt_res": [r"Flux of sputtered wall material (A)", (NCL,)],
"wldspta_res": [
r"Flux of sputtered wall material per atom (A)",
(NCL, NATM),
],
"wldsptm_res": [
r"Flux of sputtered wall material per molecule (A)",
(NCL, NMOL),
],
"wlpump_res(A)": [r"Pumped flux per atom (A)", (NCL, NATM)],
"wlpump_res(M)": [r"Pumped flux per molecule (A)", (NCL, NMOL)],
"wlpump_res(I)": [r"Pumped flux per test ion (A)", (NCL, NION)],
"wlpump_res(P)": [r"Pumped flux per plasma ion (A)", (NCL, NPLS)],
"ewldt_res": [r"Total wall power loading from Eirene particles", (NCL,)],
"pdena_int": [
r"Integral number of atoms over the entire Eirene computational grid",
(NATM, NSTRA + 1),
],
"pdenm_int": [
r"Integral number of molecules over the entire Eirene computational grid",
(NMOL, NSTRA + 1),
],
"pdeni_int": [
r"Integral number of test ions over the entire Eirene computational grid",
(NION, NSTRA + 1),
],
"pdena_int_b2": [
r"Integral number of atoms over the B2.5 computational grid",
(NATM, NSTRA + 1),
],
"pdenm_int_b2": [
r"Integral number of molecules over the B2.5 computational grid",
(NMOL, NSTRA + 1),
],
"pdeni_int_b2": [
r"Integral number of test ions over the B2.5 computational grid",
(NION, NSTRA + 1),
],
"edena_int": [
r"Integral energy carried by atoms over the entire Eirene computational grid (J)",
(NATM, NSTRA + 1),
],
"edenm_int": [
r"Integral energy carried by molecules over the entire Eirene computational grid (J)",
(NMOL, NSTRA + 1),
],
"edeni_int": [
r"Integral energy carried by test ions over the entire Eirene computational grid (J)",
(NION, NSTRA + 1),
],
"edena_int_b2": [
r"Integral energy carried by atoms over the B2.5 computational grid (J)",
(NATM, NSTRA + 1),
],
"edenm_int_b2": [
r"Integral energy carried by molecules over the B2.5 computational grid (J)",
(NMOL, NSTRA + 1),
],
"edeni_int_b2": [
r"Integral energy carried by test ions over the B2.5 computational grid (J)",
(NION, NSTRA + 1),
],
}
)
# extra, undocumented
fort44_info.update({"wall_geometry": [r"Wall geometry points", (4 * NLIM,)]})
return fort44_info | 0eca35ae512d3fd690124c45d5cde303d860ae0b | 21,093 |
def lens2memnamegen_first50(nmems):
"""Generate the member names for LENS2 simulations
Input:
nmems = number of members
Output:
memstr(nmems) = an array containing nmems strings corresponding to the member names
"""
memstr=[]
for imem in range(0,nmems,1):
if (imem < 10):
memstr1=str(1000+imem*20+1)
memstr2=str(imem+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 10) and (imem < 20)):
memstr1=str(1231)
memstr2=str(imem-10+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 20) and (imem < 30)):
memstr1=str(1251)
memstr2=str(imem-20+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 30) and (imem < 40)):
memstr1=str(1281)
memstr2=str(imem-30+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 40) and (imem < 50)):
memstr1=str(1301)
memstr2=str(imem-40+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
return memstr | 81ebbf1b17c56d604d8c6c9bc7bacd4a3093ec82 | 21,094 |
def initialize_settings(tool_name, source_path, dest_file_name=None):
""" Creates settings directory and copies or merges the source to there.
In case source already exists, merge is done.
Destination file name is the source_path's file name unless dest_file_name
is given.
"""
settings_dir = os.path.join(SETTINGS_DIRECTORY, tool_name)
if not os.path.exists(settings_dir):
os.mkdir(settings_dir)
if not dest_file_name:
dest_file_name = os.path.basename(source_path)
settings_path = os.path.join(settings_dir, dest_file_name)
if not os.path.exists(settings_path):
shutil.copy(source_path, settings_path)
else:
try:
SettingsMigrator(source_path, settings_path).migrate()
except ConfigObjError, parsing_error:
print 'WARNING! corrupted configuration file replaced with defaults'
print parsing_error
shutil.copy(source_path, settings_path)
return os.path.abspath(settings_path) | c32e35f6323e2ae87c5d53a8b2e2c0d69a30c6e4 | 21,095 |
def get_stopword_list(filename=stopword_filepath):
""" Get a list of stopword from a file """
with open(filename, 'r', encoding=encoding) as f:
stoplist = [line for line in f.read().splitlines()]
return stoplist | 8578428ec387309907f428f3eec91a526f11167a | 21,096 |
def to_text(value):
"""Convert an opcode to text.
*value*, an ``int`` the opcode value,
Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
Returns a ``str``.
"""
return Opcode.to_text(value) | 85395ecdaa2fae4fc121072747401c114d7b4ed3 | 21,098 |
import torch
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs | 9d8de5d5bd337720f386a45ad40f9e901a999b52 | 21,100 |
import socket
def get_ephemeral_port(sock_family=socket.AF_INET, sock_type=socket.SOCK_STREAM):
"""Return an ostensibly available ephemeral port number."""
# We expect that the operating system is polite enough to not hand out the
# same ephemeral port before we can explicitly bind it a second time.
s = socket.socket(sock_family, sock_type)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port | 37287b70e35b8aa7fbdb01ced1882fb3bbf38543 | 21,101 |
from typing import Optional
def IR_guess_model(spectrum: ConvSpectrum, peak_args: Optional[dict] = None) -> tuple[Model, dict]:
"""
Guess a fit for the IR spectrum based on its peaks.
:param spectrum: the ConvSpectrum to be fit
:param peak_args: arguments for finding peaks
:return: Model, parameters
"""
min_intensity, max_intensity = spectrum.range
range_intensities = max_intensity - min_intensity
IR_peak_defaults = {
"prominence": 0.1 * range_intensities,
}
peak_args = IR_peak_defaults if peak_args is None else {**IR_peak_defaults, **peak_args}
peak_indices, peak_properties = spectrum.peaks(**peak_args, indices=True)
params = Parameters()
composite_model = None
# Fit the peaks
for i, peak_idx in enumerate(peak_indices):
prefix = f"a{i}_"
model = models.GaussianModel(prefix=prefix)
center = spectrum.energies[peak_idx]
height = spectrum.intensities[peak_idx]
model.set_param_hint("amplitude", min=0.05 * height)
model.set_param_hint("center", min=center - 10, max=center + 10)
model.set_param_hint("sigma", min=0.1, max=100)
peak_params = {
f"{prefix}amplitude": height * 0.8,
f"{prefix}center": center,
f"{prefix}sigma": 10,
}
params = params.update(model.make_params(**peak_params))
composite_model = model if composite_model is None else composite_model + model
return composite_model, params | fa56e3c183ef08b35f177df1d727ff134c964eaf | 21,102 |
def virus_monte_carlo(initial_infected, population, k):
""" Generates a list of points to which some is infected
at a given value k starting with initial_infected infected.
There is no mechanism to stop the infection from reaching
the entire population.
:param initial_infected: The amount of people whom are infected at the
start.
:type initial_infected: int
:param population: The total population sample.
:type population: int
:param k: The rate of infection.
:type k: float
:return: An array of the amount of people per time infected.
:rtype: tuple(time, infected)
"""
people_array = np.arange(1, population+1, dtype=int)
current_infected = initial_infected
people_infected = np.array([current_infected])
time_array = np.array([0])
# Array math.
counter = 0
for _ in people_array:
probability = (k)*current_infected/population
random_array = np.random.uniform(0, 1, size=people_array.size)
random_bool = np.where(random_array <= probability, True, False)
people_array = people_array[random_bool != True]
if people_array.size != population:
current_infected = (population-people_array.size)
people_infected = np.append(people_infected, current_infected)
counter+=1
time_array = np.append(time_array, counter)
if people_infected.size == population:
break
return (time_array, people_infected) | 856af13a8a7fdbb931ba32b97ff7bd5207e9ca49 | 21,103 |
def threadsafe_generator(f):
"""
A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g | 013e0df91f70da8c8f4f501bc31d8bddcf378787 | 21,104 |
def lastmsg(self):
"""
Return last logged message if **_lastmsg** attribute is available.
Returns:
last massage or empty str
"""
return getattr(self, '_last_message', '') | ad080c05caadbb644914344145460db0164f017c | 21,105 |
def _callback_on_all_dict_keys(dt, callback_fn):
"""
Callback callback_fn on all dictionary keys recursively
"""
result = {}
for (key, val) in dt.items():
if type(val) == dict:
val = _callback_on_all_dict_keys(val, callback_fn)
result[callback_fn(key)] = val
return result | 3cab018413a7ba8a0e5bbae8574025253a2ea885 | 21,106 |
def top_ngrams(df, n=2, ngrams=10):
"""
* Not generalizable in this form *
* This works well, but is very inefficient and should be optimized or rewritten *
Takes a preposcessed, tokenized column and create a large list.
Returns most frequent ngrams
Arguments:
df = name of DataFrame with no_hashtags column (this will be generalizable in a future commit)
n = number of words per grouping eg. 1, 2 or 3
ngrams = Number of ngrams to return
"""
word_list = preprocess(''.join(str(df['lemma'].tolist())))
return (pd.Series(nltk.ngrams(word_list, n)).value_counts())[:ngrams] | a6c540a30a288a8d26bf6f966b44b9f080db0026 | 21,108 |
def install_openvpn(instance, arg, verbose=True):
""" """
install(instance, {"module":"openvpn"}, verbose=True)
generate_dh_key(instance, {"dh_name":"openvpn", "key_size":"2048"})
server_conf = open("simulation/workstations/"+instance.name+"/server_openvpn.conf", "w")
server_conf.write("port 1197\n")
server_conf.write("proto udp\n")
server_conf.write("dev tun\n")
server_conf.write("ca /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_middle_certificate_name"]+".cert\n")
server_conf.write("cert /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".cert\n")
server_conf.write("key /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".key\n")
server_conf.write("dh /certs/dh/openvpn-2048.key\n")
server_conf.write("server 10.122.0.0 255.255.255.0 \n")
server_conf.write("push \"10.122.1.0 255.255.255.0\"\n")
server_conf.write("keepalive \n")
server_conf.write("cipher AES-128-CBC \n")
server_conf.write("comp-lzo \n")
server_conf.write("max-clients "+arg["max_client"]+"\n")
if arg["user"] == "":
server_conf.write("user nobody\n")
else:
server_conf.write("user "+arg["user"]+"\n")
if arg["group"] == "":
server_conf.write("group nobody\n")
else:
server_conf.write("group "+arg["group"]+"\n")
server_conf.write("persist-key\n")
server_conf.write("persist-tun\n")
server_conf.write("status openvpn-status.log\n")
server_conf.write("log openvpn.log\n")
server_conf.write("verb 9\n")
server_conf.close()
if upload_file(instance, {"instance_path":"/etc/openvpn/server.conf", "host_manager_path": "simulation/workstations/"+instance.name+"/server_openvpn.conf"}, verbose=False) == 1:
return 1
if restart_service(instance, {"service":"openvpn"}) == 1:
return 1 | d95d99e7847dd08c43f54fc3dde769f69888da77 | 21,109 |
def rossoporn_parse(driver: webdriver.Firefox) -> tuple[list[str], int, str]:
"""Read the html for rossoporn.com"""
#Parses the html of the site
soup = soupify(driver)
dir_name = soup.find("div", class_="content_right").find("h1").text
dir_name = clean_dir_name(dir_name)
images = soup.find_all("div", class_="wrapper_g")
images = ["".join([PROTOCOL, img.get("src").replace("tn_", "")]) for tag_list in images for img in tag_list.find_all("img")]
num_files = len(images)
driver.quit()
return (images, num_files, dir_name) | 21aad0798bc3e13badb1076ec40c36c56f47ebf7 | 21,110 |
def pid_from_context(_, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
return pid.pid_value if pid else missing | 350fd4c915e186dd41575c5842e47beb7d055fb5 | 21,111 |
def score_text(text, tokenizer, preset_model, finetuned_model):
""" Uses rule-based rankings. Higher is better, but different features have different scales.
Args:
text (str/ List[str]): one story to rank.
tokenizer (Pytroch tokenizer): GPT2 Byte Tokenizer.
preset_model (Pytorch model): preset GPT2 model of the same/ different size of the finetuned model.
finetuned_model (Pytorch model): fine-tuned GPT2 model.
Returns a scores np.array of corresponding to text.
"""
assert isinstance(
text, (str, list)), f"score_text accepts type(text) = str/list, but got {type(text)}"
if isinstance(text, list):
text = ' '.join(text)
# Keep same order as in constants.FEATURES
scores = [0 for _ in range(len(constants.FEATURES))]
texts_sentences = split_to_sentences(text)
# scores[0] = _coherency(texts_sentences, lsa_embedder)
scores[1] = _readabilty(text, texts_sentences)
# Set of text words without punctuation and stop words.
filtered_words = list(filter(
lambda word: word not in constants.STOP_WORDS, split_words(text.lower().strip())))
filtered_words_set = set(filtered_words)
# Sentiment.
scores[2] = _sentiment_polarity(filtered_words)
# Set based measures.
scores[3], scores[4] = _simplicity(filtered_words_set), _diversity(
filtered_words, filtered_words_set)
# The bigger differene, the more tale-like, similar to the fine-tuned model, the text is.
scores[5] = KLDIV_error_per_text(
tokenizer, preset_model, finetuned_model, text)
# print(" | ".join(f'{key}: {score:.2f}' for key,
# score in zip(constants.FEATURES, scores)))
return np.array(scores) | e304975b55c44e78f6ce92f4df9d1ba563389b8b | 21,112 |
def parse_cards(account_page_content):
"""
Parse card metadata and product balances from /ClipperCard/dashboard.jsf
"""
begin = account_page_content.index(b'<!--YOUR CLIPPER CARDS-->')
end = account_page_content.index(b'<!--END YOUR CLIPPER CARDS-->')
card_soup = bs4.BeautifulSoup(account_page_content[begin:end], "html.parser")
serial_numbers = find_values(card_soup, 'Serial Number:', get_next_sibling_text)
nicknames = find_values(card_soup, 'Card Nickname:', get_inner_display_text)
types = find_values(card_soup, 'Type:', get_next_sibling_text)
statuses = find_values(card_soup, 'Status:', get_next_sibling_text)
products = parse_card_products(card_soup)
cards = []
for sn, nn, tp, st, pd in zip(serial_numbers, nicknames, types, statuses, products):
cards.append(Card(serial_number=sn, nickname=nn, type=tp, status=st, products=pd))
return cards | 6ec10941aebe88af27a75c407e6805698d5cf31c | 21,116 |
def interaction_time_data_string(logs, title):
"""
times = utils.valid_values_for_enum((models.LogEntry.TIME_CHOICES))
contexts_map = dict(models.LogEntry.TIME_CHOICES)
counts = {contexts_map[k]: v
for k, v in _counts_by_getter(logs, lambda l: l.time_of_day).items()
}
plt.clf()
xs = list(range(len(times)))
ys = [counts.get(cont, 0) for cont in times]
plt.bar(xs, ys)
plt.xticks(xs, times)
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.gca().xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64()
"""
contexts = utils.valid_values_for_enum((models.LogEntry.SOCIAL_CHOICES))
contexts_map = dict(models.LogEntry.SOCIAL_CHOICES)
reacc_map = dict(models.LogEntry.REACTION_CHOICES)
interaction_map = dict(models.LogEntry.MEDIUM_CHOICES)
time_map = dict(models.LogEntry.TIME_CHOICES)
first_agg = recommender.group_list_by_sel(logs, lambda l: interaction_map[l.interaction_medium])
plt.clf()
keys = sorted(first_agg.keys())
sub_keys = sorted(list(time_map.keys()))
xs = np.arange(len(sub_keys)) * 2
width = .35
colors = np.array([
[205,224,241],
[190,26,9],
[0,105,253],
[255,114,0],
]) / 255.0
for i, reacc in enumerate( keys ):
sub_logs = first_agg[reacc]
counts = _counts_by_getter(sub_logs, lambda l: l.time_of_day)
ys = [counts.get(cont, 0) for cont in sub_keys]
plt.bar(xs + i * width, ys, width, label=reacc, color=colors[i])
ax = plt.gca()
ax.set_xticks(xs + width * (len(keys) // 2))
ax.set_xticklabels([time_map[k] for k in sub_keys])
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.legend()
ax.xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64() | fc6f6a32d39f3bd87c3b7b816e333aef462fb0f3 | 21,117 |
import math
def _label_boost(boost_form, label):
"""Returns the label boost.
Args:
boost_form: Either NDCG or PRECISION.
label: The example label.
Returns:
A list of per list weight.
"""
boost = {
'NDCG': math.pow(2.0, label) - 1.0,
'PRECISION': 1.0 if label >= 1.0 else 0.0,
}
return boost[boost_form] | 811e87949b0bbe7dc98f63814b343ffd90fe129a | 21,118 |
def has_matching_ts_templates(reactant, bond_rearr):
"""
See if there are any templates suitable to get a TS guess from a template
Arguments:
reactant (autode.complex.ReactantComplex):
bond_rearr (autode.bond_rearrangement.BondRearrangement):
Returns:
bool:
"""
mol_graph = get_truncated_active_mol_graph(graph=reactant.graph,
active_bonds=bond_rearr.all)
ts_guess_templates = get_ts_templates()
for ts_template in ts_guess_templates:
if template_matches(reactant=reactant, ts_template=ts_template,
truncated_graph=mol_graph):
return True
return False | 10061734d2831668099f3e85d99366dda9f51157 | 21,119 |
def get_commands(xml: objectify.ObjectifiedElement):
"""
Returns an action and the room from the xml string.
:param xml:
:return:
"""
return xml.body.attrib["action"] | 3724e00c626814e792911ae094a5b200d8593f4c | 21,120 |
def compression_point(w_db, slope = 1, compression = 1,
extrapolation_point = None, axis = -1):
"""Return input referred compression point"""
interpol_line = calc_extrapolation_line(w_db, slope, extrapolation_point,
axis)
return cross(interpol_line - w_db, compression) | 4c8793c5796d1359aa1fc00f226ecafda98c3f61 | 21,121 |
from typing import List
import logging
def pattern_remove_incomplete_region_or_spatial_path(
perception_graph: PerceptionGraphPattern
) -> PerceptionGraphPattern:
"""
Helper function to return a `PerceptionGraphPattern` verifying
that region and spatial path perceptions contain a reference object.
"""
graph = perception_graph.copy_as_digraph()
region_and_path_nodes: ImmutableSet[NodePredicate] = immutableset(
node
for node in graph.nodes
if isinstance(node, IsPathPredicate) or isinstance(node, RegionPredicate)
)
nodes_without_reference: List[NodePredicate] = []
for node in region_and_path_nodes:
has_reference_edge: bool = False
for successor in graph.successors(node):
predicate = graph.edges[node, successor]["predicate"]
if isinstance(predicate, RelationTypeIsPredicate):
if predicate.relation_type in [
REFERENCE_OBJECT_LABEL,
REFERENCE_OBJECT_DESTINATION_LABEL,
REFERENCE_OBJECT_SOURCE_LABEL,
]:
has_reference_edge = True
break
if not has_reference_edge:
nodes_without_reference.append(node)
logging.info(
f"Removing incomplete regions and paths. "
f"Removing nodes: {nodes_without_reference}"
)
graph.remove_nodes_from(nodes_without_reference)
def sort_by_num_nodes(g: DiGraph) -> int:
return len(g.nodes)
# We should maybe consider doing this a different way
# As this approach just brute force solves the problem rather than being methodical about it
if number_weakly_connected_components(graph) > 1:
components = [
component
for component in [
subgraph(graph, comp) for comp in weakly_connected_components(graph)
]
]
components.sort(key=sort_by_num_nodes, reverse=True)
computed_graph = subgraph(graph, components[0].nodes)
removed_nodes: List[NodePredicate] = []
for i in range(1, len(components)):
removed_nodes.extend(components[i].nodes)
logging.info(f"Cleanup disconnected elements. Removing: {removed_nodes}")
else:
computed_graph = graph
return PerceptionGraphPattern(computed_graph, dynamic=perception_graph.dynamic) | cbcc79602bf87e1ea88f8a0027d6cd19b74fb81c | 21,122 |
def other_shifted_bottleneck_distance(A, B, fudge=default_fudge, analysis=False):
"""Compute the shifted bottleneck distance between two diagrams, A and B (multisets)"""
A = pu.SaneCounter(A)
B = pu.SaneCounter(B)
if not A and not B:
return 0
radius = fudge(upper_bound_on_radius(A, B))
events = event_queue.EventQueue(A, B)
matching = GeometricBipartiteMatching(A, B)
# these counters are for performance monitoring only - they don't affect the logic
ctr, R_ctr, L_ctr, fail_ctr, win_ctr = 0, 0, 0, 0, 0
while events and radius > epsilon:
ctr += 1
event = events.next_event(radius)
if isinstance(event, event_queue.ExitEvent):
R_ctr += 1
matching.remove_all(event.edge)
else:
L_ctr += 1
if birth(event.edge, radius) >= death(event.edge, radius):
win_ctr += 1
continue # relies on ties being broken with the highest-radius edge
# assert not matching.diagonal_perfect()
if matching.diagonal_perfect():
fail_ctr += 1
radius = fudge(max(
events.next_diagonal_height(),
radius - (events.next_exit_shift(radius)
- birth(event.edge, radius)) / 2))
events.push(event)
continue
matching.maximize_matching(
shift=event.shift_to_check,
radius=radius)
if matching.diagonal_perfect():
# radius = fudge(matching.value())
events.push(event)
if analysis:
print("other:", len(A) + len(B), "total", ctr, "R", R_ctr, "L", L_ctr, "fail", fail_ctr, "win", win_ctr)
return radius | 51455945743bfc5f262711e826d1097122309f83 | 21,123 |
def getCountdown(c):
"""
Parse into a Friendly Readable format for Humans
"""
days = c.days
c = c.total_seconds()
hours = round(c//3600)
minutes = round(c // 60 - hours * 60)
seconds = round(c - hours * 3600 - minutes * 60)
return days, hours, minutes, seconds | f49225ae2680192340720c8958aa19b9e9369f5f | 21,124 |
def fromPSK(valstr):
"""A special version of fromStr that assumes the user is trying to set a PSK.
In that case we also allow "none", "default" or "random" (to have python generate one), or simpleN
"""
if valstr == "random":
return genPSK256()
elif valstr == "none":
return bytes([0]) # Use the 'no encryption' PSK
elif valstr == "default":
return bytes([1]) # Use default channel psk
elif valstr.startswith("simple"):
# Use one of the single byte encodings
return bytes([int(valstr[6:]) + 1])
else:
return fromStr(valstr) | 73fa661458601ec33d2b839aeea060f7a26b530f | 21,125 |
def list_hierarchy(class_name, bases):
"""
Creates a list of the class hierarchy
Args:
-----
class_name: name of the current class
bases: list/tuple of bases for the current class
"""
class_list = [Uri(class_name)]
for base in bases:
if base.__name__ not in IGNORE_CLASSES:
class_list.append(Uri(base.__name__))
return list([i for i in set(class_list)]) | 1b82dfe6576a472c04bb7cb53f8eed94a83a1ac1 | 21,127 |
def rss():
"""Return ps -o rss (resident) memory in kB."""
return float(mem("rss")) / 1024 | 92580a4873f2afca3f419a7f661e5cd39ec28b96 | 21,129 |
def compare_words(
word1_features,
word2_features,
count=10,
exclude=set(),
similarity_degree=0.5,
separate=False,
min_feature_value=0.3
):
"""
Сравнение двух слов на основе списка похожих (или вообще каких-либо фич слова).
Возвращает 3 списка: характерные для первого слова, второго и общие
:param dict[int, float] word1_features: фичи первого слова: словарь {feature: value}
:param dict[int, float] word2_features: фичи второго слова: словарь {feature: value}
:param in count: число слов в результах
:param float similarity_degree: число 0..1. 1 — полное разделение слов, 0 — максимальный поиск сходства
:param bool separate: «срогое разделение» — запрет попадания одного слова в несколько колонок
:param float min_feature_value: минимальное значение
"""
diff1, diff2, common = {}, {}, {} # Характерное для первого слова, для второго и общее
features = set(word1_features.keys()).union(word2_features.keys())
for feature in features:
if feature in exclude:
continue
feature1 = word1_features.get(feature, 0)
feature2 = word2_features.get(feature, 0)
if feature1 < min_feature_value and feature2 < min_feature_value:
continue
diff1_value = feature1 * (1 - feature2)
diff2_value = feature2 * (1 - feature1)
common_value = (feature1 * feature2) ** similarity_degree
max_value = max(diff1_value, diff2_value, common_value)
if diff1_value == max_value or not separate:
diff1[feature] = diff1_value
if diff2_value == max_value or not separate:
diff2[feature] = diff2_value
if common_value == max_value or not separate:
common[feature] = common_value
return (
sorted(diff1.items(), key=itemgetter(1), reverse=True)[:count],
sorted(diff2.items(), key=itemgetter(1), reverse=True)[:count],
sorted(common.items(), key=itemgetter(1), reverse=True)[:count],
) | 4a04292e48911e6a4152cb03c19cda8de51802fb | 21,130 |
def dispatch_for_binary_elementwise_apis(x_type, y_type):
"""Decorator to override default implementation for binary elementwise APIs.
The decorated function (known as the "elementwise api handler") overrides
the default implementation for any binary elementwise API whenever the value
for the first two arguments (typically named `x` and `y`) match the specified
type annotations. The elementwise api handler is called with two arguments:
`elementwise_api_handler(api_func, x, y)`
Where `x` and `y` are the first two arguments to the elementwise api, and
`api_func` is a TensorFlow function that takes two parameters and performs the
elementwise operation (e.g., `tf.add`).
The following example shows how this decorator can be used to update all
binary elementwise operations to handle a `MaskedTensor` type:
>>> class MaskedTensor(tf.experimental.ExtensionType):
... values: tf.Tensor
... mask: tf.Tensor
>>> @dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor)
... def binary_elementwise_api_handler(api_func, x, y):
... return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask)
>>> a = MaskedTensor([1, 2, 3, 4, 5], [True, True, True, True, False])
>>> b = MaskedTensor([2, 4, 6, 8, 0], [True, True, True, False, True])
>>> c = tf.add(a, b)
>>> print(f"values={c.values.numpy()}, mask={c.mask.numpy()}")
values=[ 3 6 9 12 5], mask=[ True True True False False]
Args:
x_type: A type annotation indicating when the api handler should be called.
y_type: A type annotation indicating when the api handler should be called.
Returns:
A decorator.
#### Registered APIs
The binary elementwise APIs are:
<<API_LIST>>
"""
def decorator(handler):
if (x_type, y_type) in _ELEMENTWISE_API_HANDLERS:
raise ValueError("A binary elementwise dispatch handler "
f"({_ELEMENTWISE_API_HANDLERS[x_type, y_type]}) "
f"has already been registered for ({x_type}, {y_type}).")
_ELEMENTWISE_API_HANDLERS[x_type, y_type] = handler
for api in _BINARY_ELEMENTWISE_APIS:
_add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler)
return handler
return decorator | 743d6f85b843f6200cf8b6c6361fc81154c37936 | 21,131 |
def grid(mat, i, j, k):
"""Returns true if the specified grid contains k"""
return lookup(k, [ mat[i + p][j + q] for p in range(3) for q in range(3) ]) | b2df3a905ada922011fc344f555a908aa03d5f64 | 21,132 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.