content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def sanitize_option(option):
"""
Format the given string by stripping the trailing parentheses
eg. Auckland City (123) -> Auckland City
:param option: String to be formatted
:return: Substring without the trailing parentheses
"""
return ' '.join(option.split(' ')[:-1]).strip() | ece0a78599e428ae8826b82d7d00ffc39495d27f | 21,577 |
def node_values_for_tests():
"""Creates a list of possible node values for parameters
Returns:
List[Any]: possible node values
"""
return [1, 3, 5, 7, "hello"] | b919efc5e59a5827b3b27e4f0a4cd070ceb9a5a4 | 21,578 |
import torch
def computeGramMatrix(A, B):
"""
Constructs a linear kernel matrix between A and B.
We assume that each row in A and B represents a d-dimensional feature vector.
Parameters:
A: a (n_batch, n, d) Tensor.
B: a (n_batch, m, d) Tensor.
Returns: a (n_batch, n, m) Tensor.
"""
assert(A.dim() == 3)
assert(B.dim() == 3)
assert(A.size(0) == B.size(0) and A.size(2) == B.size(2))
return torch.bmm(A, B.transpose(1,2)) | c9b221b3d6a8c7a16337178a1f148873b27ec04a | 21,579 |
def parse_config(config):
"""Backwards compatible parsing.
:param config: ConfigParser object initilized with nvp.ini.
:returns: A tuple consisting of a control cluster object and a
plugin_config variable.
raises: In general, system exceptions are not caught but are propagated
up to the user. Config parsing is still very lightweight.
At some point, error handling needs to be significantly
enhanced to provide user friendly error messages, clean program
exists, rather than exceptions propagated to the user.
"""
# Extract plugin config parameters.
try:
failover_time = config.get('NVP', 'failover_time')
except ConfigParser.NoOptionError, e:
failover_time = str(DEFAULT_FAILOVER_TIME)
try:
concurrent_connections = config.get('NVP', 'concurrent_connections')
except ConfigParser.NoOptionError, e:
concurrent_connections = str(DEFAULT_CONCURRENT_CONNECTIONS)
plugin_config = {
'failover_time': failover_time,
'concurrent_connections': concurrent_connections,
}
LOG.info('parse_config(): plugin_config == "%s"' % plugin_config)
cluster = NVPCluster('cluster1')
# Extract connection information.
try:
defined_connections = config.get('NVP', 'NVP_CONTROLLER_CONNECTIONS')
for conn_key in defined_connections.split():
args = [config.get('NVP', 'DEFAULT_TZ_UUID')]
args.extend(config.get('NVP', conn_key).split(':'))
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters: %s' % str(e))
sys.exit(1)
return cluster, plugin_config
except Exception, e:
LOG.info('No new style connections defined: %s' % e)
# Old style controller specification.
args = [config.get('NVP', k) for k in CONFIG_KEYS]
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters.')
sys.exit(1)
return cluster, plugin_config | 74689d11c1d610a9211dc5895ff42a8b8e2389ae | 21,580 |
def deletable_proxy_user(request, onefs_client):
"""Get the name of an existing proxy user that it is ok to delete."""
return _deletable_proxy_user(request, onefs_client) | c7440099fe4435cf9b5b557253f7fb9563dc600c | 21,581 |
import six
def get_from_module(identifier, module_params, module_name,
instantiate=False, kwargs=None):
"""The function is stolen from keras.utils.generic_utils.
"""
if isinstance(identifier, six.string_types):
res = module_params.get(identifier)
if not res:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
elif type(identifier) is dict:
name = identifier.pop('name')
res = module_params.get(name)
if res:
return res(**identifier)
else:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
return identifier | 406a1da5843feb8556bbd1802426b57e7a33b20d | 21,582 |
def color_lerp(c1, c2, a):
"""Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color.
"""
return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a)) | 96c950c447994a729c9eb4c18bdcc60976dbb675 | 21,583 |
def get_equations(points):
""" Calculate affine equations of inputted points
Input : 1
points : list of list
ex : [[[x1, y1], [x2, y2]], [[xx1, yy1], [xx2, yy2]]] for 2 identified
elements
Contains coordinates of separation lines i.e.
[[[start points x, y], [end points x, y]] [...], [...]]
Output : 2
columns_a : list of list
Contains all the a coefficients of an affine equation (y = ax + b)
of all the calculated lines, in the same order as the input
columns_b : list of list
Contains all the b coefficients of an affine equation (y = ax + b)
of the all the calculated lines, in the same order as the input"""
columns_a, columns_b = [], []
# iterate throught points
for k in points:
# calculate the a coefficients of start and end separation lines of this element
a1 = (k[0][1] - k[1][1])/(k[0][0] - k[1][0])
a2 = (k[2][1] - k[3][1])/(k[2][0] - k[3][0])
columns_a.append([a1, a2])
# then calculate the b coefficients of start and end separation lines
# using the a coeff calculated before
b1 = k[0][1] - a1*k[0][0]
b2 = k[2][1] - a2*k[2][0]
columns_b.append([b1, b2])
return (columns_a, columns_b) | 4eea43aee8b5f9c63793daae0b28e3c8b4ce0929 | 21,584 |
def Temple_Loc(player, num):
"""temple location function"""
player.coins -= num
player.score += num
player.donation += num
# player = temple_bonus_check(player) for acheivements
return (player) | dced7b9f23f63c0c51787291ab12701bd7021152 | 21,585 |
def indexGenomeFile(input, output):
"""Index STAR genome index file
`input`: Input probes fasta file
`output`: SAindex file to check the completion of STAR genome index
"""
#print input
#print output
base = splitext(input)[0]
base = base + ".gtf"
#print base
gtfFile = base
outputDir = proDir + "/result/Genome"
print colored("Stage 4: Creating genome index file from the probe fasta file ....", "green")
print input
#print cpuNum
result = tasks.index_db_file(input, outputDir, cpuNum, gtfFile)
return result | 2ebd981ebad97f68adb1043e9c06fd01dc270c10 | 21,586 |
import math
def performance(origin_labels, predict_labels, deci_value, bi_or_multi=False, res=False):
"""evaluations used to evaluate the performance of the model.
:param deci_value: decision values used for ROC and AUC.
:param bi_or_multi: binary or multiple classification
:param origin_labels: true values of the data set.
:param predict_labels: predicted values of the data set.
:param res: residue or not.
"""
if len(origin_labels) != len(predict_labels):
raise ValueError("The number of the original labels must equal to that of the predicted labels.")
if bi_or_multi is False:
tp = 0.0
tn = 0.0
fp = 0.0
fn = 0.0
for i in range(len(origin_labels)):
if res is True:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == 0:
fn += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 0:
tn += 1.0
else:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == -1:
fn += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == -1:
tn += 1.0
try:
sn = tp / (tp + fn)
r = sn
except ZeroDivisionError:
sn, r = 0.0, 0.0
try:
sp = tn / (fp + tn)
except ZeroDivisionError:
sp = 0.0
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = 0.0
try:
mcc = (tp * tn - fp * fn) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
except ZeroDivisionError:
mcc = 0.0
try:
auc = roc_auc_score(origin_labels, deci_value)
except ValueError: # modify in 2020/9/13
auc = 0.0
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0.0
try:
f1 = 2 * p * r / (p + r)
except ZeroDivisionError:
f1 = 0.0
balance_acc = (sn + sp) / 2
return acc, mcc, auc, balance_acc, sn, sp, p, r, f1
else:
correct_labels = 0.0
for elem in zip(origin_labels, predict_labels):
if elem[0] == elem[1]:
correct_labels += 1.0
acc = correct_labels / len(origin_labels)
return acc | aac87e0bdc02b61ccb5136e04e1ac8b09e01ce65 | 21,587 |
def rotkehlchen_instance(
uninitialized_rotkehlchen,
database,
blockchain,
accountant,
start_with_logged_in_user,
start_with_valid_premium,
function_scope_messages_aggregator,
db_password,
rotki_premium_credentials,
accounting_data_dir,
username,
etherscan,
):
"""A partially mocked rotkehlchen instance"""
initialize_mock_rotkehlchen_instance(
rotki=uninitialized_rotkehlchen,
start_with_logged_in_user=start_with_logged_in_user,
start_with_valid_premium=start_with_valid_premium,
msg_aggregator=function_scope_messages_aggregator,
accountant=accountant,
blockchain=blockchain,
db_password=db_password,
rotki_premium_credentials=rotki_premium_credentials,
data_dir=accounting_data_dir,
database=database,
username=username,
etherscan=etherscan,
)
return uninitialized_rotkehlchen | 144585d62c04f97aa7bcb7a355bd90f8ff001022 | 21,588 |
def store_inspection_outputs_df(backend, annotation_iterators, code_reference, return_value, operator_context):
"""
Stores the inspection annotations for the rows in the dataframe and the
inspection annotations for the DAG operators in a map
"""
dag_node_identifier = DagNodeIdentifier(operator_context.operator, code_reference,
backend.code_reference_to_description.get(code_reference))
annotations_df = build_annotation_df_from_iters(backend.inspections, annotation_iterators)
annotations_df['mlinspect_index'] = range(1, len(annotations_df) + 1)
inspection_outputs = {}
for inspection in backend.inspections:
inspection_outputs[inspection] = inspection.get_operator_annotation_after_visit()
backend.dag_node_identifier_to_inspection_output[dag_node_identifier] = inspection_outputs
return_value = MlinspectDataFrame(return_value)
return_value.annotations = annotations_df
return_value.backend = backend
if "mlinspect_index" in return_value.columns:
return_value = return_value.drop("mlinspect_index", axis=1)
elif "mlinspect_index_x" in return_value.columns:
return_value = return_value.drop(["mlinspect_index_x", "mlinspect_index_y"], axis=1)
assert "mlinspect_index" not in return_value.columns
assert isinstance(return_value, MlinspectDataFrame)
return return_value | 228a24a4d59162382b5a3ae7d8204e396b8c76dd | 21,589 |
def switched (decorator):
"""decorator transform for switched decorations.
adds start_fun and stop_fun methods to class to control fun"""
@simple_decorator
def new_decorator (fun):
event = new_event()
def inner_fun (self, *args):
if args:
event.wait()
if threads_alive():
return fun(self, *args)
def new_fun (self, *args):
setattr(self, 'start_%s' % fun.__name__, event.set)
setattr(self, 'stop_%s' % fun.__name__, event.clear)
decorator(inner_fun)(self, *args)
return new_fun
return new_decorator | 1996274fcaba2095b43f7d0da134abb59b2f7a56 | 21,590 |
def logistic_embedding0(k=1, dataset='epinions'):
"""using random embedding to train logistic
Keyword Arguments:
k {int} -- [folder] (default: {1})
dataset {str} -- [dataset] (default: {'epinions'})
Returns:
[type] -- [pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score]
"""
print('random embeddings')
embeddings = np.random.rand(DATASET_NUM_DIC[dataset], EMBEDDING_SIZE)
pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score = common_logistic(dataset, k, embeddings, 'random')
return pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score | 69b198c6a5f8a44681ccfee67b532b3d38d2ee44 | 21,591 |
def process_plus_glosses(word):
"""
Find all glosses with a plus inside. They correspond
to one-phoneme affix sequences that are expressed by
the same letter due to orthographic requirements.
Replace the glosses and the morphemes.
"""
return rxPartsGloss.sub(process_plus_glosses_ana, word) | 0678efc61d1af0ec75b8d0566866b305b6312448 | 21,592 |
from datetime import datetime
def check_in_the_past(value: datetime) -> datetime:
"""
Validate that a timestamp is in the past.
"""
assert value.tzinfo == timezone.utc, "date must be an explicit UTC timestamp"
assert value < datetime.now(timezone.utc), "date must be in the past"
return value | a439295190bfa2b6d2d6de79c7dc074df562e9ed | 21,593 |
from typing import Dict
def character_count_helper(results: Dict) -> int:
"""
Helper Function that computes
character count for ocr results on a single image
Parameters
----------
results: Dict
(OCR results from a clapperboard instance)
Returns
-------
Int
Number of words computed from
OCR results
"""
count = 0
for element in results:
words_list = element["text"].split(" ")
for word in words_list:
count += len(word)
return count | b5bcba9d39b7b09a1a123fec034ab1f27b31d1eb | 21,595 |
import pickle
def from_pickle(input_path):
"""Read from pickle file."""
with open(input_path, 'rb') as f:
unpickler = pickle.Unpickler(f)
return unpickler.load() | 4e537fcde38e612e22004007122130c545246afb | 21,596 |
def PromptForRegion(available_regions=constants.SUPPORTED_REGION):
"""Prompt for region from list of available regions.
This method is referenced by the declaritive iam commands as a fallthrough
for getting the region.
Args:
available_regions: list of the available regions to choose from
Returns:
The region specified by the user, str
"""
if console_io.CanPrompt():
all_regions = list(available_regions)
idx = console_io.PromptChoice(
all_regions, message='Please specify a region:\n', cancel_option=True)
region = all_regions[idx]
log.status.Print('To make this the default region, run '
'`gcloud config set ai/region {}`.\n'.format(region))
return region | 2298fde743219f59b5a36844d85e14929d1e2a1e | 21,597 |
def update(x, new_x):
"""Update the value of `x` to `new_x`.
# Arguments
x: A `Variable`.
new_x: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return tf.assign(x, new_x) | 363cd3232a57d4c2c946813874a5a3c613f9a8c9 | 21,598 |
def r1r2_to_bp(r1,r2,pl=0.01, pu=0.25):
"""
Convert uniform samling of r1 and r2 to impact parameter b and and radius ratio p
following Espinoza 2018, https://iopscience.iop.org/article/10.3847/2515-5172/aaef38/meta
Paramters:
-----------
r1, r2: float;
uniform parameters in from u(0,1)
pl, pu: float;
lower and upper limits of the radius ratio
Return:
-------
b, p: tuple;
impact parameter and radius ratio
"""
assert np.all(0<r1) and np.all(r1<=1) and np.all(0<r2) and np.all(r2<=1), f"r1 and r2 needs to be u(0,1) but r1={r1}, r2={r2}"
Ar = (pu-pl)/(2+pu+pl)
if np.all(r1 > Ar):
b = (1+pl) * (1 + (r1-1)/(1-Ar) )
p = (1-r2)*pl + r2*pu
elif np.all(r1 <= Ar):
q1 = r1/Ar
b = (1+pl) + q1**0.5 * r2*(pu-pl)
p = pu + (pl-pu)* q1**0.5*(1-r2)
return b, p | 0c7f69f3f7960792e8d0ecd75fde028eda9feefa | 21,599 |
from datetime import datetime
def is_utc_today(utc):
"""
Returns true if the UTC is today
:param utc:
:return:
"""
current_time = datetime.datetime.utcnow()
day_start = current_time - datetime.timedelta(hours=current_time.hour, minutes=current_time.minute,
seconds=current_time.second)
day_start_utc = unix_time(day_start)
return (utc - day_start_utc) >= 0 | f707b44ce9a741a4d126e1e55a33c7e78cb1159e | 21,600 |
def test_run_completed(mock_job, mock_queue, mock_driver):
"""Test run function for a successful run."""
# Setup
def mock_render(*args, **kwargs):
return
class MockStorage:
def __init__(self):
pass
def load(self, *args, **kwargs):
return 'blah'
def save(self, *args, **kwargs):
return True
# Execute
render.run(
sleep=5,
job_queue=mock_queue,
job=mock_job,
render=mock_render,
storage=MockStorage(),
driver=mock_driver,
)
# Verify
assert mock_job.status is StatusEnum.complete | bd4660738e952cf5da24dbd2bc22cd0876716b66 | 21,601 |
async def get_telegram_id(phone_number, user_mode=False):
"""
Tries to get a telegram ID for the passed in phone number.
"""
async with start_bot_client() as bot:
if user_mode:
# just leaving this code here in case it proves useful.
# It only works if you use a user, not a bot.
# more details: https://stackoverflow.com/a/51196276/8207
# https://tl.telethon.dev/methods/contacts/import_contacts.html#examples
contact = InputPhoneContact(client_id=0, phone=phone_number, first_name="a", last_name="")
result = await bot(ImportContactsRequest([contact]))
print(result)
else:
# this only works if you have already messaged the contact, so only will allow looking
# up "known" users.
# more details: https://stackoverflow.com/a/41696457/8207
room_id = settings.MPACT_CONTACT_LOOKUP_ROOM_ID or GroupChat.objects.all()[0].id
print('room id', room_id)
receiver = await bot.get_entity(PeerChat(room_id))
msg_inst = await bot.send_file(
receiver,
InputMediaContact(
phone_number=phone_number,
first_name='Jane',
last_name='Doe',
vcard='',
))
# "unknown" users return "0" instead of the actual ID
return msg_inst.media.user_id if msg_inst.media.user_id != 0 else None | e0a0c8d4bcd305b23f831297880b0ead30eeb94a | 21,602 |
def QuadRemesh(thisMesh, parameters, multiple=False):
"""
Quad remesh this mesh.
"""
url = "rhino/geometry/mesh/quadremesh-mesh_quadremeshparameters"
if multiple: url += "?multiple=true"
args = [thisMesh, parameters]
if multiple: args = list(zip(thisMesh, parameters))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | f3068768ea62b8960fe11b736a6fcaea722d105d | 21,603 |
def part_2_helper():
"""PART TWO
This simply runs the script multiple times and multiplies the results together
"""
slope_1 = sled_down_hill(1, 1)
slope_2 = sled_down_hill(1, 3)
slope_3 = sled_down_hill(1, 5)
slope_4 = sled_down_hill(1, 7)
slope_5 = sled_down_hill(2, 1)
return slope_1 * slope_2 * slope_3 * slope_4 * slope_5 | 2a62fa6acde73a8a5c1e59a0403cf6c067baf57a | 21,604 |
def read_and_download_profile_information(id):
"""
linke: https://developer.apple.com/documentation/appstoreconnectapi/read_and_download_profile_information
:param id: bundle_id
:return: 请求结果
"""
data = {
"fields[certificates]": "certificateType",
"fields[devices]": "platform",
"fields[profiles]": "profileType",
"include": "bundleId, certificates, devices",
"fields[bundleIds]": "app, bundleIdCapabilities, identifier, name, platform, profiles, seedId",
"limit[devices]": 50,
"limit[certificates]": 50
}
result = request_core.GET(api.Profiles_API + '/' + id, data)
print(result.text)
return result | 01ba704a352df28d20d944baecf108fc0fb44ad1 | 21,605 |
def get_config(node):
"""Get the BIOS configuration.
The BIOS settings look like::
{'EnumAttrib': {'name': 'EnumAttrib',
'current_value': 'Value',
'pending_value': 'New Value', # could also be None
'read_only': False,
'possible_values': ['Value', 'New Value', 'None']},
'StringAttrib': {'name': 'StringAttrib',
'current_value': 'Information',
'pending_value': None,
'read_only': False,
'min_length': 0,
'max_length': 255,
'pcre_regex': '^[0-9A-Za-z]{0,255}$'},
'IntegerAttrib': {'name': 'IntegerAttrib',
'current_value': 0,
'pending_value': None,
'read_only': True,
'lower_bound': 0,
'upper_bound': 65535}}
:param node: an ironic node object.
:raises: DracOperationError on an error from python-dracclient.
:returns: a dictionary containing BIOS settings
The above values are only examples, of course. BIOS attributes exposed via
this API will always be either an enumerated attribute, a string attribute,
or an integer attribute. All attributes have the following parameters:
:param name: is the name of the BIOS attribute.
:param current_value: is the current value of the attribute.
It will always be either an integer or a string.
:param pending_value: is the new value that we want the attribute to have.
None means that there is no pending value.
:param read_only: indicates whether this attribute can be changed.
Trying to change a read-only value will result in
an error. The read-only flag can change depending
on other attributes.
A future version of this call may expose the
dependencies that indicate when that may happen.
Enumerable attributes also have the following parameters:
:param possible_values: is an array of values it is permissible to set
the attribute to.
String attributes also have the following parameters:
:param min_length: is the minimum length of the string.
:param max_length: is the maximum length of the string.
:param pcre_regex: is a PCRE compatible regular expression that the string
must match. It may be None if the string is read only
or if the string does not have to match any particular
regular expression.
Integer attributes also have the following parameters:
:param lower_bound: is the minimum value the attribute can have.
:param upper_bound: is the maximum value the attribute can have.
"""
client = drac_common.get_drac_client(node)
try:
return client.list_bios_settings()
except drac_exceptions.BaseClientException as exc:
LOG.error('DRAC driver failed to get the BIOS settings for node '
'%(node_uuid)s. Reason: %(error)s.',
{'node_uuid': node.uuid,
'error': exc})
raise exception.DracOperationError(error=exc) | ca9467800fae3939f83e964c29d564cc306d4b1e | 21,606 |
def get_only_metrics(results):
"""Turn dictionary of results into a list of metrics"""
metrics_names = ["test/f1", "test/precision", "test/recall", "test/loss"]
metrics = [results[name] for name in metrics_names]
return metrics | 1b0e5bb8771fdc44dcd22ff9cdb174f77205eadd | 21,608 |
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: ?
Space Complexity: ?
"""
if nums == None:
return 0
if len(nums) == 0:
return 0
pass | 0575524e2e38a215867ce51c08828fd47a230e97 | 21,609 |
import re
def find_tags_containing(project, commit):
"""Find all tags containing the given commit. Returns the full list and a condensed list (excluding tags 'after' other tags in the list)."""
tags = run_list_command(['git', 'tag', '--contains', commit], project)
# The packaging projects had a different format for older tags.
if project in ['acs-packaging', 'acs-community-packaging']:
# Remove the prefix 'acs-packaging-' if it's present.
tags = list(map(lambda tag: tag.replace('{}-'.format(project), ''), tags))
# Exclude tags that aren't just chains of numbers with an optional suffix.
tags = list(filter(lambda tag: re.match(version_filter, tag), tags))
# Filter out tags that are before other tags.
reduced_tags = reduce_tags(tags)
return tags, reduced_tags | 792bfd8c6972e818d36343723345ee1152cb66ec | 21,610 |
def setup_nupack_input(**kargs):
""" Returns the list of tokens specifying the command to be run in the pipe, and
the command-line input to be given to NUPACK.
Note that individual functions below may modify args or cmd_input depending on their
specific usage specification. """
# Set up terms of command-line executable call
args = setup_args(**kargs)
# Set up command-line input to NUPACK
cmd_input = setup_cmd_input(kargs['multi'], kargs['sequences'], kargs['ordering'],
kargs.get('structure', ''))
return (args, cmd_input) | 6dc26413bb2eab4b94c343770e6110ba2d012a41 | 21,611 |
from typing import List
from typing import Union
from typing import Literal
def rebuild_current_distribution(
fields: np.ndarray,
ics: np.ndarray,
jj_size: float,
current_pattern: List[Union[Literal["f"], str]],
sweep_invariants: List[Union[Literal["offset"], Literal["field_to_k"]]] = [
"offset",
"field_to_k",
],
precision: float = 100,
n_points: int = 2 ** 10 + 1,
) -> dict:
"""Rebuild a current distribution from a Fraunhofer pattern.
This assumes a uniform field focusing since allowing a non uniform focusing
would lead to a much larger space to explore.
Parameters
----------
fields : np.ndarray
Out of plane field for which the critical current was measured.
ics : np.ndarray
Critical current of the junction.
jj_size : float
Size of the junction.
current_pattern : List[Union[Literal["f"], str]]
Describe in how many pieces to use to represent the junction. If the
input arrays are more than 1D, "f" means that value is the same across
all outer dimension, "v" means that the slice takes different value
for all outer dimension (ie. one value per sweep).
sweep_invariants : Tuple[Union[Literal["offset", "field_to_k"]]]
Indicate what quantities are invariants across sweep for more the 1D
inputs.
precision : float, optional
pass
n_points : int, optional
Returns
-------
dict
"""
# Get the offset and estimated amplitude used in the prior
# We do not use the estimated current and phase distribution to give the
# more space to the algorithm.
offsets, first_node_locs, _, _, _ = guess_current_distribution(
field, fraunhofer, site_number, jj_size
)
# Gives a Fraunhofer pattern at the first node for v[1] = 1
field_to_ks = 2 * np.pi / jj_size / np.abs(first_node_locs - offsets)
# Determine the dimensionality of the problem based on the invariants and
# the shape of the inputs.
if len(sweep_invariants) > 2:
raise ValueError("There are at most 2 invariants.")
if any(k for k in sweep_invariants if k not in ("offset", "field_to_k")):
raise ValueError(
f"Invalid invariant specified {sweep_invariants}, "
"valid values are 'offset', 'field_to_k'."
)
shape = fields.shape[:-1]
shape_product = prod(shape) if shape else 0
if shape_product == 0 and any(p.startswith("v") for p in current_pattern):
raise ValueError(
"Found variable current in the distribution but the measurements are 1D."
)
dim = len(sweep_invariants) + current_pattern.count("f")
dim += shape_product * (current_pattern.count("v") + 2 - len(sweep_invariants))
# Pre-compute slices to access elements in the prior and log-like
offset_access = slice(
0, 1 if "offset" in sweep_invariants else (shape_product or 1)
)
field_to_k_access = slice(
offset_access.stop,
offset_access.stop + 1
if "field_to_k" in sweep_invariants
else (shape_product or 1),
)
stop = field_to_k_access.stop
current_density_accesses = []
for p in current_pattern:
if p == "f":
current_density_accesses.append(slice(stop, stop + 1))
stop += 1
elif p == "v":
current_density_accesses.append(slice(stop, stop + (shape_product or 1)))
stop += current_density_accesses[-1].stop
else:
raise ValueError(
f"Valid values in current_pattern are 'f' and 'v', found '{p}'"
)
def prior(u):
"""Map the sampled in 0-1 to the relevant values range.
For all values we consider the values in the prior to be the log of the
values we are looking for.
"""
v = np.empty_like(u)
v[offset_access] = 4 * u[offset_access] - 2
v[field_to_k_access] = 4 * u[field_to_k_access] - 2
stop += step
# For all the amplitude we map the value between 0 and -X since the
# amplitude of a single segment cannot be larger than the total current
# X is determined based on the number of segments
ampl = -np.log10(len(current_pattern))
for sl in current_density_accesses:
v[sl] = u[sl] * ampl
return v
def loglike(v):
"""Compute the distance to the data"""
# We turn invariant input into their variant form (from 1 occurence in v
# to n repetition in w) to ease a systematic writing of the loglike.
stop = step = shape_product or 1
w = np.empty((2 + len(current_pattern)) * (shape_product or 1))
stop = step = shape_product or 1
w[0:stop] = w_offset = v[offset_access]
w[stop : stop + step] = w_f2k = v[field_to_k_access]
stop += step
for sl in current_density_accesses:
w[stop : stop + step] = v[sl]
# Pack the current distribution so that each line corresponds to different
# conditions
c_density = w[stop + step :].reshape((len(current_pattern), -1)).T
err = np.empty_like(ics)
it = np.nditer((offsets, first_node_locs, field_to_ks), ["multi_index"])
for i, (off, fnloc, f2k) in enumerate(it):
# Compute the offset
f_off = off + np.sign(w_off[i]) * 10 ** -abs(w_off[i]) * fnloc
# Compute the Fraunhofer pattern
f = produce_fraunhofer_fast(
(fields[it.multi_index] - f_off[i]),
f2k * 10 ** w_f2k[i],
jj_size,
c_density[i],
2 ** 10 + 1,
)
# Compute and store the error
err[it.multi_index] = np.sum(
(100 * (ics[it.multi_index] - f) / amplitude) ** 2
)
return -np.ravel(err)
# XXX do that nasty part later
sampler = NestedSampler(loglike, prior, dim)
sampler.run_nested(dlogz=precision)
res = sampler.results
weights = np.exp(res.logwt - res.logz[-1])
mu, cov = utils.mean_and_cov(res["samples"], weights)
res["fraunhofer_params"] = {
"offset": offset + np.sign(mu[0]) * 10 ** -abs(mu[0]) * first_node_loc,
"field_to_k": 2 * np.pi / jj_size / abs(first_node_loc - offset) * 10 ** mu[1],
"amplitude": amplitude * 10 ** mu[2],
"current_distribution": np.array(
[1 - np.sum(mu[3 : 3 + site_number - 1])]
+ list(mu[3 : 3 + site_number - 1])
),
"phase_distribution": np.array(
[0] + list(mu[3 + site_number - 1 : 3 + 2 * site_number - 2])
),
}
return res | 66e81639e3ff3995c4a6b6bbe2f361071a6b51b1 | 21,612 |
def get_LCA(index, item1, item2):
"""Get lowest commmon ancestor (including themselves)"""
# get parent list from
if item1 == item2:
return item1
try:
return LCA_CACHE[index][item1 + item2]
except KeyError:
pass
parent1 = ATT_TREES[index][item1].parent[:]
parent2 = ATT_TREES[index][item2].parent[:]
parent1.insert(0, ATT_TREES[index][item1])
parent2.insert(0, ATT_TREES[index][item2])
min_len = min(len(parent1), len(parent2))
last_LCA = parent1[-1]
# note here: when trying to access list reversely, take care of -0
for i in range(1, min_len + 1):
if parent1[-i].value == parent2[-i].value:
last_LCA = parent1[-i]
else:
break
LCA_CACHE[index][item1 + item2] = last_LCA.value
return last_LCA.value | 22e65163cd1c32ac8bd20dadc3aa69c208adb540 | 21,613 |
def select_workspace_access(cursor, workspace_id):
"""ワークスペースアクセス情報取得
Args:
cursor (mysql.connector.cursor): カーソル
workspace_id (int): ワークスペースID
Returns:
dict: select結果
"""
# select実行
cursor.execute('SELECT * FROM workspace_access WHERE workspace_id = %(workspace_id)s',
{
'workspace_id' : workspace_id,
}
)
rows = cursor.fetchall()
return rows | 4cf1e0dd4a1232bd5a00d49d952e3bfb98e189c5 | 21,614 |
def pkcs7_unpad(data):
"""
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]] | 4b43b80220e195aa51c129b6cbe1f216a94360cd | 21,615 |
def leveinshtein_distance(source,target):
"""
Implement leveintein distance algorithm as described in the reference
"""
#Step 1
s_len=len(source)
t_len=len(target)
cost=0
if(s_len==0):
return t_len
if(t_len==0):
return s_len
print("Dimensions:\n\tN:%d\n\tM:%d"%(s_len,t_len))
#Step 2
matrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]
#Initialize first row 0..s_len
for idx in range(0,s_len+1):
matrix[idx][0]=idx
#Initialize the first column 0..t_len
for idx in range(0, t_len+1):
matrix[0][idx]=idx
print("===Original===")
print_matrix(matrix,source,target)
#Step 3
for i in range(1,s_len+1):
ch=source[i-1]
#print(ch)
#Step 4
for j in range(1,t_len+1):
#print(">%s"%target[j-1])
#Step 5
if ch==target[j-1]:
cost=0
else:
cost=1
#Step 6
#print("(i,j)=>(%d,%d)"%(i,j))
#print(matrix[i][j])
matrix[i][j]=minimum(
matrix[i-1][j]+1,
matrix[i][j-1]+1,
matrix[i-1][j-1]+cost
)
print("===Final Matrix===")
print_matrix(matrix,source,target)
return matrix[s_len-1][t_len-1] | cb8e35f491463469c68f5dbc3e3af4cff7e34441 | 21,616 |
def minus (s):
""" заменить последний минус на равенство """
q = s.rsplit ('-', 1)
return q[0] + '=' + q[1] | 8d4ae538d866a930603b71ccdba0b18145af9988 | 21,617 |
def _chk_y_path(tile):
"""
Check to make sure tile is among left most possible tiles
"""
if tile[0] == 0:
return True
return False | cf733c778b647654652ae5c651c7586c8c3567b8 | 21,618 |
def json_project_activities(request):
"""docstring for json_project_activities"""
timestamp = int(request.GET['dt'])
pid = int(request.GET['id'])
project = get_object_or_404(Project, id=pid)
items = project.items(timestamp)
objs = []
for item in items:
# p.items()[0].tags.all().values()
objs.append({
"username": item.username,
"tags": [tag['name'] for tag in item.tags.values()],
"type": item.type,
"source": item.source,
"title":item.title,
"subtitle": item.subtitle,
"dt": "just now",
})
return HttpResponse(simplejson.dumps(objs), mimetype='application/javascript') | cde499b9279194cb42df66989849d8beb2de1376 | 21,619 |
from typing import List
def to_complex_matrix(matrix: np.ndarray) -> List:
"""
Convert regular matrix to matrix of ComplexVals.
:param matrix: any matrix.
:return: Complex matrix.
"""
output: List[List] = matrix.tolist()
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if type(matrix[i, j]) == complex or type(matrix[i, j]) == np.complex128:
output[i][j] = ComplexVal(matrix[i, j].real, matrix[i, j].imag)
else:
output[i][j] = ComplexVal(matrix[i, j])
return output | ea21f0993452b4d2b540de4b92f64579e9aeee00 | 21,620 |
def skipIfDarwin(func):
"""Decorate the item to skip tests that should be skipped on Darwin."""
return skipIfPlatform(
lldbplatform.translate(
lldbplatform.darwin_all))(func) | ddca10a60e9d12f7e556192f922bbeaee0bc3a85 | 21,621 |
from typing import Tuple
from pathlib import Path
def load_dataframe(csv_path: PathLike) -> Tuple[str, pd.DataFrame]:
"""Returns a tuple (name, data frame). Used to construct a data set by `load_dataframes_from_directory`.
See:
load_dataframes_from_directory
Dataset
"""
return Path(csv_path).stem, pd.read_csv(csv_path) | dbbb5f36cb767d39f01bc9c57ea27e7edfb2fb35 | 21,622 |
def VerifyReleaseChannel(options):
"""Verify that release image channel is correct.
ChromeOS has four channels: canary, dev, beta and stable.
The last three channels support image auto-updates, checks
that release image channel is one of them.
"""
return GetGooftool(options).VerifyReleaseChannel(
options.enforced_release_channels) | 2be7073c9aceb2eaef111b73204d4ef4c71cc6df | 21,623 |
def make_start_script(cmd, repo, anaconda_path, env,
install_pip=(), add_swap_file=False):
""" My basic startup template formatter
Parameters
----------
cmd : str
The actual command to run.
repo : str
The repository
anaconda_path : str
The anaconda path on my AMI.
env : str
The anaconda environment.
install_pip : list of str
Some last-minute packages that are missing on my AMI.
add_swap_file : bool, int
Need a swapfile? No problem. Tell me your size.
"""
swapfile_cmd = ''
if add_swap_file:
swapfile_cmd = _base_swap_tmp.format(add_swap_file=add_swap_file)
if len(install_pip) == 0:
install_pip = ''
else:
install_pip = '\n'.join(
['{anaconda_path}/bin/pip install {package}'.format(
anaconda_path=anaconda_path, package=package)
for package in install_pip])
script = _base_cmd_tmp.format(
anaconda_path=anaconda_path,
install_pip=install_pip,
swapfile_cmd=swapfile_cmd,
repo=repo,
env=env,
cmd=cmd)
return script | 0749f257a511526bb220c3fc4c9f34bc00a0fa32 | 21,624 |
import healpy
def radius_hpmap(glon, glat,
R_truncation, Rmin,
Npt_per_decade_integ,
nside=2048, maplonlat=None):
"""
Compute a radius map in healpix
Parameters
----------
- glon/glat (deg): galactic longitude and latitude in degrees
- R_truncation (quantity): the truncation radius
- Rmin (quantity): the minimum radius
- nside (int): healpix Nside
- Npt_per_decade_integ (int): the number of point per decade
- maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude
which can be provided to save time in case of repeated computation
Returns
-------
- radius (array): the radius array from Rmin to R_truncation
- dist_map (array): distance map from center
- maplon/lat (array): longitude and latidute maps
"""
try:
except:
print("Healpy is not installed while it is requiered by get_*_hpmap")
# Get a coord map
if maplonlat is None:
npix = healpy.nside2npix(nside)
ipix = np.linspace(0, npix, npix, dtype=int)
angle = healpy.pix2ang(nside, ipix, lonlat=False)
maplon = angle[1] * 180.0/np.pi
maplat = 90.0 - angle[0] * 180.0/np.pi
else:
maplon = maplonlat[0]
maplat = maplonlat[1]
# Get a cluster distance map (in deg)
dist_map = map_tools.greatcircle(maplon, maplat, glon, glat)
dist_map[np.isnan(dist_map)] = 180.0 # some pixels are NaN for dist = 180
# Define the radius used fo computing the profile
radius = sampling_array(Rmin, R_truncation, NptPd=Npt_per_decade_integ, unit=True)
return radius, dist_map, maplon, maplat | 1daafe536ba673213a1ff3b05d0b38f8f6c48abb | 21,625 |
def convert_grayscale_image_to_pil(image):
"""Converts a 2D grayscale image into a PIL image.
Args:
image (numpy.ndarray[uint8]): The image to convert.
Returns:
PIL.Image: The converted image.
"""
image = np.repeat(image[:, :, None], 3, 2)
image_pil = Image.fromarray(image).convert('RGBA')
return image_pil | 01382037d7f08de0e66a47c2be7fbaf158443a00 | 21,626 |
def delete_group(group_id, tasks=False, cached=Conf.CACHED):
"""
Delete a group.
:param str group_id: the group id
:param bool tasks: If set to True this will also delete the group tasks.
Otherwise just the group label is removed.
:param bool cached: run this against the cache backend
:return:
"""
if cached:
return delete_group_cached(group_id)
return Task.objects.delete_group(group_id, tasks) | 6bad912ecb265b512fecb131d9a6f567f90edc52 | 21,627 |
import re
def alphanum_key(string):
"""Return a comparable tuple with extracted number segments.
Adapted from: http://stackoverflow.com/a/2669120/176978
"""
convert = lambda text: int(text) if text.isdigit() else text
return [convert(segment) for segment in re.split('([0-9]+)', string)] | 0e5e3f1d6aa43d393e1fb970f64e5910e7dc53fc | 21,628 |
def merge_data(attribute_column, geography, chloropleth, pickle_dir):
"""
Merges geometry geodataframe with chloropleth attribute data.
Inputs: dataframe or csv file name for data desired to be choropleth
Outputs: dataframe
"""
gdf = load_pickle(pickle_dir, geography)
chloropleth = load_pickle(pickle_dir, chloropleth)
chloropleth.columns = ['key', attribute_column]
return gdf.merge(chloropleth, on='key', how='left') | 70ce82667a974311bb3cda8785e0f6211a86dfd1 | 21,629 |
def get_ls8_image_collection(begin_date, end_date, aoi=None):
"""
Calls the GEE API to collect scenes from the Landsat 7 Tier 1 Surface Reflectance Libraries
:param begin_date: Begin date for time period for scene selection
:param end_date: End date for time period for scene selection
:param aoi: Optional, only select scenes that cover this aoi
:return: cloud masked GEE image collection
"""
if aoi is None:
return (ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
.filterDate(begin_date, end_date)
.select('B2', 'B3', 'B4', 'B5', 'B6', 'B10', 'B7', 'pixel_qa')
.map(rename_ls_bands)
.map(cloud_mask_ls8))
else:
return (ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
.select('B2', 'B3', 'B4', 'B5', 'B6', 'B10', 'B7', 'pixel_qa')
.filterDate(begin_date, end_date).filterBounds(aoi)
.map(rename_ls_bands)
.map(cloud_mask_ls8)) | d74370a85dedd102ebe99fa41da1a69fdbb313d2 | 21,630 |
def multi_halo(n_halo):
"""
This routine will repeat the halo generator as many times
as the input number to get equivalent amount of haloes.
"""
r_halo = []
phi_halo = []
theta_halo = []
for i in range(n_halo):
r, theta,phi = one_halo(100)
r_halo.append(r)
theta_halo.append(theta)
phi_halo.append(phi)
return r_halo, theta_halo, phi_halo | 3b46e21f983dfaa44192b7977ca2f4858818ffc1 | 21,631 |
def allocation_proportion_of_shimenwpp():
"""
Real Name: Allocation Proportion of ShiMenWPP
Original Eqn: Allocation ShiMen WPP/Total WPP Allocation
Units: m3/m3
Limits: (None, None)
Type: component
Subs: None
"""
return allocation_shimen_wpp() / total_wpp_allocation() | e476e9472132bb81feccceef7f56c7e2eaa4b6f2 | 21,632 |
import copy
def update_cfg(base_cfg, update_cfg):
"""used for mmcv.Config or other dict-like configs."""
res_cfg = copy.deepcopy(base_cfg)
res_cfg.update(update_cfg)
return res_cfg | c03dcfa7ac6d2f5c6745f69028f7cdb2ebe35eec | 21,633 |
import traceback
def check(conn, command, exit=False, timeout=None, **kw):
"""
Execute a remote command with ``subprocess.Popen`` but report back the
results in a tuple with three items: stdout, stderr, and exit status.
This helper function *does not* provide any logging as it is the caller's
responsibility to do so.
"""
command = conn.cmd(command)
stop_on_error = kw.pop('stop_on_error', True)
timeout = timeout or conn.global_timeout
if not kw.get('env'):
# get the remote environment's env so we can explicitly add
# the path without wiping out everything
kw = extend_env(conn, kw)
conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command)))
result = conn.execute(_remote_check, cmd=command, **kw)
response = None
try:
response = result.receive(timeout)
except Exception as err:
# the things we need to do here :(
# because execnet magic, we cannot catch this as
# `except TimeoutError`
if err.__class__.__name__ == 'TimeoutError':
msg = 'No data was received after %s seconds, disconnecting...' % timeout
conn.logger.warning(msg)
# there is no stdout, stderr, or exit code but make the exit code
# an error condition (non-zero) regardless
return [], [], -1
else:
remote_trace = traceback.format_exc()
remote_error = RemoteError(remote_trace)
if remote_error.exception_name == 'RuntimeError':
conn.logger.error(remote_error.exception_line)
else:
for tb_line in remote_trace.split('\n'):
conn.logger.error(tb_line)
if stop_on_error:
raise RuntimeError(
'Failed to execute command: %s' % ' '.join(command)
)
if exit:
conn.exit()
return response | f3ada320c245d0660f5f820e08131fed010a9fd4 | 21,634 |
import functools
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
This is required since the assignment manager needs to be initialized
before this manager, and yet this manager's init wants to be
able to make assignment calls (to build the domain configs). So
instead, we check if the domains have been initialized on entry
to each call, and if requires load them,
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
LOG.warning(_(
'Running an experimental and unsupported configuration '
'(domain_specific_drivers_enabled = True); '
'this will result in known issues.'))
self.domain_configs.setup_domain_drivers(
self.driver, self.assignment_api)
return f(self, *args, **kwargs)
return wrapper | b336912d9abc80b2f3f5899be7fbf6ae384ae248 | 21,635 |
def add_modified_tags(original_db, scenarios):
"""
Add a `modified` label to any activity that is new
Also add a `modified` label to any exchange that has been added
or that has a different value than the source database.
:return:
"""
# Class `Export` to which the original database is passed
exp = Export(original_db)
# Collect a dictionary of activities {row/col index in A matrix: code}
rev_ind_A = rev_index(create_codes_index_of_A_matrix(original_db))
# Retrieve list of coordinates [activity, activity, value]
coords_A = exp.create_A_matrix_coordinates()
# Turn it into a dictionary {(code of receiving activity, code of supplying activity): value}
original = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A}
# Collect a dictionary with activities' names and correponding codes
codes_names = create_codes_and_names_of_A_matrix(original_db)
# Collect list of substances
rev_ind_B = rev_index(create_codes_index_of_B_matrix())
# Retrieve list of coordinates of the B matrix [activity index, substance index, value]
coords_B = exp.create_B_matrix_coordinates()
# Turn it into a dictionary {(activity code, substance code): value}
original.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B})
for s, scenario in enumerate(scenarios):
print(f"Looking for differences in database {s + 1} ...")
rev_ind_A = rev_index(create_codes_index_of_A_matrix(scenario["database"]))
exp = Export(
scenario["database"],
scenario["model"],
scenario["pathway"],
scenario["year"],
"",
)
coords_A = exp.create_A_matrix_coordinates()
new = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A}
rev_ind_B = rev_index(create_codes_index_of_B_matrix())
coords_B = exp.create_B_matrix_coordinates()
new.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B})
list_new = set(i[0] for i in original.keys()) ^ set(i[0] for i in new.keys())
ds = (d for d in scenario["database"] if d["code"] in list_new)
# Tag new activities
for d in ds:
d["modified"] = True
# List codes that belong to activities that contain modified exchanges
list_modified = (i[0] for i in new if i in original and new[i] != original[i])
#
# Filter for activities that have modified exchanges
for ds in ws.get_many(
scenario["database"],
ws.either(*[ws.equals("code", c) for c in set(list_modified)]),
):
# Loop through biosphere exchanges and check if
# the exchange also exists in the original database
# and if it has the same value
# if any of these two conditions is False, we tag the exchange
excs = (exc for exc in ds["exchanges"] if exc["type"] == "biosphere")
for exc in excs:
if (ds["code"], exc["input"][0]) not in original or new[
(ds["code"], exc["input"][0])
] != original[(ds["code"], exc["input"][0])]:
exc["modified"] = True
# Same thing for technosphere exchanges,
# except that we first need to look up the provider's code first
excs = (exc for exc in ds["exchanges"] if exc["type"] == "technosphere")
for exc in excs:
if (
exc["name"],
exc["product"],
exc["unit"],
exc["location"],
) in codes_names:
exc_code = codes_names[
(exc["name"], exc["product"], exc["unit"], exc["location"])
]
if new[(ds["code"], exc_code)] != original[(ds["code"], exc_code)]:
exc["modified"] = True
else:
exc["modified"] = True
return scenarios | b1e18f8871e7d9430e5c4eaff41128c72359ce1c | 21,636 |
def import_tep_sets(lagged_samples: int = 2) -> tuple:
"""
Imports the normal operation training set and 4 of the commonly used test
sets [IDV(0), IDV(4), IDV(5), and IDV(10)] with only the first 22 measured
variables and first 11 manipulated variables
"""
normal_operation = import_sets(0)
testing_sets = import_sets([4, 5, 10], skip_training=True)
X = normal_operation[0][1]
T0 = normal_operation[0][2]
T4 = testing_sets[0][1]
T5 = testing_sets[1][1]
T10 = testing_sets[2][1]
ignored_var = list(range(22, 41))
X = np.delete(X, ignored_var, axis=0)
T0 = np.delete(T0, ignored_var, axis=0)
T4 = np.delete(T4, ignored_var, axis=0)
T5 = np.delete(T5, ignored_var, axis=0)
T10 = np.delete(T10, ignored_var, axis=0)
# Add lagged samples
X = add_lagged_samples(X, lagged_samples)
T0 = add_lagged_samples(T0, lagged_samples)
T4 = add_lagged_samples(T4, lagged_samples)
T5 = add_lagged_samples(T5, lagged_samples)
T10 = add_lagged_samples(T10, lagged_samples)
return(X, T0, T4, T5, T10) | d9affb48e182f4bb79cdb86e70ca90dadd461d51 | 21,638 |
def to_fgdc(obj):
"""
This is the priamry function to call in the module. This function takes a UnifiedMetadata object
and creates a serialized FGDC metadata record.
Parameters
----------
obj : obj
A amg.UnifiedMetadata class instance
Returns
-------
: str
A string encoded FGDC compliant XML metadata file
"""
template = None
for s in obj.sources:
if isinstance(s, FGDCMetadata):
template = s.data
populate_projection_information(template, obj)
populate_bounding_box(template, obj)
populate_raster_info(template, obj)
populate_digital_forms(template, obj)
populate_accuracies(template, obj)
populate_geodetic(template, obj)
template.planar_distance_units = 'meters'
template.online_linkages = obj.doi
if hasattr(obj, 'title'):
template.title = obj.title
if hasattr(obj, 'processing_environment'):
template.processing_environment = obj.processing_environment
# Add the point of contact section to the template.
template.validate()
return template.serialize(use_template=False).decode() | 762f77c45f2e40fd9135b3821e91b9c1c7a30bd9 | 21,639 |
def compute_iqms(settings, name='ComputeIQMs'):
"""
Workflow that actually computes the IQMs
.. workflow::
from mriqc.workflows.functional import compute_iqms
wf = compute_iqms(settings={'output_dir': 'out'})
"""
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=[
'subject_id', 'session_id', 'task_id', 'acq_id', 'rec_id', 'run_id', 'orig',
'epi_mean', 'brainmask', 'hmc_epi', 'hmc_fd', 'in_tsnr', 'metadata']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_dvars', 'outliers', 'out_spikes', 'out_fft']),
name='outputnode')
deriv_dir = check_folder(op.abspath(op.join(settings['output_dir'], 'derivatives')))
# Compute DVARS
dvnode = pe.Node(nac.ComputeDVARS(save_plot=False, save_all=True), name='ComputeDVARS')
dvnode.interface.estimated_memory_gb = settings[
"biggest_file_size_gb"] * 3
# AFNI quality measures
fwhm = pe.Node(afni.FWHMx(combine=True, detrend=True), name='smoothness')
# fwhm.inputs.acf = True # add when AFNI >= 16
outliers = pe.Node(afni.OutlierCount(fraction=True, out_file='ouliers.out'),
name='outliers')
outliers.interface.estimated_memory_gb = settings[
"biggest_file_size_gb"] * 2.5
quality = pe.Node(afni.QualityIndex(automask=True), out_file='quality.out',
name='quality')
quality.interface.estimated_memory_gb = settings[
"biggest_file_size_gb"] * 3
measures = pe.Node(FunctionalQC(), name='measures')
measures.interface.estimated_memory_gb = settings[
"biggest_file_size_gb"] * 3
workflow.connect([
(inputnode, dvnode, [('hmc_epi', 'in_file'),
('brainmask', 'in_mask')]),
(inputnode, measures, [('epi_mean', 'in_epi'),
('brainmask', 'in_mask'),
('hmc_epi', 'in_hmc'),
('hmc_fd', 'in_fd'),
('in_tsnr', 'in_tsnr')]),
(inputnode, fwhm, [('epi_mean', 'in_file'),
('brainmask', 'mask')]),
(inputnode, quality, [('hmc_epi', 'in_file')]),
(inputnode, outliers, [('hmc_epi', 'in_file'),
('brainmask', 'mask')]),
(dvnode, measures, [('out_all', 'in_dvars')]),
(dvnode, outputnode, [('out_all', 'out_dvars')]),
(outliers, outputnode, [('out_file', 'outliers')])
])
# Save to JSON file
datasink = pe.Node(IQMFileSink(
modality='bold', out_dir=deriv_dir), name='datasink')
workflow.connect([
(inputnode, datasink, [('subject_id', 'subject_id'),
('session_id', 'session_id'),
('task_id', 'task_id'),
('acq_id', 'acq_id'),
('rec_id', 'rec_id'),
('run_id', 'run_id'),
('metadata', 'metadata')]),
(outliers, datasink, [(('out_file', _parse_tout), 'aor')]),
(quality, datasink, [(('out_file', _parse_tqual), 'aqi')]),
(measures, datasink, [('out_qc', 'root')]),
(fwhm, datasink, [(('fwhm', fwhm_dict), 'root0')]),
(datasink, outputnode, [('out_file', 'out_file')])
])
if settings.get('fft_spikes_detector', False):
# FFT spikes finder
spikes_fft = pe.Node(niu.Function(
input_names=['in_file'],
output_names=['n_spikes', 'out_spikes', 'out_fft'],
function=slice_wise_fft), name='SpikesFinderFFT')
workflow.connect([
(inputnode, spikes_fft, [('orig', 'in_file')]),
(spikes_fft, outputnode, [('out_spikes', 'out_spikes'),
('out_fft', 'out_fft')]),
(spikes_fft, datasink, [('n_spikes', 'spikes_num')])
])
return workflow | 90ce62cd50ed2818e01c55eabde437b84a10dc70 | 21,640 |
def GetDepthFromIndicesMapping(list_indices):
"""
GetDepthFromIndicesMapping
==========================
Gives the depth of the nested list from the index mapping
@param list_indices: a nested list representing the indexes of the nested lists by depth
@return: depth
"""
return max([len(x[0]) for x in list_indices])+1 | c2318b3c6a398289c2cbf012af4c562d3d8bc2da | 21,642 |
import signal
def lowpassIter(wp, ws, fs, f, atten=90, n_max=400):
"""Design a lowpass filter using f by iterating to minimize the number
of taps needed.
Args:
wp: Passband frequency
ws: Stopband frequency
fs: Sample rate
f: Function to design filter
atten: desired attenuation (dB)
n_max: Maximum semi-length of filter
Returns:
Filter taps.
"""
n = bellangerord(0.01, 0.01, fs, (ws-wp))//2
n_prev = 1
n_lo = 1
n_hi = None
if n > n_max:
n = n_max
while n != n_prev:
N = 2*n + 1
taps = f(N, wp, ws, fs)
w, h = signal.freqz(taps, worN=8000)
w = 0.5*fs*w/np.pi
hdb = 20*np.log10(np.abs(h))
db = np.max(hdb[w >= ws])
n_prev = n
if db > -atten:
if n == n_max:
break
n_lo = n
if n_hi:
n = (n_lo + n_hi) // 2
else:
n = 2*n
else:
n_hi = n
n = (n_lo + n_hi) // 2
if n > n_max:
n = n_max
return taps | c9733a8d7c225dfbf76a4a11441ce75bd8f9042f | 21,643 |
from typing import Union
from typing import Dict
import numpy
def evaluate_themes(
ref_measurement: Measurement,
test_measurement: Measurement,
themes: Union[FmaskThemes, ContiguityThemes, TerrainShadowThemes],
) -> Dict[str, float]:
"""
A generic tool for evaluating thematic datasets.
"""
values = [v.value for v in themes]
n_values = len(values)
minv = min(values)
maxv = max(values)
# read data and reshape to 1D
ref_data = ref_measurement.read().ravel()
test_data = test_measurement.read().ravel()
ref_h = histogram(ref_data, minv=minv, maxv=maxv, reverse_indices="ri")
ref_hist = ref_h["histogram"]
ref_ri = ref_h["ri"]
theme_changes = dict()
for theme in themes:
i = theme.value
# check we have data for this category
if ref_hist[i] == 0:
# no changes as nothing exists in the reference data
theme_changes[theme] = numpy.full((n_values,), numpy.nan)
continue
idx = ref_ri[ref_ri[i] : ref_ri[i + 1]]
values = test_data[idx]
h = histogram(values, minv=minv, maxv=maxv)
hist = h["histogram"]
pdf = hist / numpy.sum(hist)
theme_changes[theme] = pdf * 100
# split outputs into separate records
result = dict()
for theme in themes:
for theme2 in themes:
key = f"{theme.name.lower()}_2_{theme2.name.lower()}"
result[key] = theme_changes[theme][theme2.value]
return result | d6f56d54e19b35d6ab3d716d7a231fb27bc3db9a | 21,644 |
def test_global_averaging():
"""Test that `T==N` and `F==pow2(N_frs_max)` doesn't error, and outputs
close to `T==N-1` and `F==pow2(N_frs_max)-1`
"""
if skip_all:
return None if run_without_pytest else pytest.skip()
np.random.seed(0)
N = 512
params = dict(shape=N, J=9, Q=4, J_fr=5, Q_fr=2, average=True,
average_fr=True, out_type='dict:array', pad_mode='reflect',
pad_mode_fr='conj-reflect-zero', max_pad_factor=None,
max_pad_factor_fr=None, frontend=default_backend,
sampling_filters_fr=('resample', 'resample'))
x = echirp(N)
x += np.random.randn(N)
outs = {}
metas = {}
Ts, Fs = (N - 1, N), (2**6 - 1, 2**6)
for T in Ts:
# N_frs_max ~= Q*max(p2['j'] for p2 in psi2_f); found 29 at runtime
for F in Fs:
jtfs = TimeFrequencyScattering1D(**params, T=T, F=F)
assert (jtfs.average_fr_global if F == Fs[-1] else
not jtfs.average_fr_global)
assert (jtfs.average_global if T == Ts[-1] else
not jtfs.average_global)
out = jtfs(x)
out = jtfs_to_numpy(out)
outs[ (T, F)] = out
metas[(T, F)] = jtfs.meta()
T0F0 = coeff_energy(outs[(Ts[0], Fs[0])], metas[(Ts[0], Fs[0])])
T0F1 = coeff_energy(outs[(Ts[0], Fs[1])], metas[(Ts[0], Fs[1])])
T1F0 = coeff_energy(outs[(Ts[1], Fs[0])], metas[(Ts[1], Fs[0])])
T1F1 = coeff_energy(outs[(Ts[1], Fs[1])], metas[(Ts[1], Fs[1])])
if metric_verbose:
print("\nGlobal averaging reldiffs:")
th = .15
for pair in T0F0:
ref = T0F0[pair]
reldiff01 = abs(T0F1[pair] - ref) / ref
reldiff10 = abs(T1F0[pair] - ref) / ref
reldiff11 = abs(T1F1[pair] - ref) / ref
assert reldiff01 < th, "%s > %s | %s" % (reldiff01, th, pair)
assert reldiff10 < th, "%s > %s | %s" % (reldiff10, th, pair)
assert reldiff11 < th, "%s > %s | %s" % (reldiff11, th, pair)
if metric_verbose:
print("(01, 10, 11) = ({:.2e}, {:.2e}, {:.2e}) | {}".format(
reldiff01, reldiff10, reldiff11, pair)) | d8a7c08754c403a2a3450be6ee5856a57f686003 | 21,645 |
def poly_coefficients(df: np.ndarray,z: np.ndarray,cov: np.ndarray) -> np.ndarray:
"""
Calculate the coefficients in the free energy polynomial
Parameters
----------
df : [2,iphase]
Difference between next and current integration points
z: np.ndarray [2,iphase]
Conjugate varibales (z1,z2) of currrent point (f1,f2) for both I and II phases
cov: np.ndarray [3,iphase]
Covariances [cov(z1,Z1),cov(z2,Z2),cov(z1,Z2)] of current point for both I and II phases
Returns
-------
df : [6,2]
Coefficients in the free energy polynomial
"""
coef = np.zeros((6,2))
coef[0,:] = z[0,:]*df[0,:]
coef[1,:] = z[1,:]*df[1,:]
coef[2,:] = cov[0,:]*df[0,:]**2
coef[3,:] = cov[1,:]*df[1,:]**2
coef[4,:] = cov[2,:]*df[0,:]*df[1,:]
coef[5,:] = cov[2,:]*df[0,:]
return coef | 67270bbb03a8f9d006e4d22d22f114ec6deab057 | 21,646 |
def NoneInSet(s):
"""Inverse of CharSet (parse as long as character is not in set). Result is string."""
return ConcatenateResults(Repeat(NoneOf(s), -1)) | 00bb27c434b630926eb8d2012ebed6bb1bf368d4 | 21,647 |
import re
def _read_part(f, verbose):
"""Reads the part name and creates a mesh with that name.
:param f: The file from where to read the nodes from.
:type f: file object at the nodes
:param verbose: Determines what level of print out to the console.
:type verbose: 0, 1 or 2
:return: Nothing, but has the side effect of setting the pointer
in the file object f to the line with the next keyword.
"""
re_part = re.compile("\*Part, name=(.*)")
line = f.readline()
match = re_part.match(line)
if not match:
raise ReadInpFileError("Error parsing file. Expected '*Part, "
"name=XXX', read '" + line + "'.")
part_name = match.group(1)
if verbose == 1 or verbose == 2:
print("Read part with name " + str(part_name))
# Initiate a mesh class with the same name as the part
return Mesh(part_name) | d6546e39dc0998e979c3cb03b60f149e2a474518 | 21,648 |
async def get_prefix(bot, message):
"""Checks if the bot has a configuration tag for the prefix. Otherwise, gets the default."""
default_prefix = await get_default_prefix(bot)
if isinstance(message.channel, discord.DMChannel):
return default_prefix
my_roles = [role.name for role in message.guild.me.roles]
for role_name in my_roles:
if role_name[:11] == "fox_prefix:":
return role_name[11:]
return default_prefix | 15d44e0a976858b5217b68740485dddd4b4bf0ef | 21,649 |
def HA19(request):
"""
Returns the render for the sdg graph
"""
data = dataFrameHA()
figure = px.bar(data, x = "Faculty", y = "HA 19", labels = {"Faculty":"Faculties",
"HA19":"Number of Modules Corresponding to HA 19"})
figure.write_image("core/static/HA19.png")
return render(request, 'HA19.html') | 6cdeb6589fffd910b981c583213e268e2ae0f1e2 | 21,651 |
def beta(data, market, periods, normalize = False):
"""
.. Beta
Parameters
----------
data : `ndarray`
An array containing values.
market : `ndarray`
An array containing market values to be used as the comparison
when calculating beta.
periods : `int`
Number of periods to be used.
normalize : `bool`, optional
Specify whether to normalize the standard deviation calculation
within the beta calculation with n - 1 instead of n.
Defaults to False.
Returns
-------
`ndarray`
An array containing beta values.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> df_market = ql.load_sample('DJI')
>>> beta = ql.beta(df['close'], df_market['close'], periods = 10)
>>> print(beta)
[nan nan nan ... 0.67027616 0.45641977 0.3169785]
"""
return beta_calc(data, market, periods, normalize) | 23071387d52d9a715dcb286cc74779d83438d453 | 21,653 |
import math
def heading_from_to(p1: Vector, p2: Vector) -> float:
"""
Returns the heading in degrees from point 1 to point 2
"""
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
angle = math.atan2(y2 - y1, x2 - x1) * (180 / math.pi)
angle = (-angle) % 360
return abs(angle) | a9fe4fe79fdff0c390e5870fb05d4e9f0c371185 | 21,654 |
import math
def selSPEA2Diverse(individuals, k):
"""Apply SPEA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *n* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *n* will have no effect other
than sorting the population according to a strength Pareto scheme. The
list returned contains references to the input *individuals*. For more
details on the SPEA-II operator see [Zitzler2001]_.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
.. [Zitzler2001] Zitzler, Laumanns and Thiele, "SPEA 2: Improving the
strength Pareto evolutionary algorithm", 2001.
"""
N = len(individuals)
nGenes= len(individuals[0])
L = len(individuals[0].fitness.values)
K = math.sqrt(N)
strength_fits = [0] * N
fits = [0] * N
dominating_inds = [list() for i in range(N)]
for i, ind_i in enumerate(individuals):
for j, ind_j in enumerate(individuals[i+1:], i+1):
if ind_i.fitness.dominates(ind_j.fitness):
strength_fits[i] += 1
dominating_inds[j].append(i)
elif ind_j.fitness.dominates(ind_i.fitness):
strength_fits[j] += 1
dominating_inds[i].append(j)
for i in range(N):
for j in dominating_inds[i]:
fits[i] += strength_fits[j]
# Choose all non-dominated individuals
chosen_indices = [i for i in range(N) if fits[i] < 1]
if len(chosen_indices) < k: # The archive is too small
print('>>>>>> TOO SMALL', len(chosen_indices),k)
distances = populationChromosomeDistances(individuals)
distances=distances/np.max(distances)
#[print('Chosen',chosen_indices)
#[print('Ind',i)
for i in range(N):
print(distances[i,:])
kth_dist = _randomizedSelect(distances[i,:], 0, N - 1, K)
density = 1.0 / (kth_dist + 2.0)
fits[i] += density
next_indices = [(fits[i], i) for i in range(N) if not i in chosen_indices]
next_indices.sort()
#print next_indices
chosen_indices += [i for _, i in next_indices[:k - len(chosen_indices)]]
elif len(chosen_indices) > k: # The archive is too large
print('>>>>>> TOO BIG')
N = len(chosen_indices)
distances = [[0.0] * N for i in range(N)]
sorted_indices = [[0] * N for i in range(N)]
for i in range(N):
for j in range(i + 1, N):
dist = 0.0
for l in range(L):
val = individuals[chosen_indices[i]].fitness.values[l] - \
individuals[chosen_indices[j]].fitness.values[l]
dist += val * val
distances[i][j] = dist
distances[j][i] = dist
distances[i][i] = -1
# Insert sort is faster than quick sort for short arrays
for i in range(N):
for j in range(1, N):
l = j
while l > 0 and distances[i][j] < distances[i][sorted_indices[i][l - 1]]:
sorted_indices[i][l] = sorted_indices[i][l - 1]
l -= 1
sorted_indices[i][l] = j
size = N
to_remove = []
while size > k:
# Search for minimal distance
min_pos = 0
for i in range(1, N):
for j in range(1, size):
dist_i_sorted_j = distances[i][sorted_indices[i][j]]
dist_min_sorted_j = distances[min_pos][sorted_indices[min_pos][j]]
if dist_i_sorted_j < dist_min_sorted_j:
min_pos = i
break
elif dist_i_sorted_j > dist_min_sorted_j:
break
# Remove minimal distance from sorted_indices
for i in range(N):
distances[i][min_pos] = float("inf")
distances[min_pos][i] = float("inf")
for j in range(1, size - 1):
if sorted_indices[i][j] == min_pos:
sorted_indices[i][j] = sorted_indices[i][j + 1]
sorted_indices[i][j + 1] = min_pos
# Remove corresponding individual from chosen_indices
to_remove.append(min_pos)
size -= 1
for index in reversed(sorted(to_remove)):
del chosen_indices[index]
print(chosen_indices)
Sel=[individuals[i] for i in chosen_indices]
print(len(chosen_indices),k)
SelU=[]
for i in chosen_indices:
if individuals[i] not in SelU:
SelU.append(individuals[i])
print('Selected')
print(len(Sel),k)
#jjprint(Sel)
print('Unique ones')
print(len(SelU),k)
#print(SelU)
if len(SelU)<k:
print('>>>>>> NEED FOR MORE')
#import pdb
#pdb.set_trace()
return Sel | d5b651844c3dc8ad96e04f9a93e264d368b066a3 | 21,655 |
def utilization_to_states(state_config, utilization):
""" Get the state history corresponding to the utilization history.
Adds the 0 state to the beginning to simulate the first transition.
(map (partial utilization-to-state state-config) utilization))
:param state_config: The state configuration.
:type state_config: list(float)
:param utilization: The history of the host's CPU utilization.
:type utilization: list(float)
:return: The state history.
:rtype: list(int)
"""
return [utilization_to_state(state_config, x) for x in utilization] | 1eaaff399dfd6981feb8de56eb5dc4b4baa8fd3f | 21,656 |
from typing import Dict
def generate_person(results: Dict):
"""
Create a dictionary from sql that queried a person
:param results:
:return:
"""
person = None
if len(results) > 0:
person = {
"id": results[0],
"name": results[1].decode("utf-8"),
"img_url": results[2].decode("utf-8"),
"location": results[3].decode("utf-8"),
"colors": (results[4].decode("utf-8")).split(",")
}
return person | 21c2f2c8fa43c43eabf06785203556ccae708d45 | 21,658 |
def paliindrome_sentence(sentence: str) -> bool:
"""
`int`
"""
string = ''
for char in sentence:
if char.isalnum():
string += char
return string[::-1].casefold() == string.casefold() | 4559f9f823f748f137bbe1eb96070dba8e7d867d | 21,659 |
def get_default_pool_set():
"""Return the names of supported pooling operators
Returns:
a tuple of pooling operator names
"""
output = ['sum', 'correlation1', 'correlation2', 'maximum']
return output | 32d28fdb80ecdacab8494251edd87b566128fd79 | 21,660 |
def virtual_networks_list_all(**kwargs):
"""
.. versionadded:: 2019.2.0
List all virtual networks within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.virtual_networks_list_all
"""
result = {}
netconn = __utils__["azurearm.get_client"]("network", **kwargs)
try:
vnets = __utils__["azurearm.paged_object_to_list"](
netconn.virtual_networks.list_all()
)
for vnet in vnets:
result[vnet["name"]] = vnet
except CloudError as exc:
__utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs)
result = {"error": str(exc)}
return result | 52b9d655a2459d1b2f2904fc216b5ccc8ad12b04 | 21,661 |
def generate_state_matrix(Hprime, gamma):
"""Full combinatorics of Hprime-dim binary vectors with at most gamma ones.
:param Hprime: Vector length
:type Hprime: int
:param gamma: Maximum number of ones
:param gamma: int
"""
sl = []
for g in range(2,gamma+1):
for s in combinations(list(range(Hprime)), g):
sl.append( np.array(s, dtype=np.int8) )
state_list = sl
no_states = len(sl)
no_states = no_states
sm = np.zeros((no_states, Hprime), dtype=np.uint8)
for i in range(no_states):
s = sl[i]
sm[i, s] = 1
state_matrix = sm
state_abs = sm.sum(axis=1)
#print("state matrix updated")
return state_list, no_states, state_matrix, state_abs | 165a6bfe742569780939783dc7f1b086245c747e | 21,662 |
def playfair_decipher(message, keyword, padding_letter='x',
padding_replaces_repeat=False, letters_to_merge=None,
wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Decipher a message using the Playfair cipher."""
column_order = list(range(5))
row_order = list(range(5))
if letters_to_merge is None:
letters_to_merge = {'j': 'i'}
grid = polybius_grid(keyword, column_order, row_order,
letters_to_merge=letters_to_merge,
wrap_alphabet=wrap_alphabet)
message_bigrams = playfair_bigrams(
sanitise(message), padding_letter=padding_letter,
padding_replaces_repeat=padding_replaces_repeat)
plaintext_bigrams = [playfair_decipher_bigram(b, grid, padding_letter=padding_letter) for b in message_bigrams]
return cat(plaintext_bigrams) | d66afe1447be6b5453a5271f80e678d5e1664c95 | 21,664 |
import json
def create_role(role_name):
"""Create a role."""
role_dict = {
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Principal" : {
"Service" : "lambda.amazonaws.com"
},
"Action" : "sts:AssumeRole"
}
]
}
cli_input = json.dumps(role_dict)
cmd = [
"aws",
"iam",
"create-role",
"--role-name",
role_name,
"--assume-role-policy-document",
cli_input
]
output = execute_command(cmd)
output_json = json.loads(output.decode("utf-8"))
return output_json["Role"]["Arn"] | 743a88c5e071d1ed2e968f7ec3a7421eaab4d69e | 21,665 |
def evaluate_field(record, field_spec):
"""
Evaluate a field of a record using the type of the field_spec as a guide.
"""
if type(field_spec) is int:
return str(record[field_spec])
elif type(field_spec) is str:
return str(getattr(record, field_spec))
else:
return str(field_spec(record)) | 66fdc96aa774a27c225fb273040acdbbf4ff9979 | 21,666 |
def project_points(X, K, R, T, distortion_params=None):
"""
Project points from 3d world coordinates to 2d image coordinates
"""
x_2d = np.dot(K, (np.dot(R, X) + T))
x_2d = x_2d[:-1, :] / x_2d[-1, :]
if distortion_params is not None:
x_2d_norm = np.concatenate((x_2d, np.ones((1, x_2d.shape[1]))), 0)
x_3d_norm = np.dot(np.linalg.pinv(K), x_2d_norm)
x_2d_post = x_3d_norm[:-1, :] / x_3d_norm[-1, :]
r = np.sqrt(x_2d_post[0, :]**2 + x_2d_post[1, :]**2)
correction = (1 + distortion_params[0] * r**2 +
distortion_params[1] * r**4 +
distortion_params[4] * r**6)
x_2d_corr = x_2d_post * correction
x_3d_corr = np.concatenate((
x_2d_corr, np.ones((1, x_2d_corr.shape[1]))), 0)
x_2d = np.dot(K, x_3d_corr)
x_2d = x_2d[:-1, :] / x_2d[-1, :]
return x_2d | e7f2c3f864e7beb798194eca3425cd9342b2c881 | 21,667 |
import numpy as np
def rate_multipressure(qD, delta_p, B, mu, perm, h):
"""Calculate Rate as Sum of Constant Flowing Pressures"""
return ((.007082 * perm * h) / (B * mu)) * (np.sum(qD * delta_p)) | a8621613abb63bb6f15c71ab3ba02d65ab160e6b | 21,669 |
def osculating_elements_of(position, reference_frame=None, gm_km3_s2=None):
"""Produce the osculating orbital elements for a position.
`position` is an instance of :class:`~skyfield.positionlib.ICRF`.
These are commonly returned by the ``at()`` method of any
Solar System body. ``reference_frame`` is an optional argument
and is a 3x3 numpy array. The reference frame by default
is the ICRF. Commonly used reference frames are found in
skyfield.data.spice.inertial_frames. ``gm_km3_s2`` is an optional
float argument representing the gravitational parameter (G*M) in
units of km^3/s^2, which is the sum of the masses of the orbiting
object and the object being orbited. If not specified, this is
calculated for you.
This function returns an instance of :class:`~skyfield.elementslib.OsculatingElements`
"""
if gm_km3_s2 is None:
if not isinstance(position.center, int):
raise ValueError('Skyfield is unable to calculate a value for GM. You'
' should specify one using the `gm_km3_s2` keyword argument')
gm_km3_s2 = GM_dict.get(position.center, 0.0)
orbits_barycenter = 0 <= position.center <= 9
if not orbits_barycenter:
gm_km3_s2 += GM_dict.get(position.target, 0.0)
if gm_km3_s2 == 0:
raise ValueError('Skyfield is unable to calculate a value for GM. You'
' should specify one using the `gm_km3_s2` keyword argument')
if reference_frame is not None:
position_vec = Distance(reference_frame.dot(position.position.au))
velocity_vec = Velocity(reference_frame.dot(position.velocity.au_per_d))
else:
position_vec = position.position
velocity_vec = position.velocity
return OsculatingElements(position_vec,
velocity_vec,
position.t,
gm_km3_s2) | cf6096ed0eeaeccaa013e052ad1a2fe7e63940f5 | 21,670 |
import copy
def rename_actions(P: NestedDicts, policy: DetPolicy) -> NestedDicts:
""" Renames actions in P so that the policy action is always 0."""
out: NestedDicts = {}
for start_state, actions in P.items():
new_actions = copy.copy(actions)
policy_action = policy(start_state)
new_actions[0], new_actions[policy_action] = actions[policy_action], actions[0]
out[start_state] = new_actions
return out | 433433ded44f118ec05ddbf9fde17825b0fd4e62 | 21,671 |
import csv
from datetime import datetime
async def get_category(category):
"""
Retrieves the data for the provided category. The data is cached for 1 hour.
:returns: The data for category.
:rtype: dict
"""
# Adhere to category naming standard.
category = category.lower()
# URL to request data from.
url = BASE_URL + "time_series_covid19_%s_global.csv" % category
# Request the data
async with httputils.CLIENT_SESSION.get(url) as response:
text = await response.text()
# Parse the CSV.
data = list(csv.DictReader(text.splitlines()))
# The normalized locations.
locations = []
for item in data:
# Filter out all the dates.
dates = dict(filter(lambda element: date_util.is_date(element[0]), item.items()))
# Make location history from dates.
history = {date: int(amount or 0) for date, amount in dates.items()}
# Country for this location.
country = item["Country/Region"]
# Latest data insert value.
latest = list(history.values())[-1]
# Normalize the item and append to locations.
locations.append(
{
# General info.
"country": country,
"country_code": countries.country_code(country),
"province": item["Province/State"],
# Coordinates.
"coordinates": {"lat": item["Lat"], "long": item["Long"],},
# History.
"history": history,
# Latest statistic.
"latest": int(latest or 0),
}
)
# Latest total.
latest = sum(map(lambda location: location["latest"], locations))
# Return the final data.
return {
"locations": locations,
"latest": latest,
"last_updated": datetime.utcnow().isoformat() + "Z",
"source": "https://github.com/ExpDev07/coronavirus-tracker-api",
} | a40ba9ec753b8d7a0b2a464675a1bb541be193f0 | 21,672 |
def plot_ellipses_area(
params, depth="None", imin=0, imax=398, jmin=0, jmax=898, figsize=(10, 10)
):
"""Plot ellipses on a map in the Salish Sea.
:arg params: a array containing the parameters (possibly at different
depths and or locations).
:type param: np.array
:arg depth: The depth at which you want to see the ellipse. If the param
array has no depth dimensions put 'None'. Default 'None'.
:arg depth: int
:arg imin: Minimum horizontal index that will be plotted.
:type imin: int
:arg imax: Maximum horizontal index that will be plotted.
:type imax: int
:arg jmin: Minimum vertical index that will be plotted.
:type jmin: int
:arg jmax: Maximum vertical index that will be plotted.
:type jmax: int
"""
phi = 0
fig, ax = plt.subplots(1, 1, figsize=figsize)
k = np.zeros((898, 398))
m = np.zeros((898, 398))
scale = 10
for q in np.arange(jmin, jmax):
for l in np.arange(imin, imax):
k[q, l] = q * np.cos(phi * np.pi / 180.0) + l * np.sin(phi * np.pi / 180.0)
m[q, l] = -q * np.sin(phi * np.pi / 180.0) + l * np.cos(phi * np.pi / 180.0)
if depth == "None":
for x in np.arange(imin, imax):
for y in np.arange(jmin, jmax):
if params[y, x, 1] > 0:
thec = "b"
else:
thec = "r"
ellsc = Ellipse(
xy=(m[y, x], k[y, x]),
width=scale * params[y, x, 0],
height=scale * params[y, x, 1],
angle=params[y, x, 2] - 29,
color=thec,
)
ax.add_artist(ellsc)
else:
for x in np.arange(imin, imax):
for y in np.arange(jmin, jmax):
if params[y, x, depth, 2] > 0:
thec = "b"
else:
thec = "r"
ellsc = Ellipse(
xy=(m[y, x], k[y, x]),
width=scale * params[y, x, depth, 1],
height=scale * params[y, x, depth, 2],
angle=params[y, x, depth, 3] - 29,
color=thec,
)
ax.add_artist(ellsc)
grid_B = nc.Dataset(
"/data/dlatorne/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc"
)
bathy = grid_B.variables["Bathymetry"][:, :]
contour_interval = [-0.01, 0.01]
ax.contourf(
m[jmin:jmax, imin:imax],
k[jmin:jmax, imin:imax],
bathy.data[jmin:jmax, imin:imax],
contour_interval,
colors="black",
)
ax.contour(
m[jmin:jmax, imin:imax],
k[jmin:jmax, imin:imax],
bathy.data[jmin:jmax, imin:imax],
[5],
colors="black",
)
ax.set_title("Tidal ellipse", fontsize=20)
ax.set_xlabel("x index", fontsize=16)
ax.set_ylabel("y index", fontsize=16)
print("red is clockwise")
return fig | b1f96ae602b6e4b1db725ac4efd2be193c23c29c | 21,673 |
from typing import Dict
def populate_workflow_request_body(manifest_data: Dict):
"""
Populate workflow request body with the passed data according API specification
:param data: item data from manifest files
:return: populated request
:rtype: dict
"""
request = {
"runId": "",
"executionContext": {
"acl": {
"owners": [],
"viewers": []
},
"legal": {
"legaltags": [],
"otherRelevantDataCountries": [],
"compliant": "compliant"
},
"Payload": {
"AppKey": "test-app",
"data-partition-id": "opendes"
},
"manifest": ""
}
}
request["runId"] = generate_id()
request["executionContext"]["acl"]["owners"].append(config.get("REQUEST", "acl_owner"))
request["executionContext"]["acl"]["viewers"].append(config.get("REQUEST", "acl_viewer"))
request["executionContext"]["legal"]["legaltags"].append(config.get("REQUEST", "legal_tag"))
request["executionContext"]["legal"]["otherRelevantDataCountries"].append(
config.get("REQUEST", "other_relevant_data_countries"))
request["executionContext"]["manifest"] = manifest_data
return request | f0ec863d835907402a9a5fa3ce3c61ea1e9a4b69 | 21,676 |
def is_leap_year(year):
"""
Is the current year a leap year?
Args:
y (int): The year you wish to check.
Returns:
bool: Whether the year is a leap year (True) or not (False).
"""
if year % 4 == 0 and (year % 100 > 0 or year % 400 == 0): return True
return False | 16e4c83adc9d42dae2396186f980755b33af9188 | 21,678 |
def blank_dog():
"""Set up (16, 3) array of dog with initial joint positions"""
length = 0.5
width = 0.2
ankle_length = 0.1
ankle_to_knee = 0.2
knee_to_shoulder = 0.05
O = Vector(0,0,0) # origin
out = []
for lengthwise in [-1, +1]:
for widthwise in [+1, -1]:
foot = O + length * Vector(lengthwise/2,0,0) + width * Vector(0, widthwise/2, 0)
ankle = foot + ankle_length * Vector(-0.3, 0, 1).unit()
knee = ankle + ankle_to_knee * Vector(-0.1, 0, 1).unit()
shoulder = knee + knee_to_shoulder * Vector(0.05,0,1).unit()
if n_joints == 16: out += [foot, ankle, knee, shoulder]
elif n_joints == 8: out += [foot, shoulder]
return np.array(out) | af53bd94e273b79b44ff25fff32245ea0dcae280 | 21,680 |
def random_majority_link_clf():
"""
for link classification we do not select labels from a fixed distribution
but instead we set labels to the number of possible segments in a sample.
I.e. we only predict a random link out of all the possible link paths in a sample.
"""
def clf(labels, k:int):
##only to self
#return np.arange(k)
# only one forward
#return [min(i+1, k-1) for i in range(k)]
# only one back
return [max(0, i-1) for i in range(k)]
## link to the segment behind or the one ahead.
## If i == k-1, we take i or i-1. if i == 0, we take i or i+1
#return [random.choice([max(0, i-1), i, min(i+1, k-1)]) for i in range(k)]
return clf | f53f2fee85914e25d5e407808dcfbee623b0782a | 21,682 |
def db_to_df(db_table):
"""Reads in a table from the board games database as pandas DataFrame"""
query = f"SELECT * FROM {db_table};"
pd_table = pd.read_sql(query, DB)
return pd_table | d20f876c7a7e8da891dc949e85eb35932c6ae5fa | 21,683 |
def player_input_choice() -> int:
"""Function that takes the player input as the position for his|her marker"""
marker_position = 0
while marker_position not in range(1, 10) or not tic_tac_toe.check_the_cell(marker_position):
marker_position = int(input("Choose the position for your marker from 1 to 9: "))
return marker_position | b717068f4e1aa36251b1ee1503034a3f60b193c7 | 21,685 |
def increment_datetime_by_string(mydate, increment, mult=1):
"""Return a new datetime object incremented with the provided
relative dates specified as string.
Additional a multiplier can be specified to multiply the increment
before adding to the provided datetime object.
Usage:
.. code-block:: python
>>> dt = datetime(2001, 9, 1, 0, 0, 0)
>>> string = "60 seconds, 4 minutes, 12 hours, 10 days, 1 weeks, 5 months, 1 years"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2003, 2, 18, 12, 5)
>>> dt = datetime(2001, 11, 1, 0, 0, 0)
>>> string = "1 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 12, 1, 0, 0)
>>> dt = datetime(2001, 11, 1, 0, 0, 0)
>>> string = "13 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2002, 12, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "72 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2007, 1, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "72 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2007, 1, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "5 minutes"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 1, 0, 5)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "49 hours"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 3, 1, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "3600 seconds"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 1, 1, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "30 days"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 31, 0, 0)
:param mydate: A datetime object to incremented
:param increment: A string providing increment information:
The string may include comma separated values of type
seconds, minutes, hours, days, weeks, months and years
Example: Increment the datetime 2001-01-01 00:00:00
with "60 seconds, 4 minutes, 12 hours, 10 days,
1 weeks, 5 months, 1 years" will result in the
datetime 2003-02-18 12:05:00
:param mult: A multiplier, default is 1
:return: The new datetime object or none in case of an error
"""
return modify_datetime_by_string(mydate, increment, mult, sign=int(1)) | 7651ccb9c4b5b881c5d1389dcdfd015dfabb0a3b | 21,686 |
def get_regularization_loss(scope=None, name="total_regularization_loss"):
"""Gets the total regularization loss.
Args:
scope: An optional scope name for filtering the losses to return.
name: The name of the returned tensor.
Returns:
A scalar regularization loss.
"""
losses = get_regularization_losses(scope)
if losses:
return math_ops.add_n(losses, name=name)
else:
return constant_op.constant(0.0) | dd65e13111b0927c197b38e4500d5194a7bd5453 | 21,687 |
def storage_backend_get_all(context, inactive=False, filters=None):
"""Get all storage backends"""
return IMPL.storage_backend_get_all(context, inactive, filters) | 9f67dab8e6576ee30185a36e59cfb8e0d45569cb | 21,689 |
from datetime import datetime
import requests
def get_date_opm_status_response(intent, session):
""" Gets the current status of opm for the day
"""
card_title = "OPM Status Result"
session_attributes = {}
speech_output = "I'm not sure which o. p. m. status you requested. " \
"Please try again."
reprompt_text = "I'm not sure which o. p. m. status you requested. " \
"Try asking if the government is open today."
should_end_session = True
if "date" in intent["slots"]:
dt_value = intent["slots"]["date"]["value"]
try:
fmt_dt_value = datetime.datetime.strptime(dt_value, "%Y-%m-%d").strftime("%m/%d/%Y")
# call the operating status endpoint and convert the response to json
r = requests.get(API_BASE + "?date=" + fmt_dt_value)
if r.status_code == 200:
data = r.json()
status = data['StatusType'].lower()
if status != 'undefined':
speech_output = "Federal agencies in the Washington, DC, area were " \
+ status + " on " + dt_value + "."
reprompt_text = ""
else:
speech_output = "I seem to be having trouble answering your question. " \
"Please ask me for the o. p. m. status by saying, " \
"Is the government open today?"
reprompt_text = "Please ask me for bus times by saying, " \
"Is the government open today?"
should_end_session = False
except ValueError:
speech_output = "Sorry, I did not understand that date. Please ask your question " \
"again with a valid date."
reprompt_text = "Sorry, I did not understand that date. Please ask your question " \
"again with a valid date."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | 29d1517f87495751a604c209768d6750a0ab960e | 21,690 |
def oauth_api_request(method, url, **kwargs):
"""
when network error, fallback to use rss proxy
"""
options = _proxy_helper.get_proxy_options()
client = RSSProxyClient(**options, proxy_strategy=_proxy_strategy)
return client.request(method, url, **kwargs) | 07438100ea75ae144567001832986f03cd72c3a8 | 21,691 |
def validate_cached(cached_calcs):
"""
Check that the calculations with created with caching are indeed cached.
"""
valid = True
for calc in cached_calcs:
if not calc.is_finished_ok:
print('Cached calculation<{}> not finished ok: process_state<{}> exit_status<{}>'
.format(calc.pk, calc.process_state, calc.exit_status))
print_report(calc.pk)
valid = False
if '_aiida_cached_from' not in calc.extras or calc.get_hash() != calc.get_extra('_aiida_hash'):
print('Cached calculation<{}> has invalid hash'.format(calc.pk))
print_report(calc.pk)
valid = False
if isinstance(calc, CalcJobNode):
original_calc = load_node(calc.get_extra('_aiida_cached_from'))
files_original = original_calc.list_object_names()
files_cached = calc.list_object_names()
if not files_cached:
print("Cached calculation <{}> does not have any raw inputs files".format(calc.pk))
print_report(calc.pk)
valid = False
if not files_original:
print("Original calculation <{}> does not have any raw inputs files after being cached from."
.format(original_calc.pk))
valid = False
if set(files_original) != set(files_cached):
print("different raw input files [{}] vs [{}] for original<{}> and cached<{}> calculation".format(
set(files_original), set(files_cached), original_calc.pk, calc.pk))
valid = False
return valid | 3406c584dada7d24a46cbb565d0cdbab98a1d303 | 21,692 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.