content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def open_sciobj_file_by_path(abs_path, write=False):
"""Open a SciObj file for read or write. If opened for write, create any missing
directories. For a SciObj stored in the default SciObj store, the path includes the
PID hash based directory levels.
This is the only method in GMN that opens SciObj files, so can be modified to
customize the SciObj storage locations and can be mocked for testing.
Note that when a SciObj is created by a client via MNStorage.create(), Django
streams the SciObj bytes to a temporary file or memory location as set by
``FILE_UPLOAD_TEMP_DIR`` and related settings.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open(abs_path, "wb" if write else "rb")
|
8c5852de544be21c61636df03ddb681a6c084310
| 27,853 |
def mps_to_kmh(speed_in_mps):
"""Convert from kilometers per hour to meters per second
Aguments:
speed_in_mps: a speed to convert
Returns:
speed_in_kmh: a speed in m/s
"""
return speed_in_mps / 1000.0 * 3600.0
|
5a37cbca17f8262043b7e1cb2b193b4c9d146766
| 27,854 |
import logging
def tokenize_and_remove_stopwords(txt,additional_stopwords):
"""
Runs tokenization and removes stop words on the specified text
Parameters
-----------
txt: text to process
additional_stopwords: path to file containing possible additional stopwords on each line
Returns
-------
Processed list of words where each word is now stemmed
"""
logging.info("Removing stopwords")
words_filter=load_stopwords(additional_stopwords)
tokens=word_tokenize(txt)
#make everything lowercase
tokens=lowercase_words(tokens)
tokens_without_sw=[]
tokens_without_sw = [word for word in tokens if word not in words_filter]
return tokens_without_sw
|
a118747bbd030e37ee0ed5f421f56390e1bd5b38
| 27,855 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add the Wiser System Switch entities."""
data = hass.data[DOMAIN][config_entry.entry_id][DATA] # Get Handler
# Add Defined Switches
wiser_switches = []
for switch in WISER_SWITCHES:
if switch["type"] == "room":
for room in [room for room in data.wiserhub.rooms.all if len(room.devices) > 0]:
wiser_switches.append(
WiserRoomSwitch(data, switch["name"], switch["key"], switch["icon"], room.id )
)
elif switch["type"] == "system":
wiser_switches.append(
WiserSystemSwitch(data, switch["name"], switch["key"], switch["icon"])
)
elif switch["type"] == "device":
for device in [device for device in data.wiserhub.devices.all if hasattr(device, switch["key"])]:
wiser_switches.append(
WiserDeviceSwitch(data, switch["name"], switch["key"], switch["icon"], device.id )
)
async_add_entities(wiser_switches)
# Add SmartPlugs (if any)
wiser_smart_plugs = [
WiserSmartPlug(data, plug.id, "Wiser {}".format(plug.name))
for plug in data.wiserhub.devices.smartplugs.all
]
async_add_entities(wiser_smart_plugs)
return True
|
5a83f0888fadab08c573378dce4167f2d01478c1
| 27,856 |
def get_keep_dice_check(input_prompt):
"""
Enables returning a yes or no response to an input prompt.
:param input_prompt: String yes no question.
"""
return pyip.inputYesNo(prompt=input_prompt)
|
c7b8a1392c3e17a1acba615079848245a1b6e167
| 27,857 |
import sh
def get_pending_jobs(sort=True):
"""Obtains the list of currently pending (queued) jobs for the user."""
username = getusername()
# see squeue man page for status code (%t specifier)
listjob = sh.pipe_out(("squeue", "-u", username, "--noheader", "--format=%i %t"), split=True)
rslt = []
# treat one of these statuses as "running"
qstatus = (
'PD', # pending
'S', # suspended
)
for job1 in listjob:
R = job1.split()
if R[1] in qstatus:
rslt.append(R[0])
if sort:
rslt.sort()
return rslt
|
5b03917885f8a09463c65a456c251cf753abdae2
| 27,858 |
def getOverlapRange(rangeA, rangeB):
"""
Calculate the overlapping range between rangeA and rangeB.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(list):
List containing the overlapping range between rangeA and rangeB.
"""
assert isOverlap(rangeA, rangeB), f"There is no overlap between rangeA:{rangeA} and rangeB:{rangeB}"
return [max(rangeA[0], rangeB[0]), min(rangeA[1], rangeB[1])]
|
5f3bd22f5ec317d2bde87c92b027f658a80431fb
| 27,859 |
def multiply_values(dictionary: dict, num: int) -> dict:
"""Multiplies each value in `dictionary` by `num`
Args:
dictionary (dict): subject dictionary
num (int): multiplier
Returns:
dict: mapping of keys to values multiplied by multiplier
"""
return (
{key: value * num for key, value in dictionary.items()}
if dictionary is not None
else {}
)
|
16eb87d60da64d648113858ba5cb4308137e0a14
| 27,860 |
def send_alarm(address, email_type, template_data={}):
"""
Send an email message to the given email address immediately, bypassing any queues or database system.
:param address: The email address to send this message to.
:param email_type: str defining this email template e.g EMAIL_WELCOME. Defined in email_types.json
:param template_data: dict, values that will be merged into the template.
"""
email_message = EmailMessage.from_email_type(address, email_type, template_data)
_DISPATCHER.send_email_alarm(email_message)
return email_message
|
2564a3d5c27f092e3e940d907b2cc2bc986257c2
| 27,861 |
def serialize_curve_point(p: Point) -> bytes:
"""
Serialize an elliptic curve point ``p`` in compressed form as described in
SEC1v2 (https://secg.org/sec1-v2.pdf) section 2.3.3.
Corresponds directly to the "ser_P(P)" function in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param p: The elliptic curve point to be serialized.
:return: A byte sequence containing the serialization of ``p``.
"""
x, y = p.x(), p.y()
if y & 1:
return b'\x03' + serialize_uint256(x)
else:
return b'\x02' + serialize_uint256(x)
|
9e002df4b18245cb4ce54f1aede5687279aae5bb
| 27,862 |
def gcom_so_config(revision=None):
"""
Create a shared object for linking.
"""
config = BuildConfig(
project_label=f'gcom shared library {revision}',
source_root=gcom_grab_config(revision=revision).source_root,
steps=[
*common_build_steps(fpic=True),
LinkSharedObject(linker='mpifort', output_fpath='$output/libgcom.so'),
]
)
return config
|
ddcd625cd1393b38e1871f46a2f1b8738a904f1f
| 27,863 |
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message"):
"""
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
"""
description_with_tabs = "\t\t" + description.strip().replace("\n", "\n\t\t")
param_list_args = []
param_list_kwargs = []
args = []
args2 = []
kwargs = []
kwargs2 = []
asserts = []
str_args = ""
str_kwargs = ""
param_strings = params_string.split("\n")
for param in param_strings:
assert_commands, assert_comments, param_name, param_type, table, non_buildin_type, param_name_input = parse_param_types(param)
param_required = table[2].strip()
param_needed = None
if param_required == "Yes":
param_needed = True
elif param_required == "Optional":
param_needed = False
param_description = table[3].strip()
if param_needed:
param_list_args.append(Param(param_name, param_type, param_needed, param_description))
args.append(param_name)
args2.append("{param_name}={param_name}".format(param_name=param_name))
str_args += '\t\t:param {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is not None)".format(var=param_name))
asserts.append("assert({ass})".format(ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
else:
param_list_kwargs.append(Param(param_name, param_type, param_needed, param_description))
kwargs.append("{param_name}=None".format(param_name=param_name))
kwargs2.append("{param_name}={param_name}".format(param_name=param_name))
str_kwargs += '\t\t:keyword {key}: {descr}\n\t\t:type {key}: {type}\n\n'.format(key=param_name, descr=param_description, type=param_type)
if assert_commands:
asserts.append("assert({var} is None or {ass})".format(var=param_name, ass=" or ".join(assert_commands)) + ((" # {comment}".format(comment=", ".join(assert_comments))) if assert_comments else ""))
args.extend(kwargs)
args2.extend(kwargs2)
asserts_string = "\n\t\t" + "\n\t\t".join(asserts)
text = ""
if len(str_args)>0:
text += '\n\t\tParameters:\n\n'
text += str_args
if len(str_kwargs)>0:
text += '\n\t\tOptional keyword parameters:\n\n'
text += str_kwargs
do_args = ['"%s"' % command]
do_args.extend(args2)
result = '\tdef {funcname}(self, {params}):\n\t\t"""\n{description_with_tabs}\n\n\t\t{link}\n\n' \
'{paramshit}\n' \
'\t\tReturns:\n\n\t\t:return: {returns}\n\t\t:rtype: {return_type}\n\t\t"""{asserts_with_tabs}\n\t\treturn self.do({do_args})\n\t# end def {funcname}'.format(
funcname=convert_to_underscore(command),
params=", ".join(args), description_with_tabs=description_with_tabs, link=link,
returns=returns, return_type=return_type, command=command, do_args=", ".join(do_args),
asserts_with_tabs=asserts_string,
paramshit = text
)
result = result.replace("\t", " ")
return result
|
4c058afdb03b9d85a32e654f83beec95b72785ee
| 27,864 |
def get_basic_details(args, item):
"""
:param args: {
"item_code": "",
"warehouse": None,
"doctype": "",
"name": "",
"project": "",
warehouse: "",
update_stock: "",
project: "",
qty: "",
stock_qty: ""
}
:param item: `item_code` of Item object
:return: frappe._dict
"""
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
warehouse = item.default_warehouse or args.warehouse
# material_request_type = ''
# if args.get('doctype') == "Material Request":
# material_request_type = frappe.db.get_value('Material Request',
# args.get('name'), 'material_request_type')
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
# "min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": args.qty or 1.0,
"stock_qty": args.qty or 1.0
})
for fieldname in ("item_name", "item_group"):
out[fieldname] = item.get(fieldname)
return out
|
6ddd7f3249d55a073d57c466e8994c1ccf8b1aa7
| 27,865 |
def validate_standard_json(json_to_test: dict) -> bool:
""" validate fixed json against schema """
valid_json_flag = False
schema_to_use = get_standard_json_schema()
valid_json_flag = validate_json(json_to_test, schema_to_use, True)
return valid_json_flag
|
97004d7f5e4758dedeaa4ecaf0ee4a67955336c9
| 27,866 |
from typing import Any
def desg_to_prefix(desg: str) -> Any:
"""Convert small body designation to file prefix."""
return (desg.replace('/', '').replace(' ', '')
.replace('(', '_').replace(')', '_'))
|
badde1e3ec9c3f669c7cce8aa55646b15cc5f4c8
| 27,868 |
def get_logger(name=None, log=False, level=INFO, path=None):
"""
Returns the appropriate logger depending on the passed-in arguments.
This is particularly useful in conjunction with command-line arguments when
you won't know for sure what kind of logger the program will need.
:param name: The name of the file to log into.
:param log: Whether to actually commit information to a file.
:param level: The verbosity level. Only events logged at or above this level
will be displayed.
:param path: The folder to put the log file into.
"""
# Are we writing the output to disk? Pick the type of logger based on that.
if log:
return FileLogger(name=name, level=level, path=path)
else:
return StreamLogger(name=name, level=level)
|
be321f4704e98db7a8f4d6033004194104bbee64
| 27,869 |
def stop_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Stop a job defined in the ai flow workflow.
:param job_name: The job name which task defined in workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_job(get_ai_flow_client().stop_job(job_name, execution_id))
|
0e67a061cbb730ffb6ebe57b11d32a3c110566dc
| 27,871 |
def find_user():
"""
Determines current user using the username value of the current session
user and returns the current user as a dict.
"""
current_user = mongo.db.users.find_one({"username": session["user"]})
return current_user
|
249836f8f1a23ff34bc55f112db2f4670672a7a1
| 27,872 |
def field2nullable(field, **kwargs):
"""Return the dictionary of swagger field attributes for a nullable field.
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
if field.allow_none:
omv = kwargs['openapi_major_version']
attributes['x-nullable' if omv < 3 else 'nullable'] = True
return attributes
|
dd5d4cd63aeede4ef9356baa9fe9a48bd5f87841
| 27,873 |
def zero_expand3d(inputs, stride=1):
"""Expand the inputs by zeros
explain the expand operation:
given stride = 1
[[[1, 2] --> [[[1, 0, 2]
[3, 4]] [0, 0, 0]
[3, 0, 4]]
[[5, 6]
[7, 8]]] [[0, 0, 0]
[0, 0, 0]
[0, 0, 0]]
[[5, 0, 6]
[0, 0, 0]
[7, 0, 8]]]
Args:
-----------------------------
inputs : tvm.te.tensor.Tensor
shape [batch, channel, depth, height, width]
stride: (optional:0) int or tuple
expected: (d_stride, h_stride, w_stride)
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [batch, channel, (depth - 1) * d_stride + 1, (height - 1) * h_stride + 1, (width - 1) * w_stride + 1]
-----------------------------
"""
stride = (stride, stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride
assert_print(isinstance(stride, tuple), "type(stride)={}".format(type(stride)))
assert_print(len(stride) == 3)
expand_zero = tvm.tir.expr.const(0, inputs.dtype)
batch_size, in_channel, depth, height, width = inputs.shape
out_depth = (depth - 1) * stride[0] + 1
out_height = (height - 1) * stride[1] + 1
out_width = (width - 1) * stride[2] + 1
return tvm.te.compute(
(batch_size, in_channel, out_depth, out_height, out_width),
lambda b, c, d ,h, w: tvm.te.if_then_else(
tvm.te.all(
d % stride[0] == 0,
h % stride[1] == 0,
w % stride[2] == 0
),
inputs[b, c, d // stride[0], h // stride[1], w // stride[2]],
expand_zero
)
)
|
4944b3f5f42811955b76fa46082dc5617fb648b7
| 27,874 |
import json
def _load_setup_cfg():
"""Load the setup configuration from the 'setup.json' file."""
try:
with open(ROOT / 'setup.json') as setup_json_file:
return json.load(setup_json_file)
except json.decoder.JSONDecodeError as error: # pylint: disable=no-member
raise DependencySpecificationError("Error while parsing 'setup.json' file: {}".format(error))
except FileNotFoundError:
raise DependencySpecificationError("The 'setup.json' file is missing!")
|
b3e26e25f18098a51210221299f3a1066c92e5db
| 27,875 |
import json
def toJSON(obj, opt_pretty=False, for_cloud_api=True):
"""Serialize an object to a JSON string appropriate for API calls.
Args:
obj: The object to serialize.
opt_pretty: True to pretty-print the object.
for_cloud_api: Whether the encoding should be done for the Cloud API or the
legacy API.
Returns:
A JSON string representing the input.
"""
serializer = Serializer(not opt_pretty, for_cloud_api=for_cloud_api)
encoded = serializer._encode(obj) # pylint: disable=protected-access
return json.dumps(encoded, indent=2 if opt_pretty else None)
|
3f3d79d0b3b200ed3a05b55ea671eccae99543ce
| 27,876 |
import urllib
def load_config_file_koe(filename):
""" Loads in a config file for KOE to run
Args:
filename: Filename (can be absolute or relative path, or a URL) to read config file from.
Returns:
dict: Configuration file as a dict object.
"""
config_values = {}
# First try to open things locally. If that doesn't work try it as a URL
try:
config_lines = open(filename, "r").readlines()
except FileNotFoundError:
try:
r = urllib.request.urlopen(filename)
config_lines = []
for line in r:
config_lines.append(line.decode("utf-8"))
except:
print("Failed to get any file")
return None
for line in config_lines:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
splits = line.split("=")
config_values[splits[0].strip()] = splits[1].strip()
for n in config_values:
value = config_values[n]
if value.lower() == "true":
config_values[n] = True
continue
elif value.lower() == "false":
config_values[n] = False
continue
config_values["SIZE"] = int(config_values["SIZE"])
config_values["CORES"] = int(config_values["CORES"])
config_values["ZEROPOINT"] = int(config_values["ZEROPOINT"])
config_values["ARC_CONV"] = float(config_values["ARC_CONV"])
config_values["LINEAR_STEP"] = float(config_values["LINEAR_STEP"])
config_values["ALARM_TIME"] = int(config_values["ALARM_TIME"])
config_values["BOX_SIZE"] = int(config_values["BOX_SIZE"])
config_values["FILTER_SIZE"] = int(config_values["FILTER_SIZE"])
value_string = config_values["MASK_PARAMS"].split(",")
config_values["MASK_PARAMS"] = [float(value_string[0]), float(value_string[1]), int(value_string[2])]
return config_values
|
e04b162a396f5e3e4747855f7c69b9cad017bb39
| 27,877 |
def select_by_type(transcripts, log):
"""Filter transcripts depending on different type"""
# Difference types: UTR5_number and UTR5_boundary
candidates, dtype, dcrit = analyse_difference_type_utr5_number_or_boundary(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR_ends
candidates, dtype, dcrit = analyse_difference_type_UTR_ends(transcripts, log)
if candidates is not None:
return candidates, dtype, dcrit
# Difference type: UTR3
return analyse_difference_type_UTR3(transcripts, log)
|
2b1b7311459e7a305a2cbc64d295114f5bca3fc3
| 27,878 |
def lorentzian_distance(x, y):
"""Calculates the Lorentzian Distance.
Args:
x (np.array): N-dimensional array.
y (np.array): N-dimensional array.
Returns:
The Lorentzian Distance between x and y.
"""
dist = np.log(1 + np.fabs(x - y))
return np.sum(dist)
|
d11cc411aa22aab14b1b3ee2dd606d5a8efb6fe7
| 27,879 |
def check_database_status(database_name, env):
"""This function looks for a DatabaseCreate task and returns a http
response or the Database itself depeding on the context. If the
DatabaseCreate task is still running of failed, a http response is
returned, otherwise this functions tries to retrieve the Database with
the get_database function.
Parameters:
database_name (str): Name of the database
env (str): It represents the database environment (prod or dev)
Returns:
Database or Response: Database or Rest Framework Response object
"""
database_create = last_database_create(database_name, env)
LOG.info(
"Task {}".format(getattr(database_create, 'task', 'No tasks found'))
)
if database_create:
if database_create.is_running:
msg = "Database {} in env {} is beeing created.".format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_412_PRECONDITION_FAILED)
elif database_create.is_status_error:
msg = ("A error ocurred creating database {} in env {}. Check "
"error on task history in https://dbaas.globoi.com").format(
database_name, env)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
database = get_database(database_name, env)
except IndexError as e:
msg = "Database {} does not exist in env {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except MultipleObjectsReturned as e:
msg = "There are multiple databases called {} in {}.".format(
database_name, env)
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
msg = "Something ocurred on dbaas, please get in touch with your DBA."
return log_and_response(
msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if not(database and database.status):
msg = "Database {} is not Alive.".format(database_name)
return log_and_response(
msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return database
|
17d9f616d20638c4624e5b35a042d9265ccf625f
| 27,880 |
def get_flat_schema(schema_name=None):
"""Flatten the specified data model schema, defaulting to the core schema,
useful for retrieving FITS keywords or valid value lists.
"""
return _schema_to_flat(_load_schema(schema_name))
|
6f43a095015c25bdace05cf473f252ac699b33f9
| 27,881 |
def repeat3(img):
"""
Repeat an array 3 times along its last axis
:param img: A numpy.ndarray
:return: A numpy.ndarray with a shape of: img.shape + (3,)
"""
return np.repeat(img[..., np.newaxis], 3, axis=-1)
|
eddd3469d8d02457b87ef00c13ef7213d3a5568b
| 27,882 |
import time
import tqdm
def encode_strategies(strategies, batch_size=stg.JOBLIB_BATCH_SIZE,
parallel=True):
"""
Encode strategies
Parameters
----------
strategies : Strategies array
Array of strategies to be encoded.
Returns
-------
numpy array
Encodings for each strategy in strategies.
Strategies array
Array of unique strategies.
"""
stg.logger.info("Encoding strategies")
N = len(strategies)
stg.logger.info("Getting unique set of strategies")
start_time = time()
unique = unique_strategies(strategies)
end_time = time()
stg.logger.info("Extraction time %.3f sec" % (end_time - start_time))
n_unique_strategies = len(unique)
stg.logger.info("Found %d unique strategies" % n_unique_strategies)
# Map strategies to number
n_jobs = u.get_n_processes() if parallel else 1
stg.logger.info("Assign samples to unique strategies (n_jobs = %d)"
% n_jobs)
results = Parallel(n_jobs=n_jobs, batch_size=batch_size)(delayed(assign_to_unique_strategy)(s, unique) for s in tqdm(strategies))
y = np.array(results)
return y, unique
|
c77fcd28c69b447e43fc9eef359b32426771d6bd
| 27,883 |
def generate_authenticator(data, authenticator_key):
"""
This function will generate an authenticator for the data (provides authentication and integrity).
:param data: The data over which to generate the authenticator.
:type data: :class:`str`
:param authenticator_key: The secret key to be used by the function, in byte string.
You can use :func:`~securitylib.crypto.generate_authenticator_key` to generate it.
:type authenticator_key: :class:`str`
:returns: :class:`str` -- The generated authenticator in byte string.
"""
validate_authenticator_key(authenticator_key)
return advanced_crypto.generate_authenticator(data, authenticator_key)
|
8203c9f487d2acf6a8a0bbd907bc1f8cc9dc026c
| 27,884 |
def multi_recall(pred_y, true_y, labels):
"""
Calculate the recall of multi classification
:param pred_y: predict result
:param true_y: true result
:param labels: label list
:return:
"""
if isinstance(pred_y[0], list):
pred_y = [item[0] for item in pred_y]
recalls = [binary_recall(pred_y, true_y, label) for label in labels]
rec = mean(recalls)
return rec
|
a11984b6c509b9b95d65ad148ca712099ff91a66
| 27,886 |
def is_safe_range(expression):
"""
Return true if an expression is safe range.
This function receives an expression in safe range
normal form and returns true if all its free variables
are range restricted.
"""
try:
return extract_logic_free_variables(
expression
) == range_restricted_variables(expression)
except NeuroLangException:
return False
|
e6a23b250f936cc78918ad15eb1122a419a8b872
| 27,887 |
def star_marker_level(prev, curr):
"""Allow markers to be on the same level as a preceding star"""
return (prev.is_stars() and not curr.is_stars() and
prev.depth == curr.depth)
|
3311c452c8f138cd8fa75b67109e75a9bf30902c
| 27,888 |
from typing import Optional
from typing import Sequence
def get_mail_addresses(ids: Optional[Sequence[str]] = None,
key_word: Optional[str] = None,
output_file: Optional[str] = None,
sendtype: Optional[str] = None,
status: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMailAddressesResult:
"""
This data source provides the Direct Mail Mail Addresses of the current Alibaba Cloud user.
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
ids = alicloud.directmail.get_mail_addresses(ids=["example_id"])
pulumi.export("directMailMailAddressId1", ids.addresses[0].id)
```
:param Sequence[str] ids: A list of Mail Address IDs.
:param str key_word: The key word about account email address.
:param str sendtype: Account type.
:param str status: Account Status. Valid values: `0`, `1`. Freeze: 1, normal: 0.
"""
__args__ = dict()
__args__['ids'] = ids
__args__['keyWord'] = key_word
__args__['outputFile'] = output_file
__args__['sendtype'] = sendtype
__args__['status'] = status
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:directmail/getMailAddresses:getMailAddresses', __args__, opts=opts, typ=GetMailAddressesResult).value
return AwaitableGetMailAddressesResult(
addresses=__ret__.addresses,
id=__ret__.id,
ids=__ret__.ids,
key_word=__ret__.key_word,
output_file=__ret__.output_file,
sendtype=__ret__.sendtype,
status=__ret__.status)
|
52590cc1c12788e47aa81adc1e9f51bbd9092f31
| 27,889 |
import torch
def load_checkpoint(
file,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer = None,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
strict: bool = True,
):
"""Loads training states from a checkpoint file.
Args:
file: a file-like object (has to implement read(), readline(), tell(), and seek()), or a string or os.PathLike
object containing a file name.
model (:class:`torch.nn.Module`): Model to load saved weights and buffers.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to recuperate.
lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`, optional):
lr_scheduler to recuperate, defaults to None.
strict (bool, optional): Whether to strictly enforce that the keys in :attr:`state_dict`
of the checkpoint match the names of parameters and buffers in model, defaults to True.
Returns:
int: The saved epoch number.
Raises:
RuntimeError: Raise error if the model/optimizer cannot successfully be recuperated
"""
state_dict = (
torch.load(file, map_location=torch.device("cpu")) if gpc.get_local_rank(ParallelMode.MODEL) == 0 else None
)
# model states
model_state = state_dict.pop("model") if state_dict is not None else dict()
# pipeline
if is_using_pp():
model_state = partition_pipeline_parallel_state_dict(model, model_state)
try:
model.load_state_dict(model_state, strict=strict)
except RuntimeError as e:
error_msgs = str(e)
if error_msgs.startswith("Error(s) in loading state_dict for "):
error_msgs = error_msgs.split("\n\t")[1:]
dst_rank = gpc.get_ranks_in_group(ParallelMode.MODEL)[0]
all_error_msgs = [None for _ in range(gpc.get_world_size(ParallelMode.MODEL))]
dist.gather_object(error_msgs, all_error_msgs, dst=dst_rank, group=gpc.get_cpu_group(ParallelMode.MODEL))
if gpc.get_global_rank() == 0:
all_error_msgs = list(chain.from_iterable(all_error_msgs))
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(all_error_msgs)
)
)
else:
raise e
# broadcast the rest states
state_dict = broadcast_state_dict(state_dict, ParallelMode.MODEL)
# # optimizer states
# if optimizer is not None and 'optimizer' in state_dict:
# optimizer.load_state_dict(state_dict['optimizer'])
# # lr scheduler states
# if lr_scheduler is not None and 'lr_scheduler' in state_dict:
# lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
# last epoch
last_epoch = state_dict.pop("epoch", -1)
return last_epoch
|
f4eb59a303a5bf13ff1bdb9f37ca577a4d9e0419
| 27,890 |
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
Ex: num_or_str('42') ==> 42; num_or_str(' 42x ') ==> '42x' """
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
|
6709cfc772ecc79993563f43c2d8ea4526f222c6
| 27,891 |
def mac_timezone():
"""Determine system timezone"""
output = cmdmod['cmd.run']("/usr/sbin/systemsetup -gettimezone")
return {'mac_timezone': output[11:]}
|
e5e8e45fdbd54d1741dd80a76a47f26f43640293
| 27,892 |
def lookup_loc_carriers(model_run):
"""
loc_carriers, used in system_wide balance, are linked to loc_tech_carriers
e.g. `X1::power` will be linked to `X1::chp::power` and `X1::battery::power`
in a comma delimited string, e.g. `X1::chp::power,X1::battery::power`
"""
# get the technologies associated with a certain loc_carrier
lookup_loc_carriers_dict = dict(dims=["loc_carriers"])
data = []
for loc_carrier in model_run.sets["loc_carriers"]:
loc_tech_carrier = list(
set(
i
for i in model_run.sets["loc_tech_carriers_prod"]
+ model_run.sets["loc_tech_carriers_con"]
if loc_carrier == "{0}::{2}".format(*i.split("::"))
)
)
data.append(",".join(loc_tech_carrier))
lookup_loc_carriers_dict["data"] = data
return lookup_loc_carriers_dict
|
85c20bd789e0250405dded9e0e4a56777047ef5a
| 27,895 |
def filldown(table, *fields, **kwargs):
"""
Replace missing values with non-missing values from the row above.
E.g.::
>>> from petl import filldown, look
>>> look(table1)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | None | 0.23 |
+-------+-------+-------+
| 1 | 'b' | None |
+-------+-------+-------+
| 2 | None | None |
+-------+-------+-------+
| 2 | None | 0.56 |
+-------+-------+-------+
| 2 | 'c' | None |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
>>> table2 = filldown(table1)
>>> look(table2)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.72 |
+-------+-------+-------+
>>> table3 = filldown(table1, 'bar')
>>> look(table3)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | None |
+-------+-------+-------+
| 2 | 'b' | None |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | None |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
>>> table4 = filldown(table1, 'bar', 'baz')
>>> look(table4)
+-------+-------+-------+
| 'foo' | 'bar' | 'baz' |
+=======+=======+=======+
| 1 | 'a' | None |
+-------+-------+-------+
| 1 | 'a' | 0.23 |
+-------+-------+-------+
| 1 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.23 |
+-------+-------+-------+
| 2 | 'b' | 0.56 |
+-------+-------+-------+
| 2 | 'c' | 0.56 |
+-------+-------+-------+
| None | 'c' | 0.72 |
+-------+-------+-------+
.. versionadded:: 0.11
"""
return FillDownView(table, fields, **kwargs)
|
1f14d9e3aba6791ab9d512c647053ad41fcfab59
| 27,896 |
def realworld_bring_peg(fully_observable=True,
time_limit=_TIME_LIMIT,
random=None,
log_output=None,
environment_kwargs=None,
safety_spec=None,
delay_spec=None,
noise_spec=None,
perturb_spec=None,
dimensionality_spec=None,
multiobj_spec=None,
combined_challenge=None):
"""Returns manipulator bring task with the peg prop."""
use_peg = True
insert = False
return gen_task(use_peg, insert, fully_observable, time_limit, random,
log_output, environment_kwargs, safety_spec, delay_spec,
noise_spec, perturb_spec, dimensionality_spec, multiobj_spec,
combined_challenge)
|
496c7e31fee93d87d10cbee7bb6369c417956b23
| 27,897 |
def schedule_gemm(cfg, s, A, B, C, batched=False, schedule_transforms=True):
"""Schedule GEMM, single and batched
Parameters
----------
cfg : Config
Schedule configuration
s : tvm.te.schedule.Schedule
Operator schedule
A : tvm.te.Tensor
2D/3D Tensor, shape [n, k]/[b, n, k]
B : tvm.te.Tensor
2D/3D Tensor, shape [k, m]/[b, k, m]
C : tvm.te.Tensor
2D/3D Tensor, shape [n, m]/[b, n, m]
batched : bool
Whether the GEMM is batched
Returns
-------
"""
block_size_x = 4
block_size_y = 4
warp_size_x = 2
warp_size_y = 2
work_group_x = cfg["work_group_x"].val
work_group_y = cfg["work_group_y"].val
k_unroll = cfg["unroll_k_factor"].val
if not batched:
y_index, x_index = (0, 1)
else:
y_index, x_index = (1, 2)
trans_inter, A_transposed_interleaved = transpose_interleave(
s, A, cfg["A_interleave"].val, y_index, x_index, [C], batched=batched
)
inter_trans, B_interleaved_transposed = interleave_transpose(
s, B, cfg["B_interleave"].val, y_index, x_index, [C], batched=batched
)
if schedule_transforms:
# Schedule A
y, x = s[trans_inter].op.axis
y, x, yi, xi = s[trans_inter].tile(y, x, 1, 8)
s[trans_inter].unroll(yi)
s[trans_inter].unroll(xi)
tile_and_bind(s, trans_inter, y, x, 1, 4)
# Schedule B
y, x = s[inter_trans].op.axis
xo, xi = s[inter_trans].split(x, 4)
s[inter_trans].vectorize(xi)
tile_and_bind(s, inter_trans, y, xo, 4, 4)
# Schedule C
CR_A = s.cache_read(A_transposed_interleaved, "local", [C])
CR_B = s.cache_read(B_interleaved_transposed, "local", [C])
CW_C = s.cache_write(C, "local")
if not batched:
y, x = s[C].op.axis
else:
z, y, x = s[C].op.axis
y, x, yt, xt = s[C].tile(y, x, block_size_y, block_size_x)
s[C].unroll(yt)
s[C].vectorize(xt)
# Tile the global work space to generate 'square' warps -> 2x2 for warp size of 4
y, x, wy, wx = s[C].tile(y, x, warp_size_y, warp_size_x)
x = s[C].fuse(x, wy, wx)
if not batched:
yo, xo, yi, xi = tile_and_bind(s, C, y, x, work_group_y, work_group_x)
else:
# For batched GEMM bind batch to z axis
zo, yo, xo, zi, yi, xi = tile_and_bind3d(s, C, z, y, x, 1, work_group_y, work_group_x)
s[CW_C].compute_at(s[C], xi)
if not batched:
y, x = s[CW_C].op.axis
else:
_, y, x = s[CW_C].op.axis
y, x, yt, xt = s[CW_C].tile(y, x, block_size_y, block_size_x)
k = s[CW_C].op.reduce_axis[0]
s[CW_C].reorder(k, yt, xt)
ko, ki = s[CW_C].split(k, k_unroll)
s[CW_C].unroll(ki)
s[CW_C].unroll(yt)
s[CW_C].unroll(xt)
if not batched:
i, j = s[CR_A].op.axis
else:
_, i, j = s[CR_A].op.axis
s[CR_A].reorder(j, i)
s[CR_A].compute_at(s[CW_C], ki)
s[CR_A].unroll(j)
s[CR_A].vectorize(i)
if not batched:
i, j = s[CR_B].op.axis
else:
_, i, j = s[CR_B].op.axis
s[CR_B].compute_at(s[CW_C], ki)
s[CR_B].unroll(i)
s[CR_B].vectorize(j)
return trans_inter, inter_trans
|
2a99a20f4e9634bdaa06d114a9eafb7406736bc3
| 27,898 |
import math
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""Возвращает расстояние между двумя точками на плоскости"""
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
2113cb5926492ba89820ebb7f42de6993e46e3cb
| 27,899 |
import logging
def GitPush(git_repo, refspec, push_to, force=False, dry_run=False,
capture_output=True, skip=False, **kwargs):
"""Wrapper for pushing to a branch.
Args:
git_repo: Git repository to act on.
refspec: The local ref to push to the remote.
push_to: A RemoteRef object representing the remote ref to push to.
force: Whether to bypass non-fastforward checks.
dry_run: If True, do everything except actually push the remote ref.
capture_output: Whether to capture output for this command.
skip: Log the git command that would have been run, but don't run it; this
avoids e.g. remote access checks that still apply to |dry_run|.
"""
cmd = ['push', push_to.remote, '%s:%s' % (refspec, push_to.ref)]
if force:
cmd.append('--force')
if dry_run:
cmd.append('--dry-run')
if skip:
logging.info('Would have run "%s"', cmd)
return
return RunGit(git_repo, cmd, capture_output=capture_output,
**kwargs)
|
3af43d0a819c297735995a9d8c7e39b49937b7a3
| 27,900 |
def boxes_to_array(bound_boxes):
"""
# Args
boxes : list of BoundBox instances
# Returns
centroid_boxes : (N, 4)
probs : (N, nb_classes)
"""
temp_list = []
for box in bound_boxes:
temp_list.append([np.argmax(box.classes), np.asarray([box.x, box.y, box.w, box.h]), np.max(box.classes)])
return np.array(temp_list)
|
e01b908e675b84928d1134d8eec4627f36b8af4a
| 27,901 |
from typing import Tuple
def _scope_prepare(scope: str) -> Tuple[object, str]:
"""
Parse a scope string a return a tuple consisting of context manager for the assignation of the tf's scope
and a string representing the summary name. The scope is of the form "<ident1>.<ident2>. ... .<ident3>", the
righmost identifier is used as summary name whereas the prefix is used as scope name.
:param scope: A string containing a qualified name.
:return:
"""
splits = scope.rsplit('.', 1)
if any(map(lambda v: len(v) == 0, splits)):
raise ValueError(f'Invalid scope name: {scope}')
if len(splits) == 1:
return nullcontext(), splits[0]
return tf.name_scope(splits[0]), splits[1]
|
01bcd08d87e23621f3476055379d9c7403fd4b75
| 27,903 |
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
aftv = hass.data[DOMAIN][entry.entry_id][ANDROID_DEV]
await aftv.adb_close()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
77376bcdf98c9b4c2ac6020e44d704fbe59d9143
| 27,904 |
from typing import Tuple
def render_wrapped_text(text: str, font: pygame.freetype.Font,
color: Color, centered: bool, offset_y: int,
max_width: int) -> Tuple[pygame.Surface, pygame.Rect]:
"""Return a surface & rectangle with text rendered over several lines.
Parameter offset_y defines the distance between lines."""
words = text.split()
lines = []
lines_h = 0
line_w, line_h = 0, 0
# Separate text into lines, storing each line size
while words:
line_words = []
while words:
_, _, l_w, l_h = font.get_rect(
' '.join(line_words + words[:1]))
if l_w > max_width:
break
line_w, line_h = l_w, l_h
line_words.append(words.pop(0))
if line_words:
lines_h += line_h
lines.append((' '.join(line_words), (line_w, line_h)))
else:
# Split word in half if it is too long
long_word = words.pop(0)
words.insert(0, long_word[:len(long_word)//2])
words.insert(1, long_word[len(long_word)//2:])
# Create transparent surface and rectangle to be returned
final_height = lines_h + (len(lines) - 1) * offset_y if lines else lines_h
final_surf = pygame.Surface((max_width, final_height), pygame.SRCALPHA, 32)
final_surf.convert()
final_rect = final_surf.get_rect()
# Render lines on the surface
pos_y = 0
for line in lines:
if centered:
pos_x = int(max_width/2 - line[1][0]/2)
else:
pos_x = 0
font.render_to(final_surf, (pos_x, pos_y), line[0], color)
pos_y += line[1][1] + offset_y
return final_surf, final_rect
|
73be30318fd3afe5bf5138b8c21c46caf05022bc
| 27,905 |
def comp4(a1,a2,b1,b2):
"""两个区间交集,a1<a2; b1<b2"""
if a2<b1 or b2<a1:#'空集'
gtii = []
else:
lst1 = sorted([a1,a2,b1,b2])
gtii = [lst1[1], lst1[2]]
return gtii
|
ba4357b16ee09f78b6c09f422d27a42cd91e298e
| 27,906 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Tuple
from typing import List
def fixed_step_solver_template(
take_step: Callable,
rhs_func: Callable,
t_span: Array,
y0: Array,
max_dt: float,
t_eval: Optional[Union[Tuple, List, Array]] = None,
):
"""Helper function for implementing fixed-step solvers supporting both
``t_span`` and ``max_dt`` arguments. ``take_step`` is assumed to be a
function implementing a single step of size h of a fixed-step method.
The signature of ``take_step`` is assumed to be:
- rhs_func: Either a generator :math:`G(t)` or RHS function :math:`f(t,y)`.
- t0: The current time.
- y0: The current state.
- h: The size of the step to take.
It returns:
- y: The state of the DE at time t0 + h.
``take_step`` is used to integrate the DE specified by ``rhs_func``
through all points in ``t_eval``, taking steps no larger than ``max_dt``.
Each interval in ``t_eval`` is divided into the least number of sub-intervals
of equal length so that the sub-intervals are smaller than ``max_dt``.
Args:
take_step: Callable for fixed step integration.
rhs_func: Callable, either a generator or rhs function.
t_span: Interval to solve over.
y0: Initial state.
max_dt: Maximum step size.
t_eval: Optional list of time points at which to return the solution.
Returns:
OdeResult: Results object.
"""
# ensure the output of rhs_func is a raw array
def wrapped_rhs_func(*args):
return Array(rhs_func(*args)).data
y0 = Array(y0).data
t_list, h_list, n_steps_list = get_fixed_step_sizes(t_span, t_eval, max_dt)
ys = [y0]
for current_t, h, n_steps in zip(t_list, h_list, n_steps_list):
y = ys[-1]
inner_t = current_t
for _ in range(n_steps):
y = take_step(wrapped_rhs_func, inner_t, y, h)
inner_t = inner_t + h
ys.append(y)
ys = Array(ys)
results = OdeResult(t=t_list, y=ys)
return trim_t_results(results, t_span, t_eval)
|
6e989b1f6d92ddeb4d5f18e9eb110667f28b6a33
| 27,907 |
def set_reference_ene(rxn_lst, spc_dct,
pes_model_dct_i, spc_model_dct_i,
run_prefix, save_prefix, ref_idx=0):
""" Sets the reference species for the PES for which all energies
are scaled relative to.
"""
# Set the index for the reference species, right now defualt to 1st spc
ref_rxn = rxn_lst[ref_idx]
_, (ref_rgts, _) = ref_rxn
ioprinter.info_message(
'Determining the reference energy for PES...', newline=1)
ioprinter.info_message(
' - Reference species assumed to be the',
' first set of reactants on PES: {}'.format('+'.join(ref_rgts)))
# Get the model for the first reference species
ref_scheme = pes_model_dct_i['therm_fit']['ref_scheme']
ref_enes = pes_model_dct_i['therm_fit']['ref_enes']
ref_ene_level = spc_model_dct_i['ene']['lvl1'][0]
ioprinter.info_message(
' - Energy Level for Reference Species: {}'.format(ref_ene_level))
# Get the elec+zpe energy for the reference species
ioprinter.info_message('')
hf0k = 0.0
for rgt in ref_rgts:
ioprinter.info_message(' - Calculating energy for {}...'.format(rgt))
basis_dct, uniref_dct = thermfit.prepare_refs(
ref_scheme, spc_dct, (rgt,))
spc_basis, coeff_basis = basis_dct[rgt]
# Build filesystem
ene_spc, ene_basis = thmroutines.basis.basis_energy(
rgt, spc_basis, uniref_dct, spc_dct,
spc_model_dct_i, run_prefix, save_prefix)
# Calcualte the total energy
hf0k += thermfit.heatform.calc_hform_0k(
ene_spc, ene_basis, spc_basis, coeff_basis, ref_set=ref_enes)
return hf0k
|
52c915060a869f41ee5262190dd7ffac79b1684b
| 27,908 |
import torch
def get_laf_center(LAF: torch.Tensor) -> torch.Tensor:
"""Returns a center (keypoint) of the LAFs.
Args:
LAF: tensor [BxNx2x3].
Returns:
tensor BxNx2.
Shape:
- Input: :math: `(B, N, 2, 3)`
- Output: :math: `(B, N, 2)`
Example:
>>> input = torch.ones(1, 5, 2, 3) # BxNx2x3
>>> output = get_laf_center(input) # BxNx2
"""
raise_error_if_laf_is_not_valid(LAF)
out: torch.Tensor = LAF[..., 2]
return out
|
c172defe938c35e7f41616b48d9d6d3da21eb9d1
| 27,909 |
def get_all_vlan_bindings_by_logical_switch(context, record_dict):
"""Get Vlan bindings that match the supplied logical switch."""
query = context.session.query(models.VlanBindings)
return query.filter_by(
logical_switch_uuid=record_dict['logical_switch_id'],
ovsdb_identifier=record_dict['ovsdb_identifier']).all()
|
df88a52325e1bee59fae3b489a29ce8ee343d1fb
| 27,910 |
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Args:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
|
88c80a77d3aee4050625aa080db5d9b246f9e920
| 27,911 |
def convert_data_to_int(x, y):
"""
Convert the provided data to integers, given a set of data with fully
populated values.
"""
# Create the new version of X
x_classes = []
for i in xrange(x.shape[1]):
x_classes.append({item:j for j, item in enumerate(set(x[:,i]))})
new_x = np.zeros(x.shape, dtype='i')
for i, xi in enumerate(x):
for j, xii in enumerate(xi):
new_x[i,j] = x_classes[j][xii]
# Create the new version of y
y_classes = {item:i for i, item in enumerate(set(y))}
new_y = np.zeros(y.shape, dtype='i')
for i, yi in enumerate(y):
new_y[i] = y_classes[yi]
return new_x, new_y
|
c8b3f017a34b68edf4f1740f8a3dd2130664ddae
| 27,912 |
def send_report(report_text, svc_info, now_str):
"""
Publish report to AWS SNS endpoint
Note: publish takes a max of 256KB.
"""
overage = len(report_text) - MAX_SNS_MESSAGE
if overage > 0:
report_text = report_text[:-overage - 20] + '\n<message truncated/>'
resp = SNS_C.publish(TopicArn=svc_info['CowReportARN'],
Message=report_text,
Subject='CowCatcher Report for ' + now_str)
return resp
|
9c48c3d7ba12e11cf3df944942803f36f6c23f52
| 27,913 |
def credential():
"""Return credential."""
return Credential('[email protected]', 'test_password')
|
1da4e56abb87c9c5a0d0996d3a2911a23349321b
| 27,914 |
def get_ez_from_contacts(xlsx_file, contacts_file, label_volume_file):
"""Return list of indices of EZ regions given by the EZ contacts in the patient spreadsheet"""
CONTACTS_IND = 6
EZ_IND = 7
df = pd.read_excel(xlsx_file, sheet_name="EZ hypothesis and EI", header=1)
ez_contacts = []
contacts_col = df.iloc[:, CONTACTS_IND]
mask = contacts_col.notnull()
contacts_names = contacts_col[mask]
ez_mask = df.iloc[:, EZ_IND][mask] == 'YES'
ez_contacts.extend(contacts_names[ez_mask])
contacts = Contacts(contacts_file)
label_vol = nib.load(label_volume_file)
ez_inds = []
for contact in ez_contacts:
coords = contacts.get_coords(contact)
region_ind = nifti.point_to_brain_region(
coords, label_vol, tol=3.0) - 1 # Minus one to account for the shift
if region_ind != -1:
ez_inds.append(region_ind)
return ez_inds
|
b3e4bfda0d0e9830b34012b7995082e90c9932a8
| 27,915 |
def mapfmt_str(fmt: str, size: int) -> str:
"""Same as mapfmt, but works on strings instead of bytes."""
if size == 4:
return fmt
return fmt.replace('i', 'q').replace('f', 'd')
|
af51b6ac65c80eef1721b64dcd8ee6a8bb5cbc97
| 27,916 |
def RandomImageDetection(rows=None, cols=None):
"""Return a uniform random color `vipy.image.ImageDetection` of size (rows, cols) with a random bounding box"""
rows = np.random.randint(128, 1024) if rows is None else rows
cols = np.random.randint(128, 1024) if cols is None else cols
return ImageDetection(array=np.uint8(255 * np.random.rand(rows, cols, 3)), colorspace='rgb', category='RandomImageDetection',
xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
bbwidth=np.random.randint(16,cols), bbheight=np.random.randint(16,rows))
|
e7f06b32b771f3eb3c10d09e39bc4be4577d4233
| 27,918 |
def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):
"""
Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
"""
if stride == 1:
return x
if separate_cls:
cls = x[:, :1]
x = x[:, 1:]
output = tf.repeat(x, repeats=stride, axis=1)
if separate_cls:
if truncate_seq:
output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]])
output = output[:, : target_len - 1]
output = tf.concat([cls, output], axis=1)
else:
output = output[:, :target_len]
return output
|
716c94cb365144e65a6182e58c39284375c8f700
| 27,919 |
import numpy
def torsional_scan_linspaces(zma, tors_names, increment=0.5,
frm_bnd_key=None, brk_bnd_key=None):
""" scan grids for torsional dihedrals
"""
sym_nums = torsional_symmetry_numbers(
zma, tors_names, frm_bnd_key=frm_bnd_key, brk_bnd_key=brk_bnd_key)
intervals = tuple(2*numpy.pi/sym_num - increment for sym_num in sym_nums)
npoints_lst = tuple(
(int(interval / increment)+1) for interval in intervals)
return tuple((0, interval, npoints)
for interval, npoints in zip(intervals, npoints_lst))
|
a2dcd4ec57ae598a42c25102db89af331e6f8a40
| 27,920 |
import ctypes
def get_output_to_console(p_state):
"""Returns a bool indicating whether the Log is output to the console."""
return bool(_Get_Output_To_Console(ctypes.c_void_p(p_state)))
|
71599b5a2e4b2708d6e8d5fa003acc89cd0d030c
| 27,921 |
def check_valid_column(observation):
"""
Validates that our observation only has valid columns
Returns:
- assertion value: True if all provided columns are valid, False otherwise
- error message: empty if all provided columns are valid, False otherwise
"""
valid_columns = {
"observation_id",
"Type",
"Date",
"Part of a policing operation",
"Latitude",
"Longitude",
"Gender",
"Age range",
"Officer-defined ethnicity",
"Legislation",
"Object of search",
"station"
}
keys = set(observation.keys())
if len(valid_columns - keys) > 0:
missing = valid_columns - keys
error = "Missing columns: {}".format(missing)
return False, error
if len(keys - valid_columns) > 0:
extra = keys - valid_columns
error = "Unrecognized columns provided: {}".format(extra)
return False, error
return True, ""
|
104fc6646a5e4d978b2a0cec4322c6f275b82f42
| 27,922 |
def w_getopt(args, options):
"""A getopt for Windows.
Options may start with either '-' or '/', the option names may
have more than one letter (/tlb or -RegServer), and option names
are case insensitive.
Returns two elements, just as getopt.getopt. The first is a list
of (option, value) pairs in the same way getopt.getopt does, but
there is no '-' or '/' prefix to the option name, and the option
name is always lower case. The second is the list of arguments
which do not belong to an option.
Different from getopt.getopt, a single argument not belonging to an option
does not terminate parsing.
"""
opts = []
arguments = []
while args:
if args[0][:1] in "/-":
arg = args[0][1:] # strip the '-' or '/'
arg = arg.lower()
if arg + ':' in options:
try:
opts.append((arg, args[1]))
except IndexError:
raise GetoptError("option '%s' requires an argument" % args[0])
args = args[1:]
elif arg in options:
opts.append((arg, ''))
else:
raise GetoptError("invalid option '%s'" % args[0])
args = args[1:]
else:
arguments.append(args[0])
args = args[1:]
return opts, arguments
|
34095675fa95cbc1c8474a7253b4d49a2e947dc0
| 27,924 |
def get_alt_for_density(density: float, density_units: str='slug/ft^3',
alt_units: str='ft', nmax: int=20, tol: float=5.) -> float:
"""
Gets the altitude associated with a given air density.
Parameters
----------
density : float
the air density in slug/ft^3
density_units : str; default='slug/ft^3'
the density units; slug/ft^3, slinch/in^3, kg/m^3
alt_units : str; default='ft'
sets the units for the output altitude; ft, m, kft
nmax : int; default=20
max number of iterations for convergence
tol : float; default=5.
tolerance in alt_units
Returns
-------
alt : float
the altitude in feet
"""
tol = convert_altitude(tol, alt_units, 'ft')
dalt = 500. # ft
alt_old = 0.
alt_final = 5000.
n = 0
#density_scale = _density_factor(density_units, "slug/ft^3")
# Newton's method
while abs(alt_final - alt_old) > tol and n < nmax:
alt_old = alt_final
alt1 = alt_old
alt2 = alt_old + dalt
rho1 = atm_density(alt1, density_units=density_units)
rho2 = atm_density(alt2, density_units=density_units)
m = dalt / (rho2 - rho1)
alt_final = m * (density - rho1) + alt1
n += 1
if abs(alt_final - alt_old) > tol:
raise RuntimeError('Did not converge; Check your units; n=nmax=%s\n'
'target alt=%s alt_current=%s' % (nmax, alt_final, alt1))
alt_out = convert_altitude(alt_final, 'ft', alt_units)
return alt_out
|
68243ec75bbe8989e7a9fd63fe6a1635da222cae
| 27,925 |
from datetime import datetime
def parse_date_string(date: str) -> datetime:
"""Converts date as string (e.g. "2004-05-25T02:19:28Z") to UNIX timestamp (uses UTC, always)
"""
# https://docs.python.org/3.6/library/datetime.html#strftime-strptime-behavior
# http://strftime.org/
parsed = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ') # string parse time
# now apply UTC timezone
return parsed.replace(tzinfo=timezone.utc)
|
624e92ceab996d7cfded7c7989e716fbba7abd5e
| 27,926 |
def levenshtein_distance(s, t, ratio_calc = False):
""" levenshtein_distance:
Calculates levenshtein distance between two strings.
If ratio_calc = True, the function computes the
levenshtein distance ratio of similarity between two strings
For all i and j, distance[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
# Initialize matrix of zeros
rows = len(s) + 1
cols = len(t) + 1
distance = np.zeros((rows,cols), dtype = int)
# Populate matrix of zeros with the indeces of each character of both strings
for i in range(1, rows):
for k in range(1,cols):
distance[i][0] = i
distance[0][k] = k
# Iterate over the matrix to compute the cost of deletions,insertions and/or substitutions
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
cost = 0 # If the characters are the same in the two strings in a given position [i,j] then the cost is 0
else:
# In order to align the results with those of the Python Levenshtein package, if we choose to calculate the ratio
# the cost of a substitution is 2. If we calculate just distance, then the cost of a substitution is 1.
if ratio_calc:
cost = 2
else:
cost = 1
distance[row][col] = min(distance[row-1][col] + 1, # Cost of deletions
distance[row][col-1] + 1, # Cost of insertions
distance[row-1][col-1] + cost) # Cost of substitutions
if ratio_calc:
# Computation of the Levenshtein Distance Ratio
ratio = ((len(s)+len(t)) - distance[row][col]) / (len(s)+len(t))
return ratio
else:
# print(distance) # Uncomment if you want to see the matrix showing how the algorithm computes the cost of deletions,
# insertions and/or substitutions
# This is the minimum number of edits needed to convert string a to string b
return distance[row][col]
|
670196344e33bd4c474c0b24b306c9fe3d7e093b
| 27,927 |
def no_warnings(func):
""" Decorator to run R functions without warning. """
def run_withoutwarnings(*args, **kwargs):
warn_i = _options().do_slot('names').index('warn')
oldwarn = _options()[warn_i][0]
_options(warn=-1)
try:
res = func(*args, **kwargs)
except Exception as e:
# restore the old warn setting before propagating
# the exception up
_options(warn=oldwarn)
raise e
_options(warn=oldwarn)
return res
return run_withoutwarnings
|
52831940551c324b6e9624af0df28cc2442bac2b
| 27,928 |
def ParseKindsAndSizes(kinds):
"""Parses kind|size list and returns template parameters.
Args:
kinds: list of kinds to process.
Returns:
sizes_known: whether or not all kind objects have known sizes.
size_total: total size of objects with known sizes.
len(kinds) - 2: for template rendering of greater than 3 kinds.
"""
sizes_known = True
size_total = 0
kinds_and_sizes = RetrieveCachedStats()
if kinds_and_sizes:
for kind in kinds:
if kind in kinds_and_sizes:
size_total += kinds_and_sizes[kind]
else:
sizes_known = False
else:
sizes_known = False
if size_total:
size_total = GetPrettyBytes(size_total)
return sizes_known, size_total, len(kinds) - 2
|
7f94fd099ea2f28070fe499288f62d1c0b57cce9
| 27,929 |
def load_3D(path,
n_sampling=10000,
voxelize=True,
voxel_mode="binary",
target_size=(30, 30, 30)):
"""Load 3D data into numpy array, optionally voxelizing it.
Parameters
----------
path : srt
Path to 3D file.
n_sampling : int
Number of points to be sampled in case the read 3D data contains a mesh.
voxelize : bool, optional (Default True)
Indicates whether the 3D data will be converted into voxelgrid or not.
voxel_mode : {"binary", "density", "TDF"}, optional (Default "binary")
The type of feature vector that will be generated from the voxelgrid.
binary
0 for empty voxels, 1 for occupied.
density
number of points inside voxel / total number of points.
TDF
Truncated Distance Function. Value between 0 and 1 indicating the distance
between the voxel's center and the closest point. 1 on the surface,
0 on voxels further than 2 * voxel side.
target_size : [int, int, int], optional (Default [30, 30, 30])
Dimensions of voxelgrid in case voxelize is True.
Returns
-------
feature_vector : ndarray
(target_size[0], target_size[1], target_size[2])
Raises
------
ValueError: if 3D format is not valid.
"""
point_cloud = PyntCloud.from_file(path)
if point_cloud.mesh is not None:
point_cloud = PyntCloud(point_cloud.get_sample(
"mesh_random", n=n_sampling))
if voxelize:
vgrid_id = point_cloud.add_structure("voxelgrid", x_y_z=target_size)
voxelgrid = point_cloud.structures[vgrid_id]
if voxel_mode == "binary":
feature_vector = voxelgrid.get_feature_vector(mode="binary")
elif voxel_mode == "density":
feature_vector = voxelgrid.get_feature_vector(mode="density")
elif voxel_mode == "TDF":
feature_vector = voxelgrid.get_feature_vector(mode="TDF")
else:
raise ValueError("Invalid mode; available modes are: {}".format(
{"binary", "density", "TDF"}))
# add fake channel
return feature_vector[None, ...]
else:
return point_cloud
|
eec6614b2675faa61d9a09b8ff3a491580302a91
| 27,930 |
import array
def create_vector2d(vec):
"""Returns a vector as a numpy array."""
return array([vec[0],vec[1]])
|
0b3cdc81f3744c54dea8aab0ee28743134ff1d42
| 27,931 |
def get_chrom_start_end_from_string(s):
"""Get chrom name, int(start), int(end) from a string '{chrom}__substr__{start}_{end}'
...doctest:
>>> get_chrom_start_end_from_string('chr01__substr__11838_13838')
('chr01', 11838, 13838)
"""
try:
chrom, s_e = s.split('__substr__')
start, end = s_e.split('_')
return chrom, int(start), int(end)
except Exception:
raise ValueError("String %s must be of format '{chrom}__substr__{start}_{end}'" % s)
|
5dbce8eb33188c7f06665cf92de455e1c705f38b
| 27,932 |
def Remove_Invalid_Tokens(tokenized_sentence, invalidating_symbols):
"""
Returns a tokenized sentence without tokens that include
invalidating_symbols
"""
valid_tokens_sentence = [] + tokenized_sentence # forcing a copy, avoid pass by reference
for token in tokenized_sentence:
for invalid_symbol in invalidating_symbols.split():
if invalid_symbol in token:
valid_tokens_sentence.remove(token)
return valid_tokens_sentence
|
931858685c6c405de5e0b4755ec0a26a672be3b0
| 27,933 |
def _get_protocol(url):
"""
Get the port of a url.
Default port is 80. A specified port
will come after the first ':' and before the next '/'
"""
if url.find('http://') == 0:
return 'http'
elif url.find('https://') == 0:
return 'https'
else:
return 'http'
|
42b2750148829154f17e34a2cebccf4387f07f25
| 27,934 |
def row_to_str(row):
"""Convert a df row to a string for insert into SQL database."""
return str(list(row)).replace("[", "(").replace("]", ")")
|
fb2b0d598604a124b948f884a6839a40af1203fc
| 27,936 |
def interpolate_affines(affines):
"""
"""
# get block grid
block_grid = affines.shape[:3]
# construct an all identities matrix for comparison
all_identities = np.empty_like(affines)
for i in range(np.prod(block_grid)):
idx = np.unravel_index(i, block_grid)
all_identities[idx] = np.eye(4)
# if affines are all identity, just return
if np.all(affines == all_identities):
return affines
# process continues until there are no identity matrices left
new_affines = np.copy(affines)
identities = True
while identities:
identities = False
# loop over all affine matrices
for i in range(np.prod(block_grid)):
idx = np.unravel_index(i, block_grid)
# if an identity matrix is found
if np.all(new_affines[idx] == np.eye(4)):
identities = True
trans, denom = np.array([0, 0, 0]), 0
# average translations from 6 connected neighborhood
for ax in range(3):
if idx[ax] > 0:
neighbor = tuple(
x-1 if j == ax else x for j, x in enumerate(idx)
)
neighbor_trans = new_affines[neighbor][:3, -1]
if not np.all(neighbor_trans == 0):
trans = trans + neighbor_trans
denom += 1
if idx[ax] < block_grid[ax]-1:
neighbor = tuple(
x+1 if j == ax else x for j, x in enumerate(idx)
)
neighbor_trans = new_affines[neighbor][:3, -1]
if not np.all(neighbor_trans == 0):
trans = trans + neighbor_trans
denom += 1
# normalize then update matrix
if denom > 0: trans /= denom
new_affines[idx][:3, -1] = trans
return new_affines
|
880ea993634a6c4725d02365d75e79705175c2e5
| 27,937 |
import tqdm
def getLineMeasures(file_list, orders, names, err_cut=0):
"""
Find line center (in pixels) to match order/mode lines
"""
# Load in x values to match order/mode lines
x_values = np.empty((len(file_list),len(orders)))
x_values[:] = np.nan # want default empty to be nan
x_errors = np.empty((len(file_list),len(orders)))
x_errors[:] = np.nan
pd_keys = pd.DataFrame({'orders':orders.copy().astype(int),
'names':names.copy().astype(str)})
for file_num in tqdm(range(len(file_list))):
# Load in line fit information
file_name = file_list[file_num]
try:
x,m,w,e = readFile(file_name)
m = m.astype(int)
if err_cut > 0:
mask = e < err_cut
x = x[mask]
m = m[mask]
w = w[mask]
e = e[mask]
except ValueError:
continue
# Identify which lines this exposure has
for nord in np.unique(m):
I = m==nord # Mask for an order
# Get identifying names: "(nord, wavelength string)"
n = ["{0:09.3f}".format(wave) for wave in w[I]]
xvl_dict = dict(zip(n,x[I]))
err_dict = dict(zip(n,e[I]))
ord_xval = np.array(pd_keys[pd_keys.orders==nord].names.map(xvl_dict))
ord_errs = np.array(pd_keys[pd_keys.orders==nord].names.map(err_dict))
x_values[file_num,pd_keys.orders==nord] = ord_xval
x_errors[file_num,pd_keys.orders==nord] = ord_errs
return x_values, x_errors
|
b13fe9f46457f7d289d09ffba3b769fe4e1c700e
| 27,938 |
def signature_exempt(view_func):
"""Mark a view function as being exempt from signature and apikey check."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.signature_exempt = True
return wraps(view_func)(wrapped_view)
|
f564ad0ce20e6e2b7ae760c5f50a297f587006d4
| 27,940 |
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tridentnet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# if args.eval_only:
# cfg.MODEL.WEIGHTS = "/root/detectron2/projects/TridentNet/log_80_20/model_0029999.pth"
cfg.freeze()
default_setup(cfg, args)
return cfg
|
13d30557537c7e7d18811e016c0eaf43602f1ef2
| 27,941 |
def get_intersections(line, potential_lines, nodes, precision):
"""
Get the intersection points between the lines defined by two planes
and the lines defined by the cost area (x=0, y=0, y=-x+1)
and the lines defined by the possible combinations of predictors
"""
slope, intercept = line
# Intersection with y axis when x=0
point = get_intersection_point_yaxis(intercept)
if point:
nodes[point].update({line, 'y_axis'})
# Intersection with x axis when y=0
point = get_intersection_point_xaxis(slope, intercept, precision)
if point:
nodes[point].update({line, 'x_axis'})
# Intersection with y=-x+1 line
point = get_intersection_point(slope, intercept, -1, 1, precision)
if point:
nodes[point].update({line, 'hypotenuse'})
# Intersection with other lines
nodes = get_intersection_with_lines(slope, intercept, potential_lines, nodes, precision)
return nodes
|
3e9811bee159f6c550dea5784754e08bc65624d7
| 27,942 |
def subtract_loss_from_gain(gain_load, loss_load):
"""Create a single DataCollection from gains and losses."""
total_loads = []
for gain, loss in zip(gain_load, loss_load):
total_load = gain - loss
total_load.header.metadata['type'] = \
total_load.header.metadata['type'].replace('Gain ', '')
total_loads.append(total_load)
return total_loads
|
b53044b802a8ea13befdde850a478c435b0370ef
| 27,943 |
def zero_out_noisy_epochs(psg, sample_rate, period_length_sec,
max_times_global_iqr=20):
"""
Sets all values in a epoch of 'period_length_sec' seconds of signal to zero
(channel-wise) if any (absolute) value within that period exceeds
'max_times_global_iqr' times the IQR of all data in the channel across time
Args:
psg: A ndarray of shape [N, C] of PSG data
sample_rate: The sample rate of data in the PSG
period_length_sec: The length of one epoch/period/segment in seconds
max_times_global_iqr: Extreme value threshold; number of times a value
in a channel must exceed the global IQR for that
channel for it to be termed an outlier.
Returns:
PSG, ndarray of shape [N, C]
A list of lists, one sub-list for each channel, each storing indices
of all epochs that were set to zero.
"""
n_channels = psg.shape[-1]
chan_inds = []
for chan in range(n_channels):
chan_psg = psg[..., chan]
# Compute global IQR
iqr = np.subtract(*np.percentile(chan_psg, [75, 25]))
threshold = iqr * max_times_global_iqr
# Reshape PSG to periods on 0th axis
n_periods = int(chan_psg.shape[0]/(sample_rate*period_length_sec))
chan_psg = chan_psg.reshape(n_periods, -1)
# Compute IQR for all epochs
inds = np.unique(np.where(np.abs(chan_psg) > threshold)[0])
# Zero out noisy epochs in the particular channel
chan_psg[inds] = 0.
psg[:, chan] = np.reshape(chan_psg, [-1])
chan_inds.append(inds)
return psg, chan_inds
|
427e2652a2e595bd0b25c3a30d35e088a9b0562b
| 27,944 |
from typing import Tuple
from typing import List
def start_training(channel: Channel) -> Tuple[List[ndarray], int, int]:
"""Start a training initiation exchange with a coordinator.
The decoded contents of the response from the coordinator are returned.
Args:
channel (~grpc.Channel): A gRPC channel to the coordinator.
Returns:
~typing.List[~numpy.ndarray]: The weights of a global model to train on.
int: The number of epochs to train.
int: The epoch base of the global model.
"""
coordinator: CoordinatorStub = CoordinatorStub(channel=channel)
# send request to start training
reply: StartTrainingReply = coordinator.StartTraining(
request=StartTrainingRequest()
)
logger.info("Participant received reply", reply=type(reply))
weights: List[ndarray] = [proto_to_ndarray(weight) for weight in reply.weights]
epochs: int = reply.epochs
epoch_base: int = reply.epoch_base
return weights, epochs, epoch_base
|
70ac5b32b58df84cd386cc820f18a8fe2667d620
| 27,946 |
import re
def has_forbidden(mylist) -> bool:
""" Does the string contain one of the forbidden substrings "ab" "cd" "pq"
"xy"? """
return bool(re.search(FORBIDDEN, mylist))
|
848fb1270ba99f40ef1ff0e23296f76895a5484d
| 27,947 |
from main import PAGLuxembourg
def classFactory(iface): # pylint: disable=invalid-name
"""Load PagLuxembourg class from file PagLuxembourg.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return PAGLuxembourg(iface)
|
9ff71fbc9f915435da660861ee9023026dfb2e48
| 27,948 |
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
target_name: Name if the target (ex. 'K64F')
version: The release version string. Should be a string contained within RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the mbed 2.0 ") + \
(("official release: %s" + linesep) % ", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" % ", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the mbed OS 5.0 ") + \
(("official release: %s" + linesep) % ", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" % ", ".join(supported_toolchains_sorted))
elif not target.default_build == 'standard':
result = False
reason = ("Target '%s' must set the 'default_build' " % target.name) + \
("to 'standard' to be included in the mbed OS 5.0 ") + \
("official release." + linesep) + \
("Currently it is set to '%s'" % target.default_build)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" % version) + \
("Please choose from the following release versions: %s" + ', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' does not have the 'release_versions' key set" % target.name
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' in its 'release_versions' key" % (target.name, version)
return result, reason
|
4cd8a2e3735aa91cd66204568c70e645e6f2f8ed
| 27,949 |
from typing import Dict
from typing import Union
from typing import Any
def _convert_to_dict_or_str(elements_map: Dict[str, Element]) -> Dict[str, Union[str, Dict[Any, Any]]]:
"""Combines a dictionary of xml elements into a dictionary of dicts or str"""
return {
key: XmlDictElement(value) if value or value.items() else value.text
for key, value in elements_map.items()
}
|
a68d1043abb995a632209528a52416a8a4661b58
| 27,950 |
def vsi_tecaji():
"""
Funkcija vrne vse tečaje PGD Hrušica.
"""
poizvedba = """
SELECT id, naziv
FROM tecaji
"""
tecaji = conn.execute(poizvedba).fetchall()
return tecaji
|
c4e9e9a9422920b38d4dce51a27d4db142c50f90
| 27,951 |
def cosine(u, v, dim=-1):
"""cosine similarity"""
return (u * v).sum(dim=dim) / (u.norm(dim=dim, p=2) * v.norm(dim=dim, p=2))
|
2d2a5a02ce20f6ae37dbefa3c8f9399aef2da8ad
| 27,952 |
from datetime import datetime
def retrive_cache_data(format: str, query: str) -> dict[int, Book]:
"""
Retrive the cached data for the query.
"""
date_now = datetime.now()
# save the search results in the cache if not already there or if the cache is expired
if format not in book_cache:
book_cache[format] = {}
if query not in book_cache[format] or book_cache[format][query]["expires"] < date_now:
book_cache[format][query] = {}
book_cache[format][query]["expires"] = date_now + timedelta(hours=1)
book_cache[format][query]["data"] = {
k + 1: {"book": v, "download_url": None, "cover_url": None}
for k, v in enumerate(
Libgen.search_title(query, {"extension": format} if format != "all" else {})
)
}
return book_cache[format][query].get("data")
|
885532f94f1e4b28350d3854ada867f42d386d5f
| 27,953 |
def get_orders_loadings_palette_number(client_orders, value):
"""
Searches OrdersLoadingPlaces objects linked with client orders and that contain value
:param client_orders: orders of profile
:param value: value of palettes number for which OrdersLoadingPlaces objects are searching
:return: List with OrdersLoadingPlaces objects or empty list
"""
orders_loadings = list()
for client_order in client_orders:
orders_loading_temp = OrdersLoadingPlaces.objects.filter(order=client_order)
for order_loading in orders_loading_temp:
if order_loading.loading_place.palette_number and order_loading.loading_place.palette_number.pallets_number == value:
orders_loadings.append(order_loading)
return orders_loadings
|
c145997418722e84da5d7420fbba6a8167737f96
| 27,954 |
import math
def rotmat(x=0,y=0,z=0):
"""Rotation Matrix function
This function creates and returns a rotation matrix.
Parameters
----------
x,y,z : float, optional
Angle, which will be converted to radians, in
each respective axis to describe the rotations.
The default is 0 for each unspecified angle.
Returns
-------
Rxyz : list
The product of the matrix multiplication.
Examples
--------
>>> import numpy as np
>>> from .pyCGM import rotmat
>>> x = 0.5
>>> y = 0.3
>>> z = 0.8
>>> np.around(rotmat(x,y,z),8)
array([[ 0.99988882, -0.01396199, 0.00523596],
[ 0.01400734, 0.99986381, -0.00872642],
[-0.00511341, 0.00879879, 0.99994822]])
>>> x = 0.5
>>> np.around(rotmat(x),8)
array([[ 1. , 0. , 0. ],
[ 0. , 0.99996192, -0.00872654],
[ 0. , 0.00872654, 0.99996192]])
>>> x = 1
>>> y = 1
>>> np.around(rotmat(x,y),8)
array([[ 9.9984770e-01, 0.0000000e+00, 1.7452410e-02],
[ 3.0459000e-04, 9.9984770e-01, -1.7449750e-02],
[-1.7449750e-02, 1.7452410e-02, 9.9969541e-01]])
"""
x = math.radians(x)
y = math.radians(y)
z = math.radians(z)
Rx = [ [1,0,0],[0,math.cos(x),math.sin(x)*-1],[0,math.sin(x),math.cos(x)] ]
Ry = [ [math.cos(y),0,math.sin(y)],[0,1,0],[math.sin(y)*-1,0,math.cos(y)] ]
Rz = [ [math.cos(z),math.sin(z)*-1,0],[math.sin(z),math.cos(z),0],[0,0,1] ]
Rxy = matrixmult(Rx,Ry)
Rxyz = matrixmult(Rxy,Rz)
Ryx = matrixmult(Ry,Rx)
Ryxz = matrixmult(Ryx,Rz)
return Rxyz
|
f1b31916abb78f3f47c324c3341d06338f6e4787
| 27,955 |
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.Horizon_Energy_Target_Shortage_MWh = Var(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
within=NonNegativeReals
)
def violation_expression_rule(mod, z, bt, h):
return mod.Horizon_Energy_Target_Shortage_MWh[z, bt, h] \
* mod.energy_target_allow_violation[z]
m.Horizon_Energy_Target_Shortage_MWh_Expression = Expression(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
rule=violation_expression_rule
)
def energy_target_rule(mod, z, bt, h):
"""
Total delivered energy-target-eligible energy must exceed target
:param mod:
:param z:
:param bt:
:param h:
:return:
"""
return mod.Total_Delivered_Horizon_Energy_Target_Energy_MWh[z, bt, h] \
+ mod.Horizon_Energy_Target_Shortage_MWh_Expression[z, bt, h] \
>= mod.Horizon_Energy_Target[z, bt, h]
m.Horizon_Energy_Target_Constraint = Constraint(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
rule=energy_target_rule
)
|
e6fe56e72fb7cd5906f0f30da2e7166a09eeb3f1
| 27,956 |
def grow_rate(n, k, nu_c, nu_d, sigma, g, dp, rho_c, rho_d, K):
"""
Compute the instability growth rate on a gas bubble
Write instability growth rate equation in Grace et al. as a root
problem for n = f(k)
Returns
-------
res : float
The residual of the growth-rate equation expressed as a root-finding
problem.
Notes
-----
This function is used by the `grace()` function for maximum stable
particle size. It should not be called directly.
"""
# Compute more derived variables
m_c = np.sqrt(k**2 + n / nu_c)
m_d = np.sqrt(k**2 + n / nu_d)
mu_c = nu_c / rho_c
# Compute the residual of the root function
res = (sigma * k**3 - g * k * dp + n**2 * (rho_c + rho_d)) * \
(k + m_c + K * (k + m_d)) + 4 * n * k * mu_c * (k + K * m_d) * \
(K * k + m_c)
# Return the residual
return res
|
79e277ca7941b80fd52667b4157ffbfd97f9a87e
| 27,957 |
def cg_semirelaxed_fused_gromov_wasserstein(C1:th.Tensor,
A1:th.Tensor,
p:th.Tensor,
C2:th.Tensor,
A2:th.Tensor,
alpha:float,
symmetry:bool=True,
init_mode:str='product',
T_init:th.Tensor=None,
use_log:bool=False,
eps:float=10**(-5),
max_iter:int=1000,
seed:int=0,
verbose:bool=False,
device:str='cpu',
dtype:type=th.float32):
"""
Conditional gradient algorithm for semi-relaxed fused gromov-wasserstein:
\min_{T} \alpha * <L(C_1, C_2) \otimes T, T> + (1-\alpha) * <D, T>
The implementation uses the generalization of the Frank-Wolfe algorithm detailed
in Algorithm 1. Section 3.2 of the main paper.
This general form is discussed in Algorithm 3. of section 7.3.1 in the supplementary material.
It comes down to consider:
- linear_cost = (1-\alpha) * D
- alpha = \alpha
"""
N1 = A1.shape[0]
N2 = A2.shape[0]
d = A1.shape[1]
# Compute matrix of euclidean distances between features
FS2 = (A1**2) @ th.ones((d, N2), dtype=dtype, device=device)
FT2 = th.ones((N1, d), dtype=dtype, device=device) @ (A2**2).T
D = FS2 + FT2 - 2 * A1 @ A2.T
return cg_semirelaxed(C1, p, C2, alpha, (1 - alpha) * D, init_mode, T_init,
symmetry, use_log, eps, max_iter, seed, verbose, device, dtype)
|
5a21edf35423b46826a9b88af62ddd27678538c2
| 27,958 |
def Vinv_terminal_time_series(m_t,Vdc_t):
"""Function to generate time series inverter terminal voltage."""
try:
assert len(m_t) == len(Vdc_t) != None
return m_t*(Vdc_t/2)
except:
LogUtil.exception_handler()
|
cbbcc7475c30694f3960e1b78b4be64bde283a2b
| 27,959 |
def render(ob, ns):
"""Calls the object, possibly a document template, or just returns
it if not callable. (From DT_Util.py)
"""
if hasattr(ob, '__render_with_namespace__'):
ob = ZRPythonExpr.call_with_ns(ob.__render_with_namespace__, ns)
else:
# items might be acquisition wrapped
base = aq_base(ob)
# item might be proxied (e.g. modules might have a deprecation
# proxy)
base = removeAllProxies(base)
if callable(base):
try:
if getattr(base, 'isDocTemp', 0):
ob = ZRPythonExpr.call_with_ns(ob, ns, 2)
else:
ob = ob()
except NotImplementedError:
pass
return ob
|
fba552131df5fe760c124e90e58f845f06fbbf44
| 27,960 |
def GetFlakeInformation(flake, max_occurrence_count, with_occurrences=True):
"""Gets information for a detected flakes.
Gets occurrences of the flake and the attached monorail issue.
Args:
flake(Flake): Flake object for a flaky test.
max_occurrence_count(int): Maximum number of occurrences to fetch.
with_occurrences(bool): If the flake must be with occurrences or not.
For flakes reported by Flake detection, there should always be
occurrences, but it's not always true for flakes reported by
Flake Analyzer, ignore those flakes for now.
Returns:
flake_dict(dict): A dict of information for the test. Including data from
its Flake entity, its flake issue information and information of all its
flake occurrences.
"""
occurrences = []
for flake_type in [
FlakeType.CQ_FALSE_REJECTION, FlakeType.RETRY_WITH_PATCH,
FlakeType.CI_FAILED_STEP, FlakeType.CQ_HIDDEN_FLAKE
]:
typed_occurrences = _FetchFlakeOccurrences(flake, flake_type,
max_occurrence_count)
occurrences.extend(typed_occurrences)
if max_occurrence_count:
max_occurrence_count = max_occurrence_count - len(typed_occurrences)
if max_occurrence_count == 0:
# Bails out if the number of occurrences with higher impact has hit the
# cap.
break
if not occurrences and with_occurrences:
# Flake must be with occurrences, but there is no occurrence, bail out.
return None
# Makes sure occurrences are sorted by time_happened in descending order,
# regardless of types.
occurrences.sort(key=lambda x: x.time_happened, reverse=True)
flake_dict = flake.to_dict()
flake_dict['occurrences'] = _GetGroupedOccurrencesByBuilder(occurrences)
flake_dict['flake_counts_last_week'] = _GetFlakeCountsList(
flake.flake_counts_last_week)
flake_issue = GetFlakeIssue(flake)
if flake_issue and flake_issue.status and flake_issue.status in OPEN_STATUSES:
flake_dict['flake_issue'] = flake_issue.to_dict()
flake_dict['flake_issue']['issue_link'] = FlakeIssue.GetLinkForIssue(
flake_issue.monorail_project, flake_issue.issue_id)
flake_dict['flake_issue'][
'last_updated_time_in_monorail'] = _GetLastUpdatedTimeDelta(flake_issue)
flake_dict['culprits'], flake_dict['sample_analysis'] = (
_GetFlakeAnalysesResults(flake_issue.issue_id))
return flake_dict
|
596573749599e7a1b49e8047b68d67a09e4e00e9
| 27,961 |
def get_prop_cycle():
"""Get the prop cycle."""
prop_cycler = rcParams['axes.prop_cycle']
if prop_cycler is None and 'axes.color_cycle' in rcParams:
clist = rcParams['axes.color_cycle']
prop_cycler = cycler('color', clist)
return prop_cycler
|
9b571b16cddf187e9bbfdacd598f280910de130a
| 27,962 |
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
# Note: nn.sigmoid_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output / (1 - output))
return sigmoid_cross_entropy_with_logits(labels=target, logits=output)
|
eb388c3bb3454eec6e26797313534cf089d06a6d
| 27,963 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.