content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_vacancy_at_cell(house_map, cell):
"""
Return True if the given cell is vacant.
Vacancy is defined as a '0' in the house map at the given coordinates.
(i.e. there is no wall at that location)
"""
x = cell[0]
y = cell[1]
if not 0 <= x < MAP_WIDTH:
return False
if not 0 <= y < MAP_HEIGHT:
return False
return house_map[y][x] == '0' | 78a24b25a6954b6411aa686512066b25c6f4e1d5 | 10,100 |
from typing import Dict
def extract_text_and_vertices(x: Dict[str, str]):
"""Extracts all annotations and bounding box vertices from a single OCR
output from Google Cloud Vision API.
The first element is the full OCR. It's equivalent to the output of
`extract_full_text_annotation` for the same OCR output.
Args:
x (Dict[str, str]): whole OCR output.
Returns:
list where each item is a tuple where the first element is the text and
the second are the 4 vertices of the corresponding bounding box.
"""
blocks = []
for annotation in x["textAnnotations"]:
text = annotation['description']
vertices = [
tuple(x.values()) for x in annotation['boundingPoly']['vertices']
]
blocks.append((text, vertices))
return blocks | 6fde2bc71ceccfd580a6f5f0da7fa0b76e045bad | 10,101 |
def cspace3(obs, bot, theta_steps):
"""
Compute the 3D (x, y, yaw) configuration space obstacle for a lit of convex 2D obstacles given by [obs] and a convex 2D robot given by vertices in [bot] at a variety of theta values.
obs should be a 3D array of size (2, vertices_per_obstacle, num_obstacles)
bot should be a 2d array of size (2, num_bot_vertices)
theta_steps can either be a scalar, in which case it specifies the number of theta values, evenly spaced between -pi and +pi; or it can be a vector of theta values.
"""
bot = -np.array(bot)
if np.isscalar(theta_steps):
thetas = np.linspace(-np.pi, np.pi, num=theta_steps)
else:
thetas = theta_steps
c_obs = []
for k in range(obs.shape[2]):
for j in range(len(thetas)-1):
th0 = thetas[j]
th1 = thetas[j+1]
bot_rot0 = rotmat(th0).dot(bot)
c_obs0 = minkowski_sum(bot_rot0, obs[:,:,k])
bot_rot1 = rotmat(th1).dot(bot)
c_obs1 = minkowski_sum(bot_rot1, obs[:,:,k])
c_pts = np.vstack((np.hstack((c_obs0, c_obs1)),
np.hstack((th0 + np.zeros(c_obs0.shape[1]),
th1 + np.zeros(c_obs1.shape[1])))))
c_obs.append(c_pts)
if len(c_obs) == 0:
return np.zeros((3, bot.shape[1] * 2, 0))
max_n_vert = max((x.shape[1] for x in c_obs))
return np.dstack((np.pad(c, pad_width=((0,0), (0,max_n_vert-c.shape[1])), mode='edge') for c in c_obs)) | 723e1b885a19ae0416226856f7b03aecb045e139 | 10,102 |
from typing import Optional
def Graph(backend:Optional[str]=None) -> BaseGraph:
"""Returns an instance of an implementation of :class:`~pyzx.graph.base.BaseGraph`.
By default :class:`~pyzx.graph.graph_s.GraphS` is used.
Currently ``backend`` is allowed to be `simple` (for the default),
or 'graph_tool' and 'igraph'.
This method is the preferred way to instantiate a ZX-diagram in PyZX.
Example:
To construct an empty ZX-diagram, just write::
g = zx.Graph()
"""
if backend is None: backend = 'simple'
if backend not in backends:
raise KeyError("Unavailable backend '{}'".format(backend))
if backend == 'simple': return GraphS()
if backend == 'graph_tool':
return GraphGT()
if backend == 'igraph': return GraphIG()
if backend == 'quizx-vec': return quizx.VecGraph() # type: ignore
return GraphS() | 9d2d759096016e0df770863448305b627df0ce73 | 10,103 |
from datetime import datetime
def get_carb_data(data, offset=0):
""" Load carb information from an issue report cached_carbs dictionary
Arguments:
data -- dictionary containing cached carb information
offset -- the offset from UTC in seconds
Output:
3 lists in (carb_values, carb_start_dates, carb_absorption_times)
format
"""
carb_values = [float(dict_.get("quantity")) for dict_ in data]
start_dates = [
datetime.strptime(
dict_.get("startDate"),
" %Y-%m-%d %H:%M:%S %z"
) + timedelta(seconds=offset)
for dict_ in data
]
absorption_times = [
float(dict_.get("absorptionTime")) / 60
if dict_.get("absorptionTime") is not None
else None for dict_ in data
]
assert len(start_dates) == len(carb_values) == len(absorption_times),\
"expected input shapes to match"
return (start_dates, carb_values, absorption_times) | cc2e54859f3f4635e9724260f277dd3c191c32ac | 10,104 |
def _discover_bounds(cdf, tol=1e-7):
"""
Uses scipy's general continuous distribution methods
which compute the ppf from the cdf, then use the ppf
to find the lower and upper limits of the distribution.
"""
class DistFromCDF(stats.distributions.rv_continuous):
def cdf(self, x):
return cdf(x)
dist = DistFromCDF()
# the ppf is the inverse cdf
lower = dist.ppf(tol)
upper = dist.ppf(1. - tol)
return lower, upper | bb882065ed74a34c61c60aa48481b2737a2496da | 10,105 |
def ml_app_instances_ml_app_instance_id_get(ml_app_instance_id): # noqa: E501
"""ml_app_instances_ml_app_instance_id_get
# noqa: E501
:param ml_app_instance_id: MLApp instance identifier
:type ml_app_instance_id: str
:rtype: None
"""
return 'do some magic!' | e702d106b6dd4999ed536f77347ca84675be3716 | 10,106 |
import random
def generate_name(style: str = 'underscore', seed: int = None) -> str:
"""Generate a random name."""
if seed is not None:
random.seed(seed)
return format_names(random_names(), style=style) | 2f74460f5492c3b4788800d6e33a44b856df91aa | 10,107 |
def argunique(a, b):
"""
找出a--b对应体中的唯一对应体,即保证最终输出的aa--bb没有重复元素,也没有多重对应
:param a:
:param b:
:return: aaa, bbb 使得aaa-bbb是唯一对
"""
# 先对a中元素进行逐个检查,如果第一次出现,那么添加到aa中,如果不是第一次,那么检查是否一致,不一致则设置成-1
# 设置成-1,代表a中当前元素i有过一对多纪录,剔除。同时-1也不会被再匹配到
seta = {}
for i, j in zip(a, b):
if i not in seta:
seta[i] = j
elif seta[i] != j:
seta[i] = -1
aa = [i for i in seta if seta[i] != -1]
bb = [seta[i] for i in seta if seta[i] != -1]
# 再反过来做一遍,以b为索引,剔除重复项
setb = {}
for i, j in zip(aa, bb):
if j not in setb:
setb[j] = i
elif setb[j] != i:
setb[j] = -1
aaa = [setb[j] for j in setb if setb[j] != -1]
bbb = [j for j in setb if setb[j] != -1]
return aaa, bbb | e804436203496d5f3109511967a0d75eaca330da | 10,108 |
def move(obj, direction):
"""
Moves object by (dx, dy).
Returns true if move succeeded.
"""
goal = obj.pos + direction
if (goal.x < 0 or goal.y < 0 or
goal.x >= obj.current_map.width or
goal.y >= obj.current_map.height):
# try_ catches this for the player, but need to
# check here for NPCs
return False
if not obj.current_map.is_blocked_from(obj.pos, goal):
obj.pos = goal
if obj.fighter:
obj.fighter.exhaustion += MOVE_EXHAUSTION
return True
return False | 23917e448ed953acb2bb864d7a14d01c72f07a85 | 10,109 |
def addMedicine(medicine: object):
"""Data required are "name", "description", "price", "quantity", "medicalId" """
return mr.makePostRequest(mr.API + "/medicine/", medicine) | f488ecd16d6e2986944ae26e5776ca9f9be7e170 | 10,110 |
from benchbuild.utils.db import create_run
from benchbuild.utils import schema as s
from benchbuild.settings import CFG
from datetime import datetime
def begin(command, project, ename, group):
"""
Begin a run in the database log.
Args:
command: The command that will be executed.
pname: The project name we belong to.
ename: The experiment name we belong to.
group: The run group we belong to.
Returns:
(run, session), where run is the generated run instance and session the
associated transaction for later use.
"""
db_run, session = create_run(command, project, ename, group)
db_run.begin = datetime.now()
db_run.status = 'running'
log = s.RunLog()
log.run_id = db_run.id
log.begin = datetime.now()
log.config = repr(CFG)
session.add(log)
session.commit()
return db_run, session | a33a5e809b20b6d1f92545bd0df5de9fbc230f91 | 10,111 |
def fortran_library_item(lib_name,
sources,
**attrs
): #obsolete feature
""" Helper function for creating fortran_libraries items. """
build_info = {'sources':sources}
known_attrs = ['module_files','module_dirs',
'libraries','library_dirs']
for key,value in attrs.items():
if key not in known_attrs:
raise TypeError,\
"fortran_library_item() got an unexpected keyword "\
"argument '%s'" % key
build_info[key] = value
return (lib_name,build_info) | 720802933b9ebcaab566f3deeb063341b85dba7e | 10,112 |
def copy_generator(generator):
"""Copy an existing numpy (random number) generator.
Parameters
----------
generator : numpy.random.Generator or numpy.random.RandomState
The generator to copy.
Returns
-------
numpy.random.Generator or numpy.random.RandomState
In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.
Both are copies of the input argument.
"""
if isinstance(generator, np.random.RandomState):
return _copy_generator_np116(generator)
return _copy_generator_np117(generator) | 57f5c3b9ad934330b1eedb6460943204f97b9436 | 10,113 |
import os, sys
import subprocess
import datetime
def uptime():
"""Returns a datetime.timedelta instance representing the uptime in a Windows 2000/NT/XP machine"""
if not sys.platform.startswith('win'):
raise RuntimeError, "This function is to be used in windows only"
cmd = "net statistics server"
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
lines = child_stdout.readlines()
child_stdin.close()
child_stdout.close()
lines = [line.strip() for line in lines if line.strip()]
date, time, ampm = lines[1].split()[2:5]
#print date, time, ampm
m, d, y = [int(v) for v in date.split('/')]
H, M = [int(v) for v in time.split(':')]
if ampm.lower() == 'pm':
H += 12
now = datetime.datetime.now()
then = datetime.datetime(y, m, d, H, M)
diff = now - then
return diff | a5712fa6acc068174b09f942102da6cda48660f9 | 10,114 |
from sys import path
def sort_from_avro(df: 'pd.DataFrame', cur_filename: str, order_folder: str) -> 'pd.DataFrame':
"""Shuffle a dataframe with the given seed
:param df: the input dataframe
:type df: pandas.DataFrame
:param cur_filename: the initial file name
:type cur_filename: str
:param order_folder: the order_folder path
:type order_folder: str
:return: the shuffled dataframe
:rtype: pandas.DataFrame
"""
real_filename = cur_filename.split(".", 1)[0].replace("results_", "")
ord_df = None
for root, _, files in walk(order_folder):
for file_ in files:
if file_.find(real_filename) != -1:
ord_df = pd.read_csv(path.join(root, file_))
ord_df.rename(columns={'FileName': "Filename"}, inplace=True)
if ord_df is None:
return None
print(
f"{STATUS_ARROW}[File:{STATUS_WARNING(cur_filename)}][Order dataframe with avro indexes]")
df_mask = df.Filename.duplicated(keep=False)
ord_df_mask = ord_df.Filename.duplicated(keep=False)
# Add counter number for unique indexes
df.loc[df_mask, 'Filename'] += "_#" + \
df.groupby('Filename').cumcount().add(1).astype(str)
ord_df.loc[ord_df_mask, 'Filename'] += "_#" + \
ord_df.groupby('Filename').cumcount().add(1).astype(str)
# Change indexes
df = df.set_index("Filename")
ord_df = ord_df.set_index("Filename")
# Reindex
new_index = df.reindex_like(ord_df, method=None).dropna()
df.set_index(new_index.index, inplace=True)
df.reset_index(inplace=True)
ord_df.reset_index(inplace=True)
# Remove duplicate counters
df.Filename = df.Filename.apply(lambda elm: elm.rsplit("_#", 1)[
0] if elm.find("_#") else elm)
ord_df.Filename = ord_df.Filename.apply(lambda elm: elm.rsplit("_#", 1)[
0] if elm.find("_#") else elm)
if not all(ord_df.Filename.eq(df.Filename)):
print("File name not equal...")
exit(-1)
return df | be2d0ec8a69c9df5605e21b9a6643983fd1fa86e | 10,115 |
def test_pages_kingdom_successful(args, protein_gen_success, cazy_home_url, monkeypatch):
"""Test parse_family_by_kingdom() when all is successful."""
test_fam = Family("famName", "CAZyClass", "http://www.cazy.org/GH14.html")
def mock_get_pag(*args, **kwargs):
return ["http://www.cazy.org/GH14_all.html"]
def mock_get_pages(*args, **kwargs):
return protein_gen_success
monkeypatch.setattr(get_cazy_pages, "get_pagination_pages_kingdom", mock_get_pag)
monkeypatch.setattr(get_cazy_pages, "get_html_page", mock_get_pages)
get_cazy_pages.parse_family_by_kingdom(
family=test_fam,
cazy_home=cazy_home_url,
args=args["args"],
kingdoms=["Bacteria"],
) | b5fdbeac6a4a54170c17a98689ae5cc6bbca9542 | 10,116 |
def _truncate_and_pad_token_ids(token_ids, max_length):
"""Truncates or pads the token id list to max length."""
token_ids = token_ids[:max_length]
padding_size = max_length - len(token_ids)
if padding_size > 0:
token_ids += [0] * padding_size
return token_ids | a8f29fdbc99c3dcac42b9275037d3a3c39c22e12 | 10,117 |
def build_bundletoperfectsensor_pipeline(pan_img, ms_img):
"""
This function builds the a pipeline that performs P+XS pansharpening
:param pan_img: Path to the panchromatic image
:type pan_img: string
:param ms_img: Path to the multispectral image
:type ms_img: string
:returns: resample_image
:rtype: otb application
"""
pansharpening_app = otbApplication.Registry.CreateApplication(
"BundleToPerfectSensor"
)
pansharpening_app.SetParameterString("inp", pan_img)
pansharpening_app.SetParameterString("inxs", ms_img)
pansharpening_app.Execute()
return pansharpening_app | f40aa0828ef50ef8f81f901f93dbaf8690a14d4f | 10,118 |
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker store',
'formatter_class': arg.RawDescriptionDefaultHelpFormatter,
# Description is shown when the command's help is queried directly
'description': """
Store the results from one or more 'codechecker-analyze' result files in a
database.""",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': """
Environment variables
------------------------------------------------
CC_PASS_FILE The location of the password file for auto login. By default
CodeChecker will use '~/.codechecker.passwords.json' file.
It can also be used to setup different credential files to
login to the same server with a different user.
CC_SESSION_FILE The location of the session file where valid sessions are
stored. This file will be automatically created by
CodeChecker. By default CodeChecker will use
'~/.codechecker.session.json'. This can be used if
restrictive permissions forbid CodeChecker from creating
files in the users home directory (e.g. in a CI
environment).
The results can be viewed by connecting to such a server in a Web browser or
via 'CodeChecker cmd'.""",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Save analysis results to a database."
} | 9debf6233652052782295aeb5b630ee2b4b3b19e | 10,119 |
def castep_geom_count(dot_castep):
"""Count the number of geom cycles"""
count = 0
with open(dot_castep) as fhandle:
for line in fhandle:
if 'starting iteration' in line:
count += 1
return count | 6a619b5853a02a8c118af1fc19da0d803941c84f | 10,120 |
def nav_login(request, text="Login", button=False):
"""Navigation login button
Args:
request (Request): Request object submitted by template
text (str, optional): Text to be shown in button. Defaults to "Login".
button (bool, optional): Is this to be styled as a button or as a link. Defaults to False.
Returns:
SafeText: HTML form
"""
url = reverse("login")
return nav_next(request, url, text, button) | ddbc3de38c47425ec9f095577d178c068cce74c2 | 10,121 |
def parse_adapter(name: str, raw: dict) -> dict:
"""Parse a single adapter."""
parsed = {
"name": strip_right(obj=name, fix="_adapter"),
"name_raw": name,
"name_plugin": raw["unique_plugin_name"],
"node_name": raw["node_name"],
"node_id": raw["node_id"],
"status": raw["status"],
"features": raw["supported_features"],
}
generic_name = GENERIC_NAME
discovery_name = DISCOVERY_NAME
specific_name = get_specific_name(raw=raw)
config = raw["config"]
specific_schema = config.get(specific_name, {}).get("schema", {})
specific_schema = parse_schema(raw=specific_schema)
generic_schema = config[generic_name]["schema"]
generic_schema = parse_schema(raw=generic_schema)
discovery_schema = config[discovery_name]["schema"]
discovery_schema = parse_schema(raw=discovery_schema)
cnx_schema = parse_schema(raw=raw["schema"])
cnx_schema["connection_label"] = {
"name": "connection_label",
"title": "Connection Label",
"type": "string",
"required": False,
}
parsed["schemas"] = {
"cnx": cnx_schema,
"specific": specific_schema,
"generic": generic_schema,
"discovery": discovery_schema,
"generic_name": generic_name,
"specific_name": specific_name,
"discovery_name": discovery_name,
}
parsed["config"] = {
"specific": raw["config"].get(specific_name, {}).get("config", {}),
"generic": raw["config"].get(generic_name, {}).get("config", {}),
"discovery": raw["config"].get(discovery_name, {}).get("config", {}),
}
parsed["cnx"] = parse_cnx(raw=raw, parsed=parsed)
parsed["cnx_count_total"] = len(parsed["cnx"])
parsed["cnx_count_broken"] = len([x for x in parsed["cnx"] if not x["working"]])
parsed["cnx_count_working"] = len([x for x in parsed["cnx"] if x["working"]])
return parsed | 085b8a38561d6ffda12ca27d2f2089759b34e1ed | 10,122 |
def export_phones(ucm_axl):
"""
Export Phones
"""
try:
phone_list = ucm_axl.get_phones(
tagfilter={
"name": "",
"description": "",
"product": "",
"model": "",
"class": "",
"protocol": "",
"protocolSide": "",
"callingSearchSpaceName": "",
"devicePoolName": "",
"commonDeviceConfigName": "",
"commonPhoneConfigName": "",
"networkLocation": "",
"locationName": "",
"mediaResourceListName": "",
"networkHoldMohAudioSourceId": "",
"userHoldMohAudioSourceId": "",
"loadInformation": "",
"securityProfileName": "",
"sipProfileName": "",
"cgpnTransformationCssName": "",
"useDevicePoolCgpnTransformCss": "",
"numberOfButtons": "",
"phoneTemplateName": "",
"primaryPhoneName": "",
"loginUserId": "",
"defaultProfileName": "",
"enableExtensionMobility": "",
"currentProfileName": "",
"loginTime": "",
"loginDuration": "",
# "currentConfig": "",
"ownerUserName": "",
"subscribeCallingSearchSpaceName": "",
"rerouteCallingSearchSpaceName": "",
"allowCtiControlFlag": "",
"alwaysUsePrimeLine": "",
"alwaysUsePrimeLineForVoiceMessage": "",
}
)
all_phones = []
for phone in phone_list:
# print(phone)
phone_details = {
"name": phone.name,
"description": phone.description,
"product": phone.product,
"model": phone.model,
"protocol": phone.protocol,
"protocolSide": phone.protocolSide,
"callingSearchSpaceName": phone.callingSearchSpaceName._value_1,
"devicePoolName": phone.defaultProfileName._value_1,
"commonDeviceConfigName": phone.commonDeviceConfigName._value_1,
"commonPhoneConfigName": phone.commonPhoneConfigName._value_1,
"networkLocation": phone.networkLocation,
"locationName": phone.locationName._value_1,
"mediaResourceListName": phone.mediaResourceListName._value_1,
"networkHoldMohAudioSourceId": phone.networkHoldMohAudioSourceId,
"userHoldMohAudioSourceId": phone.userHoldMohAudioSourceId,
"loadInformation": phone.loadInformation,
"securityProfileName": phone.securityProfileName._value_1,
"sipProfileName": phone.sipProfileName._value_1,
"cgpnTransformationCssName": phone.cgpnTransformationCssName._value_1,
"useDevicePoolCgpnTransformCss": phone.useDevicePoolCgpnTransformCss,
"numberOfButtons": phone.numberOfButtons,
"phoneTemplateName": phone.phoneTemplateName._value_1,
"primaryPhoneName": phone.primaryPhoneName._value_1,
"loginUserId": phone.loginUserId,
"defaultProfileName": phone.defaultProfileName._value_1,
"enableExtensionMobility": phone.enableExtensionMobility,
"currentProfileName": phone.currentProfileName._value_1,
"loginTime": phone.loginTime,
"loginDuration": phone.loginDuration,
# "currentConfig": phone.currentConfig,
"ownerUserName": phone.ownerUserName._value_1,
"subscribeCallingSearchSpaceName": phone.subscribeCallingSearchSpaceName._value_1,
"rerouteCallingSearchSpaceName": phone.rerouteCallingSearchSpaceName._value_1,
"allowCtiControlFlag": phone.allowCtiControlFlag,
"alwaysUsePrimeLine": phone.alwaysUsePrimeLine,
"alwaysUsePrimeLineForVoiceMessage": phone.alwaysUsePrimeLineForVoiceMessage,
}
line_details = ucm_axl.get_phone(name=phone.name)
# print(line_details.lines.line)
try:
for line in line_details.lines.line:
# print(line)
phone_details[f"line_{line.index}_dirn"] = line.dirn.pattern
phone_details[f"line_{line.index}_routePartitionName"] = line.dirn.routePartitionName._value_1
phone_details[f"line_{line.index}_display"] = line.display
phone_details[f"line_{line.index}_e164Mask"] = line.e164Mask
except Exception as e:
print(e)
all_phones.append(phone_details)
print(
f"exporting: {phone.name}: {phone.model} - {phone.description}")
print("-" * 35)
print(f"number of phones: {len(all_phones)}")
return all_phones
except Exception as e:
print(e)
return [] | 1487cef48c5666224da57173b968e9988f587a57 | 10,123 |
def is_various_artists(name, mbid):
"""Check if given name or mbid represents 'Various Artists'."""
return name and VA_PAT.match(name) or mbid == VA_MBID | 084f1d88b99ec7f5b6eac0774a05e901bd701603 | 10,124 |
def validate_ruletype(t):
"""Validate *bounds rule types."""
if t not in ["typebounds"]:
raise exception.InvalidBoundsType("{0} is not a valid *bounds rule type.".format(t))
return t | a8ae173f768837cdc35d1a8f6429614b58a74988 | 10,125 |
from pathlib import Path
import io
import tarfile
import sys
def download_and_unpack_database(db: str, sha256: str) -> Path:
"""Download the given database, unpack it to the local filesystem, and
return the path.
"""
local_dir = cache_path(f"state_transition_dataset/{sha256}")
with _DB_DOWNLOAD_LOCK, InterProcessLock(
transient_cache_path(".state_transition_database_download.LOCK")
):
if not (local_dir / ".installed").is_file():
tar_data = io.BytesIO(download(db, sha256))
local_dir.mkdir(parents=True, exist_ok=True)
logger.info("Unpacking database to %s ...", local_dir)
with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc:
arc.extractall(str(local_dir))
(local_dir / ".installed").touch()
unpacked = [f for f in local_dir.iterdir() if f.name != ".installed"]
if len(unpacked) != 1:
print(
f"fatal: Archive {db} expected to contain one file, contains: {len(unpacked)}",
file=sys.stderr,
)
return unpacked[0] | e67081a82999ddf524825199acf7338394bb129f | 10,126 |
def decode_section_flags(sflags: str) -> int:
"""Map readelf's representation of section flags to ELF flag values."""
d = {
'W': elftools.elf.constants.SH_FLAGS.SHF_WRITE,
'A': elftools.elf.constants.SH_FLAGS.SHF_ALLOC,
'X': elftools.elf.constants.SH_FLAGS.SHF_EXECINSTR,
'M': elftools.elf.constants.SH_FLAGS.SHF_MERGE,
'S': elftools.elf.constants.SH_FLAGS.SHF_STRINGS,
'I': elftools.elf.constants.SH_FLAGS.SHF_INFO_LINK,
'L': elftools.elf.constants.SH_FLAGS.SHF_LINK_ORDER,
'O': elftools.elf.constants.SH_FLAGS.SHF_OS_NONCONFORMING,
'G': elftools.elf.constants.SH_FLAGS.SHF_GROUP,
'T': elftools.elf.constants.SH_FLAGS.SHF_TLS,
'C': 0x800, # SHF_COMPRESSED
'E': elftools.elf.constants.SH_FLAGS.SHF_EXCLUDE,
'y': 0x20000000, # SHF_ARM_PURECODE
}
flags = 0
for k, v in d.items():
if k in sflags:
flags |= v
return flags | e007f1f370f6203bafe92a1a6422100f2d9626ae | 10,127 |
def nCr(n,r):
"""
Implements multiplicative formula:
https://en.wikipedia.org/wiki/Binomial_coefficient#Multiplicative_formula
"""
if r < 0 or r > n:
return 0
if r == 0 or r == n:
return 1
c = 1
for i in xrange(min(r, n - r)):
c = c * (n - i) // (i + 1)
return c | 8c0dc30b4cdab47c99bf98459e435147ac0b92fd | 10,128 |
import os
def get_test_vesselfile():
"""
return the necessary paths for the testfile tests
Returns
-------
str
absolute file path to the test file
"""
testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'vessel_file.kfc')
return testfile | 4a08f986d06c66766b7a92023c60bebfe7452dc7 | 10,129 |
import inspect
def _get_init_arguments(cls, *args, **kwargs):
"""Returns an OrderedDict of args passed to cls.__init__ given [kw]args."""
init_args = inspect.signature(cls.__init__)
bound_args = init_args.bind(None, *args, **kwargs)
bound_args.apply_defaults()
arg_dict = bound_args.arguments
del arg_dict['self']
return arg_dict | 116c01f9edb838e4b392fa624a454fdf4c455f1a | 10,130 |
def MatchCapture(nfa: NFA, id: CaptureGroup) -> NFA:
"""Handles: (?<id>A)"""
captures = {(s, i): {id} for (s, i) in nfa.transitions if i != Move.EMPTY}
return NFA(nfa.start, nfa.end, nfa.transitions, merge_trans(nfa.captures, captures)) | 08805d01be73480cfea4d627c6a67969290c1d11 | 10,131 |
import os
def save_private_file_share(request, token):
"""
Save private share file to someone's library.
"""
username = request.user.username
try:
pfs = PrivateFileDirShare.objects.get_priv_file_dir_share_by_token(token)
except PrivateFileDirShare.DoesNotExist:
raise Http404
from_user = pfs.from_user
to_user = pfs.to_user
repo_id = pfs.repo_id
path = pfs.path
src_path = os.path.dirname(path)
obj_name = os.path.basename(path.rstrip('/'))
if username == from_user or username == to_user:
dst_repo_id = request.POST.get('dst_repo')
dst_path = request.POST.get('dst_path')
new_obj_name = check_filename_with_rename(dst_repo_id, dst_path, obj_name)
wingufile_api.copy_file(repo_id, src_path, obj_name,
dst_repo_id, dst_path, new_obj_name, username)
messages.success(request, _(u'Successfully saved.'))
else:
messages.error(request, _("You don't have permission to save %s.") % obj_name)
next = request.META.get('HTTP_REFERER', None)
if not next:
next = SITE_ROOT
return HttpResponseRedirect(next) | 5c0e633070750f0f7e920530e809d55b21535fe0 | 10,132 |
def get_all_state_events(log):
""" Returns a list of tuples of event id, state_change_id, block_number and events"""
return [
(InternalEvent(res[0], res[1], res[2], log.serializer.deserialize(res[3])))
for res in get_db_state_changes(log.storage, 'state_events')
] | c75307c930add3e142996e19c441c84fd663e36a | 10,133 |
def iff(a: NNF, b: NNF) -> Or[And[NNF]]:
"""``a`` is true if and only if ``b`` is true."""
return (a & b) | (a.negate() & b.negate()) | 82ea5bfe9c4e1f79361319b2d8455cba898e77ec | 10,134 |
def redact_access_token(e: Exception) -> Exception:
"""Remove access token from exception message."""
if not isinstance(e, FacebookError):
return e
e.args = (redact_access_token_from_str(str(e.args[0])),)
return e | 63d7a7422cb7315866e9c25552fa96b403673261 | 10,135 |
def _get_ext_comm_subtype(type_high):
"""
Returns a ByteEnumField with the right sub-types dict for a given community.
http://www.iana.org/assignments/bgp-extended-communities/bgp-extended-communities.xhtml
"""
return _ext_comm_subtypes_classes.get(type_high, {}) | 5b5782659f1d261162d8f9d5becfe65b852f3bdc | 10,136 |
import click
def _filter_classes(classes, filters, names_only, iq):
"""
Filter a list of classes for the qualifiers defined by the
qualifier_filter parameter where this parameter is a list of tuples.
each tuple contains the qualifier name and a dictionary with qualifier
name as key and tuple containing the option_value(True or False) and
a list of booleans where each boolean represents one of the scope types
()
whether to display or not display if it exists.
This method only works for boolean qualifiers
Parameters:
classes (list of :class:`~pywbem.CIMClass`):
list of classes to be filtered
qualifier_filters (dict):
Dictionary defining the filtering to be performed. It contains an entry
for each qualifier filter that is defined. See _build_qualifier_filters
for a definition of this list.
names_only (:class:`py:bool`):
If True, return only the classnames. Otherwise returns the filtered
classes. This is because we must get the classes from the server to
perform the filtering
iq (:class:`py:bool`):
If not True, remove any qualifiers from the classes. This is because
we must get the classes from the server with qualifiers to
perform the filtering.
"""
def class_has_qualifier(cls, qname, scopes):
"""
Determine if the qualifier defined by qname exists in the elements
of the class where the elements are defined by the scopes parameter
for this filter.
Parameters:
cls (:class:`~pywbem.CIMClass`):
The class to be inspected for the qualifier defined by qname
qname (:term:`string`):
The qualifier for which we are searching
scopes (tuple of booleans):
A tuple containing a boolean value for each of the possible scopes
(class, property, method, parameter)
Returns:
True if the qualifier with name quname is found in the elements where
the scope is True. Otherwise, False is returned
"""
# Test class scope
if scopes[0] and qname in cls.qualifiers:
return True
# if property scope, test properties
if scopes[1]:
for prop in cls.properties.values():
if qname in prop.qualifiers:
return True
# If method scope, test methods and if parameter scope, test parameters
if scopes[2]:
for method in cls.methods.values():
if qname in method.qualifiers:
return True
if scopes[3]:
params = method.parameters
for param in params.values():
if qname in param.qualifiers:
return True
return False
# Test all classes in the input property for the defined filters.
filtered_classes = []
subclass_names = []
# Build list of subclass names that will be used later as a filter on the
# classes to be returned
if 'subclass_of' in filters:
try:
subclass_names = get_subclass_names(
classes,
classname=filters['subclass_of'].optionvalue,
deep_inheritance=True)
except ValueError:
raise click.ClickException(
'Classname {} for "subclass-of" not found in returned classes.'
.format(filters['subclass_of'].optionvalue))
# Build a list of leaf class names that will be used later as a filter on
# the classes to be returned.
if 'leaf_classes' in filters:
try:
if subclass_names:
clsx = [cls for cls in classes if cls.classname in
subclass_names]
leafclass_names = get_leafclass_names(clsx)
else:
leafclass_names = get_leafclass_names(classes)
except ValueError:
raise click.ClickException(
'Classname {} for "leaf_classes-of" not found in returned '
'classes.'.format(filters['leaf_classes'].optionvalue))
for cls in classes:
show_class_list = []
for filter_name, filter_ in filters.items():
if filter_name == 'qualifier':
option_value = filter_.optionvalue
if class_has_qualifier(cls, filter_.qualifiername,
filter_.scopes):
if filter_.qualifiername == 'version':
if filter_.qualifiername in cls.qualifiers:
cls_version = \
cls.qualifiers[filter_.qualifiername].value
val = parse_version_value(cls_version,
cls.classname)
option_value = bool(val >= filter_.optionvalue)
show_class_list.append(option_value)
else:
show_class_list.append(not option_value)
elif filter_name == 'schema':
show_class_list.append(
cls.classname.lower().startswith(filter_.optionvalue))
elif filter_name == 'subclass_of':
show_class_list.append(cls.classname in subclass_names)
elif filter_name == 'leaf_classes':
show_class_list.append(cls.classname in leafclass_names)
else:
assert False # Future for other test_types
# Show if all options are True for this class
show_this_class = all(show_class_list)
if show_this_class:
# If returning instances, honor the names_only option
if not names_only and not iq:
cls.qualifiers = []
for p in cls.properties.values():
p.qualifiers = []
for m in cls.methods.values():
m.qualifiers = []
for p in m.parameters.values():
p.qualifiers = []
filtered_classes.append(cls)
# If names_only parameter create list of classnames
if names_only:
filtered_classes = [cls.classname for cls in filtered_classes]
return filtered_classes | eecee9f5a1ccf6c793000faf11cd0f666a0c0f7b | 10,137 |
def template2():
"""load_cep_homo"""
script = """
## (Store,figure)
<< host = chemml
<< function = SavePlot
<< kwargs = {'normed':True}
<< output_directory = plots
<< filename = amwVSdensity
>> 0 fig
## (Visualize,artist)
<< host = chemml
<< function = decorator
<< title = AMW vs. Density
<< grid_color = g
<< xlabel = density (Kg/m3)
<< ylabel = atomic molecular weight
<< grid = True
<< size = 18
>> fig 0
>> 4 fig
## (Enter,python script)
<< host = chemml
<< function = PyScript
<< line01 = print (iv1.head())
>> 1 iv1
## (Enter,datasets)
<< host = chemml
<< function = load_organic_density
>> smiles 1
>> density 2
>> features 3
## (Visualize,plot)
<< host = chemml
<< function = scatter2D
<< y = 0
<< marker = o
<< x = 'AMW'
>> 2 dfy
>> 3 dfx
>> fig 4
"""
return script.strip().split('\n') | 2d6dfbab0ef3093645b67da756491fd1b1639649 | 10,138 |
from typing import Tuple
from typing import Any
def get_target_and_encoder_gpu(train: GpuDataset) -> Tuple[Any, type]:
"""Get target encoder and target based on dataset.
Args:
train: Dataset.
Returns:
(Target values, Target encoder).
"""
target = train.target
if isinstance(target, cudf.Series):
target = target.values
target_name = train.target.name
if train.task.name == 'multiclass':
n_out = cp.max(target)+1
target = (target[:, cp.newaxis] == cp.arange(n_out)[cp.newaxis, :])
encoder = MultiClassTargetEncoder_gpu
else:
encoder = TargetEncoder_gpu
return target, encoder | 64cfee3ec9c58bf07d9eb28977b4e5cb7ebadc80 | 10,139 |
import copy
import os
import _locale
import gettext
import locale
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = os.environ.get(_locale.get_locale_dir_variable_name(domain))
def find(x):
return gettext.find(domain, localedir=localedir, languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
locale_identifiers = set(locale.windows_locale.values())
language_list.extend(
language for language in locale_identifiers if find(language)
)
language_list.extend(
alias for alias, _ in _BABEL_ALIASES.items() if find(alias)
)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list) | f28cc90a24d28224e0ad9f10532674151f070848 | 10,140 |
import functools
def once(f):
"""Cache result of a function first call"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
rv = getattr(f, 'rv', MISSING)
if rv is MISSING:
f.rv = f(*args, **kwargs)
return f.rv
return wrapper | 25d096f76d156c7a8a26f8f159b65b9b31c8d927 | 10,141 |
import torch
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.BUA.EXTRACTOR.MODE = 1
default_setup(cfg, args)
cfg.MODEL.DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
cfg.freeze()
return cfg | 76a8a21714a1fed96fe7b6f33cb948aee30bffcf | 10,142 |
def build_audit_stub(obj):
"""Returns a stub of audit model to which assessment is related to."""
audit_id = obj.audit_id
if audit_id is None:
return None
return {
'type': 'Audit',
'id': audit_id,
'context_id': obj.context_id,
'href': '/api/audits/%d' % audit_id,
'issue_tracker': obj.audit.issue_tracker,
} | 705f066975bf9dae8704944c71eeb3e313cf445f | 10,143 |
def calculate_widths(threshold_img, landmarks):
"""
Calcula a largura dos vasos sanguíneos nos pontos de potenciais
bifurcação. Esse cálculo é feito pegando a menor distância percorrida
a partir do ponto em cada uma das direções (8 direções são utilizadas).
A função retorna o que seria equivalente ao diametro do vasos em cada
ponto.
:param threshold_img: imagem (binária) usada para calculo da largura dos
vasos sanguíneos
:param landmarks: pontos onde calcular as larguras
:return: vetor com larguras de cada um dos pontos (diametro dos vasos)
"""
N, M = threshold_img.shape
widths = []
for x, y, mark_type in landmarks:
# down
i = x
j = y
vert_dist = 0
while(j < M and threshold_img[i, j] != 0):
vert_dist += 1
j += 1
# up
i = x
j = y
while(j >= 0 and threshold_img[i, j] != 0):
vert_dist += 1
j -= 1
# right
horiz_dist = 0
i = x
j = y
while(i < N and threshold_img[i, j] != 0):
horiz_dist += 1
i += 1
# left
i = x
j = y
while(i >= 0 and threshold_img[i, j] != 0):
horiz_dist += 1
i -= 1
# down right
i = x
j = y
s_diag_dist = 0
while(i < N and j < M and threshold_img[i, j] != 0):
i += 1
j += 1
s_diag_dist += 1
# up left
i = x
j = y
while(i >= 0 and j >= 0 and threshold_img[i, j] != 0):
i -= 1
j -= 1
s_diag_dist += 1
# down left
i = x
j = y
p_diag_dist = 0
while(i >= 0 and j < M and threshold_img[i, j] != 0):
i -= 1
j += 1
p_diag_dist += 1
# up right
i = x
j = y
while(i < N and j >= 0 and threshold_img[i, j] != 0):
i += 1
j -= 1
p_diag_dist += 1
min_width = np.min([vert_dist, horiz_dist, p_diag_dist, s_diag_dist])
widths.append([(x, y), np.ceil(min_width).astype(int), mark_type])
return widths | 304ce6bec19faba0a0520b63435fcbf66f8989f0 | 10,144 |
from typing import Optional
def smi_to_fp(smi: str, fingerprint: str,
radius: int = 2, length: int = 2048) -> Optional[np.ndarray]:
"""fingerprint functions must be wrapped in a static function
so that they may be pickled for parallel processing
Parameters
----------
smi : str
the SMILES string of the molecule to encode
fingerprint : str
the the type of fingerprint to generate
radius : int
the radius of the fingerprint
length : int
the length of the fingerprint
Returns
-------
T_comp
the compressed feature representation of the molecule
"""
mol = Chem.MolFromSmiles(smi)
if mol is None:
return None
if fingerprint == 'morgan':
fp = rdmd.GetMorganFingerprintAsBitVect(
mol, radius=radius, nBits=length, useChirality=True)
elif fingerprint == 'pair':
fp = rdmd.GetHashedAtomPairFingerprintAsBitVect(
mol, minLength=1, maxLength=1+radius, nBits=length)
elif fingerprint == 'rdkit':
fp = rdmd.RDKFingerprint(
mol, minPath=1, maxPath=1+radius, fpSize=length)
elif fingerprint == 'maccs':
fp = rdmd.GetMACCSKeysFingerprint(mol)
else:
raise NotImplementedError(
f'Unrecognized fingerprint: "{fingerprint}"')
x = np.empty(len(fp))
DataStructs.ConvertToNumpyArray(fp, x)
return x | fa768c5b53a4a1b637b1928127ef85506d375fd7 | 10,145 |
def f(x, t):
"""function to learn."""
return tf.square(tf.cast(t, tf.float32) / FLAGS.tm) * (tf.math.sin(5 * x) + 1) | 9138b7a2acf43a1c62d5da8157725ff10e6f7f78 | 10,146 |
def render_cells(cells, width=80, col_spacing=2):
"""Given a list of short (~10 char) strings, display these aligned in
columns.
Example output::
Something like this can be
used to neatly arrange long
sequences of values in a
compact format.
Parameters
----------
cells : [(strlen, str), ...]
Gives the cells to print as tuples giving the strings length in visible
characters and the string to display.
width : int
The width of the terminal.
col_spacing : int
Size of the gap to leave between columns.
"""
# Special case (since max below will fail)
if len(cells) == 0:
return ""
# Columns should be at least as large as the largest cell with padding
# between columns
col_width = max(strlen for strlen, s in cells) + col_spacing
lines = [""]
cur_length = 0
for strlen, s in cells:
# Once line is full, move to the next
if cur_length + strlen > width:
lines.append("")
cur_length = 0
# Add the current cell (with spacing)
lines[-1] += s + (" "*(col_width - strlen))
cur_length += col_width
return "\n".join(map(str.rstrip, lines)) | 714b915430be84980c3a9b74f3c5b2cb89b6acba | 10,147 |
def separate_types(data):
"""Separate out the points from the linestrings."""
if data['type'] != 'FeatureCollection':
raise TypeError('expected a FeatureCollection, not ' + data['type'])
points = []
linestrings = []
for thing in data['features']:
if thing['type'] != 'Feature':
raise TypeError('expected Feature, not ' + thing['type'])
geometry_type = thing['geometry']['type']
if geometry_type == 'Point':
points.append(thing)
elif geometry_type == 'LineString':
linestrings.append(thing)
else:
raise TypeError('expected Point or LineString, not ' + geometry_type)
return points, linestrings | 28ab8eb7e2cdf1206f4908a15506a9b9af1aa428 | 10,148 |
def state(state_vec):
""" Qiskit wrapper of qobj
"""
return gen_operator.state(state_vec) | 74094c7c6e3c33cff28777f54d8852245c31f276 | 10,149 |
import os
def get_index_path(bam_path: str):
"""
Obtain path to bam index
Returns:
path_to_index(str) : path to the index file, None if not available
"""
for p in [bam_path+'.bai', bam_path.replace('.bam','.bai')]:
if os.path.exists(p):
return p
return None | b8ccf66a89d865f49fdb311f2fcf0c371fe5c488 | 10,150 |
def get_games_for_platform(platform_id):
"""Return the list of all the games for a given platform"""
controller = GameController
return controller.get_list_by_platform(MySQLFactory.get(), platform_id) | 83855b6cdb4d39442e255d14d2f94d76a702a0ea | 10,151 |
def validate_dataset(elem: object) -> Dataset:
"""Check that `elem` is a :class:`~pydicom.dataset.Dataset` instance."""
if not isinstance(elem, Dataset):
raise TypeError('Sequence contents must be Dataset instances.')
return elem | d4744b06f0ccdc8dca0deab57585706c1ee91db9 | 10,152 |
from typing import Dict
from typing import Any
import copy
def build_array(name: str, variables: Dict[str, Dict[str, Any]],
data: np.ndarray):
"""Builds the array from the data and the variables"""
properties = variables[name]
attrs = copy.deepcopy(properties["attrs"])
# Reading the storage properties of the variable
encoding: Dict[str, Any] = dict(dtype=properties["dtype"])
# If the variable defines a fill value.
if "_FillValue" in attrs:
encoding["_FillValue"] = encode_fill_value(properties)
del attrs["_FillValue"]
# Some values read from the XML files must be decoded
# TODO(fbriol): The type of these attributes should be determined
# from their type, but at the moment this is not possible.
for item in ["add_offset", "scale_factor"]:
if item in attrs:
attrs[item] = float(attrs[item])
for item in ["valid_range", "valid_min", "valid_max"]:
if item in attrs:
attrs[item] = cast_to_dtype(attrs[item], properties)
if "flag_values" in attrs:
items = attrs["flag_values"].split()
attrs["flag_values"] = np.array(
[cast_to_dtype(item, properties) for item in items],
properties["dtype"]) if len(items) != 1 else cast_to_dtype(
float(attrs["flag_values"]), properties)
# if "scale_factor" in attrs and "add_offset" not in attrs:
# attrs["add_offset"] = 0.0
# if "add_offset" in attrs and "scale_factor" not in attrs:
# attrs["scale_factor"] = 1.0
return {
name: encoding
}, xr.DataArray(data=data,
dims=properties["shape"],
name=name,
attrs=attrs) | f0021bdce68b90b9f88bb715c302a376083e1dce | 10,153 |
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 convolution """
weight_shape = (out_channels, in_channels, 3, 3)
weight = Tensor(np.ones(weight_shape).astype(np.float32))
conv = Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=0, weight_init=weight, has_bias=False,
pad_mode="same")
conv.conv2d.shard(strategy_weight)
return conv | 011a3f74e8665669f9ecf5d4b9e8abf14f52e053 | 10,154 |
def _workflow_complete(workflow_stage_dict: dict):
"""Check if the workflow is complete.
This function checks if the entire workflow is complete.
This function is used by `execute_processing_block`.
Args:
workflow_stage_dict (dict): Workflow metadata dictionary.
Returns:
bool, True if the workflow is complete, otherwise False.
"""
# Check if all stages are complete, if so end the PBC by breaking
# out of the while loop
complete_stages = []
for _, stage_config in workflow_stage_dict.items():
complete_stages.append((stage_config['status'] == 'complete'))
if all(complete_stages):
LOG.info('PB workflow complete!')
return True
return False | 4e5be4c4768d82e8b1e76d1964c3effb2e604dd2 | 10,155 |
import os
def create_output_folder(ProjectDir):
"""Create the output folders starting from the project directory.
Parameters
----------
ProjectDir : str
Name of the project directory.
Returns
-------
type
PicturePath, ResultsPath
"""
npath = os.path.normpath(ProjectDir)
# set pathname for the Output
OutputPath = os.path.join(npath, os.path.basename(npath))
# set pathname for the images
PicturePath = os.path.join(npath, os.path.basename(npath), "Pictures")
# set pathname for the files
ResultsPath = os.path.join(npath, os.path.basename(npath), "Results")
# Add foldes for outputs
if not os.path.exists(OutputPath):
os.mkdir(OutputPath)
if not os.path.exists(PicturePath):
os.mkdir(PicturePath)
if not os.path.exists(ResultsPath):
os.mkdir(ResultsPath)
return PicturePath, ResultsPath | d1f9abf35bf5342707e7928aaa23c699063a3b70 | 10,156 |
def get_name(f, opera_format=True):
"""Load dataset and extract radar name from it"""
ds = xr.open_dataset(f)
if hasattr(ds, 'source'):
radar = ds.source
else:
filename = osp.splitext(osp.basename(f))[0]
radar = filename.split('_')[-1]
if opera_format:
if '/' in radar:
radar = (radar[:2]+radar[-3:]).lower()
else:
if radar.islower():
radar = radar[:2] + '/' + radar[-3:]
return radar | 8c50bebfde1300aa6de55981537cbc23171e6ee8 | 10,157 |
import six
def range_join(numbers, to_str=False, sep=",", range_sep=":"):
"""
Takes a sequence of positive integer numbers given either as integer or string types, and
returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop
values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent
to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*.
Example:
.. code-block:: python
range_join([1, 2, 3, 5])
# -> [(1, 3), (5,)]
range_join([1, 2, 3, 5, 7, 8, 9])
# -> [(1, 3), (5,), (7, 9)]
range_join([1, 2, 3, 5, 7, 8, 9], to_str=True)
# -> "1:3,5,7:9"
"""
if not numbers:
return "" if to_str else []
# check type, convert, make unique and sort
_numbers = []
for n in numbers:
if isinstance(n, six.string_types):
try:
n = int(n)
except ValueError:
raise ValueError("invalid number format '{}'".format(n))
if isinstance(n, six.integer_types):
_numbers.append(n)
else:
raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n))
numbers = sorted(set(_numbers))
# iterate through numbers, keep track of last starts and stops and fill a list of range tuples
ranges = []
start = stop = numbers[0]
for n in numbers[1:]:
if n == stop + 1:
stop += 1
else:
ranges.append((start,) if start == stop else (start, stop))
start = stop = n
ranges.append((start,) if start == stop else (start, stop))
# convert to string representation
if to_str:
ranges = sep.join(
(str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r))
for r in ranges
)
return ranges | c1b2d10ec1b47fa5c917fccead2ef8d5fc506370 | 10,158 |
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a | f665c529541420ada0ae4819e53de1e73035d83f | 10,159 |
def CreateInstanceTemplate(task, task_dir):
"""Create the Compute Engine instance template that will be used to create the
instances.
"""
backend_params = task.BackendParams()
instance_count = backend_params.get('instance_count', 0)
if instance_count <= 0:
clovis_logger.info('No template required.')
return True
bucket = backend_params.get('storage_bucket')
if not bucket:
clovis_logger.error('Missing bucket in backend_params.')
return False
return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,
task_dir) | 558e4ed3152bb87a51bd2bb7dd107af5dd76bcd1 | 10,160 |
def get_config_string(info_type, board_num, dev_num, config_item, max_config_len):
"""Returns configuration or device information as a null-terminated string.
Parameters
----------
info_type : InfoType
The configuration information for each board is grouped into different categories. This
parameter specifies which category you want. Always set this parameter to
InfoType.BOARDINFO.
board_num : int
The number associated with the board when it was installed with InstaCal or created
with :func:`.create_daq_device`.
dev_num : int
The purpose of the dev_num parameter depends on the value of the config_item parameter. It
can serve as a channel number, an index into the config_item, or it can be ignored.
Unless otherwise noted in the "config_item parameter values" section below, this value is
ignored.
config_item : BoardInfo
The type of information to read from the device. Set it to one of the constants listed in
the "config_item parameter values" section below.
max_config_len : int
The maximum number of bytes to be read from the device into config_val.
Returns
-------
string
The specified configuration item
.. table:: **config_item parameter values**
============ =============================================================================
config_item Description
============ =============================================================================
DEVMACADDR MAC address of an Ethernet device.
------------ -----------------------------------------------------------------------------
DEVSERIALNUM Factory serial number of a USB or Bluetooth device.
dev_num specifies either a base board (0) or an expansion board (1).
------------ -----------------------------------------------------------------------------
DEVUNIQUEID Unique identifier of a discoverable device, such as the serial number of a
USB device or MAC address of an Ethernet device.
------------ -----------------------------------------------------------------------------
DEVVERSION Firmware version and FPGA version installed on a device.
Use this setting in conjunction with one of these dev_num settings:
- MAIN (main firmware version)
- MEASUREMENT (measurement firmware version)
- MEASUREMENT_EXP (expansion board measurement firmware version)
- RADIO (radio firmware version)
- FPGA (FPGA version)
------------ -----------------------------------------------------------------------------
USERDEVID User-configured string identifier of up to maxConfigLen character/bytes from
an Ethernet, Bluetooth, or USB device.
============ =============================================================================
"""
config_val = create_string_buffer(max_config_len)
_check_err(_cbw.cbGetConfigString(
info_type, board_num, dev_num, config_item,
config_val, byref(c_int(max_config_len))))
return config_val.value.decode('utf-8') | 72a35d984cb35e38a5e0742c7d790dc72ccbc928 | 10,161 |
import inspect
def _get_kwargs(func, locals_dict, default=None):
"""
Convert a function's args to a kwargs dict containing entries that are not identically default.
Parameters
----------
func : function
The function whose args we want to convert to kwargs.
locals_dict : dict
The locals dict for the function.
default : object
Don't include arguments whose values are this object.
Returns
-------
dict
The non-default keyword args dict.
"""
return {n: locals_dict[n] for n in inspect.signature(func).parameters
if locals_dict[n] is not default} | ae0a06cb4e17b5512a03e89d7ca2119c58ea762b | 10,162 |
from datetime import datetime
def iso_to_date(iso_str: str):
"""Convert a date string with iso formating to a datetime date object"""
if not iso_str:
return None
return datetime.date(*map(int, iso_str.split('-'))) | a0d0541298ed538d7df9940ceef7b2bac121af27 | 10,163 |
def call_with_error(error_type):
"""Collects a bunch of errors and returns them all once.
Decorator that collects the errors in the decorated function so that the
user can see everything they need to fix at once. All errors are thrown
with the same error type.
The decorated must have an `error` keyword parameter. The `error` parameter
is then ignored if the end user passes in that argument.
Parameters
----------
error_type: type
The type of error to throw. For example, `ValueError`.
Returns
-------
Callable[Callable[[Any], Any], Callable[[Any], Any]]
Returns a decorator
Example
-------
>>> @call_with_error(ValueError)
>>> def func(a: int, b: int, error: Callable[[str], None]) -> int:
... if a < 0:
... error("a must be zero or greater")
... if b < 0:
... error("b must be zero or greater")
... return a + b
>>> func(-1, 0)
ValueError("a must be zero or greater")
>>> func(0, -1)
ValueError("b must be zero or greater")
>>> func(-1, -1)
ValueError("a must be zero or greater\nb must be zero or greater")
"""
def _call_with_error(f):
@curry
def error(log, msg):
log.append(msg)
@wraps(f)
def wrapped(*args, **kwargs):
log = []
result = f(*args, error=error(log), **kwargs)
if len(log) > 0:
raise error_type("\n".join(log))
return result
return wrapped
return _call_with_error | 9a64fb630b0a491bc9e01d77ebe35199df47ab55 | 10,164 |
def _get_metadata_and_fingerprint(instance_name, project, zone):
"""Return the metadata values and fingerprint for the given instance."""
instance_info = _get_instance_info(instance_name, project, zone)
if not instance_info:
logs.log_error('Failed to fetch instance metadata')
return None, None
fingerprint = instance_info['metadata']['fingerprint']
metadata_items = instance_info['metadata']['items']
return metadata_items, fingerprint | 7049805d538c7942dc8249e91c27faa2c1867936 | 10,165 |
def solve_token_pair_and_fee_token_economic_viable(
token_pair, accounts, b_orders, s_orders, f_orders, fee,
xrate=None
):
"""Match orders between token pair and the fee token, taking into
account all side constraints, including economic viability.
If xrate is given, then it will be used instead of trying to find
optimal xrate.
Sets b_orders/s_orders/f_orders (integral) buy_amounts for the best execution.
Also returns the (integral) prices found.
"""
b_buy_token, s_buy_token = token_pair
orders, prices = TRIVIAL_SOLUTION
# Search for an economically viable solution.
while len(b_orders) > 0 or len(s_orders) > 0:
# Solve current problem.
orders, prices = solve_token_pair_and_fee_token(
token_pair, accounts, b_orders, s_orders, f_orders, fee, xrate
)
# If solution is economically viable, exit.
# Hopefully, in large majority of cases this will occur in the first iteration.
if is_economic_viable(orders, prices, fee, IntegerTraits) or is_trivial(orders):
break
# If solution cannot be made economically viable (assuming prices wouldn't change)
if len(compute_approx_economic_viable_subset(
orders, prices, fee, IntegerTraits
)) == 0:
orders, prices = TRIVIAL_SOLUTION
break
# Note: to increase performance, we could consider removing all orders that do not
# satisfy min_abs_fee_per_order here at once, instead of removing one at a time as
# it is currently. The advantage of removing one by one is that it will not remove
# more than needed (note that prices, and hence order fees, keep changing).
# Find and remove the order paying the least fee.
b_order_with_min_buy_amount = min(
[o for o in b_orders if o.buy_amount > 0],
key=lambda o: o.buy_amount
)
s_order_with_min_buy_amount = min(
[o for o in s_orders if o.buy_amount > 0],
key=lambda o: o.buy_amount
)
if b_order_with_min_buy_amount.buy_amount * prices[b_buy_token]\
< s_order_with_min_buy_amount.buy_amount * prices[s_buy_token]:
b_orders = [
o for o in b_orders if o.id != b_order_with_min_buy_amount.id
]
else:
s_orders = [
o for o in s_orders if o.id != s_order_with_min_buy_amount.id
]
# Make sure the solution is correct.
validate(accounts, orders, prices, fee)
return orders, prices | f6fcf5bccdf498f29852614751cf120a8e2addd4 | 10,166 |
def two_categorical(df, x, y, plot_type="Cross tab"):
"""
['Cross tab', "Stacked bone_numeric_one_categorical"]
"""
if plot_type is None:
plot_type = 'Cross tab'
if plot_type == 'Stacked bar': # 20
df_cross = pd.crosstab(df[x], df[y])
data = []
for x in df_cross.columns:
data.append(go.Bar(name=str(x), x=df_cross.index, y=df_cross[x]))
fig = go.Figure(data)
fig.update_layout(barmode = 'stack')
#For you to take a look at the result use
if plot_type == "Cross tab": # 21
df_cross = pd.crosstab(df[x], df[y])
return df_cross
return fig | 5c0908055848d9de02920f8e2718de0918f4b460 | 10,167 |
def yices_bvconst_one(n):
"""Set low-order bit to 1, all the other bits to 0.
Error report:
if n = 0
code = POS_INT_REQUIRED
badval = n
if n > YICES_MAX_BVSIZE
code = MAX_BVSIZE_EXCEEDED
badval = n.
"""
# let yices deal with int32_t excesses
if n > MAX_INT32_SIZE:
n = MAX_INT32_SIZE
return libyices.yices_bvconst_one(n) | 6a70c4773a6558e068d2bbafb908657d5b5b4d1d | 10,168 |
def showItem(category_id):
"""Show all Items"""
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).filter_by(
category_id=category_id).all()
return render_template('item.html', items=items, category=category) | 230cc9b7e8043b0bb3e78866b2a27a0aec287828 | 10,169 |
def get_standard_t_d(l, b, d):
"""
Use NE2001 to estimate scintillation time at 1 GHz and 1 km/s transverse velocity.
Parameters
----------
l : float
Galactic longitude
b : float
Galactic latitude
d : float
Distance in kpc
Returns
-------
t_d : float
Scintillation timescale in s
"""
return query_ne2001(l, b, d, field='SCINTIME') | e1743ae75a6893376c5e13126deda6f0eb41d38f | 10,170 |
import os
import json
def get_json_test_data(project, test_name):
"""Get data from json file.
If json data is not of type dict or list of dicts it is ignored.
"""
json_data = None
json_path = json_file_path(project, test_name)
if os.path.isfile(json_path):
try:
with open(json_path, encoding='utf-8') as f:
json_data = json.load(f)
except json.JSONDecodeError:
pass
if type(json_data) is dict:
return [json_data]
if type(json_data) is list:
if all(type(x) is dict for x in json_data):
return json_data
return [] | 1934128e059eb3a521b7e8ee016c87bc0951453e | 10,171 |
def match(pattern: str, text: str) -> bool:
"""
匹配同样长度的字符串
"""
if pattern:
return True
elif pattern == "$" and text == "":
return True
elif pattern[1] == "?":
return _match_question(pattern, text)
elif pattern[1] == "*":
return _match_star(pattern, text)
else:
return match_one(pattern[0], text[0]) and match(pattern[1:], text[1:]) | 0dc71f00323502de7c1a2e00c502c99d75f56fc1 | 10,172 |
def config_func(tools, index, device_id, config_old: {}, config_new: {}):
"""
CANedge configuration update function
:param tools: A collection of tools used for device configuration
:param index: Consecutive device index (from 0)
:param device_id: Device ID
:param config_old: The current device configuration
:param config_new: Default new device configuration
:return: Update configuration
"""
# This is an example of how to upgrade existing access point and S3 credentials from plain to encrypted form. Note
# that below assumes that the existing configuration holds the information in unencrypted form.
# Devices already using encrypted credentials are skipped (no configuration returned)
# New configuration uses same structure. The old configuration can safely be copied to the new.
config_new = config_old
# Only update configurations unencrypted credentials
if config_new["connect"]["wifi"]["keyformat"] == 0 and config_new["connect"]["s3"]["server"]["keyformat"] == 0:
# Set the server kpub
config_new["general"]["security"] = {"kpub": tools.security.user_public_key_base64}
# Set the access point key format to 1 (encrypted)
config_new["connect"]["wifi"]["keyformat"] = 1
# Loop each accesspoint in list
for ap in config_new["connect"]["wifi"]["accesspoint"]:
# Encrypt the wifi password
unencrypted_wifi_pwd = ap["pwd"]
ap["pwd"] = tools.security.encrypt_encode(unencrypted_wifi_pwd)
# Encrypt the S3 secret key
unencrypted_s3_secretkey = config_new["connect"]["s3"]["server"]["secretkey"]
config_new["connect"]["s3"]["server"]["keyformat"] = 1
config_new["connect"]["s3"]["server"]["secretkey"] = tools.security.encrypt_encode(unencrypted_s3_secretkey)
return config_new | c0585c3a268fb40e3e00a2613e03001bc561566a | 10,173 |
def load_figure(file_path: str) -> matplotlib.figure.Figure:
"""Fully loads the saved figure to be able to be modified.
It can be easily showed by:
fig_object.show()
Args:
file_path: String file path without file extension.
Returns:
Figure object.
Raises:
None.
"""
with open(file_path + '.pkl', 'rb') as handle:
fig_object = pk.load(handle)
return fig_object | 76dcc0a27a3ae04e574a3d69fb431eedbc0c618a | 10,174 |
def cluster_vectors(vectors, k=500, n_init=100, **kwargs):
"""Build NearestNeighbors tree."""
kwargs.pop('n_clusters', None)
kwargs.pop('init', None)
kwargs.pop('n_init', None)
return KMeans(n_clusters=k, init='k-means++', n_init=n_init,
**kwargs).fit(vectors) | 28984811ea58a2a2c123d36cdb5e56c1d5b8d0db | 10,175 |
import torch
import math
def positional_encoding(d_model, length):
"""
:param d_model: dimension of the model
:param length: length of positions
:return: length*d_model position matrix
"""
if d_model % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(d_model))
pe = torch.zeros(length, d_model)
position = torch.arange(0, length).unsqueeze(1)
div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float) * -(math.log(10000.0) / d_model)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe | de41f0c99b46f16dbe300d59527e11b98a0b1f14 | 10,176 |
def esx_connect(host, user, pwd, port, ssl):
"""Establish connection with host/vcenter."""
si = None
# connect depending on SSL_VERIFY setting
if ssl is False:
si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port)
current_session = si.content.sessionManager.currentSession.key
_LOGGER.debug("Logged in - session %s", current_session)
else:
si = SmartConnect(host=host, user=user, pwd=pwd, port=port)
current_session = si.content.sessionManager.currentSession.key
_LOGGER.debug("Logged in - session %s", current_session)
return si | 2a8d3214f41bc284ef899c6292fb63284954849b | 10,177 |
import os
def get_cropped_face_img(image_path, margin=44, image_size=160, folders=None):
"""return cropped face img if face is detected,
otherwise remove the img
"""
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
pnet, rnet, onet = init_mtcnn()
img_file_dict = {}
cropped_face_img_dict = {}
if isinstance(image_path, list):
img_file_dict["img_list"] = image_path
img_list = []
for image in image_path:
img = imageio.imread(os.path.expanduser(image), pilmode="RGB",)
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = align.detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor
)
if len(bounding_boxes) < 1:
img_file_dict["img_list"].remove(image)
print("can't detect face, remove ", image)
continue
# print(f"bound_boxes: {bounding_boxes}")
# print(f'points: {points}')
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1] : bb[3], bb[0] : bb[2], :]
aligned = np.array(
Image.fromarray(cropped).resize(
(image_size, image_size), Image.BILINEAR
)
).astype(np.double)
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
# Only add to dict when list is not empty
if img_list:
cropped_face_img_dict["img_list"] = np.stack(img_list)
return cropped_face_img_dict, img_file_dict
else:
if not folders:
for folder in os.listdir(image_path):
for _, _, files in os.walk(os.path.join(image_path, folder)):
img_file_dict[folder] = files
else:
for folder in folders:
for _, _, files in os.walk(os.path.join(image_path, folder)):
img_file_dict[folder] = files
for folder in img_file_dict:
img_list = []
for image in img_file_dict[folder]:
img = imageio.imread(
os.path.expanduser(os.path.join(image_path, folder, image)),
pilmode="RGB",
)
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = align.detect_face.detect_face(
img, minsize, pnet, rnet, onet, threshold, factor
)
if len(bounding_boxes) < 1:
img_file_dict[folder].remove(image)
print("can't detect face, remove ", image)
continue
# print(f"bound_boxes: {bounding_boxes}")
# print(f'points: {points}')
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1] : bb[3], bb[0] : bb[2], :]
aligned = np.array(
Image.fromarray(cropped).resize(
(image_size, image_size), Image.BILINEAR
)
).astype(np.double)
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
# Only add to dict when list is not empty
if img_list:
cropped_face_img_dict[folder] = np.stack(img_list)
return cropped_face_img_dict, img_file_dict | 82230dce29bd24967152c6e74c1db3ed7529528d | 10,178 |
def invert_hilbert_QQ(n=40, system='sage'):
"""
Runs the benchmark for calculating the inverse of the hilbert
matrix over rationals of dimension n.
INPUT:
- ``n`` - matrix dimension (default: ``300``)
- ``system`` - either 'sage' or 'magma' (default: 'sage')
EXAMPLES::
sage: import sage.matrix.benchmark as b
sage: ts = b.invert_hilbert_QQ(30)
sage: tm = b.invert_hilbert_QQ(30, system='magma') # optional - magma
"""
if system == 'sage':
A = hilbert_matrix(n)
t = cputime()
d = A**(-1)
return cputime(t)
elif system == 'magma':
code = """
h := HilbertMatrix(%s);
tinit := Cputime();
d := h^(-1);
s := Cputime(tinit);
delete h;
"""%n
if verbose: print(code)
magma.eval(code)
return float(magma.eval('s')) | 153e7400467a57cf07f839042d31d4800fe161bd | 10,179 |
def getModelListForEnumProperty(self, context):
"""Returns a list of (str, str, str) elements which contains the models
contained in the currently selected model category.
If there are no model categories (i.e. '-') return ('-', '-', '-').
Args:
context:
Returns:
"""
category = context.window_manager.category
if category == '-' or category == '':
return [('-',) * 3]
return sorted(model_previews[category].enum_items) | 46f642933dd220b0f71431ff4a9cb7410858fbf0 | 10,180 |
import os
import shutil
def moveFiles(subsystemDict, dirPrefix):
""" For each subsystem, moves ROOT files that need to be moved from directory that receives HLT histograms into appropriate file structure for processing.
Creates run directory and subsystem directories as needed. Renames files to convention that is later used for extracting timestamps.
Args:
subsystemDict (dict): Dictionary of subsystems (keys) and lists of files that need to be moved (values) for each subsystem.
dirPrefix (str): Directory prefix used to get to all of the folders.
Returns:
None.
"""
runsDict = {}
# For each subsystem, loop over all files to move, and put them in subsystem directory
for key in subsystemDict.keys():
filesToMove = subsystemDict[key]
if len(filesToMove) == 0:
logger.info("No files to move in %s" % key)
for filename in filesToMove:
# Extract time stamp and run number
tempFilename = filename
splitFilename = tempFilename.replace(".root","").split("_")
#logger.debug("tempFilename: %s" % tempFilename)
#logger.debug("splitFilename: ", splitFilename)
if len(splitFilename) < 3:
continue
timeString = "_".join(splitFilename[3:])
#logger.debug("timeString: ", timeString)
# How to parse the timeString if desired
#timeStamp = time.strptime(timeString, "%Y_%m_%d_%H_%M_%S")
runString = splitFilename[1]
runNumber = int(runString)
hltMode = splitFilename[2]
# Determine the directory structure for each run
runDirectoryPath = "Run" + str(runNumber)
# Move replays of the data to a different directory
if hltMode == "E":
runDirectoryPath = os.path.join("ReplayData", runDirectoryPath)
# Create Run directory and subsystem directories as needed
if not os.path.exists(os.path.join(dirPrefix, runDirectoryPath)):
os.makedirs( os.path.join(dirPrefix, runDirectoryPath) )
if len(filesToMove) != 0 and not os.path.exists(os.path.join(dirPrefix, runDirectoryPath, key)):
os.makedirs(os.path.join(dirPrefix, runDirectoryPath, key))
newFilename = key + "hists." + timeString + ".root"
oldPath = os.path.join(dirPrefix, tempFilename)
newPath = os.path.join(dirPrefix, runDirectoryPath, key, newFilename)
logger.info("Moving %s to %s" % (oldPath, newPath))
# DON"T IMPORT MOVE. BAD CONSEQUENCES!!
shutil.move(oldPath, newPath)
# Create dict for subsystem if it doesn't exist, and then create a list for the run if it doesn't exist
# See: https://stackoverflow.com/a/12906014
runsDict.setdefault(runString, {}).setdefault(key, []).append(newFilename)
# Save the HLT mode
# Must be the same for each file in the run
if "hltMode" not in runsDict[runString]:
runsDict[runString]["hltMode"] = hltMode
return runsDict | 4755f3d42b5540f8a7422a51c2f0048e49897ea6 | 10,181 |
import logging
def run_wcs(*args, **kwargs):
"""
Set up the environment and run the bundled wcs.exe (from the Talon distribution)
using the supplied command line arguments
"""
# Pull out keyword args that we are interested in
write_stdout_to_console = kwargs.get("write_stdout_to_console", False)
# Override the TELHOME environment variable so that we can use relative
# paths when specifying the location of the GSC directory and ip.cfg
environment = dict(TELHOME=paths.talon_wcs_path())
stdout_destination = PIPE
if write_stdout_to_console:
stdout_destination = None
# Make sure all passed-in arguments are strings
args = [str(x) for x in args]
args = [
WCS_EXE,
# wcs.exe will use the last-specified values for -i and -c, so
# we'll provide defaults below but they can be overridden by values
# coming in via the *args array
"-i", "ip.cfg", # Specify the path to ip.cfg (relative to TELHOME)
"-c", "gsc" # Specify the path to the GSC catalog (relative to TELHOME)
] + list(args) # Include additional args specified by the user
process = Popen(
args,
env=environment,
stdout=stdout_destination,
stderr=PIPE
)
(stdout, stderr) = process.communicate() # Obtain stdout and stderr output from the wcs tool
exit_code = process.wait() # Wait for process to complete and obtain the exit code
if not write_stdout_to_console:
logging.info(stdout.decode("utf-8"))
if exit_code != 0:
logging.info("Error finding WCS solution.\n" +
"Exit code: " + str(exit_code) + "\n" +
"Error output: " + stderr.decode("utf-8"))
return False
return True | 51461fcbc4ed5a08063d630aea926d312b9da825 | 10,182 |
import os
def read_setup_cfg():
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
config_file = os.path.join(HERE, "setup.cfg")
cp = ConfigParser()
cp.read([config_file])
return cp | 86979a63d162df682468bbc5163c751d6a75fbca | 10,183 |
def store_exposure_fp(fp, exposure_type):
"""
Preserve original exposure file extention if its in a pandas supported
compressed format
compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default ‘infer’
For on-the-fly decompression of on-disk data. If ‘infer’ and
filepath_or_buffer is path-like, then detect compression from
the following extensions: ‘.gz’, ‘.bz2’, ‘.zip’, or ‘.xz’
(otherwise no decompression).
If using ‘zip’, the ZIP file must contain only one data file
to be read in. Set to None for no decompression.
New in version 0.18.1: support for ‘zip’ and ‘xz’ compression.
"""
compressed_ext = ('.gz', '.bz2', '.zip', '.xz')
filename = SOURCE_FILENAMES[exposure_type]
if fp.endswith(compressed_ext):
return '.'.join([filename, fp.rsplit('.')[-1]])
else:
return filename | c187e79d4cce7ea79b66671a2e7378a35de4841f | 10,184 |
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False):
"""Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding)
"""
adata = data.copy() if copy else data
if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey)
adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2)
if highly_variable and 'highly_variable' in adata.var.keys():
adata.var[vkey + '_genes'] &= adata.var['highly_variable']
logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes']))
return adata if copy else None | 7d2a0b86d2fb4402cdef9ab56fe2638f89d09fac | 10,185 |
def update_graphics_labels_from_node_data(node, n_id_map, add_new_props):
"""Updates the graphics labels so they match the node-data"""
try:
gfx = select_child(node, n_id_map, 'nodegraphics').getchildren()[0].getchildren()
except:
return None
node_label = select_child(node, n_id_map, 'labelcount').text
node_props = select_child(node, n_id_map, 'node_prop_text').text
# Nodes have either 0, 1, or 2 node labels. If 1, its just title and count
# If 2, the first one is title count, second is properties and counts
i = 0
for elem in gfx:
if elem.tag.endswith('NodeLabel'):
if i == 0:
elem.text = node_label
i += 1
# not all nodes have a props-label
elif i == 1 and node_props:
# Add all properties to the label text, even if new
elem.text = node_props | c2d3104dbc3a20ff6c34de754ff681b176091787 | 10,186 |
import sys
def deploy_binary_if_master(args):
"""if the active branch is 'master', deploy binaries for the primary suite to remote maven repository."""
master_branch = 'master'
active_branch = mx.VC.get_vc(SUITE.dir).active_branch(SUITE.dir)
if active_branch == master_branch:
if sys.platform == "darwin":
args.insert(0, "--platform-dependent")
return mx.command_function('deploy-binary')(args)
else:
mx.log('The active branch is "%s". Binaries are deployed only if the active branch is "%s".' % (
active_branch, master_branch))
return 0 | 582e16e9c688b9ec05dfced51e82f48c3e472e7d | 10,187 |
import pandas
import numpy
def fast_spearman(x, y=None, destination=None):
"""calculate the spearman correlation matrix for the columns of x (with dimensions MxN), or optionally, the spearman correlaton
matrix between the columns of x and the columns of y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns:
(numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
logger.debug("x.shape: {}".format(x.shape))
if hasattr(y, "shape"):
logger.debug("y.shape: {}".format(y.shape))
x_ranks = pandas.DataFrame(x).rank(method="average").values
logger.debug("some min and max ranks of x_ranks:\n{}\n{}".format(numpy.min(x_ranks[:10], axis=0), numpy.max(x_ranks[:10], axis=0)))
y_ranks = pandas.DataFrame(y).rank(method="average").values if y is not None else None
return fast_corr(x_ranks, y_ranks, destination) | e2386d6da26a12c87ca1471a5afde8bcd108c30b | 10,188 |
def ProcuraPalavra(dicionário, palavra):
"""
Procura as possíveis palavras para substituir
a palavra passada, e as devolve numa lista
"""
#Antes de mais nada tornamos a palavra maiuscula
#para realizar as comparações
palavra = palavra.upper()
#Primeiro olhamos para o caso de haver uma
#primeira letra selecionada, o que facilitaria
#a nossa busca
if palavra[0] != '*':
#Primeiro nós encontramos o ponto do dionário
#onde começa nossa letra
for i in range(len(dicionário)):
if i % 100 == 0:
print('Procurando Letra no dicionário...')
if dicionário[i][0] == palavra[0]:
break
#E também o ponto do dicionário onde nossa
#letra acaba
for j in range(i, len(dicionário)):
if j % 100 == 0:
print('Procurando Letra no dicionário...')
if dicionário[j][0] != palavra[0]:
break
return SeparaPorTamanho(dicionário[i:j], palavra)
else:
return SeparaPorTamanho(dicionário, palavra) | 1a283aec6670c0e2fe6ca6f4366ef43c6ba97e9f | 10,189 |
import os
def color_parser(color: str, color_dicts: list = None) -> tuple:
"""
convert a string with RGB/matplotlib named colors to matplotlib HSV tuples.
supports RGB colors with ranges between 0-1 or 0-255.
supported matplotlib colors can be found here:
https://matplotlib.org/3.3.1/gallery/color/named_colors.html
"""
# input: RGB
if color.count(",") == 2:
value = [float(c) for c in color.split(",")]
return rgb_to_hsv(value)
# input: matplotlib colors
cdicts = color_dicts if color_dicts else DEFAULT_COLOR_DICTS
for cdict in cdicts:
if color in cdict:
value = cdict[color]
# tableau, css4 and xkcd return hex colors.
if str(value).startswith("#"):
value = hex_to_rgb(value)
return rgb_to_hsv(value)
logger.error(f"Color not recognized: {color}")
os._exit(1) | e43fc03add16368d84371dded7f34370b7c8bb9e | 10,190 |
import json
import six
def _build_auth_record(response):
"""Build an AuthenticationRecord from the result of an MSAL ClientApplication token request"""
try:
id_token = response["id_token_claims"]
if "client_info" in response:
client_info = json.loads(_decode_client_info(response["client_info"]))
home_account_id = "{uid}.{utid}".format(**client_info)
else:
# MSAL uses the subject claim as home_account_id when the STS doesn't provide client_info
home_account_id = id_token["sub"]
# "iss" is the URL of the issuing tenant e.g. https://authority/tenant
issuer = six.moves.urllib_parse.urlparse(id_token["iss"])
# tenant which issued the token, not necessarily user's home tenant
tenant_id = id_token.get("tid") or issuer.path.strip("/")
# AAD returns "preferred_username", ADFS returns "upn"
username = id_token.get("preferred_username") or id_token["upn"]
return AuthenticationRecord(
authority=issuer.netloc,
client_id=id_token["aud"],
home_account_id=home_account_id,
tenant_id=tenant_id,
username=username,
)
except (KeyError, ValueError) as ex:
auth_error = ClientAuthenticationError(
message="Failed to build AuthenticationRecord from unexpected identity token"
)
six.raise_from(auth_error, ex) | 96aed71945c354e41cafd89bf5d7a7d62c31a40a | 10,191 |
def loc_data_idx(loc_idx):
"""
Return tuple of slices containing the unflipped idx corresponding to loc_idx.
By 'unflipped' we mean that if a slice has a negative step, we wish to retrieve
the corresponding indices but not in reverse order.
Examples
--------
>>> loc_data_idx(slice(11, None, -3))
(slice(2, 12, 3),)
"""
retval = []
for i in as_tuple(loc_idx):
if isinstance(i, slice) and i.step is not None and i.step == -1:
if i.stop is None:
retval.append(slice(0, i.start+1, -i.step))
else:
retval.append(slice(i.stop+1, i.start+1, -i.step))
elif isinstance(i, slice) and i.step is not None and i.step < -1:
if i.stop is None:
lmin = i.start
while lmin >= 0:
lmin += i.step
retval.append(slice(lmin-i.step, i.start+1, -i.step))
else:
retval.append(slice(i.stop+1, i.start+1, -i.step))
elif is_integer(i):
retval.append(slice(i, i+1, 1))
else:
retval.append(i)
return as_tuple(retval) | d30b1b27957e24ff30caf19588487645ac198dc8 | 10,192 |
def eat_descriptor(descr):
"""
Read head of a field/method descriptor. Returns a pair of strings, where
the first one is a human-readable string representation of the first found
type, and the second one is the tail of the parameter.
"""
array_dim = 0
while descr[0] == '[':
array_dim += 1
descr = descr[1:]
if (descr[0] == 'L'):
try:
end = descr.find(';')
except Exception:
raise ParserError("Not a valid descriptor string: " + descr)
type = descr[1:end]
descr = descr[end:]
else:
global code_to_type_name
try:
type = code_to_type_name[descr[0]]
except KeyError:
raise ParserError("Not a valid descriptor string: %s" % descr)
return (type.replace("/", ".") + array_dim * "[]", descr[1:]) | 411ce48ce250fe15438cd89f43b91ee9b87908a6 | 10,193 |
def legendre(a, p):
"""Legendre symbol"""
tmp = pow(a, (p-1)//2, p)
return -1 if tmp == p-1 else tmp | 66b86dce23ae10ba226ffb19942b98550bb7c218 | 10,194 |
import argparse
def get_parser():
"""
Create a parser with some arguments used to configure the app.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(description="configuration")
parser.add_argument(
"--upload-folder",
required=True,
metavar="path",
help="Target path where the images will be uploaded for inference",
)
parser.add_argument(
"--config-file",
default="/content/computer-vision-REST-API/MaskRCNN_finetune/configs/ResNet-101-FPN/balloon.yaml",
metavar="path",
help="Path to the model config file. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc config file.",
)
parser.add_argument(
"--weights",
default="https://www.dropbox.com/s/otp52ccygc2t3or/ResNet101_FPN_model_final.pth?dl=1",
metavar="path",
help="Path to the model file weights. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc pretrained weights.",
)
parser.add_argument(
"--remove-colors",
default=False,
action="store_true",
help="One can remove colors of unsegmented pixels for better clarity as the mask and balloons colors can be hard to distinguish.",
)
parser.add_argument(
"--use-ngrok",
default=False,
action="store_true",
help="Need to set this arg to True to be able to run it on google collab",
)
parser.add_argument(
"--infer-with-cpu",
default=False,
action="store_true",
help="Use cpu for forward pass (slower)",
)
return parser | 0f7a375948d3e45157637647908f5c0e6948083a | 10,195 |
def precheck_arguments(args):
""" Make sure the argument choices are valid """
any_filelist = (len(args.filelist_name[0]) > 0 or len(args.output_dir[0]) > 0 or args.num_genomes[0] > 0)
if len(args.filelist_name[0]) > 0 and len(args.output_dir[0]) == 0:
print("Error: Need to specify output directory with -O if using -F")
exit(1)
if len(args.filelist_name[0]) == 0 and len(args.output_dir[0]) > 0:
print("Error: Need to specify a filelist with -F if using -O")
exit(1)
if len(args.input_fasta[0]) > 0 and any_filelist:
print("Error: When using -i flag, cannot use any of other options that imply multiple files")
exit(1)
if len(args.input_fasta[0]) > 0 and not any_filelist:
return "single"
elif any_filelist and len(args.input_fasta[0]) == 0:
return "multi"
else:
print("Error: Need to specify either -i or the combination of -F and -O")
exit(1) | 984865d214cca63eae8bacf5bc7be238e7209ddb | 10,196 |
def get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors) | 6d72ea1ffcdf20bbf05f75f6084a9027e771196a | 10,197 |
def get_expr_fields(self):
"""
get the Fields referenced by switch or list expression
"""
def get_expr_field_names(expr):
if expr.op is None:
if expr.lenfield_name is not None:
return [expr.lenfield_name]
else:
# constant value expr
return []
else:
if expr.op == '~':
return get_expr_field_names(expr.rhs)
elif expr.op == 'popcount':
return get_expr_field_names(expr.rhs)
elif expr.op == 'sumof':
# sumof expr references another list,
# we need that list's length field here
field = None
for f in expr.lenfield_parent.fields:
if f.field_name == expr.lenfield_name:
field = f
break
if field is None:
raise Exception("list field '%s' referenced by sumof not found" % expr.lenfield_name)
# referenced list + its length field
return [expr.lenfield_name] + get_expr_field_names(field.type.expr)
elif expr.op == 'enumref':
return []
else:
return get_expr_field_names(expr.lhs) + get_expr_field_names(expr.rhs)
# get_expr_field_names()
# resolve the field names with the parent structure(s)
unresolved_fields_names = get_expr_field_names(self.expr)
# construct prefix from self
prefix = [('', '', p) for p in self.parents]
if self.is_container:
prefix.append(('', '', self))
all_fields = _c_helper_resolve_field_names (prefix)
resolved_fields_names = list(filter(lambda x: x in all_fields.keys(), unresolved_fields_names))
if len(unresolved_fields_names) != len(resolved_fields_names):
raise Exception("could not resolve all fields for %s" % self.name)
resolved_fields = [all_fields[n][1] for n in resolved_fields_names]
return resolved_fields | 103aa0ac54be37b23d9695dddfda9972a9f0d7f0 | 10,198 |
import math
def add_bias_towards_void(transformer_class_logits, void_prior_prob=0.9):
"""Adds init bias towards the void (no object) class to the class logits.
We initialize the void class with a large probability, similar to Section 3.3
of the Focal Loss paper.
Reference:
Focal Loss for Dense Object Detection, ICCV 2017.
https://arxiv.org/abs/1708.02002
Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, Piotr Dollár.
Args:
transformer_class_logits: A [batch, num_mask_slots, num_classes] tensor, the
class logits predicted by the transformer. It concats (num_classes - 1)
non-void classes, including both thing classes and stuff classes, and the
void class (the last channel). If the dataset class IDs do not follow this
order, MaX-DeepLab loss functions will handle the mapping and thus the
architecture still supports any dataset.
void_prior_prob: A float, the desired probability (after softmax) of the
void class at initialization. Defaults to 0.9 as in MaX-DeepLab.
Returns:
updated_transformer_class_logits: A [batch, num_mask_slots, num_classes]
Raises:
ValueError: If the rank of transformer_class_logits is not 3.
"""
class_logits_shape = transformer_class_logits.get_shape().as_list()
if len(class_logits_shape) != 3:
raise ValueError('Input transformer_class_logits should have rank 3.')
init_bias = [0.0] * class_logits_shape[-1]
init_bias[-1] = math.log(
(class_logits_shape[-1] - 1) * void_prior_prob / (1 - void_prior_prob))
# Broadcasting the 1D init_bias to the 3D transformer_class_logits.
return transformer_class_logits + tf.constant(init_bias, dtype=tf.float32) | f5b3439fc7fbc987bbcbc3b64fd689208db7c5e6 | 10,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.