content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def deprecated() -> None:
"""Run the command and print a deprecated notice."""
LOG.warning("c2cwsgiutils_coverage_report.py is deprecated; use c2cwsgiutils-coverage-report instead")
return main() | ea3309fc308dd969872f7a4630c137e76a3659b0 | 7,400 |
def build_syscall_Linux(syscall, arg_list, arch_bits, constraint=None, assertion = None, clmax=SYSCALL_LMAX, optimizeLen=False):
"""
arch_bits = 32 or 64 :)
"""
# Check args
if( syscall.nb_args() != len(arg_list)):
error("Error. Expected {} arguments, got {}".format(len(syscall.arg_types), len(arg_list)))
return None
# Check args length
for i in range(0,len(arg_list)):
if( not verifyArgType(arg_list[i], syscall.arg_types[i])):
error("Argument error for '{}': expected '{}', got '{}'".format(arg_list[i], syscall.arg_types[i], type(arg_list[i])))
return None
# Check constraint and assertion
if( constraint is None ):
constraint = Constraint()
if( assertion is None ):
assertion = getBaseAssertion()
# Check if we have the function !
verbose("Trying to call {}() function directly".format(syscall.def_name))
func_call = build_call(syscall.function(), arg_list, constraint, assertion, clmax=clmax, optimizeLen=optimizeLen)
if( not isinstance(func_call, str) ):
verbose("Success")
return func_call
else:
if( not constraint.chainable.ret ):
verbose("Coudn't call {}(), try direct syscall".format(syscall.def_name))
else:
verbose("Couldn't call {}() and return to ROPChain".format(syscall.def_name))
return None
# Otherwise do syscall directly
# Set the registers
args = [(Arch.n2r(x[0]), x[1]) for x in zip(syscall.arg_regs, arg_list) + syscall.syscall_arg_regs]
chain = popMultiple(args, constraint, assertion, clmax-1, optimizeLen=optimizeLen)
if( not chain ):
verbose("Failed to set registers for the mprotect syscall")
return None
# Int 0x80
if( arch_bits == 32 ):
syscall_gadgets = search(QueryType.INT80, None, None, constraint, assertion)
# syscall
elif( arch_bits == 64):
syscall_gadgets = search(QueryType.SYSCALL, None, None, constraint, assertion)
if( not syscall_gadgets ):
verbose("Failed to find an 'int 0x80' OR 'syscall' gadget")
return None
else:
chain.addChain(syscall_gadgets[0])
verbose("Success")
return chain | 1fc9e5eadb688e58f2e6ac3de4d678e3040a1086 | 7,401 |
def gamma(x):
"""Diffusion error (normalized)"""
CFL = x[0]
kh = x[1]
return (
1.
/ (-2)
* (
4. * CFL ** 2 / 3
- 7. * CFL / 3
+ (-23. * CFL ** 2 / 12 + 35 * CFL / 12) * np.cos(kh)
+ (2. * CFL ** 2 / 3 - 2 * CFL / 3) * np.cos(2 * kh)
+ (-CFL ** 2 / 12 + CFL / 12) * np.cos(3 * kh)
)
) | c8689e1388338cc4d6b1b135f09db90f0e866346 | 7,402 |
import re
def copylabel(original_name):
"""create names/labels with the sequence (Copy), (Copy 2), (Copy 3), etc."""
copylabel = pgettext_lazy("this is a copy", "Copy")
copy_re = f"\\({copylabel}( [0-9]*)?\\)"
match = re.search(copy_re, original_name)
if match is None:
label = f"{original_name} ({copylabel})"
elif match.groups()[0] is None:
label = re.sub(copy_re, f"({copylabel} 2)", original_name)
else:
n = int(match.groups()[0].strip()) + 1
label = re.sub(copy_re, f"({copylabel} {n})", original_name)
return label | 1f838c33faf347b4219ca23083b664bda01cb9ef | 7,403 |
def load_opts_from_mrjob_confs(runner_alias, conf_paths=None):
"""Load a list of dictionaries representing the options in a given
list of mrjob config files for a specific runner. Returns
``[(path, values), ...]``. If a path is not found, use ``(None, {})`` as
its value.
If *conf_paths* is ``None``, look for a config file in the default
locations (see :py:func:`find_mrjob_conf`).
:type runner_alias: str
:param runner_alias: String identifier of the runner type, e.g. ``emr``,
``local``, etc.
:type conf_paths: list or ``None``
:param conf_path: locations of the files to load
This will only load each config file once, even if it's referenced
from multiple paths due to symlinks.
"""
if conf_paths is None:
results = load_opts_from_mrjob_conf(runner_alias)
else:
# don't include conf files that were loaded earlier in conf_paths
already_loaded = []
# load configs in reversed order so that order of conf paths takes
# precedence over inheritance
results = []
for path in reversed(conf_paths):
results = load_opts_from_mrjob_conf(
runner_alias, path, already_loaded=already_loaded) + results
if runner_alias and not any(conf for path, conf in results):
log.warning('No configs specified for %s runner' % runner_alias)
return results | 6ef2acc7dce0de5e467456d376a52c8078336c55 | 7,404 |
def clut8_rgb888(i):
"""Reference CLUT for wasp-os.
Technically speaking this is not a CLUT because the we lookup the colours
algorithmically to avoid the cost of a genuine CLUT. The palette is
designed to be fairly easy to generate algorithmically.
The palette includes all 216 web-safe colours together 4 grays and
36 additional colours that target "gaps" at the brighter end of the web
safe set. There are 11 greys (plus black and white) although two are
fairly close together.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 24-bit colour in RGB888 format
"""
if i < 216:
rgb888 = ( i % 6) * 0x33
rg = i // 6
rgb888 += (rg % 6) * 0x3300
rgb888 += (rg // 6) * 0x330000
elif i < 252:
i -= 216
rgb888 = 0x7f + (( i % 3) * 0x33)
rg = i // 3
rgb888 += 0x4c00 + ((rg % 4) * 0x3300)
rgb888 += 0x7f0000 + ((rg // 4) * 0x330000)
else:
i -= 252
rgb888 = 0x2c2c2c + (0x101010 * i)
return rgb888 | ca95c95306f7f4762add01f2ffc113f348e29d3b | 7,405 |
def get_file_from_rcsb(pdb_id,data_type='pdb'):
""" (file_name) -> file_path
fetch pdb or structure factor file for pdb_id from the RCSB website
Args:
file_name: a pdb file name
data_type (str):
'pdb' -> pdb
'xray' -> structure factor
Returns:
a file path for the pdb file_name
"""
try:
file_name = fetch.get_pdb(pdb_id,data_type,mirror='rcsb',log=null_out())
except Sorry:
file_name = ''
return file_name | 19a557a0bf4f69ba132d6d4520a124aef931f816 | 7,406 |
def parse_events(fobj):
"""Parse a trace-events file into {event_num: (name, arg1, ...)}."""
def get_argnames(args):
"""Extract argument names from a parameter list."""
return tuple(arg.split()[-1].lstrip('*') for arg in args.split(','))
events = {dropped_event_id: ('dropped', 'count')}
event_num = 0
for line in fobj:
m = event_re.match(line.strip())
if m is None:
continue
disable, name, args = m.groups()
events[event_num] = (name,) + get_argnames(args)
event_num += 1
return events | af35deff9c5b76d4d46700a738186822032d4190 | 7,407 |
def enu2ECEF(phi, lam, x, y, z, t=0.0):
""" Convert ENU local coordinates (East, North, Up) to Earth centered - Earth fixed (ECEF) Cartesian,
correcting for Earth rotation if needed.
ENU coordinates can be transformed to ECEF by two rotations:
1. A clockwise rotation over east-axis by an angle (90 - phi) to align the up-axis with the z-axis.
2. A clockwise rotation over the z-axis by and angle (90 + lam) to align the east-axis with the x-axis.
Source: http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates
Arguments:
phi: [float] east-axis rotation angle
lam: [float] z-axis rotation angle
x: [float] ENU x coordinate
y: [float] ENU y coordinate
z: [float] ENU z coordinate
Keyword arguments:
t: [float] time in seconds, 0 by default
Return:
(x_ecef, y_ecef, z_ecef): [tuple of floats] ECEF coordinates
"""
# Calculate ECEF coordinate from given local coordinates
x_ecef = -np.sin(lam)*x - np.sin(phi)*np.cos(lam)*y + np.cos(phi)*np.cos(lam)*z
y_ecef = np.cos(lam)*x - np.sin(phi)*np.sin(lam)*y + np.cos(phi)*np.sin(lam)*z
z_ecef = np.cos(phi) *y + np.sin(phi) *z
# Calculate time correction (in radians)
tau = 2*np.pi/(23.0*3600.0 + 56.0*60.0 + 4.09054) # Earth rotation in rad/s
yaw = -tau*t
x_temp = x_ecef
y_temp = y_ecef
# Apply time correction
x_ecef = np.cos(yaw)*x_temp + np.sin(yaw)*y_temp
y_ecef = -np.sin(yaw)*x_temp + np.cos(yaw)*y_temp
return x_ecef, y_ecef, z_ecef | 494078d7c3bf9933fcc5a1b8ac62e105233722b8 | 7,408 |
def _load_container_by_name(container_name, version=None):
""" Try and find a container in a variety of methods.
Returns the container or raises a KeyError if it could not be found
"""
for meth in (database.load_container, # From the labware database
_load_weird_container): # honestly don't know
log.debug(
f"Trying to load container {container_name} via {meth.__name__}")
try:
container = meth(container_name)
if meth == _load_weird_container:
container.properties['type'] = container_name
log.info(f"Loaded {container_name} from {meth.__name__}")
break
except (ValueError, KeyError) as e:
log.debug(f"{container_name} not in {meth.__name__} ({repr(e)})")
else:
log.debug(
f"Trying to load container {container_name} version {version}"
f"from v2 labware store")
container = load_new_labware(container_name, version=version)
return container | 924675bc174fb8664bb2dbeb7cf21af223c73545 | 7,409 |
def get_login(discord_id):
"""Get login info for a specific user."""
discord_id_str = str(discord_id)
logins = get_all_logins()
if discord_id_str in logins:
return logins[discord_id_str]
return None | 16b7690dd4f95df1647c7060200f3938b80993c0 | 7,410 |
import json
from typing import OrderedDict
def to_json_dict(json_data):
"""Given a dictionary or JSON string; return a dictionary.
:param json_data: json_data(dict, str): Input JSON object.
:return: A Python dictionary/OrderedDict with the contents of the JSON object.
:raises TypeError: If the input object is not a dictionary or string.
"""
if isinstance(json_data, dict):
return json_data
elif isinstance(json_data, str):
return json.loads(json_data, object_hook=OrderedDict)
else:
raise TypeError(f"'json_data' must be a dict or valid JSON string; received: {json_data!r}") | e1264d88a4424630f7348cbe7794ca072c057bdf | 7,411 |
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'neck',
'right_shoulder',
'right_elbow',
'right_wrist',
'left_shoulder',
'left_elbow',
'left_wrist',
'right_hip',
'right_knee',
'right_ankle',
'left_hip',
'left_knee',
'left_ankle',
'right_eye',
'left_eye',
'right_ear',
'left_ear']
return keypoints | 1bedcee8c5f38bdefcd00251dd95530966a41353 | 7,412 |
def has_mtu_mismatch(iface: CoreInterface) -> bool:
"""
Helper to detect MTU mismatch and add the appropriate OSPF
mtu-ignore command. This is needed when e.g. a node is linked via a
GreTap device.
"""
if iface.mtu != DEFAULT_MTU:
return True
if not iface.net:
return False
for iface in iface.net.get_ifaces():
if iface.mtu != iface.mtu:
return True
return False | a9415ed9fbcb276a53df8dac159f48aaac831744 | 7,413 |
def phrase_boxes_alignment(flatten_boxes, ori_phrases_boxes):
""" align the bounding boxes with corresponding phrases. """
phrases_boxes = list()
ori_pb_boxes_count = list()
for ph_boxes in ori_phrases_boxes:
ori_pb_boxes_count.append(len(ph_boxes))
strat_point = 0
for pb_boxes_num in ori_pb_boxes_count:
sub_boxes = list()
for i in range(strat_point, strat_point + pb_boxes_num):
sub_boxes.append(flatten_boxes[i])
strat_point += pb_boxes_num
phrases_boxes.append(sub_boxes)
pb_boxes_count = list()
for ph_boxes in phrases_boxes:
pb_boxes_count.append(len(ph_boxes))
assert pb_boxes_count == ori_pb_boxes_count
return phrases_boxes | e961a90f61917f217ac6908263f5b6c74bc42b26 | 7,414 |
import tempfile
import subprocess
from pathlib import Path
def editor(initial_contents=None, filename=None, editor=None):
"""
Open a text editor, user edits, return results
ARGUMENTS
initial_contents
If not None, this string is written to the file before the editor
is opened.
filename
If not None, the name of the file to edit. If None, a temporary file
is used.
editor
The path to an editor to call. If None, use editor.default_editor()
"""
editor = editor or default_editor()
if not filename:
with tempfile.NamedTemporaryFile(mode='r+', suffix='.txt') as fp:
if initial_contents is not None:
fp.write(initial_contents)
fp.flush()
subprocess.call([editor, fp.name])
fp.seek(0)
return fp.read()
path = Path(filename)
if initial_contents is not None:
path.write_text(initial_contents)
subprocess.call([editor, filename])
return path.read_text() | a488d20901a512e582c4e52d771bd092467d61c9 | 7,415 |
def dismiss_notification(request):
""" Dismisses a notification
### Response
* Status code 200 (When the notification is successsfully dismissed)
{
"success": <boolean: true>
}
* `success` - Whether the dismissal request succeeded or not
* Status code 400 (When the notification ID cannot be found)
{
"success": <boolean: false>,
"message": <string: "notification_not_found">
}
* `message` - Error message, when success is false
"""
response = {'success': False}
data = request.data
try:
notif = Notification.objects.get(id=data['notificationId'])
notif.dismissed_by.add(request.user)
response['success'] = True
resp_status = status.HTTP_200_OK
except Notification.DoesNotExist:
resp_status = status.HTTP_400_BAD_REQUEST
response['message'] = 'notification_not_found'
return Response(response, status=resp_status) | 97cbd560fd16da8ba0d081616e3e2504a2dbf8a0 | 7,416 |
def log_at_level(logger, message_level, verbose_level, msg):
"""
writes to log if message_level > verbose level
Returns anything written in case we might want to drop down and output at a
lower log level
"""
if message_level <= verbose_level:
logger.info(msg)
return True
return False | 4b88ee137f7c2cb638b8a058b2dceb534329c0d9 | 7,417 |
def datafile(tmp_path_factory):
"""Make a temp HDF5 Ocat details file within 60 arcmin of 3c273 for obsids
before 2021-Nov that persists for the testing session."""
datafile = str(tmp_path_factory.mktemp('ocat') / 'target_table.h5')
update_ocat_local(datafile, target_name='3c273', resolve_name=True, radius=60,
startDate=DATE_RANGE)
return datafile | 16448a80385ab29ebbaef8e593f96ff0167c1fdb | 7,418 |
def _collect_scalars(values):
"""Given a list containing scalars (float or int) collect scalars
into a single prefactor. Input list is modified."""
prefactor = 1.0
for i in range(len(values)-1, -1, -1):
if isinstance(values[i], (int, float)):
prefactor *= values.pop(i)
return prefactor | bea7e54eec16a9b29552439cd12ce29b9e82d40b | 7,419 |
from pathlib import Path
def create_output_directory(validated_cfg: ValidatedConfig) -> Path:
"""
Creates a top level download directory if it does not already exist, and returns
the Path to the download directory.
"""
download_path = validated_cfg.output_directory / f"{validated_cfg.version}"
download_path.mkdir(parents=True, exist_ok=True)
return download_path | 720f45885e177b55ddbdf492655b17275c4097f8 | 7,420 |
def presentation_logistique(regression,sig=False):
"""
Mise en forme des résultats de régression logistique
Paramètres
----------
regression: modèle de régression de statsmodel
sig: optionnel, booléen
Retours
-------
DataFrame : tableau de la régression logistique
"""
# Passage des coefficients aux Odds Ratio
df = np.exp(regression.conf_int())
df['odd ratio'] = round(np.exp(regression.params), 2)
df["p-value"] = round(regression.pvalues, 3)
df["IC"] = df.apply(lambda x : "%.2f [%.2f-%.2f]" \
% (x["odd ratio"],x[0],x[1]),axis=1)
# Ajout de la significativité
if sig:
df["p-value"] = df["p-value"].apply(significativite)
df = df.drop([0,1], axis=1)
return df | bef9e08f463c9bc0fbb1d737a412472ab792051e | 7,421 |
def handle_colname_collisions(df: pd.DataFrame, mapper: dict, protected_cols: list) -> (pd.DataFrame, dict, dict):
"""
Description
-----------
Identify mapper columns that match protected column names. When found,
update the mapper and dataframe, and keep a dict of these changes
to return to the caller e.g. SpaceTag.
Parameters
----------
df: pd.DataFrame
submitted data
mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe.
protected_cols: list
protected column names i.e. timestamp, country, admin1, feature, etc.
Output
------
pd.DataFame:
The modified dataframe.
dict:
The modified mapper.
dict:
key: new column name e.g. "day1month1year1" or "country_non_primary"
value: list of old column names e.g. ['day1','month1','year1'] or ['country']
"""
# Get names of geo fields that collide and are not primary_geo = True
non_primary_geo_cols = [d["name"] for d in mapper["geo"] if d["name"] in protected_cols and ("primary_geo" not in d or d["primary_geo"] == False)]
# Get names of date fields that collide and are not primary_date = True
non_primary_time_cols = [d['name'] for d in mapper['date'] if d["name"] in protected_cols and ('primary_date' not in d or d['primary_date'] == False)]
# Only need to change a feature column name if it qualifies another field,
# and therefore will be appended as a column to the output.
feature_cols = [d["name"] for d in mapper['feature'] if d["name"] in protected_cols and "qualifies" in d and d["qualifies"]]
# Verbose build of the collision_list, could have combined above.
collision_list = non_primary_geo_cols + non_primary_time_cols + feature_cols
# Bail if no column name collisions.
if not collision_list:
return df, mapper, {}
# Append any collision columns with the following suffix.
suffix = "_non_primary"
# Build output dictionary and update df.
renamed_col_dict = {}
for col in collision_list:
df.rename(columns={col: col + suffix}, inplace=True)
renamed_col_dict[col + suffix] = [col]
# Update mapper
for k, vlist in mapper.items():
for dct in vlist:
if dct["name"] in collision_list:
dct["name"] = dct["name"] + suffix
elif "qualifies" in dct and dct["qualifies"]:
# change any instances of this column name qualified by another field
dct["qualifies"] = [w.replace(w, w + suffix) if w in collision_list else w for w in dct["qualifies"] ]
elif "associated_columns" in dct and dct["associated_columns"]:
# change any instances of this column name in an associated_columns dict
dct["associated_columns"] = {k: v.replace(v, v + suffix) if v in collision_list else v for k, v in dct["associated_columns"].items() }
return df, mapper, renamed_col_dict | 56819ff256cc3c1bcd2062fab0cac29bce7a0c15 | 7,422 |
import codecs
import json
def process_file(filename):
"""Read a file from disk and parse it into a structured dict."""
try:
with codecs.open(filename, encoding='utf-8', mode='r') as f:
file_contents = f.read()
except IOError as e:
log.info('Unable to index file: %s, error :%s', filename, e)
return
data = json.loads(file_contents)
sections = []
title = ''
body_content = ''
if 'current_page_name' in data:
path = data['current_page_name']
else:
log.info('Unable to index file due to no name %s', filename)
return None
if 'body' in data and data['body']:
body = PyQuery(data['body'])
body_content = body.text().replace(u'¶', '')
sections.extend(generate_sections_from_pyquery(body))
else:
log.info('Unable to index content for: %s', filename)
if 'title' in data:
title = data['title']
if title.startswith('<'):
title = PyQuery(data['title']).text()
else:
log.info('Unable to index title for: %s', filename)
return {'headers': process_headers(data, filename),
'content': body_content, 'path': path,
'title': title, 'sections': sections} | 864c04449cbd998394c07790858ccbdc2d4eea6d | 7,423 |
def revive(grid: Grid, coord: Point) -> Grid:
"""Generates a set of all cells which can be revived near coord"""
revives = set()
for offset in NEIGHBOR_OFFSETS:
possible_revive = addpos(coord, offset)
if possible_revive in grid: continue
active_count = live_around(grid, possible_revive)
if active_count == 3:
revives.add(possible_revive)
return revives | 94e928ce9dff7015f2785e5a0186f06c4f754cda | 7,424 |
def process_table_creation_surplus(region, exchanges_list):
"""Add docstring."""
ar = dict()
ar["@type"] = "Process"
ar["allocationFactors"] = ""
ar["defaultAllocationMethod"] = ""
ar["exchanges"] = exchanges_list
ar["location"] = location(region)
ar["parameters"] = ""
ar["processDocumentation"] = process_doc_creation()
ar["processType"] = "UNIT_PROCESS"
ar["name"] = surplus_pool_name + " - " + region
ar[
"category"
] = "22: Utilities/2211: Electric Power Generation, Transmission and Distribution"
ar["description"] = "Electricity surplus in the " + str(region) + " region."
ar["description"]=(ar["description"]
+ " This process was created with ElectricityLCI "
+ "(https://github.com/USEPA/ElectricityLCI) version " + elci_version
+ " using the " + model_specs.model_name + " configuration."
)
ar["version"] = make_valid_version_num(elci_version)
return ar | 0669fce0363d807ee018b59125a13c95417294a7 | 7,425 |
import requests
import json
import os
def upload_event():
"""
Expect a well formatted event data packet (list of events)
Verify the access token in the request
Verify the packet
if verified then send to event hub
else return a failure message
"""
# authenticate the access token with okta api call
# get user id from okta and make sure the ids match
auth_token = request.headers['Authorization']
auth_headers = {"Authorization": "{}".format(auth_token)}
print(auth_headers)
print("sending request to: {}".format(IDENTITY_SERVER_SETTINGS['PERSONICLE_AUTH_API_ENDPOINT']))
auth_response = requests.get(IDENTITY_SERVER_SETTINGS['PERSONICLE_AUTH_API_ENDPOINT'], headers=auth_headers)
print(auth_response.text, auth_response.status_code)
if auth_response.status_code != requests.codes.ok or json.loads(auth_response.text).get("message", False)== False:
return Response("Unauthorised access token", 401)
try:
user_id = json.loads(auth_response.text)['user_id']
except KeyError as e:
return Response("Incorrect response from auth server", 401)
content_type = request.headers.get('Content-Type')
if (content_type == 'application/json'):
event_data_packet = request.json
else:
return Response('Content-Type not supported!', 415)
if type(event_data_packet) != type([]):
return Response("Array of events expected", 422)
# verify the event packet by making the data dictionary api call
send_records = []
send_summary = {}
for event in event_data_packet:
data_dict_params = {"data_type": "event"}
data_dict_response = requests.post(DATA_DICTIONARY_SERVER_SETTINGS['HOST_URL']+"/validate-data-packet",
json=event, params=data_dict_params)
print(data_dict_response.text)
if data_dict_response.status_code == requests.codes.ok and json.loads(data_dict_response.text).get("schema_check", False):
if user_id == event.get("individual_id", ""):
send_summary[event['event_name']] = send_summary.get(event['event_name'], 0) + 1
send_records.append(event)
else:
send_summary['incorrect_user_id'] = send_summary.get('incorrect_user_id', 0) + 1
else:
send_summary['incorrectly_formatted_events'] = send_summary.get('incorrectly_formatted_events', 0) + 1
# send the data to azure event hub
schema_file = os.path.join(AVRO_SCHEMA_LOC, "event_schema.avsc")
if len(send_records)> 0:
send_records_azure.send_records_to_eventhub(schema_file, send_records, EVENTHUB_CONFIG['EVENTHUB_NAME'])
return jsonify({"message": "Sent {} records to database".format(len(send_records)),
"summary": send_summary
}) | 9cb77e0319df209659877970f5b395910ad285e8 | 7,426 |
import copy
def makepath_coupled(model_hybrid,T,h,ode_method,sample_rate):
""" Compute paths of coupled exact-hybrid model using CHV ode_method. """
voxel = 0
# make copy of model with exact dynamics
model_exact = copy.deepcopy(model_hybrid)
for e in model_exact.events:
e.hybridType = SLOW
# setup integrator
path = np.zeros((Nt,2*model_hybrid.dimension))
path[0][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[0][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
clock = np.zeros(Nt)
k = 0
tj = ode(chvrhs_coupled).set_integrator(ode_method,atol = h,rtol = h)
tj.set_f_params(model_hybrid,model_exact,sample_rate)
y0 = np.zeros(2*model_hybrid.dimension+1)
while (k+1<Nt) and (clock[k]<T):
k = k+1
s1 = tryexponential(1)
# solve
y0[0:model_hybrid.dimension] = model_hybrid.getstate(0)
y0[model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
y0[2*model_hybrid.dimension] = 0.
tj.set_initial_value(y0,0)
tj.integrate(s1)
ys1 = tj.y
for i in range(model_hybrid.dimension):
model_hybrid.systemState[i].value[0] = ys1[i]
for i in range(model_hybrid.dimension):
model_exact.systemState[i].value[0] = ys1[i+model_hybrid.dimension]
t_next = tj.y[2*model_hybrid.dimension]
for e in model_hybrid.events:
e.updaterate()
for e in model_exact.events:
e.updaterate()
# update slow species
r = np.random.rand()
agg_rate = 0.
for i in range(len(model_hybrid.events)):
if model_hybrid.events[i].hybridType == SLOW:
hybrid_rate = model_hybrid.events[i].rate
exact_rate = model_exact.events[i].rate
agg_rate = agg_rate + res(hybrid_rate,exact_rate )
agg_rate = agg_rate + res(exact_rate,hybrid_rate )
agg_rate = agg_rate + min(hybrid_rate,exact_rate )
else:
agg_rate = agg_rate + model_exact.events[i].rate
#agg_rate = agg_rate + model_hybrid.events[i].rate
#else:
# print("PROBLEM")
# find reaction
if r>sample_rate/(agg_rate+sample_rate):
firing_event_hybrid,firing_event_exact = findreaction_coupled(model_hybrid.events,model_exact.events,agg_rate,r)
if isinstance(firing_event_hybrid,Reaction):
firing_event_hybrid.react()
if isinstance(firing_event_exact,Reaction):
firing_event_exact.react()
clock[k] = clock[k-1] + t_next
path[k][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[k][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
return path[0:k+1],clock[0:k+1] | 95e26ec633b5c10797a040583cbc6ad6d6ad9127 | 7,427 |
import torch
def process_image_keypoints(img, keypoints, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
img = img[:,:,::-1].copy() # PyTorch does not support negative stride at the moment
center, scale, bbox = bbox_from_keypoints(keypoints, imageHeight = img.shape[0])
if center is None:
return None, None, None, None, None
img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))
# viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3
if img is None:
return None, None, None, None, None
# unCropped = uncrop(img, center, scale, (input_res, input_res))
# if True:
# viewer2D.ImShow(img)
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
# return img, norm_img, img_original, boxScale_o2n, bboxTopLeft, bbox
bboxInfo ={"center": center, "scale": scale, "bboxXYWH":bbox}
return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo | e30e9c9b5de106c968d538a56062fefab7c1b3ee | 7,428 |
import html
import logging
import os
import shutil
def update_output(
list_of_contents,
list_of_names,
list_of_dates,
initiate_pipeline_n_clicks,
clear_pipeline_n_clicks,
append_uploads_n_clicks,
refresh_uploads_n_clicks,
clear_uploads_n_clicks,
memory,
user_login_n_clicks,
session_data,
workflow,
initiate_pipeline_timestamp,
clear_pipeline_timestamp,
user_login_timestamp,
refresh_uploads_timestamp,
clear_uploads_timestamp,
):
"""Primary APP Pipeline function, as triggered by 'Initiate
[APP] Pipeline' UI button (located in the "Step 2 (2/2)"
section).
Parameters
----------
list_of_contents
<list of str>
Array containing user-uploaded ABI raw contents as
binary strings (thus requiring decoding)
list_of_names
<list of str>
Array containing user-uploaded ABI filenames
(does not include the full path for security reasons)
list_of_dates
<list of int>
Array containing user-uploaded ABI last modified timestamps
(integers as seconds since 1970)
initiate_pipeline_n_clicks
<int>
Total count of UI button clicks
clear_pipeline_n_clicks
<int>
Total count of UI button clicks
append_uploads_n_clicks
<int>
Total count of UI button clicks
refresh_uploads_n_clicks
<int>
Total count of UI button clicks
clear_uploads_n_clicks
<int>
Total count of UI button clicks
memory
Dash.dcc.Store(type='session')
user_login_n_clicks
<int>
Total count of UI button clicks
session_data
Dash.dcc.Store(type='session')
workflow
<type>
initiate_pipeline_timestamp
<type>
clear_pipeline_timestamp
<type>
user_login_timestamp
<type>
refresh_uploads_timestamp
<type>
clear_uploads_timestamp
<type>
"""
def show_list_of_names(USER, list_of_names):
"""Display the filenames for all successfully received
USER-uploaded ABI files.
Args:
USER: <str>
Active user
list_of_names: <list>
List of user-uploaded ABI filenames
Returns:
<html.Div([...])>
Reactive response to display after processing upload
"""
if not all([fn.endswith(tuple([".csv",".xlsx"])) for fn in list_of_names]):
return html.Div(
[
html.Br(),
html.Code(
f"⚠ UPLOAD ERROR: Not all of the {len(list_of_names)} files are CSV or Excel files !",
style={"color": "red"},
),
html.Br(),
html.Code(
f"⛔ | Please reset this upload & then perform a fresh upload of either .csv or .xlsx files."
),
]
)
return html.Div(
[
html.Br(),
html.Code(
f"✔ UPLOAD SUCCESSFUL (N={len(list_of_names)})", style={"color": "green"}
),
html.Br(),
html.Br(),
html.Details(
[
html.Summary(
html.H3(
f"File(s) received (click to expand)",
style={"textAlign": "left", "fontSize": "120%"},
)
),
html.Div(
[
html.Li(f"{'{:02d}'.format(i+1)})\t{abi}")
for (i, abi) in enumerate(sorted(list_of_names))
],
id="files-received",
style={
"textAlign": "left",
"fontSize": "60%",
"columnCount": "3",
"paddingBottom": "2%",
"fontFamily": "'Roboto Mono', monospace",
},
),
html.Hr(
style={
"borderTop": "1px solid",
"animation": "pact-gradient-text-flow 3s infinite linear",
"borderRadius": "5px",
"opacity": "0.67",
"width": "50%",
"marginLeft": "25%",
}
),
]
),
html.Br(),
html.Span(className="fader-line-short", style={"marginBottom": "20px"}),
],
style={"width": "80%", "marginLeft": "10%"},
)
not_signed_in_msg = html.Div(
[html.H6("Please log in to release the pipeline as ready for activation.")]
)
try:
if session_data: # ["user_logged_in"] == "True":
RUN_ID = session_data["RUN_ID"]
SESSION_OUTPUT_DIR = session_data["PATH_TO_SESSION_OUTPUT"]
LOG_FILE = session_data["session_log_file"]
USER = session_data["user_proper"]
UUID = session_data["UUID"]
if len(app.logger.handlers) < 1:
app.logger.info(
f"Number logger handlers = {len(app.logger.handlers)}->{logger.handlers}"
)
app.logger.info("Adding log FileHandler...")
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.INFO)
app.logger.addHandler(fh)
app.logger.info(
f"Number logger handlers = {len(app.logger.handlers)}->{logger.handlers}"
)
else:
return not_signed_in_msg
except KeyError as e:
app.logger.error(f"No user appears to be logged in (KeyError: {e})")
return not_signed_in_msg
### UPON USER FILE UPLOAD(S):
if list_of_contents is not None:
if initiate_pipeline_n_clicks >= 1:
init_t_elapse = tns() / 1e9 - initiate_pipeline_timestamp / 1e3
app.logger.info(f"init_t_elapse = {init_t_elapse}; ")
if init_t_elapse < 30:
if (
clear_pipeline_n_clicks > 0
and refresh_uploads_n_clicks <= clear_pipeline_n_clicks
):
if all(
clear_pipeline_timestamp > ts
for ts in [initiate_pipeline_timestamp, user_login_timestamp]
):
return [
html.H3(
f"Thanks, {USER}; the previous pipeline results have been cleared."
),
html.H4(f"Current analysis output folder: {RUN_ID}"),
html.H5(
html.Div(
[
html.Span(f"Launch a new analysis."),
html.Br(),
]
)
),
]
elif clear_pipeline_n_clicks > 0:
if clear_pipeline_timestamp > initiate_pipeline_timestamp:
if refresh_uploads_n_clicks > 0:
if refresh_uploads_timestamp > clear_pipeline_timestamp:
return show_list_of_names(USER, list_of_names)
return html.Div(
html.H5(
f"(Pipeline results [{RUN_ID}] CLEARED)", style={"color": "red"}
)
)
app.logger.info(
f"📟📶⌁⌁⌁📠Using the following as pipeline data input. \n{len(list_of_names)} USER UPLOADED FILE(S) : \n"
+ "\n 📊⇢🧬 ".join(
[
"{:>03d})\t{:>50s}".format(i + 1, abi)
for i, abi in enumerate(sorted(list_of_names))
]
)
)
app.logger.info(
f"INITIALIZING NEW PIPELINE LAUNCH:\n\n\t\t{SESSION_OUTPUT_DIR}"
)
start_time = tns()
children = []
parsed_upload_children = [
html.Details(
[
parse_contents(c, n, d, SESSION_OUTPUT_DIR, session_log_file=LOG_FILE)
for c, n, d in zip(list_of_contents, list_of_names, list_of_dates)
]
)
]
# Generate (single!) TCR alpha/beta chain pair combinations
# base pipeline reference files (e.g., agg'd fq, designated master
# reference 'genome', DataFrames, log, etc.)
try:
pipeline_output = ljoin(
[
r
for r in pipeline.run_pipeline(
RUN_ID,
SESSION_OUTPUT_DIR,
workflow=workflow,
session_log_file=LOG_FILE,
)
]
)
args = [(*(x), i + 1) for i, x in enumerate(pipeline_output)]
except Exception as e:
logs = []
report = None
with open(LOG_FILE, "r+") as log_file:
for line in log_file.readlines():
logs.append(line)
stderr = [
dcc.Textarea(
placeholder="(Main Sequence -- logger placeholder)",
value="\n".join(logs),
style={
"height": "400px",
"width": "50%",
"fontSize": "0.7rem",
"lineHeight": "0.9rem",
"fontFamily": "'Roboto Mono', monospace",
},
className="logger-text",
name="organization",
readOnly=True,
)
]
fatal_crash = "⚠ ALERT: ERROR IN MAIN PIPELINE SEQUENCE"
app.logger.error(f"{fatal_crash}: \n\n{e}")
log_exc(app.logger)
return html.Div(
[
html.H2(fatal_crash, style={"color": "red"}),
html.P(f"App runtime was: {gtt(start_time)}"),
html.Code(f"Primary error message for crash:\n{e}"),
html.H4("See [end of] AUDIT LOG (below) for failure reason."),
html.H5(f"WEB SERVER SYSTEM LOG:", style={"color": "red"}),
html.Div(stderr),
]
)
### # # # # # # # # # #### # # # # # # # # # ###
children.append(
html.Div(
[
html.Hr(),
html.Br(),
html.H4("All files analyzed in most recent upload:"),
]
)
)
""" ~ ◮ ~
S U M M A R Y
a n a l y s i s
~ ~ ~
~ ◮ ~
"""
if report:
summary_report = [
html.Div(
[
html.Br(),
html.H2(
"Pipeline Output Summary",
style={
"fontSize": "80%",
"letterSpacing": "1.33rem",
"fontFamily": "Cinzel",
"animation": "anim-text-flow-keys 120s infinite linear",
},
),
html.Hr(),
],
style={"width": "90%", "marginLeft": "5%"},
)
]
else:
summary_report = [html.Div([html.H4(f"No final output found.")])]
html_out = f"{SESSION_OUTPUT_DIR}{RUN_ID}_HTMLprops.tsv"
pd.DataFrame(
[str(c.to_plotly_json()) for c in children], columns=["DashHTMLDivComponents"]
).to_csv(html_out, encoding="utf-8", sep="\t")
app.logger.info("Processed & analzyed input files were:")
app.logger.debug(parsed_upload_children)
app.logger.info(",".join([str(type(x)) for x in parsed_upload_children]))
total_exec_time = gtt(start_time)
app.logger.info(
f"———COMPLETE——-\n\n \t ☆☆☆ Total EXECUTION TIME Required ☆☆☆\n\n \t\t = {total_exec_time} s \n\n"
)
show_exec_time = [
html.Div(
[
html.Hr(),
html.H3(
f"* ゚(>͂ ͡͡︒ ͜ ʖ ͡︒)>-。゚☆* :・゚.☆ * ・ "
),
html.H4(f"Total Execution Time Required = {total_exec_time} s"),
html.Hr(),
html.Br(),
]
)
]
if len(children) > 50:
full_report = [
html.Div(
[
html.H2(
f"NOTICE: Due to an unusually large number of results in this analysis (N={len(children)}), full report display has been automatically disabled."
)
]
)
]
else:
full_report = children
children = (
show_exec_time
+ TOC
+ summary_report
+ full_report
+ parsed_upload_children
+ [html.Div(html.Hr())]
)
app.logger.debug(",".join([str(type(x)) for x in children]))
app.logger.debug(f"Number of html.Div elements in final layout: {len(children)}")
return children
elif initiate_pipeline_n_clicks > 15:
return html.Div(
[
html.H4(
"⚠ | ALERT ! : Un𝒇ortunately, you have over-activated the pipeline submissions check system. Please re𝒇resh the page, re-log in, and re-upload the set o𝒇 ABI 𝒇iles you would like analyzed. 🛠⎆ "
),
html.H6("↺ Please Re𝒇resh the page. ↺"),
]
)
if clear_uploads_n_clicks > 0:
t_elapsed = tns() / 1e9 - clear_uploads_timestamp / 1e3
if t_elapsed < 2:
for tcr_dir in os.listdir(SESSION_OUTPUT_DIR):
grouped_clone_fqs = f"{SESSION_OUTPUT_DIR}{tcr_dir}"
if os.path.isdir(grouped_clone_fqs):
shutil.rmtree(grouped_clone_fqs)
return html.Div(
[
html.Code(f"UPLOADS CLEARED", style={"color": "red"}),
html.H5(
f'To continue, submit at least one new upload & click "✥ Append".'
),
]
)
if append_uploads_n_clicks > 0 or clear_uploads_n_clicks > 0:
if len(list_of_names) > 0 and len(memory.items()) > 0:
all_uploads = (
memory[f"{RUN_ID}-list_of_names"]
if len(memory[f"{RUN_ID}-list_of_names"]) > 0
else list_of_names
)
return show_list_of_names(USER, all_uploads)
elif len(memory.items()) == 0:
return html.Div(html.Code("NONE"))
else:
app.logger.info(
f"{USER} uploaded the following {len(list_of_names)} file(s):"
+ "\n\t ◇ 📄 "
+ "\n\t ◇ 📄 ".join(sorted(list_of_names))
+ ".\n"
)
return show_list_of_names(USER, list_of_names)
else:
return html.Div(
[html.Br(), html.H5(f"Logged in as: {USER}", style={"color": "rgb(32,92,188)"})]
) | db5f61c4850368c3f939cc35034cad9c6b9255a5 | 7,429 |
from typing import Dict
from typing import Iterable
from typing import Union
def _load_outputs(dict_: Dict) -> Iterable[Union[HtmlOutput, EbookConvertOutput]]:
"""Translates a dictionary into a list of output objects.
The dictionary is assumed to have the following structure::
{
'outputs': [{ 'path': 'some', 'new': 'text' },
{ 'path: '...', 'replace_with': '...' }]
}
If the key 'outputs' is not present in the dictionary or if there are no output
sub-dictionaries, an empty list is returned instead.
The type of the output is inferred from the file name provided as a value of the 'path' key
of the output sub-dictionary.
A file name ending in the file type '.html' will produce an HtmlOutput. '.epub', '.mobi' or
any other file type excluding '.html' will produce an EbookConvertOutput.
Note that a local stylesheet *replaces* the global stylesheet, but local ebookconvert_params
are *added* to the global ebookconvert_params if present.
Args:
dict_: The dictionary.
Returns:
The list of output objects or an empty list either if not output sub-dictionaries are
present in the encapsulating dictionary or if the 'outputs' key itself is missing.
"""
outputs = []
global_stylesheet = None
global_ec_params = []
if 'stylesheet' in dict_:
global_stylesheet = dict_['stylesheet']
if 'ebookconvert_params' in dict_:
global_ec_params = _load_ebookconvert_params(dict_)
for output in dict_['outputs']:
path = output['path']
file_type = path.split('.')[-1]
if 'stylesheet' not in output and global_stylesheet:
output['stylesheet'] = global_stylesheet
if file_type == 'html':
outputs.append(HtmlOutput(**output))
else:
if 'ebookconvert_params' in output:
local_ec_params = _load_ebookconvert_params(output)
output['ebookconvert_params'] = global_ec_params + local_ec_params
else:
output['ebookconvert_params'] = global_ec_params
outputs.append(EbookConvertOutput(**output))
return outputs | 229eeb33ca34266a397dca56b13f004a8647e8e5 | 7,430 |
def _async_friendly_contextmanager(func):
"""
Equivalent to @contextmanager, except the resulting (non-async) context
manager works correctly as a decorator on async functions.
"""
@wraps(func)
def helper(*args, **kwargs):
return _AsyncFriendlyGeneratorContextManager(func, args, kwargs)
return helper | 453fb89ca52101e178e0bd2c5895804ca2cc54e6 | 7,431 |
import itertools
def all_inputs(n):
"""
returns an iterator for all {-1,1}-vectors of length `n`.
"""
return itertools.product((-1, +1), repeat=n) | 526dff9332cf606f56dcb0c31b5c16a0124478ed | 7,432 |
import logging
def brightness(df: pd.DataFrame, gain: float = 1.5) -> pd.DataFrame:
"""
Enhance image brightness.
Parameters
----------
df
The dataset as a dataframe.
Returns
-------
df
A new dataframe with follwing changes:
* 'filename', overwrited with new brightened image filenames.
"""
logging.info('Brightening images ...')
df_out = df.copy()
new_filename_list = []
for index, row in df.iterrows():
filename = row['filename']
logging.debug(f'Brightening image {filename}')
img = Image.open(filename)
img = ImageEnhance.Brightness(img)
img = img.enhance(gain)
new_filename = make_filename(row, step='brightness')
new_filename_list.append(new_filename)
save_image(new_filename, img, dpi=(300, 300), engine='pil')
df_out['filename'] = new_filename_list
return df_out | 1bc7778a6843f31448ebd218a1a5d2b42582ef79 | 7,433 |
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ", ".join(winner.name for winner in winners) | 2586292d4a96f63bf40c0d043111f5087c46f7a9 | 7,434 |
def stampify_url():
"""The stampified version of the URL passed in args."""
url = request.args.get('url')
max_pages = request.args.get('max_pages')
enable_animations = bool(request.args.get('animations') == 'on')
if not max_pages:
max_pages = DEFAULT_MAX_PAGES
_stampifier = Stampifier(url, int(max_pages), enable_animations)
try:
return _stampifier.stampify().stamp_html
except StampifierError as err:
return render_template('error_screen.html',
message=err.message) | 136d95adedeeddcdc4166a9bce20414e909fa21f | 7,435 |
import os
def pleasant_lgr_stand_alone_parent(pleasant_lgr_test_cfg_path, tmpdir):
"""Stand-alone version of lgr parent model for comparing with LGR results.
"""
# Edit the configuration file before the file paths within it are converted to absolute
# (model.load_cfg converts the file paths)
cfg = load(pleasant_lgr_test_cfg_path)
del cfg['setup_grid']['lgr']
cfg['simulation']['sim_ws'] = os.path.join(tmpdir, 'pleasant_lgr_just_parent')
# save out the edited configuration file
path, fname = os.path.split(pleasant_lgr_test_cfg_path)
new_file = os.path.join(path, 'pleasant_lgr_just_parent.yml')
dump(new_file, cfg)
# load in the edited configuration file, converting the paths to absolute
cfg = MF6model.load_cfg(new_file)
# add some stuff just for the tests
cfg['gisdir'] = os.path.join(cfg['simulation']['sim_ws'], 'gis')
m = MF6model.setup_from_cfg(cfg)
m.write_input()
return m | a097f3e4a884bcc4c350955e8b24f886fdf9e009 | 7,436 |
from re import T
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller() | 05627d4445a7c99d05c61bfef923b9d52d774512 | 7,437 |
def init_time(p, **kwargs):
"""Initialize time data."""
time_data = {
'times': [p['parse']],
'slots': p['slots'],
}
time_data.update(**kwargs)
return time_data | 2aff3819d561f0dc9e0c9b49702b8f3fbb6e9252 | 7,438 |
import os
def welcome():
"""
The code that executes at launch. It guides the user through menus and starts other functions from the other folders
based on the users choices.
"""
clear()
print(msg)
print(Style.RESET_ALL)
print("\n")
print("Welcome!")
input("Press ENTER key to begin!")
clear()
print(feesNotice)
input()
clear()
# Checks to see if the account information was stored on the local machine previously
if not os.path.isfile(os.path.join(CURRENT_DIR, "sav", "credentials.pickle")):
# If the user hasn't successfullly logged in before, it takes them to a menu sequence to log in, and saves the info locally
# for the next time the script is run.
login.login_interface()
user = login.Credentials()
print(f"You are logged in as: {user.username}")
instructions = """
Instructions:
This program takes in a csv file full of docket numbers and will automatically
populate 2 folders with the raw JSON data and all of the PDF documents associated
with that docket.
You will now select the path where your input csv is located.
Press ENTER to open the file browser.
"""
print(instructions)
input()
clear()
# Opens a graphical file browser and returns the path to the csv file that the user selected.
csvChoice = file_browser.browseCSVFiles()
# Assigns the choice to a global variable, so other modules can find the path that the user specified.
global_variables.CSV_INPUT_PATH = csvChoice
options = """
Type in one of the following numbers and press ENTER to specify your choice:
[1] Get all JSON files and PDF files.
[2] Get JSON files only.
[3] Get PDF files only.
( Only select 3 if you already have a directory full of JSON files. )
( The JSON files are needed to extract the download links from. )
[4] More options.
Enter your response below.[1/2/3/4]
"""
print(options)
def handle_input():
"""
Prompts the user for a choice and calls the function from the 'modules' folder that corresponds
with that choice.
"""
userChoice = input()
# Choice 1 is downloading all json and pdf files.
if userChoice == "1":
clear()
menus.select_paths_menu()
clear()
menus.specify_client_matter_menu()
print(msg)
get_json_and_pdfs()
# Choice 2 is donwloading only JSON files.
elif userChoice == "2":
clear()
menus.select_paths_menu(pdfOption=False)
menus.specify_client_matter_menu()
print(msg)
get_json.thread_download_json()
# Choice 3 is downloading only PDF files.
elif userChoice == "3":
clear()
menus.select_paths_menu()
menus.specify_client_matter_menu()
print(msg)
link_list = get_pdfs.get_urls("json-output")
# get_pdfs.multiprocess_download_pdfs(link_list)
get_pdfs.thread_download_pdfs(link_list)
elif userChoice == "4":
clear()
menus.other_options_menu()
# If the user enters anything other than a valid choice, then it tells them their choice is invalid and
# restarts this function, prompting them to make a choice again.
else:
print("Please Enter Valid input (1, 2 or 3)")
return handle_input()
handle_input()
try:
os.startfile(os.path.join(CURRENT_DIR, "log"))
except:
pass
print("\nDone.")
input() | c9821f4133e7d52f3a59a0d4f810d9120429772a | 7,439 |
def bsplslib_D0(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param UIndex:
:type UIndex: int
:param VIndex:
:type VIndex: int
:param Poles:
:type Poles: TColgp_Array2OfPnt
:param Weights:
:type Weights: TColStd_Array2OfReal &
:param UKnots:
:type UKnots: TColStd_Array1OfReal &
:param VKnots:
:type VKnots: TColStd_Array1OfReal &
:param UMults:
:type UMults: TColStd_Array1OfInteger &
:param VMults:
:type VMults: TColStd_Array1OfInteger &
:param UDegree:
:type UDegree: int
:param VDegree:
:type VDegree: int
:param URat:
:type URat: bool
:param VRat:
:type VRat: bool
:param UPer:
:type UPer: bool
:param VPer:
:type VPer: bool
:param P:
:type P: gp_Pnt
:rtype: void
"""
return _BSplSLib.bsplslib_D0(*args) | 4c7a95448c116ef04fac36168c05a22597bc0684 | 7,440 |
def b_cross(self) -> tuple:
"""
Solve cross one piece at a time.
Returns
-------
tuple of (list of str, dict of {'CROSS': int})
Moves to solve cross, statistics (move count in ETM).
Notes
-----
The cube is rotated so that the white centre is facing down.
The four white cross pieces are moved to the yellow side (on top),
starting with the edge which is the fewest moves away from solved.
The edges are then moved down to the white centre in the fewest
number of moves.
"""
cube = self.cube
solve = []
edges = (1,0), (-1,1), (1,-1), (0,1)
cross = {
'L': (4,1,-1),
"L'": (2,1,0),
'F': (1,1,-1),
"F'": (3,1,0),
'R': (2,1,-1),
"R'": (4,1,0),
'B': (3,1,-1),
"B'": (1,1,0),
'L2': (5,1,0),
'F2': (5,0,1),
'R2': (5,1,-1),
'B2': (5,-1,1),
"L U' F": (1,0,1),
"L' U' F": (1,-1,1),
"F U' R": (2,0,1),
"F' U' R": (2,-1,1),
"R' U F'": (3,0,1),
"R U F'": (3,-1,1),
"B' U R'": (4,0,1),
"B U R'": (4,-1,1)
}
for s, side in enumerate(cube):
if side[1][1] == 'U':
break
if s != 5:
move = ('z2', "z'", "x'", 'z', 'x')[s]
self.move(move)
solve.append(move)
while not(all(cube[0][y][x] == 'U' for y, x in edges) or
all(cube[5][y][x] == 'U' for y, x in edges) and
all(side[-1][1] == side[1][1] for side in cube[1:5])):
for edge in cross:
if cube[cross[edge][0]][cross[edge][1]][cross[edge][-1]] == 'U':
break
slot = 'LFRB'.index(edge[0])
if cube[0][edges[slot][0]][edges[slot][1]] != 'U':
moves = edge.split()
elif cube[0][edges[slot-3][0]][edges[slot-3][1]] != 'U':
moves = ['U'] + edge.split()
elif cube[0][edges[slot-1][0]][edges[slot-1][1]] != 'U':
moves = ["U'"] + edge.split()
else:
moves = ['U2'] + edge.split()
self.move(moves)
solve.extend(moves)
while any(cube[5][y][x] != 'U' for y, x in edges):
if cube[1][0][1] == cube[1][1][1] and cube[0][1][0] == 'U':
self.move('L2')
solve.append('L2')
if cube[2][0][1] == cube[2][1][1] and cube[0][-1][1] == 'U':
self.move('F2')
solve.append('F2')
if cube[3][0][1] == cube[3][1][1] and cube[0][1][-1] == 'U':
self.move('R2')
solve.append('R2')
if cube[4][0][1] == cube[4][1][1] and cube[0][0][1] == 'U':
self.move('B2')
solve.append('B2')
if any(cube[s][0][1] == cube[(s + 2) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U')
solve.append('U')
elif any(cube[s][0][1] == cube[s % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move("U'")
solve.append("U'")
elif any(cube[s][0][1] == cube[(s + 1) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U2')
solve.append('U2')
return solve, {'CROSS': len(solve)} | f0a82ea6b6634b78e4252ac264a537af87be0fc1 | 7,441 |
def get_db():
"""Creates a 'SQLAlchemy' instance.
Creates a 'SQLAlchemy' instance and store it to 'flask.g.db'.
Before this function is called, Flask's application context must be exist.
Returns:
a 'SQLAlchemy' instance.
"""
if 'db' not in g:
current_app.logger.debug('construct SQLAlchemy instance.')
db = SQLAlchemy(current_app)
g.db = db
return g.db | 903e2e94a81112603d159f5d1186ecbc2e954afa | 7,442 |
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict | a6681d8e6b3c8c44fa4fee9143ab57538eac2661 | 7,443 |
import json
def cluster_list_node(realm, id):
""" this function add a cluster node """
cluster = Cluster(ES)
account = Account(ES)
account_email = json.loads(request.cookies.get('account'))["email"]
if account.is_active_realm_member(account_email, realm):
return Response(json.dumps(cluster.list_nodes(realm, id)))
else:
return Response({"failure": "account identifier and realm is not an active match"}) | eebccc3c7c3c710fc2c26ee0dfba5481e2e2043a | 7,444 |
import logging
import os
def compute_features_for_audio_file(audio_file):
"""
Parameters
----------
audio_file: str
Path to the audio file.
Returns
-------
features: dict
Dictionary of audio features.
"""
# Load Audio
logging.info("Loading audio file %s" % os.path.basename(audio_file))
audio, sr = librosa.load(audio_file, sr=msaf.Anal.sample_rate)
# Compute harmonic-percussive source separation
logging.info("Computing Harmonic Percussive source separation...")
y_harmonic, y_percussive = librosa.effects.hpss(audio)
# Output features dict
features = {}
# Compute framesync features
features["mfcc"], features["hpcp"], features["tonnetz"], \
features["cqt"], features["gmt"] = compute_features(audio, y_harmonic)
# Estimate Beats
features["beats_idx"], features["beats"] = compute_beats(
y_percussive, sr=msaf.Anal.sample_rate)
# Compute Beat-sync features
features["bs_mfcc"], features["bs_hpcp"], features["bs_tonnetz"], \
features["bs_cqt"], features["bs_gmt"] = compute_beat_sync_features(features,
features["beats_idx"])
# Analysis parameters
features["anal"] = {}
features["anal"]["frame_rate"] = msaf.Anal.frame_size
features["anal"]["hop_size"] = msaf.Anal.hop_size
features["anal"]["mfcc_coeff"] = msaf.Anal.mfcc_coeff
features["anal"]["sample_rate"] = msaf.Anal.sample_rate
features["anal"]["window_type"] = msaf.Anal.window_type
features["anal"]["n_mels"] = msaf.Anal.n_mels
features["anal"]["dur"] = audio.shape[0] / float(msaf.Anal.sample_rate)
return features | 49f9a42b505d5847d5f9dae09a893b9faebc0396 | 7,445 |
def complete_json(input_data, ref_keys='minimal', input_root=None,
output_fname=None, output_root=None):
"""
Parameters
----------
input_data : str or os.PathLike or list-of-dict
Filepath to JSON with data or list of dictionaries with information
about annotations
ref_keys : {'minimal', 'info'}, optional
Which reference keys to check in `input_data`. Default: 'minimal'
input_root : str, optional
If `input_data` is a filename the key in the file containing data about
annotations. If not specified will be based on provided `ref_keys`.
Default: None
output_fname : str or os.PathLike, optional
Filepath where complete JSON should be saved. If not specified the
data are not saved to disk. Default: None
output_root : str, optional
If `output_fname` is not None, the key in the saved JSON where
completed information should be stored. If not specified will be based
on `input_root`. Default: None
Returns
-------
output : list-of-dict
Information about annotations from `input_data`
"""
valid_keys = ['minimal', 'info']
if ref_keys not in valid_keys:
raise ValueError(f'Invalid ref_keys: {ref_keys}. Must be one of '
f'{valid_keys}')
# this is to add missing fields to existing data
# could accept data dict list or filename as input
# set minimal vs info
if ref_keys == 'minimal':
ref_keys = MINIMAL_KEYS
if input_root is None:
input_root = 'annotations'
elif ref_keys == 'info':
ref_keys = INFO_KEYS
if input_root is None:
input_root = 'info'
# check input
if not isinstance(input_data, list):
input_data = parse_json(input_data, root=input_root)
# make output
output = []
for item in input_data:
output.append({
key: (item[key] if key in item else None)
for key in ref_keys
})
# write output
if output_fname is not None:
if output_root is None:
output_root = input_root
write_json(output, output_fname, root=output_root)
return output | 1a732c87670c890b406e935494ca2c51a0f0dc83 | 7,446 |
def BuildSystem(input_dir, info_dict, block_list=None):
"""Build the (sparse) system image and return the name of a temp
file containing it."""
return CreateImage(input_dir, info_dict, "system", block_list=block_list) | 4537da68b322e7d7714faa2c365ece1c67b255f2 | 7,447 |
def add_upgrades(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=IFERROR(IF(Towers_non_4G_MNO!{}>0,IF(Towers_non_4G_MNO!{}>".format(cell,cell)
part2 = "New_4G_Sites!{},New_4G_Sites!{},New_4G_Sites!{}-Towers_non_4G_MNO!{}),0),0)".format(cell,cell,cell,cell)
ws[cell] = part1 + part2 #+ part3 + part4
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws | a5c33a59992976dfbdd775ce55c6017cef7d7f1d | 7,448 |
import torch
def vae_loss(recon_x, x, mu, logvar, reduction="mean"):
"""
Effects
-------
Reconstruction + KL divergence losses summed over all elements and batch
See Appendix B from VAE paper:
Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
https://arxiv.org/abs/1312.6114
"""
BCE = F.binary_cross_entropy(recon_x, x, reduction=reduction)
# 0.5 * mean(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp())
return BCE, KLD | ecdabfd62d7e7c7aa858b36669a73e6081891f83 | 7,449 |
from functools import reduce
def Or(*args):
"""Defines the three valued ``Or`` behaviour for a 2-tuple of
three valued logic values"""
def reduce_or(cmp_intervala, cmp_intervalb):
if cmp_intervala[0] is True or cmp_intervalb[0] is True:
first = True
elif cmp_intervala[0] is None or cmp_intervalb[0] is None:
first = None
else:
first = False
if cmp_intervala[1] is True or cmp_intervalb[1] is True:
second = True
elif cmp_intervala[1] is None or cmp_intervalb[1] is None:
second = None
else:
second = False
return (first, second)
return reduce(reduce_or, args) | fc252c0129904d7ad58c18adad0b08c638b2bd11 | 7,450 |
def create_task():
"""Create a new task"""
data = request.get_json()
# In advanced solution, a generic validation should be done
if (TaskValidator._validate_title(data)):
TaskPersistence.create(title=data['title'])
return {'success': True, 'message': 'Task has been saved'}
# Simple error response
return {'error': 'bad request', 'message': 'not valid data', 'status': 400} | e42b06ed297b589cacedae522b81c898a01d6b72 | 7,451 |
import socket
def get_socket_with_reuseaddr() -> socket.socket:
"""Returns a new socket with `SO_REUSEADDR` option on, so an address
can be reused immediately, without waiting for TIME_WAIT socket
state to finish.
On Windows, `SO_EXCLUSIVEADDRUSE` is used instead.
This is because `SO_REUSEADDR` on this platform allows the socket
to be bound to an address that is already bound by another socket,
without requiring the other socket to have this option on as well.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if 'SO_EXCLUSIVEADDRUSE' in dir(socket):
sock.setsockopt(socket.SOL_SOCKET,
getattr(socket, 'SO_EXCLUSIVEADDRUSE'), 1)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock | 6edbc0f0aaaeaebd9c6d0f31257de0b4dfe7df1c | 7,452 |
def get_systemd_services(service_names):
"""
:param service_names: {'service_unit_id': 'service_display_name'}
e.g., {'cloudify-rabbitmq.service': 'RabbitMQ'}
"""
systemd_services = get_services(service_names)
statuses = []
services = {}
for service in systemd_services:
is_service_running = service['instances'] and (
service['instances'][0]['state'] == 'running')
status = NodeServiceStatus.ACTIVE if is_service_running \
else NodeServiceStatus.INACTIVE
services[service['display_name']] = {
'status': status,
'extra_info': {
'systemd': service
}
}
statuses.append(status)
return services, statuses | 523efae5fb536c9326978d84ea9055aecf47da05 | 7,453 |
import datetime
from werkzeug.local import LocalProxy
def json_handler(obj):
"""serialize non-serializable data for json"""
# serialize date
if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)):
return unicode(obj)
elif isinstance(obj, LocalProxy):
return unicode(obj)
else:
raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \
(type(obj), repr(obj)) | ea44a5d77e608f16458a1c3405665011f2d9c70c | 7,454 |
def _random_prefix(sentences):
"""
prefix random generator
input: list of input sentences
output: random word
"""
words = _word_dict(sentences)
return choice(words) | 7a81b5825bc0dc2ac4b75bff40a9b76af77486a3 | 7,455 |
def user_logout(*args, **kwargs):
# pylint: disable=unused-argument
"""
This endpoint is the landing page for the logged-in user
"""
# Delete the Oauth2 token for this session
log.info('Logging out User: %r' % (current_user,))
delete_session_oauth2_token()
logout_user()
flash('You were successfully logged out.', 'warning')
return flask.redirect(_url_for('backend.home')) | baaeb1f1b353eaa75bc1d81cb72a9bb931398047 | 7,456 |
def redraw_frame(image, names, aligned):
"""
Adds names and bounding boxes to the frame
"""
i = 0
unicode_font = ImageFont.truetype("DejaVuSansMono.ttf", size=17)
img_pil = Image.fromarray(image)
draw = ImageDraw.Draw(img_pil)
for face in aligned:
draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 255, 0), width=2)
if names is not None and len(names) > i:
if names[i] == 'unknown':
draw.text((face[0], face[1] - 30), "unknown", fill=(0, 0, 255), font=unicode_font)
draw.rectangle((face[0], face[1], face[2], face[3]), outline=(0, 0, 255), width=2)
else:
draw.text((face[0], face[1] - 30), names[i], fill=(0, 255, 0), font=unicode_font)
if names is None or len(names) <= i:
draw.text((face[0], face[1] - 30), 'refreshing...', fill=(255, 0, 0), font=unicode_font)
i += 1
return np.array(img_pil) | 66adbbc42c4108855e1eea6494391957c3e91b4f | 7,457 |
import sys
import typing
def _tp_relfq_name(tp, tp_name=None, assumed_globals=None, update_assumed_globals=None,
implicit_globals=None):
# _type: (type, Optional[Union[Set[Union[type, types.ModuleType]], Mapping[Union[type, types.ModuleType], str]]], Optional[bool]) -> str
"""Provides the fully qualified name of a type relative to a set of
modules and types that is assumed as globally available.
If assumed_globals is None this always returns the fully qualified name.
If update_assumed_globals is True, this will return the plain type name,
but will add the type to assumed_globals (expected to be a set).
This way a caller can query how to generate an appropriate import section.
If update_assumed_globals is False, assumed_globals can alternatively be
a mapping rather than a set. In that case the mapping is expected to be
an alias table, mapping modules or types to their alias names desired for
displaying.
update_assumed_globals can be None (default). In that case this will return the
plain type name if assumed_globals is None as well (default).
This mode is there to have a less involved default behavior.
"""
if tp_name is None:
tp_name = util.get_class_qualname(tp)
if implicit_globals is None:
implicit_globals = _implicit_globals
else:
implicit_globals = implicit_globals.copy()
implicit_globals.update(_implicit_globals)
if assumed_globals is None:
if update_assumed_globals is None:
return tp_name
md = sys.modules[tp.__module__]
if md in implicit_globals:
return tp_name
name = tp.__module__+'.'+tp_name
pck = None
if not (md.__package__ is None or md.__package__ == ''
or name.startswith(md.__package__)):
pck = md.__package__
return name if pck is None else pck+'.'+name
if tp in assumed_globals:
try:
return assumed_globals[tp]
except:
return tp_name
elif hasattr(tp, '__origin__') and tp.__origin__ in assumed_globals:
try:
return assumed_globals[tp.__origin__]
except:
return tp_name
# For some reason Callable does not have __origin__, so we special-case
# it here. Todo: Find a cleaner solution.
elif is_Callable(tp) and typing.Callable in assumed_globals:
try:
return assumed_globals[typing.Callable]
except:
return tp_name
elif update_assumed_globals == True:
if not assumed_globals is None:
if hasattr(tp, '__origin__') and not tp.__origin__ is None:
toadd = tp.__origin__
elif is_Callable(tp):
toadd = typing.Callable
else:
toadd = tp
if not sys.modules[toadd.__module__] in implicit_globals:
assumed_globals.add(toadd)
return tp_name
else:
md = sys.modules[tp.__module__]
if md in implicit_globals:
return tp_name
md_name = tp.__module__
if md in assumed_globals:
try:
md_name = assumed_globals[md]
except:
pass
else:
if not (md.__package__ is None or md.__package__ == ''
or md_name.startswith(md.__package__)):
md_name = md.__package__+'.'+tp.__module__
return md_name+'.'+tp_name | b50b8a38e4e776d4a0a0da149ef45cfb90bcdf2f | 7,458 |
from typing import Type
def is_dapr_actor(cls: Type[Actor]) -> bool:
"""Checks if class inherits :class:`Actor`.
Args:
cls (type): The Actor implementation.
Returns:
bool: True if cls inherits :class:`Actor`. Otherwise, False
"""
return issubclass(cls, Actor) | 1c3f5b4744cf9db91c869247ab297ffc10dcfc68 | 7,459 |
def unpickle(file):
""" unpickle the data """
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict | dbab180e31e7bff6ba965f48ee7a3018e2665763 | 7,460 |
def _calc_norm_gen_prob(sent_1, sent_2, mle_lambda, topic):
"""
Calculates and returns the length-normalized generative probability of sent_1 given sent_2.
"""
sent_1_len = sum([count for count in sent_1.raw_counts.values()])
return _calc_gen_prob(sent_1, sent_2, mle_lambda, topic) ** (1.0 / sent_1_len) | 7f84f1b0de67f9d6f631ad29aa1c614d6d3f13d6 | 7,461 |
def isomorphic(l_op, r_op):
""" Subject of definition, here it is equal operation.
See limintations (vectorization.rst).
"""
if l_op.getopnum() == r_op.getopnum():
l_vecinfo = forwarded_vecinfo(l_op)
r_vecinfo = forwarded_vecinfo(r_op)
return l_vecinfo.bytesize == r_vecinfo.bytesize
return False | e34c6928c4fdf10fed55bb20588cf3183172cab1 | 7,462 |
import numpy
import math
def partition5(l, left, right):
"""
Insertion Sort of list of at most 5 elements and return the position of the median.
"""
j = left
for i in xrange(left, right + 1):
t = numpy.copy(l[i])
for j in xrange(i, left - 1, -1):
if l[j - 1][0] < t[0]:
break
l[j] = l[j - 1]
l[j] = t
return int(math.floor((left + right) / 2)) | 75c5c893c978e81a2b19b79c6000a2151ff6b088 | 7,463 |
def max_validator(max_value):
"""Return validator function that ensures upper bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check.
Args:
max_value: maximum value for new validator
"""
def validator(value):
if value > max_value:
raise ValidationError("{} is not <= {}".format(value, max_value))
return validator | 6957f507f7140aa58a9c969a04b9bde65da54319 | 7,464 |
def standalone_job_op(name, image, command, gpus=0, cpu_limit=0, memory_limit=0, env=[],
tensorboard=False, tensorboard_image=None,
data=[], sync_source=None, annotations=[],
metrics=['Train-accuracy:PERCENTAGE'],
arena_image='cheyang/arena_launcher:v0.5',
timeout_hours=240):
"""This function submits a standalone training Job
Args:
name: the name of standalone_job_op
image: the docker image name of training job
mount: specify the datasource to mount to the job, like <name_of_datasource>:<mount_point_on_job>
command: the command to run
"""
if not name:
raise ValueError("name must be specified")
if not image:
raise ValueError("image must be specified")
if not command:
raise ValueError("command must be specified")
options = []
if sync_source:
if not sync_source.startswith("http"):
raise ValueError("sync_source must be an http git url")
options.append('--sync-source')
options.append(str(sync_source))
for e in env:
options.append('--env')
options.append(str(e))
for d in data:
options.append('--data')
options.append(str(d))
for m in metrics:
options.append('--metric')
options.append(str(m))
if tensorboard_image:
options.append('--tensorboard-image')
options.append(str(tensorboard_image))
op = dsl.ContainerOp(
name=name,
image=arena_image,
command=['python','arena_launcher.py'],
arguments=[ "--name", name,
"--tensorboard", str(tensorboard),
"--image", str(image),
"--gpus", str(gpus),
"--cpu", str(cpu_limit),
"--step-name", '{{pod.name}}',
"--workflow-name", '{{workflow.name}}',
"--memory", str(memory_limit),
"--timeout-hours", str(timeout_hours),
] + options +
[
"job",
"--", str(command)],
file_outputs={'train': '/output.txt',
'workflow':'/workflow-name.txt',
'step':'/step-name.txt',
'name':'/name.txt'}
)
op.set_image_pull_policy('Always')
return op | 2c2c6c014fe841b6929153cd5590fa43210964ed | 7,465 |
def load_randompdata(dataset_str, iter):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
NL = 2312
NC = 6
elif dataset_str == 'cora':
NL = 1708
NC = 7
else:
NL = 18717
NC = 3
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
#fixed 500 for validation read from file, choose random 20 per class from the others for train
'''
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
'''
idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()]
idx_test = test_idx_range.tolist()
idx_traincand = list(set(range(0,NL))-set(idx_val)) #train candiate, not test not valid
nontestlabels = labels[idx_traincand]
gtlabels = np.argmax(nontestlabels,axis=1)
idx_train = []
for i in range(NC):
nodeidx = np.where(gtlabels==i)
ridx = random.sample(range(0,nodeidx[0].shape[0]),20)
idx_train+=list(np.asarray(idx_traincand)[list(nodeidx[0][ridx])])
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask | 353160ffd8b0474fc4c58532d1bcae80f4d6cbad | 7,466 |
def page_to_reload():
""" Returns page that is refreshed every
argument of content attribute in
meta http-equiv="refresh".
"""
val = knob_thread.val
year = int(val * 138./256 + 1880)
return (
"""<!DOCTYPE html>
<html>
<head><meta http-equiv="refresh" content=".2">
<style>
h1 {{color:white; font-family: Arial; font-size: 9em}}
</style>
</head>
<body bgcolor="{color}0000">
<h1>YEAR {year}</h1><br />
<h1>ANOMALY {anomaly}° </h1>
</body>
</html>
"""
).format(color=('%x' % val),
year=year,
anomaly=year_to_anomaly[year]) | 75c9a409a6dd936f23c9a54dae058fb7e8fd9e97 | 7,467 |
def svn_log_changed_path2_create(*args):
"""svn_log_changed_path2_create(apr_pool_t pool) -> svn_log_changed_path2_t"""
return _core.svn_log_changed_path2_create(*args) | f40cf409bfb458d35cb38ba76fa93c319803a992 | 7,468 |
import os
def plot_timeseries(model, radius, lon, save=False, tag=''):
"""
Plot the solar wind model timeseries at model radius and longitude closest to those specified.
:param model: An instance of the HUXt class with a completed solution.
:param radius: Radius to find the closest model radius to.
:param lon: Longitude to find the closest model longitude to.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return: fig: Figure handle
:return: ax: Axes handle
"""
if (radius < model.r.min()) | (radius > (model.r.max())):
print("Error, specified radius outside of model radial grid")
if model.lon.size != 1:
if (lon < model.lon.min()) | (lon > (model.lon.max())):
print("Error, input lon outside range of model longitudes. Defaulting to closest longitude")
id_lon = np.argmin(np.abs(model.lon - lon))
lon = model.lon[id_lon]
fig, ax = plt.subplots(figsize=(14, 7))
# Get plotting data
id_r = np.argmin(np.abs(model.r - radius))
r_out = model.r[id_r].value
if model.lon.size == 1:
id_lon = 0
lon_out = model.lon.value
else:
id_lon = np.argmin(np.abs(model.lon - lon))
lon_out = model.lon[id_lon].value
t_day = model.time_out.to(u.day)
ax.plot(t_day, model.v_grid[:, id_r, id_lon], 'k-')
ylab = 'Solar Wind Speed (km/s)'
ymin = 200
ymax = 1000
ax.set_ylim(ymin, ymax)
ax.set_ylabel(ylab)
ax.set_xlim(t_day.value.min(), t_day.value.max())
ax.set_xlabel('Time (days)')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95)
# Add label
radius_label = " Radius: {:3.2f}".format(r_out) + "$R_{sun}$ "
lon_label = " Longitude: {:3.2f}".format(lon_out) + "$^\circ$"
label = "HUXt" + radius_label + lon_label
ax.set_title(label, fontsize=20)
#ax.legend(loc=1)
if save:
cr_num = np.int32(model.cr_num.value)
r_tag = np.int32(r_out)
lon_tag = np.int32(lon_out)
template_string = "HUXt1D_CR{:03d}_{}_time_series_radius_{:03d}_lon_{:03d}.png"
filename = template_string.format(cr_num, tag, r_tag, lon_tag)
filepath = os.path.join(model._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax | 5538da1ee1cb62c60ba146a9709922b894a3cfca | 7,469 |
import os
import logging
import csv
def convert_csv_to_excel(csv_path):
"""
This function converts a csv file, given by its file path, to an excel file in the same directory with the same
name.
:param csv_path:string file path of CSV file to convert
:return: string file path of converted Excel file.
"""
(file_path, file_extension) = os.path.splitext(csv_path) # split the csv pathname to remove the extension
wb = xl.Workbook() # create the excel workbook
ws = wb.active # use the active sheet by default
logging.info("converting file to xlsx: '{}'".format(csv_path))
with open(csv_path, newline='') as csv_file: # append each row of the csv to the excel worksheet
rd = csv.reader(csv_file, delimiter=",", quotechar='"')
for row in rd:
ws.append(row)
output_path = os.path.join(file_path + '.xlsx') # output file path should be the same as the csv file
logging.info("saving to file: '{}'".format(output_path))
wb.save(output_path) # save the converted file
return output_path | 590505cdd3c53f0e860bb131d575956bae5c4031 | 7,470 |
def check_from_dict(method):
"""A wrapper that wrap a parameter checker to the original function(crop operation)."""
@wraps(method)
def new_method(self, *args, **kwargs):
word_dict, = (list(args) + [None])[:1]
if "word_dict" in kwargs:
word_dict = kwargs.get("word_dict")
assert isinstance(word_dict, dict), "word_dict needs to be a list of word,id pairs"
for word, word_id in word_dict.items():
assert isinstance(word, str), "each word in word_dict needs to be type str"
assert isinstance(word_id, int) and word_id >= 0, "each word id needs to be positive integer"
kwargs["word_dict"] = word_dict
return method(self, **kwargs)
return new_method | d45b68ccccbd4f97e585c7386f7da6547fdd86d6 | 7,471 |
def env_observation_space_info(instance_id):
"""
Get information (name and dimensions/bounds) of the env's
observation_space
Parameters:
- instance_id: a short identifier (such as '3c657dbc')
for the environment instance
Returns:
- info: a dict containing 'name' (such as 'Discrete'),
and additional dimensional info (such as 'n') which
varies from space to space
"""
info = envs.get_observation_space_info(instance_id)
return jsonify(info = info) | 2ee4b17a73ad49c6c63dc07f2822b0f2d1ece770 | 7,472 |
from typing import Set
from typing import Dict
def build_target_from_transitions(
dynamics_function: TargetDynamics,
initial_state: State,
final_states: Set[State],
) -> Target:
"""
Initialize a service from transitions, initial state and final states.
The set of states and the set of actions are parsed from the transition function.
This will guarantee that all the states are reachable.
:param dynamics_function: the transition function
:param initial_state: the initial state
:param final_states: the final states
:return: the service
"""
states = set()
actions = set()
transition_function: TransitionFunction = {}
policy: Dict[State, Dict[Action, Prob]] = {}
reward: Dict[State, Dict[Action, Reward]] = {}
for start_state, transitions_by_action in dynamics_function.items():
states.add(start_state)
transition_function[start_state] = {}
policy[start_state] = {}
reward[start_state] = {}
for action, (next_state, prob, reward_value) in transitions_by_action.items():
actions.add(action)
states.add(next_state)
transition_function[start_state][action] = next_state
policy[start_state][action] = prob
reward[start_state][action] = reward_value
unreachable_final_states = final_states.difference(states)
assert (
len(unreachable_final_states) == 0
), f"the following final states are not in the transition function: {unreachable_final_states}"
assert initial_state in states, "initial state not in the set of states"
return Target(
states,
actions,
final_states,
initial_state,
transition_function,
policy,
reward,
) | d1014560c05e6f3169c65725d94af20494d97f0a | 7,473 |
from typing import Optional
from datetime import datetime
def citation(dll_version: Optional[str] = None) -> dict:
"""
Return a citation for the software.
"""
executed = datetime.now().strftime("%B %d, %Y")
bmds_version = __version__
url = "https://pypi.org/project/bmds/"
if not dll_version:
# assume we're using the latest version
dll_version = get_latest_dll_version()
return dict(
paper=(
"Pham LL, Watford S, Friedman KP, Wignall J, Shapiro AJ. Python BMDS: A Python "
"interface library and web application for the canonical EPA dose-response modeling "
"software. Reprod Toxicol. 2019;90:102-108. doi:10.1016/j.reprotox.2019.07.013."
),
software=(
f"Python BMDS. (Version {bmds_version}; Model Library Version {dll_version}) "
f"[Python package]. Available from {url}. Executed on {executed}."
),
) | 1196e1de2c2431120467eac83701022f1b4d9840 | 7,474 |
def comment_pr_(ci_data, github_token):
"""Write either a staticman comment or non-staticman comment to
github.
"""
return sequence(
(comment_staticman(github_token) if is_staticman(ci_data) else comment_general),
post(github_token, ci_data),
lambda x: dict(status_code=x.status_code, json=x.json()),
)(ci_data) | 548f854a37fe95b83660bc2ec4012cda72317976 | 7,475 |
from re import L
def response_loss_model(h, p, d_z, d_x, d_y, samples=1, use_upper_bound=False, gradient_samples=0):
"""
Create a Keras model that computes the loss of a response model on data.
Parameters
----------
h : (tensor, tensor) -> Layer
Method for building a model of y given p and x
p : (tensor, tensor) -> Layer
Method for building a model of p given z and x
d_z : int
The number of dimensions in z
d_x : int
Tbe number of dimensions in x
d_y : int
The number of dimensions in y
samples: int
The number of samples to use
use_upper_bound : bool
Whether to use an upper bound to the true loss
(equivalent to adding a regularization penalty on the variance of h)
gradient_samples : int
The number of separate additional samples to use when calculating the gradient.
This can only be nonzero if user_upper_bound is False, in which case the gradient of
the returned loss will be an unbiased estimate of the gradient of the true loss.
Returns
-------
A Keras model that takes as inputs z, x, and y and generates a single output containing the loss.
"""
assert not(use_upper_bound and gradient_samples)
# sample: (() -> Layer, int) -> Layer
def sample(f, n):
assert n > 0
if n == 1:
return f()
else:
return L.average([f() for _ in range(n)])
z, x, y = [L.Input((d,)) for d in [d_z, d_x, d_y]]
if gradient_samples:
# we want to separately sample the gradient; we use stop_gradient to treat the sampled model as constant
# the overall computation ensures that we have an interpretable loss (y-h̅(p,x))²,
# but also that the gradient is -2(y-h̅(p,x))∇h̅(p,x) with *different* samples used for each average
diff = L.subtract([y, sample(lambda: h(p(z, x), x), samples)])
grad = sample(lambda: h(p(z, x), x), gradient_samples)
def make_expr(grad, diff):
return K.stop_gradient(diff) * (K.stop_gradient(diff + 2 * grad) - 2 * grad)
expr = L.Lambda(lambda args: make_expr(*args))([grad, diff])
elif use_upper_bound:
expr = sample(lambda: L.Lambda(K.square)(L.subtract([y, h(p(z, x), x)])), samples)
else:
expr = L.Lambda(K.square)(L.subtract([y, sample(lambda: h(p(z, x), x), samples)]))
return Model([z, x, y], [expr]) | 898e72f29a9c531206d0243b8503761844468665 | 7,476 |
import numpy
from datetime import datetime
def get_hourly_load(session, endpoint_id, start_date, end_date):
"""
:param session: session for the database
:param endpoint_id: id for the endpoint
:param start_date: datetime object
:param end_date: datetime object and: end_date >= start_date
:return:
"""
numdays = (end_date - start_date).days + 1
# list of hours: 0:00 - 23:00
hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)]
heatmap_data = numpy.zeros((len(hours), numdays))
start_datetime = to_utc_datetime(
datetime.datetime.combine(start_date, datetime.time(0, 0, 0, 0))
)
end_datetime = to_utc_datetime(datetime.datetime.combine(end_date, datetime.time(23, 59, 59)))
for time, count in get_num_requests(session, endpoint_id, start_datetime, end_datetime):
parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
day_index = (parsed_time - start_datetime).days
hour_index = int(to_local_datetime(parsed_time).strftime('%H'))
heatmap_data[hour_index][day_index] = count
return {
'days': [
(start_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d') for i in range(numdays)
],
"data": heatmap_data.tolist(),
} | cf619b12778edfaf27d89c43226079aafc650ac4 | 7,477 |
def startend(start=None, end=None):
"""Return TMIN, TAVG, TMAX."""
# Select statement
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
# Calculate TMIN, TAVG, TMAX for dates greater than start
results = session.query(*sel).\
filter(Measurement.date >= start).all()
print(results) # This is a list of tuples
# Convert list of tuples into normal list
temps = list(np.ravel(results))
return jsonify(temps)
# Calculate TMIN, TAVG, TMAX with start and stop
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
print(results) # This is a list of tuples
# Convert list of tuples into normal list
temps = list(np.ravel(results))
print(temps) # This is a normal list
return jsonify(temps) | 7b8f395fd177d5352b14c12902acea1a641c5df8 | 7,478 |
def configure_camera(config):
"""
Configures the camera.
:param config: dictionary containing BARD configuration
parameters optional parameters in camera.
source (default 0), window size (default delegates to
cv2.CAP_PROP_FRAME_WIDTH), calibration directory and
roi (region of interest)
"""
# Specify some reasonable defaults. Webcams are typically 640x480.
video_source = 0
dims = None
mtx33d = np.array([[1000.0, 0.0, 320.0],
[0.0, 1000.0, 240.0],
[0.0, 0.0, 1.0]])
dist5d = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
roi = None
if config is None:
return video_source, mtx33d, dist5d, dims, roi
camera_config = config.get('camera', None)
if camera_config is not None:
video_source = camera_config.get('source', 0)
calib_dir = camera_config.get('calibration directory', None)
calib_prefix = camera_config.get('calibration prefix', 'calib')
if calib_dir is not None:
calib_param = MonoCalibrationParams()
calib_param.load_data(calib_dir, calib_prefix,
halt_on_ioerror = False)
mtx33d = calib_param.camera_matrix
dist5d = calib_param.dist_coeffs
dims = camera_config.get('window size', None)
if dims is None:
print("WARNING: window size was not specified! "
"This probably breaks the calibrated overlay!")
else:
# JSON file contains list, OpenCV requires tuple.
if len(dims) != 2:
raise ValueError("Invalid window size given, window size",
" should be list of length 2")
dims = (dims[0], dims[1])
roi = camera_config.get('roi', None)
if roi is not None:
if len(roi) != 4:
raise ValueError("Invalid roi set. Region of interest should",
" be a list of length 4. [x_start, y_start, x_end, y_end]")
return video_source, mtx33d, dist5d, dims, roi | 8accdaf9d710ff2ccff6d4ad5216611593e06ff0 | 7,479 |
def munsell_value_Moon1943(Y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Return the *Munsell* value :math:`V` of given *luminance* :math:`Y` using
*Moon and Spencer (1943)* method.
Parameters
----------
Y
*luminance* :math:`Y`.
Returns
-------
:class:`np.floating` or :class:`numpy.ndarray`
*Munsell* value :math:`V`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``Y`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``V`` | [0, 10] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Wikipedia2007c`
Examples
--------
>>> munsell_value_Moon1943(12.23634268) # doctest: +ELLIPSIS
4.0688120...
"""
Y = to_domain_100(Y)
V = 1.4 * spow(Y, 0.426)
return as_float(from_range_10(V)) | 7e419c8936fa49f35a5838aa7d3a5d99c93808f2 | 7,480 |
import functools
import traceback
def log_errors(func):
"""
A wrapper to print exceptions raised from functions that are called by callers
that silently swallow exceptions, like render callbacks.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Exceptions from calls like this aren't well-defined, so just log the
# error and don't reraise it.
traceback.print_exc()
return wrapper | a15c26de36a8c784da0333382f27fc06b0ed78a0 | 7,481 |
def count_total_words(sentence_list):
"""
문장 리스트에 있는 단어를 셉니다.
:param sentence_list: 단어의 리스트로 구성된 문장 리스트
:return: 문장에 있는 단어의 개수
"""
return sum(
[count_words_per_sentence(sentence) for sentence in sentence_list]
) | 0abc550c26b40fd36d0b9540fc1cd001e40a7552 | 7,482 |
def translate_dbpedia_url(url):
"""
Convert an object that's defined by a DBPedia URL to a ConceptNet
URI. We do this by finding the part of the URL that names the object,
and using that as surface text for ConceptNet.
This is, in some ways, abusing a naming convention in the Semantic Web.
The URL of an object doesn't have to mean anything at all. The
human-readable name is supposed to be a string, specified by the "name"
relation.
The problem here is that the "name" relation is not unique in either
direction. A URL can have many names, and the same name can refer to
many URLs, and some of these names are rarely used or are the result of
parsing glitches. The URL itself is a stable thing that we can build a
ConceptNet URI from, on the other hand.
"""
if '__' in url or 'dbpedia.org' not in url:
return None
parsed = parse_url(url)
domain = parsed.netloc
if '.' not in domain:
return None
if domain == 'dbpedia.org':
# Handle old DBPedia URLs that had no language code
domain = 'en.dbpedia.org'
domain_parts = domain.split('.', 1)
if domain_parts[1] == 'dbpedia.org':
lang = domain_parts[0]
if lang in LCODE_ALIASES:
lang = LCODE_ALIASES[lang]
if lang not in ALL_LANGUAGES:
return None
text = resource_name(url).replace('_', ' ')
uri = topic_to_concept(lang, text)
if uri in CONCEPT_BLACKLIST:
return None
else:
return uri
else:
return None | 2a6b99ca59216c97dc1cfd90f1d8f4c01ad5f9f2 | 7,483 |
def setSecurityPolicy(aSecurityPolicy):
"""Set the system default security policy.
This method should only be caused by system startup code. It should
never, for example, be called during a web request.
"""
last = _ImplPython._defaultPolicy
_ImplPython._defaultPolicy = aSecurityPolicy
return last | 7063b83f1c2492b8684a43f64c9d2a49ae2ca61b | 7,484 |
import re
def map_sentence2ints(sentence):
"""Map a sentence to a list of words."""
word_list = re.findall(r"[\w']+|[.,!?;]", sentence)
int_list = [const.INPUTVOCABULARY.index(word) for word in word_list]
return np.array(int_list).astype(np.int32) | 6dcb2917c817aa2e394c313fb273d466b6fb1ea9 | 7,485 |
def get_api_key(
api_key_header: str = Security(
APIKeyHeader(name=settings.API_KEY_HEADER, auto_error=False)
)
) -> str:
"""
This function checks the header and his value for correct authentication if not a
403 error is returned:
- api_key_header = Security api header
https://github.com/tiangolo/fastapi/issues/142
"""
if api_key_header == settings.API_KEY:
return api_key_header | 50121c0d16455862552c58e7478ef383b68e71c7 | 7,486 |
def add_pred_to_test(test_df, pred_np, demo_col_list, days):
"""
derived from Tensorflow
INPUT:
- df (pandas DataFrame)
- group (string)
OUTPUT:
- show_group_stats_viz
"""
test_df = test_df.copy()
for c in demo_col_list:
test_df[c] = test_df[c].astype(str)
test_df['score'] = pred_np
test_df['label_value'] = test_df['time_in_hospital'].apply(lambda x: 1 if x >=days else 0)
return test_df | aec48bd6201e1a9a1ebd6f96c4c8b7cfd9304607 | 7,487 |
def getCriticality(cvss):
""" color convention fot the cells of the PDF """
if cvss == 0.0:
return ("none", "#00ff00", (0, 255, 0))
if cvss < 3.1:
return ("low", "#ffff00", (255, 255, 0))
if cvss < 6.1:
return ("medium", "#ffc800", (255, 200, 0))
if cvss < 9.1:
return ("high", "#ff6400", (255, 100, 0))
return ("critical", "#cc0000", (200, 0, 0)) | a4167b2f576dcb361641f7fe0280c212673f0157 | 7,488 |
from typing import List
def combine_groups(groups: List[np.ndarray], num_features: int) -> np.ndarray:
"""
Combines the given groups back into a 2d measurement matrix.
Args:
groups: A list of 1d, flattened groups
num_features: The number of features in each measurement (D)
Returns:
A [K, D] array containing the recovered measurements.
"""
flattened = np.concatenate(groups) # [K * D]
return flattened.reshape(num_features, -1).T | 906c69fabcb62a60f12fd3c4bafc711aa971ad19 | 7,489 |
import functools
def skippable(*prompts, argument=None):
"""
Decorator to allow a method on the :obj:`CustomCommand` to be
skipped.
Parameters:
----------
prompts: :obj:iter
A series of prompts to display to the user when the method is being
skipped.
argument: :obj:`str`
By default, the management command argument to indicate that the method
should be skipped will be `skip_<func_name>`. If the argument should
be different, it can be explicitly provided here.
"""
def decorator(func):
@functools.wraps(func)
def inner(instance, *args, **kwargs):
parameter = argument or "skip_%s" % func.__name__
if parameter in kwargs and kwargs[parameter] is True:
instance.prompt(*prompts,
style_func=instance.style.HTTP_NOT_MODIFIED)
return False
else:
return func(instance, *args, **kwargs)
return inner
return decorator | 879106f4cc0524660fb6639e56d688d40b115ac4 | 7,490 |
import torch
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
):
"""
相对 fairseq.data.language_pair_dataset.collate 的区别是:
1. prev_output_tokens的key不再是target,而是 prev_output_tokens(因为自定义了prev_output_tokens,)
2. 增加了positions(默认position从1开始,mass保留了原句中的位置)
TODO:
1. 新key的order问题:
策略0,全部重写:https://coding.jd.com/alphaw/fairseq_ext/blob/a336c4529822271417fff86a06dcd9f2b0945592/src/data/mask_language_pair_dataset.py
策略1,继承时也sort一次。前提保证sort结果的不变性。(目前采用该策略,看上去仍然代码冗余)
策略2:collate增加more_keys参数,或者net_input下的所有都order一遍 (TODO: 先采用策略一)
"""
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
batch = _collate(samples, pad_idx, eos_idx, left_pad_source=left_pad_source, left_pad_target=left_pad_target,
input_feeding=input_feeding)
# patch
src_lengths = torch.LongTensor([
s['source'].ne(pad_idx).long().sum() for s in samples
])
src_lengths, sort_order = src_lengths.sort(descending=True)
prev_output_positions = merge('prev_output_positions', left_pad=left_pad_target).index_select(0, sort_order) # 更改
batch['net_input']['prev_output_positions'] = prev_output_positions # 更改
return batch | 0343a5adb70c163599b91e9742b5302c1482cca8 | 7,491 |
import hashlib
def _cache_name(address):
"""Generates the key name of an object's cache entry"""
addr_hash = hashlib.md5(address).hexdigest()
return "unsub-{hash}".format(hash=addr_hash) | 6933b1170933df5e3e57af03c81322d68a46d91f | 7,492 |
def format_currency(
value: Decimal,
currency: str | None = None,
show_if_zero: bool = False,
invert: bool = False,
) -> str:
"""Format a value using the derived precision for a specified currency."""
if not value and not show_if_zero:
return ""
if value == ZERO:
return g.ledger.format_decimal(ZERO, currency)
if invert:
value = -value
return g.ledger.format_decimal(value, currency) | 197dc15c799e1866526a944e0f1f8217e97cf785 | 7,493 |
import os
from pathlib import Path
from typing import Union
from sys import version
def guess_ghostscript() -> str:
"""Guess the path to ghostscript. Only guesses well on Windows.
Should prevent people from needing to add ghostscript to PATH.
"""
if os.name != 'nt':
return 'gs' # I'm not sure where to look on non-Windows OSes so just guess 'gs'.
def sort_by_version(v: Path) -> Union[version.Version, version.LegacyVersion]:
return version.parse(v.name[2:]) # When this is an inline lambda mypy and pylint fuss.
locations = 'C:\\Program Files\\gs', 'C:\\Program Files (x86)\\gs'
files = 'gswin64c.exe', 'gswin32c.exe', 'gs.exe'
for location in locations:
path = Path(location)
if path.exists():
versions = [v for v in path.iterdir() if v.is_dir() and v.name.startswith('gs')]
versions.sort(key=sort_by_version, reverse=True)
for v in versions:
for file in files:
exe = v / 'bin' / file
if exe.exists():
return str(exe)
return 'gswin64c' | 09e8761185f6029025d8d6cc6861672870e781b2 | 7,494 |
def supplemental_div(content):
"""
Standardize supplemental content listings
Might not be possible if genus and tree content diverge
"""
return {'c': content} | b42e868ef32f387347cd4a97328794e6628fe634 | 7,495 |
def viewTypes():
"""View types of item when sent through slash command"""
user_id, user_name, channel_id = getUserData(request.form)
checkUser(user_id)
itemType = request.form.get('text')
try:
text = viewTypesItems(itemType)
except ItemNotInPantry:
reply = "Sorry! But either the spelling is wrong or the item is currently unavailable.\nPlease view items in the pantry to check."
client.chat_postMessage(channel=channel_id, blocks=itemExceptionBlock(reply))
return Response(), 200
client.chat_postMessage(channel=channel_id, blocks=viewTypesItemBlock(text))
return Response(), 200 | aea7633a1092c68a5ccf3a5619eee9d74dafdca2 | 7,496 |
def load_and_preprocess():
"""
Load the data (either train.csv or test.csv) and pre-process it with some simple
transformations. Return in the correct form for usage in scikit-learn.
Arguments
---------
filestr: string
string pointing to csv file to load into pandas
Returns
-------
X_train: numpy.array
array containing features of training set
X_test: numpy.array
array containing features of test set
y: numpy.array
array containing labels for training set
test_ID: numpy.array
IDs for test set, for submission
"""
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
data = pd.concat((train.loc[:,'MSSubClass':'SaleCondition'],\
test.loc[:,'MSSubClass':'SaleCondition']))
#first extract the target variable, and log-transform because the prices are very skewed
y_train = np.log1p(train['SalePrice'].values)
#one hot encoding for categorical variables
data = pd.get_dummies(data)
#first find which numerical features are significantly skewed and transform them to log(1 + x)
numerical = data.dtypes[data.dtypes!='object'].index
skewed = data[numerical].apply(lambda u: skew(u.dropna()))
skewed = skewed[skewed > 0.75].index
data[skewed] = np.log1p(data[skewed])
#if numerical values are missing, replace with median from that column
data = data.fillna(data.mean())
X_train = data[:train.shape[0]].as_matrix()
X_test = data[train.shape[0]:].as_matrix()
return X_train,X_test,y_train,test.Id | 3202c4aaf76af0695594c39641dd4892b1215d97 | 7,497 |
from datetime import datetime
def _dates2absolute(dates, units):
"""
Absolute dates from datetime object
Parameters
----------
dates : datetime instance or array_like of datetime instances
Instances of pyjams.datetime class
units : str
'day as %Y%m%d.%f', 'month as %Y%m.%f', or 'year as %Y.%f'
Returns
-------
longdouble or array_like of longdouble
absolute dates
Examples
--------
>>> dt = [datetime(1990, 1, 1), datetime(1991, 1, 1)]
>>> dec = _dates2absolute(dt, 'day as %Y%m%d.%f')
>>> print(np.around(dec, 1))
[19900101.0, 19910101.0]
"""
mdates = input2array(dates, default=datetime(1990, 1, 1))
# wrapper might be slow
out = [ _date2absolute(dd, units) for dd in mdates ]
out = array2input(out, dates)
return out | ef823887ec410d7f7d0c5c54d12005ab35744c0c | 7,498 |
def Mix_GetNumMusicDecoders():
"""Retrieves the number of available music decoders.
The returned value can differ between runs of a program due to changes in
the availability of the shared libraries required for supporting different
formats.
Returns:
int: The number of available music decoders.
"""
return _funcs["Mix_GetNumMusicDecoders"]() | a91b84c42701cdaeb7f400a3091bb869e477ff06 | 7,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.