content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4,
inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (~nnabla.initializer.BaseInitializer): Initializer for the weight.
i_init (~nnabla.initializer.BaseInitializer): Initializer for the indicators (0 ... learnable, 1 ... fixed).
b_init (~nnabla.initializer.BaseInitializer): Initializer for the bias.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if i_init is None:
fan_in = np.prod(inp.shape[base_axis:])
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, not fix_parameters)
i = get_parameter_or_create(
"I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, not fix_parameters)
return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed) | 7654d925a13276002419c9d08342e4e7974bdc31 | 19,623 |
def is_greater_equal(min_value):
"""Check if the attribute value is greater than or equal to a minimum value.
This validator can handle both lists and single element attributes. If it
is a list, it checks if the element with the smallest value is greater than
or equal to the specified minimum value.
"""
def compare(self, attribute, value):
if type(value) is not list:
value = [value]
if np.min(value) < min_value:
_logger.error(
f"{attribute.name} cannot be smaller than {min_value}!",
_logger.ExceptionTypes.ValueError,
)
return compare | a95cc24afcae6d11689b872f2178f6b38b864ca7 | 19,624 |
from typing import List
def save_lyrics(list_: List[Text], location: Text) -> None:
"""Writes 'list_' to 'location' as txt file. Returns None."""
with open(location, "w+") as f:
for element in list_:
f.write(element)
f.write("\n")
return None | 1fff7cf838fdaea32f6875beab90b172a84f379c | 19,625 |
def translate_provider_for_icon(sync_server, project, site):
"""
Get provider for 'site'
This is used for getting icon, 'studio' should have different icon
then local sites, even the provider 'local_drive' is same
"""
if site == sync_server.DEFAULT_SITE:
return sync_server.DEFAULT_SITE
return sync_server.get_provider_for_site(site=site) | 58867a6dd44c83582d85fc1baf48121eff714232 | 19,626 |
def server_delete_ip(body=None): # noqa: E501
"""delete server IP
Send by server during shutdown. # noqa: E501
:param body: port of iperf server. Ip and time could be emply
:type body: dict | bytes
:rtype: List[InlineResponse200]
"""
if connexion.request.is_json:
body = ServerAddr.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 92b2f425ae7cca1e3e42c58382f3d21b2e96f016 | 19,627 |
import re
def extract_key_and_index(field):
"""Returns the key type, key name and if key is a compound list then returns the index pointed by the field
Arguments:
field: csv header field
"""
for key_type, value in KEY_TYPES.items():
regex = re.compile(value["regex"])
match = regex.match(field)
if match:
return tuple([key_type] + list(match.groups()))
return None | aba66922117cd14f2c670df839c3ca522856caa3 | 19,628 |
import torch
def as_mask(indexes, length):
"""
Convert indexes into a binary mask.
Parameters:
indexes (LongTensor): positive indexes
length (int): maximal possible value of indexes
"""
mask = torch.zeros(length, dtype=torch.bool, device=indexes.device)
mask[indexes] = 1
return mask | 0235d66f9ee5bdc7447819122b285d29efd238c9 | 19,629 |
def interpolate_array(x, y, smooth_rate=500):
"""
:param x:
:param y:
:return:
"""
interp_obj = interpolate.PchipInterpolator(x, y)
new_x = np.linspace(x[0], x[-1], smooth_rate)
new_y = interp_obj(new_x)
return new_x, new_y | 4c6f79c3071496e6314d772651d2e6cc6a449c74 | 19,630 |
from typing import Sequence
from typing import Counter
import copy
def calculate_resource_utilization_for_slaves(
slaves: Sequence[_SlaveT], tasks: Sequence[MesosTask]
) -> ResourceUtilizationDict:
""" Given a list of slaves and a list of tasks, calculate the total available
resource available in that list of slaves, and the resources consumed by tasks
running on those slaves.
:param slaves: a list of slaves to calculate resource usage for
:param tasks: the list of tasks running in the mesos cluster
:returns: a dict, containing keys for "free" and "total" resources. Each of these keys
is a ResourceInfo tuple, exposing a number for cpu, disk and mem.
"""
resource_total_dict: _Counter[str] = Counter()
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(slave["resources"])
resource_total_dict.update(Counter(filtered_resources))
resource_free_dict = copy.deepcopy(resource_total_dict)
for task in tasks:
task_resources = task["resources"]
resource_free_dict.subtract(Counter(filter_mesos_state_metrics(task_resources)))
for slave in slaves:
filtered_resources = filter_mesos_state_metrics(
reserved_maintenence_resources(slave["reserved_resources"])
)
resource_free_dict.subtract(Counter(filtered_resources))
return {
"free": ResourceInfo(
cpus=resource_free_dict["cpus"],
disk=resource_free_dict["disk"],
mem=resource_free_dict["mem"],
gpus=resource_free_dict.get("gpus", 0),
),
"total": ResourceInfo(
cpus=resource_total_dict["cpus"],
disk=resource_total_dict["disk"],
mem=resource_total_dict["mem"],
gpus=resource_total_dict.get("gpus", 0),
),
"slave_count": len(slaves),
} | 13b4856b3ef0bdf06410a58ecc0fbc29bfda4483 | 19,631 |
def check_pass(value):
"""
This test always passes (it is used for 'checking' things like the
workshop address, for which no sensible validation is feasible).
"""
return True | aa3a5f536b5bc729dc37b7f09c3b997c664b7481 | 19,632 |
def state_array_to_int(s):
"""translates a state s into an integer by interpreting the state as a
binary represenation"""
return int(state_array_to_string(s), 2) | b0b50dd879b74af27946cde49e1bf805c2d6e504 | 19,633 |
import asyncio
import traceback
def async_task(coro, loop=asyncio.get_event_loop(), error_cb=None):
"""
Wrapper to always print exceptions for asyncio tasks.
"""
future = asyncio.ensure_future(coro)
def exception_logging_done_cb(future):
try:
e = future.exception()
except asyncio.CancelledError:
return
if e is not None:
log.critical('Unhandled exception in async future: {}: {}\n{}',
type(e).__name__, e, ''.join(traceback.format_tb(e.__traceback__)))
if error_cb is not None:
error_cb()
loop.call_exception_handler({
'message': 'Unhandled exception in async future',
'future': future,
'exception': e,
})
future.add_done_callback(exception_logging_done_cb)
return future | 5520aafebca17cbe32c79b69e41856f6076179f3 | 19,634 |
def is_valid_charts_yaml(content):
"""
Check if 'content' contains mandatory keys
:param content: parsed YAML file as list of dictionary of key values
:return: True if dict contains mandatory values, else False
"""
# Iterate on each list cell
for chart_details in content:
# If one of the keys is missing or, is None
if not all(chart_details.get(x) is not None
and x in chart_details
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file', 'private_image']):
return False
# If one of the keys is not a string
if not all(type(chart_details.get(x)) is str
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file']):
return False
# If one of the keys is not a boolean
if not all(type(chart_details.get(x)) is bool
for x in ['private_image']):
return False
if not all(type(chart_details.get(x)) is list
for x in ['extra_executes']):
return False
return True | cc68ba6bc9166f8d2f8c37da756accec667f471a | 19,635 |
def get_trader_fcas_availability_agc_status_condition(params) -> bool:
"""Get FCAS availability AGC status condition. AGC must be enabled for regulation FCAS."""
# Check AGC status if presented with a regulating FCAS offer
if params['trade_type'] in ['L5RE', 'R5RE']:
# AGC is active='1', AGC is inactive='0'
return True if params['agc_status'] == '1' else False
# Return True if a presented with a contingency FCAS offer (AGC doesn't need to be enabled)
else:
return True | fa73ae12a0934c76f12c223a05161280a6dc01f1 | 19,636 |
import importlib
def load_class(full_class_string):
"""
dynamically load a class from a string
"""
class_data = full_class_string.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
# Finally, we retrieve the Class
return getattr(module, class_str) | 95ab0a0b27d508b5bd41468cf6a4e3c008799fdf | 19,637 |
def request_credentials_from_console():
"""
Requests the credentials interactive and returns them in form (username, password)
"""
username = raw_input('Username: ')
password = raw_input('Password: ')
return username, password | 43102d8528502f700ab96714831c94abb6a3b0f8 | 19,638 |
def prettify_url(url):
"""Return a URL without its schema
"""
if not url:
return url
split = url.split('//', 1)
if len(split) == 2:
schema, path = split
else:
path = url
return path.rstrip('/') | 0beed0522355e4ea8170cac22e61f92e0c21ccca | 19,639 |
def CDLMORNINGDOJISTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray:
"""
Morning Doji Star (Pattern Recognition)
Inputs:
data:['open', 'high', 'low', 'close']
Parameters:
penetration: 0.3
Outputs:
double series (values are -1, 0 or 1)
"""
return multiple_series_call(talib.CDLMORNINGDOJISTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE],
[penetration], result_divider=100) | be1f4954dda5109d067070fc97c340233085b043 | 19,640 |
def panerror_to_dict(obj):
"""Serializer function for POCS custom exceptions."""
name_match = error_pattern.search(str(obj.__class__))
if name_match:
exception_name = name_match.group(1)
else:
msg = f"Unexpected obj type: {obj}, {obj.__class__}"
raise ValueError(msg)
return {"__class__": "PanError",
"exception_name": exception_name,
"args": obj.args} | 8841d2c4b6f3ba1deae5057a6b85b70830c412a1 | 19,641 |
from typing import Optional
def build_class_instance(module_path: str, init_params: Optional[dict] = None):
"""
Create an object instance from absolute module_path string.
Parameters
----------
module_path: str
Full module_path that is valid for your project or some external package.
init_params: optional dict
These parameters will be used as init parameters for the given type.
Returns
-------
Some object instance
"""
class_ = get_type_from_module_path(module_path=module_path)
result = class_(**(init_params or {}))
return result | d50ffdbb8bbeed36b572e6e555376febabecb745 | 19,642 |
def maintainers_mapper(maintainers, package):
"""
Update package maintainers and return package.
https://docs.npmjs.com/files/package.json#people-fields-author-contributors
npm also sets a top-level "maintainers" field with your npm user info.
"""
# note this is the same code as contributors_mappers... should be refactored
maintains = []
if isinstance(maintainers, list):
for contrib in maintainers:
name, email, url = parse_person(contrib)
maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url))
else: # a string or dict
name, email, url = parse_person(maintainers)
maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url))
package.maintainers = maintains
return package | 57e76ec2cbc1727778bfe980c5cd6d82798a1800 | 19,643 |
def calc_original_pressure(pressure_ratio):
"""
calculates the original pressure value given the <code>AUDITORY_THRESHOLD</code>.
The results are only correct if the pressure ratio is build using the <code>AUDITORY_THRESHOLD</code>.
:param pressure_ratio: the pressure ration that shall be converted to the original value
:return: the pressure value the ratio is based on
"""
return pressure_ratio * (AUDITORY_THRESHOLD ** 2) | 1a0680073cb739fef47952887e6dedf4487f2aa0 | 19,644 |
def normalize_field_names(fields):
"""
Map field names to a normalized form to check for collisions like 'coveredText' vs 'covered_text'
"""
return set(s.replace('_','').lower() for s in fields) | 55bdac50fd1fcf23cfec454408fbcbbae96e507e | 19,645 |
def powspec_disc_n(n, fs, mu, s, kp, km, vr, vt, tr):
"""Return the n'th Lorentzian and its width"""
Td = ifana.LIF().Tdp(mu, s, vr, vt) + tr
Ppp = (kp*exp(-(kp+km)*tr)+km)/(kp+km)
kpbar = (kp*(Td-tr)-log(Ppp))/Td
return 1./Td * 2*kpbar/(kpbar**2 + (2*pi*(fs - n*1./Td))**2), kpbar | 60335e496b734c32116424d857a3c59de7f5b567 | 19,647 |
def list_keys(request):
"""
Tags: keys
---
Lists all added keys.
READ permission required on key.
---
"""
auth_context = auth_context_from_request(request)
return filter_list_keys(auth_context) | 564d03457265c0461e82b55abfb23cb4d45ad0ac | 19,648 |
def rounder(money_dist: list, pot: int, to_coin: int = 2) -> list:
"""
Rounds the money distribution while preserving total sum
stolen from https://stackoverflow.com/a/44740221
"""
def custom_round(x):
""" Rounds a number to be divisible by to_coin specified """
return int(to_coin * round(x / to_coin))
rs = [custom_round(x) for x in money_dist]
k = pot - sum(rs)
assert k == custom_round(k)
fs = [x - custom_round(x) for x in money_dist]
indices = [
i
for order, (e, i) in enumerate(
reversed(sorted((e, i) for i, e in enumerate(fs)))
)
if order < k
]
return [r + 1 if i in indices else r for i, r in enumerate(rs)] | f315027def4646252aa7d4ee7c05ca3085625583 | 19,649 |
def find_wcscorr_row(wcstab, selections):
""" Return an array of indices from the table (NOT HDU) 'wcstab' that matches the
selections specified by the user.
The row selection criteria must be specified as a dictionary with
column name as key and value(s) representing the valid desired row values.
For example, {'wcs_id':'OPUS','extver':2}.
"""
mask = None
for i in selections:
bmask = (wcstab.field(i) == selections[i])
if mask is None:
mask = bmask.copy()
else:
mask = np.logical_and(mask,bmask)
del bmask
return mask | dcd2b4025ec3756319911e6626dd403a6efda1c4 | 19,650 |
import urllib
def _gftRead(url, step):
"""
Reads in a gtf file from a specific db given the url.
Some gft have a certain number of header lines that
are skipped however.
Input: url where gtf is fetched from
Input: number of lines to skip while reading in the frame
Output: gtf in a pandas frame
"""
urllib.request.urlretrieve(url, "/tmp/conversion.gtf.gz")
gtf = pd.read_csv("/tmp/conversion.gtf.gz",
compression = "gzip",
engine = "python",
delimiter = '\t',
skiprows = step,
header = None)
return gtf | ef05d2747188def526612bdd27931e0420e275dd | 19,652 |
def add() -> jsonify:
"""
Adds a new item in the server and returns the updated list to the front-end
"""
# Passed Items from Front-End
name = request.form['name']
priority = request.form['priority']
price = request.form['price'].replace(",", "") # To prevent string to float conversion
money = request.form['money']
# Adds item to the server and check the status of the addition
is_right = mysqlcommands.add_item(name, priority, price, money)
# Pass the status of the addition to this variable
message = constants.ADD_ITEM_SUCCESS_MESSAGE if is_right else constants.ADD_ITEM_FAILURE_MESSAGE
# Get the content from the JSON file
my_obj = jsoncommands.get_json(constants.JSON_FILE_PATH)
# Re-allocate the budget with the new added item
utilities.money_allocation(mysqlcommands.get_all_items(), my_obj['Total'] * my_obj['Percentage'],
mysqlcommands.priority_count())
return jsonify({
"color": is_right,
"message": message,
"allItems": mysqlcommands.get_all_items()
}) | e8a32aa47ee057c6f9653554955cddd0b003ef1a | 19,653 |
def calculate(formula, **params):
""" Calculate formula and return a dictionary of coin and amounts """
formula = Formula.get(formula)
if formula is None:
raise InvalidFormula(formula)
if not formula.expression:
return {}
return calculate_expression(formula.expression, formula, **params) | e8a237d2677581296bb1491badecf83264c0d44a | 19,655 |
import torch
def pack_batch_tensor(inputs):
"""default pad_ids = 0
"""
input_max_length = max([d.size(0) for d in inputs])
# prepare batch tensor
input_ids = torch.LongTensor(len(inputs), input_max_length).zero_()
input_mask = torch.LongTensor(len(inputs), input_max_length).zero_()
for i, d in enumerate(inputs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
return {
"input_ids":input_ids,
"input_mask":input_mask,
} | 33e59acbc8facaf41064e2dd7031bdd314211878 | 19,656 |
def build_network(network_class=None,
dataset_dirs_args=None,
dataset_dirs_class=None,
dataset_dirs=None,
dataset_spec_args=None,
dataset_spec_class=None,
dataset_spec=None,
network_spec_args=None,
network_spec_class=None,
network_spec=None,
json_spec_path=None,
spec_cont=None,
class_priority=False):
"""
build network
"""
# build network specification
network_spec = ivy.default(
network_spec,
build_network_specification(
dataset_dirs_args=dataset_dirs_args,
dataset_dirs_class=dataset_dirs_class,
dataset_dirs=dataset_dirs,
dataset_spec_args=dataset_spec_args,
dataset_spec_class=dataset_spec_class,
dataset_spec=dataset_spec,
network_spec_args=network_spec_args,
network_spec_class=network_spec_class,
json_spec_path=json_spec_path,
spec_cont=spec_cont))
# override network_class if specified in network_spec
network_class = ivy.default(ivy.default(
_import_arg_specified_class_if_present(network_spec, 'network_class'),
network_class, rev=class_priority),
None)
# verify network_class exists
if not ivy.exists(network_class):
raise Exception('network_class must either be specified in this build_network() method,'
'or network_class attribute must be specified in the network_spec instance')
# network
return network_class(network_spec) | 4fae693408b385629c6ae645c69778276c16b915 | 19,657 |
def lookup_service_root(service_root):
"""Dereference an alias to a service root.
A recognized server alias such as "staging" gets turned into the
appropriate URI. A URI gets returned as is. Any other string raises a
ValueError.
"""
if service_root == EDGE_SERVICE_ROOT:
# This will trigger a deprecation warning and use production instead.
service_root = 'edge'
return _dereference_alias(service_root, service_roots) | 8cc5384ba26639438e4c7e18264fb39ee2445fcf | 19,659 |
import warnings
def get_initial_configuration():
"""
Return (pos, type)
pos: (1, 1) - (9, 9)
type will be 2-letter strings like CSA format.
(e.g. "FU", "HI", etc.)
"""
warnings.warn(
"""get_initial_configuration() returns ambiguous cell state.
Use get_initial_configuration_with_dir() instead.""",
DeprecationWarning)
initial_state_top = {
(1, 1): "KY",
(2, 1): "KE",
(3, 1): "GI",
(4, 1): "KI",
(5, 1): "OU",
(6, 1): "KI",
(7, 1): "GI",
(8, 1): "KE",
(9, 1): "KY",
(2, 2): "KA",
(8, 2): "HI",
(1, 3): "FU",
(2, 3): "FU",
(3, 3): "FU",
(4, 3): "FU",
(5, 3): "FU",
(6, 3): "FU",
(7, 3): "FU",
(8, 3): "FU",
(9, 3): "FU",
}
initial_state = {}
for (pos, ty) in initial_state_top.items():
x, y = pos
initial_state[pos] = ty
initial_state[(10 - x, 10 - y)] = ty
return initial_state | c96f7f70e258d09090abeffc9815082265245cf2 | 19,660 |
def request_pull_to_diff_or_patch(
repo, requestid, username=None, namespace=None, diff=False
):
"""Returns the commits from the specified pull-request as patches.
:arg repo: the `pagure.lib.model.Project` object of the current pagure
project browsed
:type repo: `pagure.lib.model.Project`
:arg requestid: the identifier of the pull-request to convert to patch
or diff
:type requestid: int
:kwarg username: the username of the user who forked then project when
the project viewed is a fork
:type username: str or None
:kwarg namespace: the namespace of the project if it has one
:type namespace: str or None
:kwarg diff: a boolean whether the data returned is a patch or a diff
:type diff: boolean
:return: the patch or diff representation of the specified pull-request
:rtype: str
"""
repo = flask.g.repo
if not repo.settings.get("pull_requests", True):
flask.abort(404, description="No pull-requests found for this project")
request = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo.id, requestid=requestid
)
if not request:
flask.abort(404, description="Pull-request not found")
if request.remote:
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
parentpath = pagure.utils.get_repo_path(request.project)
else:
repo_from = request.project_from
parentpath = pagure.utils.get_repo_path(request.project)
repopath = parentpath
if repo_from:
repopath = pagure.utils.get_repo_path(repo_from)
repo_obj = pygit2.Repository(repopath)
orig_repo = pygit2.Repository(parentpath)
branch = repo_obj.lookup_branch(request.branch_from)
commitid = None
if branch:
commitid = branch.peel().hex
diff_commits = []
if request.status != "Open":
commitid = request.commit_stop
try:
for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE):
diff_commits.append(commit)
if commit.oid.hex == request.commit_start:
break
except KeyError:
# This happens when repo.walk() cannot find commitid
pass
else:
try:
diff_commits = pagure.lib.git.diff_pull_request(
flask.g.session, request, repo_obj, orig_repo, with_diff=False
)
except pagure.exceptions.PagureException as err:
flask.flash("%s" % err, "error")
return flask.redirect(
flask.url_for(
"ui_ns.view_repo",
username=username,
repo=repo.name,
namespace=namespace,
)
)
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
flask.flash(
"Could not update this pull-request in the database", "error"
)
diff_commits.reverse()
patch = pagure.lib.git.commit_to_patch(
repo_obj, diff_commits, diff_view=diff
)
return flask.Response(patch, content_type="text/plain;charset=UTF-8") | a7b543294cb66561700b7db7800277d74ed1267c | 19,661 |
def GL(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
""" Computes the GL fractional derivative of a function for an entire array
of function values.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> DF_poly = GL(-0.5, lambda x: x**2 - 1)
>>> DF_sqrt = GL(0.5, lambda x: np.sqrt(x), 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
# Get the convolution filter.
b_coeffs = GLcoeffs(alpha, num_points-1)
# Real Fourier transforms for convolution filter and array of function values.
B = np.fft.rfft(b_coeffs)
F = np.fft.rfft(f_values)
result = np.fft.irfft(F*B)*step_size**-alpha
return result | 226d5d8b49be9a243ac4508a7691c8997503cb4d | 19,662 |
def check_lint(root_dir, ignore, verbose, dry_run, files_at_a_time,
max_line_len, continue_on_error):
"""Check for lint.
Unless `continue_on_error` is selected, returns `False` on the first
iteration where lint is found, or where the lint checker otherwise
returned failure.
:return: Whether the check found everything OK.
"""
success = True
# Suffixes for types of file that pocketlint can check for us.
pocketlint_suffixes = C_LIKE_SUFFIXES + PERL_SUFFIXES + [
'.ini',
# Don't check for now. Styles differ too much.
# '.css',
'.js',
'.md',
'.cgi',
'.php',
'.py',
'.sh',
]
lintable_files = find_files(
root_dir, ignore=ignore, suffixes=pocketlint_suffixes)
command_line = ['pocketlint', '-m', '%d' % max_line_len, '--']
for chunk in chunk_file_list(lintable_files, files_at_a_time):
try:
run_command(
command_line + chunk, verbose=verbose, dry_run=dry_run)
except CalledProcessError:
success = False
if not success and not continue_on_error:
return False
return success | c43b7a05bf47b9108281bb7c0cef4ff1d6e107d3 | 19,663 |
def remove_comments(s):
"""
Examples
--------
>>> code = '''
... # comment 1
... # comment 2
... echo foo
... '''
>>> remove_comments(code)
'echo foo'
"""
return "\n".join(l for l in s.strip().split("\n") if not l.strip().startswith("#")) | 1d3e1468c06263d01dd204c5ac89235a17f50972 | 19,664 |
def generate_wsl(ws):
"""
Generates watershed line that correspond to areas of
touching objects.
"""
se = square(3)
ero = ws.copy()
ero[ero == 0] = ero.max() + 1
ero = erosion(ero, se)
ero[ws == 0] = 0
grad = dilation(ws, se) - ero
grad[ws == 0] = 0
grad[grad > 0] = 255
grad = grad.astype(np.uint8)
return grad | 6d61b2b366ca6a4b94f8a6513f1a0a5fb1bfd8c9 | 19,665 |
def train_lstm_model(x, y,
epochs=200,
patience=10,
lstm_dim=48,
batch_size=128,
lr=1e-3):
"""
Train an LSTM to predict purchase (1) or abandon (0)
:param x: session sequences
:param y: target labels
:param epochs: num training epochs
:param patience: early stopping patience
:param lstm_dim: lstm units
:param batch_size: batch size
:param lr: learning rate
:return:
"""
# Verfiy if GPU/CPU is being used
print("Print out system device...")
print(device_lib.list_local_devices())
print("Starting training now...")
X_train, X_test, y_train, y_test = train_test_split(x,y)
# pad sequences for training in batches
max_len = max(len(_) for _ in x)
X_train = pad_sequences(X_train, padding="post",value=7, maxlen=max_len)
X_test = pad_sequences(X_test, padding="post", value=7, maxlen=max_len)
# convert to one-hot
X_train = tf.one_hot(X_train, depth=7)
X_test = tf.one_hot(X_test, depth=7)
y_train = np.array(y_train)
y_test = np.array(y_test)
# Define Model
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(None,7)))
# Masking layer ignores padded time-steps
model.add(keras.layers.Masking())
model.add(keras.layers.LSTM(lstm_dim))
model.add(keras.layers.Dense(1,activation='sigmoid'))
model.summary()
# Some Hyper Params
opt = keras.optimizers.Adam(learning_rate=lr)
loss = keras.losses.BinaryCrossentropy()
es = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
verbose=1,
restore_best_weights=True)
# Include wandb callback for tracking
callbacks = [es, WandbCallback()]
model.compile(optimizer=opt,
loss=loss,
metrics=['accuracy'])
# Train Model
model.fit(X_train, y_train,
validation_data=(X_test,y_test),
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks)
# return trained model
# NB: to store model as Metaflow Artifact it needs to be pickle-able!
return model.to_json(), model.get_weights(), model | 159ad99e419d659e655bcdd9556a6ae3202071ae | 19,666 |
def nash_sutcliffe_efficiency(predicted, observed):
""" implements Nash-Sutcliffe Model Efficiencobserved Coefficient where predicted is modeled and observed is observed"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return 1 - np.sum((predicted - observed)**2) / np.sum((observed - observed.mean())**2) | b9820cf95472499d6e6c24e47ffb4bbd5574e439 | 19,667 |
def tf_Affine_transformer(points, theta):
"""
Arguments:
points: `Matrix` [2, np] of grid points to transform
theta: `Matrix` [bs, 2, 3] with a batch of transformations
"""
with tf.name_scope('Affine_transformer'):
num_batch = tf.shape(theta)[0]
grid = tf.tile(tf.expand_dims(points, 0), [num_batch, 1, 1])
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.matmul(theta, grid)
return T_g | a024609ac386b7d12173fb3965233ae4c21233d2 | 19,668 |
def read_COCO_gt(filename, n_imgs=None, ret_img_sizes=False, ret_classes=False, bbox_gt=False):
"""
Function for reading COCO ground-truth files and converting them to GroundTruthInstances format.
:param filename: filename of the annotation.json file with all COCO ground-truth annotations
:param n_imgs: number of images ground-truth is being extracted from. If None extract all (default None)
:param ret_img_sizes: Boolean flag dictating if the image sizes should be returned
:param ret_classes: Boolean flag dictating if the class mapping dictionary should be returned
:param bbox_gt: Boolean flag dictating if the GroundTruthInstance should ignore the segmentation mask and only use
bounding box information.
:return: ground-truth instances as GTLoader and optionally image sizes or class mapping dictionary if requested
"""
# read the json file
coco_obj = COCO(filename)
gt_instances = GTLoader(coco_obj, n_imgs, bbox_gt=bbox_gt)
# Return image sizes if requested
if ret_img_sizes:
return gt_instances, [
[coco_obj.imgs[img_id]['height'], coco_obj.imgs[img_id]['width']]
for img_id in sorted(coco_obj.imgs.keys())
]
# Return class mapping dictionary if requested
if ret_classes:
return gt_instances, {
coco_obj.cats[cat_id]['name']: idx
for idx, cat_id in enumerate(sorted(coco_obj.cats.keys()))
}
return gt_instances | dfcfa69ee620ac3546b1f646c9c23f126a9822c3 | 19,669 |
def get_metrics_from_file(metric_file):
"""Gets all metric functions within a file
:param str metric_file: The name of the file to look in
:return: Tuples containing (function name, function object)
:rtype: list
"""
try:
metrics = import_module(metric_file)
metrics = get_sorted_metric_function_tuples(metrics)
except ImportError:
raise NoMetricFileFound
if not metrics:
raise NoMetricFunctionsFound
return metrics | 12e59feb03b6d8571fb7a5ef8f02e81e53605b0b | 19,670 |
def mnist_model(inputs, mode):
"""Takes the MNIST inputs and mode and outputs a tensor of logits."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
inputs = tf.reshape(inputs, [-1, 28, 28, 1])
data_format = 'channels_last'
if tf.test.is_built_with_cuda():
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance.
# See https://www.tensorflow.org/performance/performance_guide#data_formats
data_format = 'channels_first'
inputs = tf.transpose(inputs, [0, 3, 1, 2])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=inputs,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
data_format=data_format)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2,
data_format=data_format)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
data_format=data_format)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2,
data_format=data_format)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024,
activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN))
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
return logits | 7807adce4030d070c79eea5f3a1991ff4c4e1cd6 | 19,671 |
import base64
import io
def show_local_mp4_video(file_name, width=640, height=480):
"""Renders a mp4 video on a Jupyter notebook
Args:
file_name (str): Path to file.
width (int): Video width.
height (int): Video height.
Returns:
obj: Video render as HTML object.
"""
video_encoded = base64.b64encode(io.open(file_name, 'rb').read())
return HTML(data='''<video width="{0}" height="{1}" alt="test" controls>
<source src="data:video/mp4;base64,{2}" type="video/mp4" />
</video>'''.format(width, height, video_encoded.decode('ascii'))) | 4f2b4660b005edcf865eca1c6632ffa6c0899fe8 | 19,672 |
from datetime import datetime
def change_status(sid, rev, status, **kwargs):
"""
[INCOMPLETE]
- DISABLE OTHER REVISION OF THE SAME SIGNTURE WHEN DEPLOYING ONE
Change the status of a signature
Variables:
sid => ID of the signature
rev => Revision number of the signature
status => New state
Arguments:
None
Data Block:
None
Result example:
{ "success" : true } #If saving the rule was a success or not
"""
DEPLOYED_STATUSES = ['DEPLOYED', 'NOISY', 'DISABLED']
DRAFT_STATUSES = ['STAGING', 'TESTING']
STALE_STATUSES = ['INVALID']
user = kwargs['user']
if status == 'INVALID':
return make_api_response("",
"INVALID signature status is reserved for service use only.",
403)
if not user['is_admin'] and status in DEPLOYED_STATUSES:
return make_api_response("",
"Only admins are allowed to change the signature status to a deployed status.",
403)
key = "%sr.%s" % (sid, rev)
data = STORAGE.get_signature(key)
if data:
if not Classification.is_accessible(user['classification'], data['meta'].get('classification',
Classification.UNRESTRICTED)):
return make_api_response("", "You are not allowed change status on this signature", 403)
if data['meta']['al_status'] in STALE_STATUSES and status not in DRAFT_STATUSES:
return make_api_response("",
"Only action available while signature in {} status is to change "
"signature to a DRAFT status"
.format(data['meta']['al_status']),
403)
if data['meta']['al_status'] in DEPLOYED_STATUSES and status in DRAFT_STATUSES:
return make_api_response("", "You cannot change the status of signature %s r.%s from %s to %s." %
(sid, rev, data['meta']['al_status'], status), 403)
query = "meta.al_status:{status} AND _yz_rk:{sid}* AND NOT _yz_rk:{key}"
today = datetime.date.today().isoformat()
uname = user['uname']
if status not in ['DISABLED', 'INVALID', 'TESTING']:
for other in STORAGE.get_signatures(
STORAGE.list_filtered_signature_keys(
query.format(key=key, sid=sid, status=status)
)
):
other['meta']['al_state_change_date'] = today
other['meta']['al_state_change_user'] = uname
other['meta']['al_status'] = 'DISABLED'
other_sid = other['meta']['id']
other_rev = other['meta']['rule_version']
other_key = "%sr.%s" % (other_sid, other_rev)
STORAGE.save_signature(other_key, other)
data['meta']['al_state_change_date'] = today
data['meta']['al_state_change_user'] = uname
data['meta']['al_status'] = status
STORAGE.save_signature(key, data)
return make_api_response({"success": True})
else:
return make_api_response("", "Signature not found. (%s r.%s)" % (sid, rev), 404) | dce934db6c7fe34e184ff98a63f2bc5e32efaffe | 19,673 |
from re import X
def trilinear_interpolation(a: np.ndarray, factor: float) -> np.ndarray:
"""Resize an three dimensional array using trilinear
interpolation.
:param a: The array to resize. The array is expected to have at
least three dimensions.
:param factor: The amount to resize the array. Given how the
interpolation works, you probably don't get great results
with factor less than or equal to .5. Consider multiple
passes of interpolation with larger factors in those cases.
:return: A :class:ndarray object.
:rtype: numpy.ndarray
Usage::
>>> import numpy as np
>>>
>>> a = np.array([
... [
... [0, 1],
... [1, 0],
... ],
... [
... [1, 0],
... [0, 1],
... ],
... ])
>>> trilinear_interpolation(a, 2)
array([[[0. , 0.5, 1. , 1. ],
[0.5, 0.5, 0.5, 0.5],
[1. , 0.5, 0. , 0. ],
[1. , 0.5, 0. , 0. ]],
<BLANKLINE>
[[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
<BLANKLINE>
[[1. , 0.5, 0. , 0. ],
[0.5, 0.5, 0.5, 0.5],
[0. , 0.5, 1. , 1. ],
[0. , 0.5, 1. , 1. ]],
<BLANKLINE>
[[1. , 0.5, 0. , 0. ],
[0.5, 0.5, 0.5, 0.5],
[0. , 0.5, 1. , 1. ],
[0. , 0.5, 1. , 1. ]]])
"""
# Return the array unchanged if the array won't be magnified.
if factor == 1:
return a
# Perform a defensive copy of the original array to avoid
# unexpected side effects.
a = a.copy()
# Since we are magnifying the given array, the new array's shape
# will increase by the magnification factor.
mag_size = tuple(int(s * factor) for s in a.shape)
# Map out the relationship between the old space and the
# new space.
indices = np.indices(mag_size)
if factor > 1:
whole = (indices // factor).astype(int)
parts = (indices / factor - whole).astype(float)
else:
new_ends = [s - 1 for s in mag_size]
old_ends = [s - 1 for s in a.shape]
true_factors = [n / o for n, o in zip(new_ends, old_ends)]
for i in range(len(true_factors)):
if true_factors[i] == 0:
true_factors[i] = .5
whole = indices.copy()
parts = indices.copy()
for i in Z, Y, X:
whole[i] = (indices[i] // true_factors[i]).astype(int)
parts[i] = (indices[i] / true_factors[i] - whole[i]).astype(float)
del indices
# Trilinear interpolation determines the value of a new pixel by
# comparing the values of the eight old pixels that surround it.
# The hashes are the keys to the dictionary that contains those
# old pixel values. The key indicates the position of the pixel
# on each axis, with one meaning the position is ahead of the
# new pixel, and zero meaning the position is behind it.
hashes = [f'{n:>03b}'[::-1] for n in range(2 ** 3)]
hash_table = {}
# The original array needs to be made one dimensional for the
# numpy.take operation that will occur as we build the tables.
raveled = np.ravel(a)
# Build the table that contains the old pixel values to
# interpolate.
for hash in hashes:
hash_whole = whole.copy()
# Use the hash key to adjust the which old pixel we are
# looking at.
for axis in Z, Y, X:
if hash[axis] == '1':
hash_whole[axis] += 1
# Handle the pixels that were pushed off the far
# edge of the original array by giving them the
# value of the last pixel along that axis in the
# original array.
m = np.zeros(hash_whole[axis].shape, dtype=bool)
m[hash_whole[axis] >= a.shape[axis]] = True
hash_whole[axis][m] = a.shape[axis] - 1
# Since numpy.take() only works in one dimension, we need to
# map the three dimensional indices of the original array to
# the one dimensional indices used by the raveled version of
# that array.
raveled_indices = hash_whole[Z] * a.shape[Y] * a.shape[X]
raveled_indices += hash_whole[Y] * a.shape[X]
raveled_indices += hash_whole[X]
# Get the value of the pixel in the original array.
hash_table[hash] = np.take(raveled, raveled_indices.astype(int))
# Once the hash table has been built, clean up the working arrays
# in case we are running short on memory.
else:
del hash_whole, raveled_indices, whole
# Everything before this was to set up the interpolation. Now that
# it's set up, we perform the interpolation. Since we are doing
# this across three dimensions, it's a three stage process. Stage
# one is along the X axis.
x1 = lerp(hash_table['000'], hash_table['001'], parts[X])
x2 = lerp(hash_table['010'], hash_table['011'], parts[X])
x3 = lerp(hash_table['100'], hash_table['101'], parts[X])
x4 = lerp(hash_table['110'], hash_table['111'], parts[X])
# Stage two is along the Y axis.
y1 = lerp(x1, x2, parts[Y])
y2 = lerp(x3, x4, parts[Y])
del x1, x2, x3, x4
# And stage three is along the Z axis. Since this is the last step
# we can just return the result.
return lerp(y1, y2, parts[Z]) | a3ed2c13f13bdc37cbe47cd7ed6862a67cdebd66 | 19,674 |
def load_data(path):
"""将材料的label与text进行分离,得到两个list"""
label_list = []
text_list = []
with open(path, 'r') as f:
for line in f.readlines():
data = line.strip().split('\t')
data[1] = data[1].strip().split()
label = [0 for i in range(8)]
total = 0
for i in range(0, 8):
label[i] = float(data[1][1 + i].split(':')[1])
total += label[i]
for i in range(len(label)):
label[i] /= total
label_list.append(label)
text_list.append(data[2].strip().split())
return label_list, text_list | 2274e0e327844c4dedae229b3c03a344653f7342 | 19,675 |
def invoke(request):
"""Where the magic happens..."""
with monitor(labels=_labels, name="transform_request"):
transformed_request = _transform_request(request)
with monitor(labels=_labels, name="invoke"):
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"):
transformed_response = _transform_response(response)
return transformed_response | 4547de901cfd2cc153ea67632c2a002a17a15d8b | 19,676 |
def munge_pocket_response(resp):
"""Munge Pocket Article response."""
articles = resp['list']
result = pd.DataFrame([articles[id] for id in articles])
# only munge if actual articles present
if len(result) != 0:
result['url'] = (result['resolved_url'].combine_first(result['given_url']))
for time_col in ['time_added', 'time_updated', 'time_read']:
result[time_col] = pd.to_datetime(result[time_col], unit='s')
return (
result.drop_duplicates(subset=['resolved_id'])[[
'item_id', 'resolved_id', 'given_title', 'url', 'resolved_title', 'time_added',
'time_read', 'time_updated', 'status', 'word_count'
]]
) | 32695526a784cc95aeb428ca2481dcf9053e72ed | 19,678 |
import time
def fake_data_PSBL_phot(outdir='', outroot='psbl',
raL=259.5, decL=-29.0,
t0=57000.0, u0_amp=0.8, tE=500.0,
piE_E=0.02, piE_N=0.02,
q=0.5, sep=5.0, phi=75.0, b_sff1=0.5, mag_src1=16.0,
parallax=True, target='Unknown', animate=False):
"""
Optional Inputs
---------------
outdir : str
The output directory where figures and data are saved.
outroot : str
The output file name root for a saved figure.
raL : float (deg)
The right ascension in degrees. Needed if parallax=True.
decL : float (deg)
The declination in degrees. Needed if parallax=False.
t0: float
Time of photometric peak, as seen from Earth [MJD]
u0_amp: float
Angular distance between the lens and source on the plane of the
sky at closest approach in units of thetaE. It can be
positive (u0_hat cross thetaE_hat pointing away from us) or
negative (u0_hat cross thetaE_hat pointing towards us).
tE: float
Einstein crossing time. [MJD]
piE_E: float
The microlensing parallax in the East direction in units of thetaE
piE_N: float
The microlensing parallax in the North direction in units of thetaE
q: float
Mass ratio (low-mass / high-mass)
sep: float
Angular separation of the two lenses in units of thetaE where
thetaE is defined with the total binary mass.
phi: float
Angle made between the binary axis and the relative proper motion vector,
measured in degrees.
b_sff: array or list
The ratio of the source flux to the total (source + neighbors + lens)
b_sff = f_S / (f_S + f_L + f_N). This must be passed in as a list or
array, with one entry for each photometric filter.
mag_src: array or list
Photometric magnitude of the source. This must be passed in as a
list or array, with one entry for each photometric filter.
"""
start = time.time()
if parallax:
psbl = model.PSBL_Phot_Par_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi,
[b_sff1], [mag_src1],
raL=raL, decL=decL, root_tol=1e-8)
else:
psbl = model.PSBL_Phot_noPar_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi,
[b_sff1], [mag_src1],
root_tol=1e-8)
# Simulate
# photometric observations every 1 day and
# for the bulge observing window. Observations missed
# for 125 days out of 365 days for photometry.
t_pho = np.array([], dtype=float)
for year_start in np.arange(54000, 60000, 365.25):
phot_win = 240.0
phot_start = (365.25 - phot_win) / 2.0
t_pho_new = np.arange(year_start + phot_start,
year_start + phot_start + phot_win, 1)
t_pho = np.concatenate([t_pho, t_pho_new])
t_mod = np.arange(t_pho.min(), t_pho.max(), 1)
i_pho, A_pho = psbl.get_all_arrays(t_pho)
i_mod, A_mod = psbl.get_all_arrays(t_mod)
imag_pho = psbl.get_photometry(t_pho, amp_arr=A_pho)
imag_mod = psbl.get_photometry(t_mod, amp_arr=A_mod)
# Make the photometric observations.
# Assume 0.05 mag photoemtric errors at I=19.
# This means Signal = 400 e- at I=19.
flux0 = 400.0
imag0 = 19.0
flux_pho = flux0 * 10 ** ((imag_pho - imag0) / -2.5)
flux_pho_err = flux_pho ** 0.5
flux_pho += np.random.randn(len(t_pho)) * flux_pho_err
imag_pho = -2.5 * np.log10(flux_pho / flux0) + imag0
imag_pho_err = 1.087 / flux_pho_err
stop = time.time()
fmt = 'It took {0:.2f} seconds to evaluate the model at {1:d} time steps'
print(fmt.format(stop - start, len(t_mod) + len(t_pho)))
##########
# Plot photometry
##########
plt.figure(1)
plt.clf()
plt.errorbar(t_pho, imag_pho, yerr=imag_pho_err, fmt='k.', label='Sim Obs',
alpha=0.2)
plt.plot(t_mod, imag_mod, color='red', label='Model')
plt.gca().invert_yaxis()
plt.xlabel('Time (MJD)')
plt.ylabel('I (mag)')
plt.legend()
data = {}
data['t_phot1'] = t_pho
data['mag1'] = imag_pho
data['mag_err1'] = imag_pho_err
data['phot_files'] = ['fake_data_parallax_phot1']
data['ast_files'] = ['fake_data_parallax_ast1']
data['target'] = target
data['phot_data'] = 'sim'
data['ast_data'] = 'sim'
data['raL'] = raL
data['decL'] = decL
params = {}
params['t0'] = t0
params['u0_amp'] = u0_amp
params['tE'] = tE
params['piE_E'] = piE_E
params['piE_N'] = piE_N
params['q'] = q
params['sep'] = sep
params['phi'] = phi
params['b_sff'] = b_sff1
params['mag_src'] = mag_src1
out_name = outdir + outroot + '_movie.gif'
if animate:
ani = plot_models.animate_PSBL(psbl, outfile=out_name)
else:
ani = None
return data, params, psbl, ani | bde4d98d0936be3b0cd655879bbfdbde2e1a5826 | 19,680 |
import pathlib
def is_dicom(path: pathlib.Path) -> bool:
"""Check if the input is a DICOM file.
Args:
path (pathlib.Path): Path to the file to check.
Returns:
bool: True if the file is a DICOM file.
"""
path = pathlib.Path(path)
is_dcm = path.suffix.lower() == ".dcm"
is_dcm_dir = path.is_dir() and any(
p.suffix.lower() == ".dcm" for p in path.iterdir()
)
return is_dcm or is_dcm_dir | 1e20ace9c645a41817bf23a667bd4e1ac815f63f | 19,681 |
from typing import Optional
def _hessian(model: 'BinaryLogReg', data: Dataset, data_weights: Optional[jnp.ndarray]) -> jnp.ndarray:
"""Ravelled Hessian matrix of the objective function with respect to the model parameters"""
params_flat, unravel = ravel_pytree(model.params)
random_params = model.random_params
h = jax.hessian(lambda p: _objective(unravel(p), random_params, data, model.lamb, model.pos_label, data_weights))
return h(params_flat) | 5824fe0d2def5d03e8ac3f773641d19d6aebfa3e | 19,682 |
import re
def promax2meta(doc, target):
"""
Return meta information (Line or Area) of csv Promax geometry file.
Arguments:
doc -- csv Promax geometry file
target -- meta information to get (Line or Area)
"""
ptarget = r'' + re.escape(target) + r'\s*[=:]\s*\"?([\w-]+)\"?'
for line in open(doc):
result = (re.search(ptarget, line, re.I))
if result:
return result.group(1) | 7dce362112aa7fb6fa24999c4f870107b24c3d40 | 19,683 |
def axLabel(value, unit):
"""
Return axis label for given strings.
:param value: Value for axis label
:type value: int
:param unit: Unit for axis label
:type unit: str
:return: Axis label as \"<value> (<unit>)\"
:rtype: str
"""
return str(value) + " (" + str(unit) + ")" | cc553cf4334222a06ae4a2bcec5ec5acb9668a8f | 19,684 |
import hashlib
import time
def save_notebook(filename, timeout=10):
"""
Force-saves a Jupyter notebook by displaying JavaScript.
Args:
filename (``str``): path to notebook file being saved
timeout (``int`` or ``float``): number of seconds to wait for save before timing-out
Returns
``bool``: whether the notebook was saved successfully
"""
timeout = timeout * 10**9
if get_ipython() is not None:
with open(filename, "rb") as f:
md5 = hashlib.md5(f.read()).hexdigest()
start = time.time_ns()
display(Javascript("Jupyter.notebook.save_checkpoint();"))
curr = md5
while curr == md5 and time.time_ns() - start <= timeout:
time.sleep(1)
with open(filename, "rb") as f:
curr = hashlib.md5(f.read()).hexdigest()
return curr != md5
return True | 4d02f1eb48459c412a119fcab7d8df7515c1b465 | 19,685 |
def test_gen():
"""Create the test system."""
project_name = "test_grid_sinfactory"
return PFactoryGrid(project_name=project_name).gens["SM1"] | cd8009b55bfced7fbafc2914b80e0dd2cd2851fc | 19,686 |
def validate_api_key():
"""Validates an API key submitted via POST."""
api_key_form = ApiKeyForm()
api_key_form.organization.choices = session['orgs_list']
if api_key_form.validate_on_submit():
session['org_id'] = api_key_form.organization.data
return jsonify(True)
return jsonify(api_key_form.errors), 422 | 1cf72017600222992cb9d622c6b718b8dc84bae8 | 19,687 |
def plot_clickForPlane():
""" Create a Plane at location of one mouse click in the view or
onto a clicked object or
at a pre-selected point location:
Create a Plane perpendicular to the view at location of one mouse click.
- Click first on the Button then click once on the View.
- Click first on the Button then click once on one object of the View
to attach the plane at the object.
But you can also select an already existing point first and click the button
to attach the plane.
"""
msg = verbose
createFolders('WorkPlanes')
m_actDoc = get_ActiveDocument(info=msg)
if m_actDoc.Name is None:
return None
m_selEx = Gui.Selection.getSelectionEx(m_actDoc.Name)
if len(m_selEx) >= 1:
SelectedObjects = get_SelectedObjects(info=1)
Number_of_Points = SelectedObjects[0]
if (Number_of_Points == 1):
Point_List = SelectedObjects[3]
name = "Plane"
part = "Part::Feature"
# return view direction as a vector
Plane_Normal = Gui.ActiveDocument.ActiveView.getViewDirection()
# Set the base of the plane at location of mouse click
Plane_Point = Point_List[-1].Point
# Create a Plane
Plane_User_Name, plane = plot_plane(m_lengthPlane, m_widthPlane, Plane_Point, Plane_Normal, part, name)
else:
printError_msg("Either select first one Point and Click the button or \n" +
"Click the button and one free mouse click in the view or" +
"Click the button and one mouse click on an object of the view !")
else:
global m_callback
#view = Gui.ActiveDocument.ActiveView
view = get_ActiveView()
# m_callback = view.addEventCallbackPivy(SoMouseButtonEvent.getClassTypeId(),getClickedPlane)
m_callback = view.addEventCallback("SoMouseButtonEvent", getClickedPlane2) | 62acc0e41ce165047f6dabf99d0df8fd2b1db000 | 19,688 |
def is_logged_in(f):
"""
is logged in decorator
"""
@wraps(f)
def wrap(*args, **kwargs):
"""
wrap from template
"""
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap | 732bf60bf0901fc341f81c3e6db3516052ecfd12 | 19,689 |
def dataframe_from_mult_files(filenames):
"""@param filenames (List[Str]): list of filenames"""
dfs = []
for filename in filenames:
dfs.append(dataframe_from_file(filename))
return pd.concat(dfs, axis=0) | 76f37d5cef6a8b44946ef25536a135db339beca6 | 19,690 |
def batch_euclidean_dist(x, y, min_val):
""" euclidean_dist function over batch
x and y are batches of matrices x' and y':
x' = (x'_1, | x'_2 | ... | x'_m).T
y' = (y'_1, | y'_2 | ... | y'_n).T
Where x_i and y_j are vectors. We calculate the distances between each pair x_i and y_j.
res'[i, j] = dict(x'_i, y'_j)
res (batch of res') will have the shape [batch_size, m, n]
For calculation we use the formula x^2 - 2xy + y^2.
Clipped to prevent zero distances for numerical stability.
"""
_, m, _ = x.shape
_, n, _ = y.shape
# shape [N, m, n]
xx = ops.pows(x, 2).sum(-1, keepdims=True).repeat(n, axis=-1)
yy = ops.pows(y, 2).sum(-1, keepdims=True).repeat(m, axis=-1).transpose(0, 2, 1)
dist = xx + yy
dist = 1 * dist - 2 * ops.batch_dot(x, y.transpose(0, 2, 1))
# Avoiding zeros for numerical stability
dist = ops.maximum(
dist,
min_val,
)
dist = ops.sqrt(dist)
return dist | 1eec65330ba8970fd84c7d9c7c57e91cd79e0e6f | 19,691 |
def outgroup_reformat(newick, outgroup):
"""
Move the location of the outgroup in a newick string to be at the end of the string
Inputs:
newick --- a newick string to be reformatted
outgroup --- the outgroup
Output:
newick --- the reformatted string
"""
# Replace the outgroup and comma with an empty string
newick = newick.replace(outgroup + ",", "")
newick = newick[:-2] + "," + outgroup + ");"
return newick | a45be59deb95d7bb61ea82a111d4390e49d4b7a8 | 19,692 |
def get_source_token(request):
"""
Perform token validation for the presqt-source-token header.
Parameters
----------
request : HTTP request object
Returns
-------
Returns the token if the validation is successful.
Raises a custom AuthorizationException error if the validation fails.
"""
# Validate that the proper token exists in the request.
try:
return request.META['HTTP_PRESQT_SOURCE_TOKEN']
except KeyError:
raise PresQTValidationError(
"PresQT Error: 'presqt-source-token' missing in the request headers.",
status.HTTP_400_BAD_REQUEST) | db53bba32f8471a17d44fae2d3f44749d5a83c86 | 19,693 |
def read_train_valid(filename):
"""
读取训练或者验证文件
:param filename: 训练集/验证集的文件名字
:return:
返回训练集的文本和标签
其中文本是一个list, 标签是一个list(每个元素为int)
返回示例:['我很开心', '你不是真正的快乐', '一切都是假的], [1, 0, 0]
"""
fp = pd.read_table(filename, sep='\t', error_bad_lines=False)
return fp['review'].tolist(), list(map(int, fp['sentiment'].tolist())) | f6990db50453e4dd88f8ecd13e1eb345ab15fc87 | 19,694 |
def weighted_regularization_matrix_from(
regularization_weights: np.ndarray,
pixel_neighbors: np.ndarray,
pixel_neighbors_sizes: np.ndarray,
) -> np.ndarray:
"""
From the pixel-neighbors, setup the regularization matrix using the weighted regularization scheme.
Parameters
----------
regularization_weights
The regularization_ weight of each pixel, which governs how much smoothing is applied to that individual pixel.
pixel_neighbors
An array of length (total_pixels) which provides the index of all neighbors of every pixel in
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_sizes
An array of length (total_pixels) which gives the number of neighbors of every pixel in the
Voronoi grid.
Returns
-------
np.ndarray
The regularization matrix computed using an adaptive regularization scheme where the effective regularization
coefficient of every source pixel is different.
"""
pixels = len(regularization_weights)
regularization_matrix = np.zeros(shape=(pixels, pixels))
regularization_weight = regularization_weights ** 2.0
for i in range(pixels):
regularization_matrix[i, i] += 1e-8
for j in range(pixel_neighbors_sizes[i]):
neighbor_index = pixel_neighbors[i, j]
regularization_matrix[i, i] += regularization_weight[neighbor_index]
regularization_matrix[
neighbor_index, neighbor_index
] += regularization_weight[neighbor_index]
regularization_matrix[i, neighbor_index] -= regularization_weight[
neighbor_index
]
regularization_matrix[neighbor_index, i] -= regularization_weight[
neighbor_index
]
return regularization_matrix | ecc6301e327adc608530c933ae769bd92ffcaf84 | 19,695 |
def child_at_time(
self,
search_time,
shallow_search=False,
):
"""Return the child that overlaps with time search_time.
search_time is in the space of self.
If shallow_search is false, will recurse into compositions.
"""
range_map = self.range_of_all_children()
# find the first item whose end_time_exclusive is after the
first_inside_range = _bisect_left(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].end_time_exclusive(),
)
# find the last item whose start_time is before the
last_in_range = _bisect_right(
seq=self,
tgt=search_time,
key_func=lambda child: range_map[child].start_time,
lower_search_bound=first_inside_range,
)
# limit the search to children who are in the search_range
possible_matches = self[first_inside_range:last_in_range]
result = None
for thing in possible_matches:
if range_map[thing].overlaps(search_time):
result = thing
break
# if the search cannot or should not continue
if (
result is None
or shallow_search
or not hasattr(result, "child_at_time")
):
return result
# before you recurse, you have to transform the time into the
# space of the child
child_search_time = self.transformed_time(search_time, result)
return result.child_at_time(child_search_time, shallow_search) | 5961a6d20a962b7b698822610bf4cecdf9c33257 | 19,696 |
def weight_variable_truncated_normal(input_dim, output_dim, name=""):
"""Create a weight variable with truncated normal distribution, values
that are more than 2 stddev away from the mean are redrawn."""
initial = tf.truncated_normal([input_dim, output_dim], stddev=0.5)
return tf.Variable(initial, name=name) | 4c645fc5a914ff99f5b1063f5ecc0b4878481517 | 19,697 |
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec | 6564197d319b87f0a9dd05ae3053de6ebc11cf5c | 19,698 |
def test_comparison_ops_eq_t():
"""Check the equal-to operator for a truthy result."""
return """
fn main() {
{dest} = 1 == 1;
}
""" | 1d6562c26b1103deaf3a8eed4bc8d341a4a7d3e0 | 19,699 |
def binary_accuracy(*, logits, labels):
"""Accuracy of binary classifier, from logits."""
p = jax.nn.sigmoid(logits)
return jnp.mean(labels == (p > 0.5)) | f7795c8d7a945e5e5e97475888cf9e5b65aa1415 | 19,700 |
def extract_tag(inventory, url):
"""
extract data from sphinx inventory.
The extracted datas come from a C++ project
documented using Breathe. The structure of the inventory
is a dictionary with the following keys
- cpp:class (class names)
- cpp:function (functions or class methods)
- cpp:type (type names)
each value of this dictionary is again a dictionary with
- key : the name of the element
- value : a tuple where the third index is the url to the corresponding documentation
Parameters
----------
inventory : dict
sphinx inventory
url : url of the documentation
Returns
-------
dictionary with keys class, class_methods, func, type
but now the class methods are with their class.
"""
classes = {}
class_methods = {}
functions = {}
types = {}
get_relative_url = lambda x: x[2].replace(url, '')
for c, v in inventory.get('cpp:class', {}).items():
classes[c] = get_relative_url(v)
class_methods[c] = {}
for method, v in inventory.get('cpp:function', {}).items():
found = False
for c in class_methods.keys():
find = c + '::'
if find in method:
class_methods[c][method.replace(find, '')] = get_relative_url(v)
found = True
break
if not found:
functions[method] = get_relative_url(v)
for typename, v in inventory.get('cpp:type', {}).items():
types[typename] = get_relative_url(v)
return {'class': classes,
'class_methods': class_methods,
'func':functions,
'type': types
} | dcda1869fb6a44bea3b17f1d427fe279ebdc3a11 | 19,702 |
def strip_parens(s):
"""Strip parentheses around string"""
if not s:
return s
if s[0] == "(" and s[-1] == ")":
return strip_parens(s[1:-1])
else:
return s | ee4c9ce6ee769a86a2e2e39159aa9eaa5fd422c6 | 19,703 |
import ast
def custom_eval(node, value_map=None):
"""
for safely using `eval`
"""
if isinstance(node, ast.Call):
values = [custom_eval(v) for v in node.args]
func_name = node.func.id
if func_name in {"AVG", "IF"}:
return FUNCTIONS_MAP[func_name](*values)
elif func_name in FUNCTIONS_MAP:
return FUNCTIONS_MAP[func_name](values)
else:
raise NotImplementedError(func_name)
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.BinOp):
return OPERATORS[type(node.op)](
custom_eval(node.left, value_map=value_map),
custom_eval(node.right, value_map=value_map),
)
elif isinstance(node, ast.UnaryOp):
return OPERATORS[type(node.op)](custom_eval(node.operand, value_map=value_map))
elif isinstance(node, ast.Compare):
return OPERATORS[type(node.ops[0])](
custom_eval(node.left, value_map=value_map),
custom_eval(node.comparators[0], value_map=value_map),
)
elif isinstance(node, ast.Name):
name = node.id
if value_map is None:
raise ValueError("value_map must not be None")
if name not in value_map:
raise KeyError()
try:
return value_map[name]
except KeyError as e:
raise e
else:
raise ArithmeticError() | a9ff29455ee90a83f5c54197633153d5b9d0fdbc | 19,704 |
def validate_dict(input,validate):
"""
This function returns true or false if the dictionaries pass regexp
validation.
Validate format:
{
keyname: {
substrname: "^\w{5,10}$",
subintname: "^[0-9]+$"
}
}
Validates that keyname exists, and that it contains a substrname
that is 5-10 word characters, and that it contains subintname which
is only integers.
"""
# Create a local copy to work our magic on.
input = dict(input)
if not type(input) == dict and type(validate) == dict:
raise ValueError, "Values to validate_dict must be dicts."
for key in validate.keys():
if not input.get(key):
# Key didn't exist.
return False
else:
if not type(input[key]) == type(validate[key]) and not type(input[key]) == unicode:
# The types of keys didn't match.
return False
elif type(input[key]) == dict:
if not validate_dict(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == str or type(input[key]) == unicode:
if not validate_str(input[key],validate[key]):
# The sub-validate didn't pass.
return False
else:
del input[key]
elif type(input[key]) == int:
del input[key]
pass
elif type(input[key]) == float:
del input[key]
pass
else:
# I don't know how to deal with this case!
return False
if input == {}:
return True
else:
return False | 0a221f5586f4464f4279ab4ce3d22019e247659b | 19,705 |
def build_windows_and_pods_from_events(backpressure_events, window_width_in_hours=1) -> (list, list):
"""
Generate barchart-friendly time windows with counts of backpressuring durations within each window.
:param backpressure_events: a list of BackpressureEvents to be broken up into time windows
:param window_width_in_hours: how wide each time window should be in hours
:return: a dictionary with timestamp keys to list of BackpressureEvent values
"""
# The logic below is highly dependent on events being sorted by start timestamp oldest to newest.
sorted_events = backpressure_events.copy()
sorted_events.sort(key=lambda e: e.start)
interval = sorted_events[0].start.replace(minute=0, second=0, microsecond=0)
next_interval = interval + timedelta(hours=window_width_in_hours)
all_pods = set(())
windows = [BackpressureWindow(interval)]
for event in sorted_events:
all_pods.add(event.pod)
while event.start >= next_interval:
interval = next_interval
windows.append(BackpressureWindow(interval))
next_interval = next_interval + timedelta(hours=window_width_in_hours)
windows[-1].add_event(event)
all_pods_list = list(all_pods)
all_pods_list.sort()
return windows, all_pods_list | 78adebe54d883a7251c04e250f7c14e47043d40e | 19,706 |
import requests
import json
def package_search(api_url, org_id=None, params=None, start_index=0, rows=100, logger=None, out=None):
"""
package_search: run the package_search CKAN API query, filtering by org_id, iterating by 100, starting with 'start_index'
perform package_search by owner_org:
https://data.ioos.us/api/3/action/package_search?q=owner_org:
"""
action = "package_search"
if org_id is not None:
if params is not None:
payload = {'q': "owner_org:{id}+{params}".format(id=org_id, params="+".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'q': "owner_org:{id}".format(id=org_id), 'start': start_index, 'rows': rows}
print(payload)
else:
if params is not None:
payload = {'q': "{params}".format(params=" ".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'start': start_index, 'rows': rows}
print(payload)
url = ("/").join([api_url, "action", action])
if logger:
logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
#r = requests.get(url=url, headers = {'content-type': 'application/json'}, params=payload)
#r = requests.post(url=url, headers = {'content-type': 'application/json'}, data=json.dumps(payload))
r = requests.post(url=url, headers = {'content-type': 'application/json'}, json=payload)
print(json.dumps(payload))
print(r.text)
# either works:
#result = json.loads(r.text)
result = r.json()
# this is the full package_search result:
#if out:
# out.write(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False))
return result | 642a869931d45fe441a146cb8e931dc530170c37 | 19,707 |
def voigt_fit(prefix,x,slice,c,vary):
"""
This function fits a voigt to a spectral slice. Center value can be set to constant or floated, everything else is floated.
Parameters:
prefix: prefix for lmfit to distinguish variables during multiple fits
x: x values to use in fit
slice: slice to be fit
c: center of voigt obtained from max value of the slice
vary: Boolean, determines whether c is floated default is True
Returns:
out: lmfit fit output
"""
model = VoigtModel(prefix=prefix)
pars = model.guess(slice,x=x)
pars[str(prefix)+'center'].set(c,vary=vary)
out = model.fit(slice,pars,x=x)
return out | 034810cb6a0ac8efb311182df3d65cf0bd6002d9 | 19,708 |
from typing import List
def turn_coordinates_into_list_of_distances(list_of_coordinates: List[tuple]):
"""
Function to calculate the distance between coordinates in a list. Using the
'great_circle' for measuring here, since it is much faster (but less precise
than 'geodesic').
Parameters
----------
list_of_coordinates : List[tuple]
A list containing tuples with coordinates
Returns
-------
list_of_distances : List[float]
A list containing the distance in kilometers between two coordinates.
Subsequent values are added up, thus the values are increasing.
"""
list_of_distances = []
previous_coordinates = None
for coordinates in list_of_coordinates:
if not previous_coordinates:
list_of_distances.append(0.)
else:
dist = distance.great_circle([previous_coordinates[1], previous_coordinates[0]], [coordinates[1], coordinates[0]])
list_of_distances.append(round(list_of_distances[-1] + dist.km, 4))
previous_coordinates = coordinates
return list_of_distances | 5fdc0198b533604ec3d935224c7b2b634670083e | 19,709 |
import json
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0) # flat dict in the format of blockName: blockSize
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset | 48d77aa47998204ff99df188ef830cae647ac9b9 | 19,710 |
def convertpo(inputpofile, outputpotfile, template, reverse=False):
"""reads in inputpofile, removes the header, writes to outputpotfile."""
inputpo = po.pofile(inputpofile)
templatepo = po.pofile(template)
if reverse:
swapdir(inputpo)
templatepo.makeindex()
header = inputpo.header()
if header:
inputpo.units = inputpo.units[1:]
for i, unit in enumerate(inputpo.units):
for location in unit.getlocations():
templateunit = templatepo.locationindex.get(location, None)
if templateunit and templateunit.source == unit.source:
break
else:
templateunit = templatepo.findunit(unit.source)
unit.othercomments = []
if unit.target and not unit.isfuzzy():
unit.source = unit.target
elif not reverse:
if inputpo.filename:
unit.addnote("No translation found in %s" % inputpo.filename, origin="programmer")
else:
unit.addnote("No translation found in the supplied source language", origin="programmer")
unit.target = ""
unit.markfuzzy(False)
if templateunit:
unit.addnote(templateunit.getnotes(origin="translator"))
unit.markfuzzy(templateunit.isfuzzy())
unit.target = templateunit.target
if unit.isobsolete():
del inputpo.units[i]
outputpotfile.write(str(inputpo))
return 1 | 6954354db5ca9c660e326eeae23906853743eb57 | 19,711 |
def do_fk5(l, b, jde):
"""[summary]
Parameters
----------
l : float
longitude
b : float
latitude
jde : float
Julian Day of the ephemeris
Returns
-------
tuple
tuple(l,b)
"""
T = (jde - JD_J2000) / CENTURY
lda = l - deg2rad(1.397)*T - deg2rad(0.00031)*T*T
delta_lon = -deg2rad(0.09033/3600) + deg2rad(0.03916/3600)*(cos(lda)+sin(lda))*tan(b)
delta_lat = deg2rad(0.03916/3600)*(np.cos(lda)- np.sin(lda))
l += delta_lon
b += delta_lat
return l,b | 2ccc96aab8ddfcbe93d7534a01b0f262c0330053 | 19,712 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.8 ** (epoch // 1))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | ce4b5a3aa70ab07791af3bd41e14758e568fb1cc | 19,713 |
import yaml
def get_defaults(module, *args):
"""
Find an internal defaults data file, load it using YAML, and return the resulting
dictionary.
Takes the dot-separated module path (e.g. "abscal.wfc3.reduce_grism_extract"), splits
off the last item (e.g. ["abscal.wfc3", "reduce_grism_extract"]), adds ".yaml" to the
end of the second item (e.g. ["abscal.wfc3", "reduce_grism_extract.yaml"]), adds
".defaults" to the first item
(e.g. ["abscal.wfc3.defaults", "reduce_grism_extract.yaml"]), and feeds the result
into :code:`get_data_file()`. Then loads the resulting file as a dictionary, and
builds a new dictionary consisting of:
- All key/value pairs in the "all" dictionary
- All key/value pairs in any dictionary matching any of the keyword arguments
- The above two items from any dictionary matching any of the keyword arguments,
extending recursively into the depths of the dictionary.
The result will be a flat (i.e. single-level) dictionary.
Parameters
----------
module : str
The module to search in, using standard dot separators (e.g. abscal.wfc3)
args : list
A list of specific keyword arguments, provided to ensure the inclusion of
specific sub-values or sub-dictionaries.
Returns
-------
defaults : dict
Dictionary of default parameters.
"""
items = module.split(".")
module = ".".join(items[:-1])
file_name = items[-1]+".yaml"
defaults_file = get_data_file(module, file_name, defaults=True)
with open(defaults_file, "r") as inf:
defaults_dict = yaml.safe_load(inf)
defaults = _extract_dict(defaults_dict, {}, args)
return defaults | a92e37f75c4f967c2b23391a817fb14118b89a8f | 19,714 |
import shlex
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if _tryorder is None:
with _lock:
if _tryorder is None:
register_standard_browsers()
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser") | 12c2ca5fdd93964527330a0694d69a8d4e84ee12 | 19,715 |
import binascii
def _bin_to_long(x):
"""
Convert a binary string into a long integer
This is a clever optimization for fast xor vector math
"""
return int(binascii.hexlify(x), 16) | 54b50ffea715bf127eabd7e82aada36e4717c288 | 19,716 |
def update_book(username, book_id, data):
"""Update book data"""
cursor, conn = db_sql.connect('books.db')
keys = list(data.keys())
sql = ("UPDATE " + username + " SET " + " = ?, ".join(keys) +
" = ? WHERE _id = ?")
temp_list = []
for key in keys:
temp_list.append(data[key])
temp_list.append(book_id)
cursor.execute(sql, tuple(temp_list))
conn.commit()
conn.close()
return cursor.lastrowid | 2dcc2970cec8f53c90c72f341092f7cb7c7d6232 | 19,717 |
def score_retrievals(label, retrievals):
"""
Evaluating the current retrieval experiment
Args:
-----
label: string
label corresponding to the query
retrivals: list
list of strings containing the ranked labels corresponding to the retrievals
tot_labels: integer
number of images with the current label. We need this to compute recalls
"""
# retrievals = retrievals[1:] # we do not account rank-0 since it's self-retrieval
relevant_mask = np.array([1 if r==label else 0 for r in retrievals])
num_relevant_retrievals = np.sum(relevant_mask)
if(num_relevant_retrievals == 0):
print(label)
metrics = {
"label": label,
"p@1": -1,
"p@5": -1,
"p@10": -1,
"p@50": -1,
"p@rel": -1,
"mAP": -1,
"r@1": -1,
"r@5": -1,
"r@10": -1,
"r@50": -1,
"r@rel": -1,
"mAR": -1
}
return metrics
# computing precision based metrics
precision_at_rank = np.cumsum(relevant_mask) / np.arange(1, len(relevant_mask) + 1)
precision_at_1 = precision_at_rank[0]
precision_at_5 = precision_at_rank[4]
precision_at_10 = precision_at_rank[9]
precision_at_50 = precision_at_rank[49]
precision_at_rel = precision_at_rank[num_relevant_retrievals - 1]
average_precision = np.sum(precision_at_rank * relevant_mask) / num_relevant_retrievals
# computing recall based metrics
recall_at_rank = np.cumsum(relevant_mask) / num_relevant_retrievals
recall_at_1 = recall_at_rank[0]
recall_at_5 = recall_at_rank[4]
recall_at_10 = recall_at_rank[9]
recall_at_50 = recall_at_rank[49]
recall_at_rel = recall_at_rank[num_relevant_retrievals - 1]
average_recall = np.sum(recall_at_rank * relevant_mask) / num_relevant_retrievals
metrics = {
"label": label,
"p@1": precision_at_1,
"p@5": precision_at_5,
"p@10": precision_at_10,
"p@10": precision_at_50,
"p@rel": precision_at_rel,
"mAP": average_precision,
"r@1": recall_at_1,
"r@5": recall_at_5,
"r@10": recall_at_10,
"r@10": recall_at_50,
"r@rel": recall_at_rel,
"mAR": average_recall
}
return metrics | c9a3a24c2c6e5a2986387db88710da43984bd862 | 19,718 |
def default_add_one_res_2_all_res(one_res: list, all_res: list) -> list:
"""
默认函数1: one_res 增加到all_res
:param one_res:
:param all_res:
:return:
"""
for i in one_res:
for j in i:
all_res.append(j)
return all_res | 9c2e83ffaa7c67759f8b3d7cf30354d7cf7ca030 | 19,719 |
from typing import Iterable
import re
def search_gene(search_string: str, **kwargs) -> Iterable[Gene]:
""" Symbols have been separated into search_gene_symbol - this returns Gene objects """
CONSORTIUM_REGEX = {
r"(ENSG\d+)": AnnotationConsortium.ENSEMBL,
r"Gene:(\d+)": AnnotationConsortium.REFSEQ,
r"GeneID:(\d+)": AnnotationConsortium.REFSEQ,
r"Gene ID:(\d+)": AnnotationConsortium.REFSEQ,
}
for c_regex, annotation_consortium in CONSORTIUM_REGEX.items():
if m := re.match(c_regex, search_string, re.IGNORECASE):
gene_id = m.group(1)
return Gene.objects.filter(identifier=gene_id, annotation_consortium=annotation_consortium)
return [] | 768c6ac712b6660b78b2bead3be6f0000541696f | 19,720 |
def check_logged(request):
"""Check if user is logged and have the permission."""
permission = request.GET.get('permission', '')
if permission:
has_perm = request.user.has_perm(permission)
if not has_perm:
msg = (
"User does not have permission to exectute this action:\n"
"expected permission: {permission}").format(
permission=permission)
raise exceptions.PumpWoodUnauthorized(
message=msg, payload={
"permission": permission})
return Response(True) | 2cf03f7336b7c8814fd380aae5209b0b8fe6dca9 | 19,723 |
def _deprecated_configs(agentConfig):
""" Warn about deprecated configs
"""
deprecated_checks = {}
deprecated_configs_enabled = [v for k, v in OLD_STYLE_PARAMETERS if len([l for l in agentConfig if l.startswith(k)]) > 0]
for deprecated_config in deprecated_configs_enabled:
msg = "Configuring %s in datadog.conf is not supported anymore. Please use conf.d" % deprecated_config
deprecated_checks[deprecated_config] = {'error': msg, 'traceback': None}
log.error(msg)
return deprecated_checks | e47a47a1a7dd40d04a21927730479500f934a1d1 | 19,724 |
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1, stack_depth=2):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(
object_with_method,
[method_name],
maximum_calls,
minimum_calls,
stack_depth=stack_depth + 1
) | 64bdc512753b159128e34aa7c95d60a741745fce | 19,725 |
def _get_span(succ, name, resultidx=0, matchidx=0, silent_fail=False):
"""
Helper method to return the span for the given result index and name, or None.
Args:
succ: success instance
name: name of the match info, if None, uses the entire span of the result
resultidx: index of the result in success
matchidx: if there is more than one match info with that name, which one to return, if no name, ignored
silent_fail: if True, return None, if False, raise an exception if the match info is not present
Returns:
the span or None if no Span exists
"""
if resultidx >= len(succ):
if not silent_fail:
raise Exception(f"No resultidx {resultidx}, only {len(succ)} results")
return None
res = succ[resultidx]
if name:
matches = res.matches4name(name)
if not matches:
if not silent_fail:
raise Exception(f"No match info with name {name} in result")
return None
if matchidx >= len(matches):
if not silent_fail:
raise Exception(
f"No match info with index {matchidx}, length is {len(matches)}"
)
return None
ret = matches[matchidx].get("span")
else:
ret = res.span
if ret is None:
if silent_fail:
return None
else:
raise Exception("No span found")
return ret | 1fc6208f1aa7289a53e4e64c041abb71498a2eeb | 19,727 |
import random
def gen_k_arr(K, n):
"""
Arguments:
K {int} -- [apa numbers]
n {int} -- [trial numbers]
"""
def random_sel(K, trial=200):
count_index = 0
pool = np.arange(K)
last = None
while count_index < trial:
count_index += 1
random.shuffle(pool)
if pool[0] == last:
swap_with = random.randrange(1, len(pool))
pool[0], pool[swap_with] = pool[swap_with], pool[0]
for item in pool:
yield item
last = pool[-1]
if K <= 1:
return np.repeat(K - 1, n)
else:
k_lst = list(random_sel(K, trial=n))
return np.array(k_lst) | c66084faa8903455835973226ea6ca570239a1ec | 19,728 |
def tau_data(spc_dct_i,
spc_mod_dct_i,
run_prefix, save_prefix, saddle=False):
""" Read the filesystem to get information for TAU
"""
# Set up all the filesystem objects using models and levels
pf_filesystems = filesys.models.pf_filesys(
spc_dct_i, spc_mod_dct_i, run_prefix, save_prefix, saddle)
[harm_cnf_fs, _,
harm_min_locs, harm_save, _] = pf_filesystems['harm']
# [tors_cnf_fs, _, tors_min_locs, _, _] = pf_filesystems['tors']
# Get the conformer filesys for the reference geom and energy
if harm_min_locs:
geom = harm_cnf_fs[-1].file.geometry.read(harm_min_locs)
min_ene = harm_cnf_fs[-1].file.energy.read(harm_min_locs)
# Set the filesystem
tau_save_fs = autofile.fs.tau(harm_save)
# Get the rotor info
rotors = tors.build_rotors(spc_dct_i, pf_filesystems, spc_mod_dct_i)
run_path = filesys.models.make_run_path(pf_filesystems, 'tors')
tors_strs = tors.make_hr_strings(
rotors, run_path, spc_mod_dct_i)
[_, hr_str, flux_str, prot_str, _] = tors_strs
# Use model to determine whether to read grads and hessians
vib_model = spc_mod_dct_i['vib']['mod']
freqs = ()
_, _, proj_zpve, harm_zpve = vib.tors_projected_freqs_zpe(
pf_filesystems, hr_str, prot_str, run_prefix, zrxn=None)
zpe_chnlvl = proj_zpve * phycon.EH2KCAL
# Set reference energy to harmonic zpve
db_style = 'directory'
reference_energy = harm_zpve * phycon.EH2KCAL
if vib_model == 'tau':
if db_style == 'directory':
tau_locs = [locs for locs in tau_save_fs[-1].existing()
if tau_save_fs[-1].file.hessian.exists(locs)]
elif db_style == 'jsondb':
tau_locs = [locs for locs in tau_save_fs[-1].json_existing()
if tau_save_fs[-1].json.hessian.exists(locs)]
else:
if db_style == 'directory':
tau_locs = tau_save_fs[-1].existing()
elif db_style == 'jsondb':
tau_locs = tau_save_fs[-1].json_existing()
# Read the geom, ene, grad, and hessian for each sample
samp_geoms, samp_enes, samp_grads, samp_hessians = [], [], [], []
for locs in tau_locs:
# ioprinter.info_message('Reading tau info at path {}'.format(
# tau_save_fs[-1].path(locs)))
if db_style == 'directory':
geo = tau_save_fs[-1].file.geometry.read(locs)
elif db_style == 'jsondb':
geo = tau_save_fs[-1].json.geometry.read(locs)
geo_str = autofile.data_types.swrite.geometry(geo)
samp_geoms.append(geo_str)
if db_style == 'directory':
tau_ene = tau_save_fs[-1].file.energy.read(locs)
elif db_style == 'jsondb':
tau_ene = tau_save_fs[-1].json.energy.read(locs)
rel_ene = (tau_ene - min_ene) * phycon.EH2KCAL
ene_str = autofile.data_types.swrite.energy(rel_ene)
samp_enes.append(ene_str)
if vib_model == 'tau':
if db_style == 'directory':
grad = tau_save_fs[-1].file.gradient.read(locs)
elif db_style == 'jsondb':
grad = tau_save_fs[-1].json.gradient.read(locs)
grad_str = autofile.data_types.swrite.gradient(grad)
samp_grads.append(grad_str)
if db_style == 'directory':
hess = tau_save_fs[-1].file.hessian.read(locs)
elif db_style == 'jsondb':
hess = tau_save_fs[-1].json.hessian.read(locs)
hess_str = autofile.data_types.swrite.hessian(hess)
samp_hessians.append(hess_str)
# Read a geometry, grad, and hessian for a reference geom if needed
ref_geom, ref_grad, ref_hessian = [], [], []
if vib_model != 'tau':
# Get harmonic filesystem information
[harm_save_fs, _, harm_min_locs, _, _] = pf_filesystems['harm']
# Read the geometr, gradient, and Hessian
geo = harm_save_fs[-1].file.geometry.read(harm_min_locs)
geo_str = autofile.data_types.swrite.geometry(geo)
ref_geom.append(geo_str)
grad = harm_save_fs[-1].file.gradient.read(harm_min_locs)
grad_str = autofile.data_types.swrite.gradient(grad)
ref_grad.append(grad_str)
hess = harm_save_fs[-1].file.hessian.read(harm_min_locs)
hess_str = autofile.data_types.swrite.hessian(hess)
ref_hessian.append(hess_str)
# Obtain symmetry factor
ioprinter.info_message('Determining the symmetry factor...', newline=1)
sym_factor = symm.symmetry_factor(
pf_filesystems, spc_mod_dct_i, spc_dct_i, rotors,
)
# Create info dictionary
keys = ['geom', 'sym_factor', 'elec_levels', 'freqs', 'flux_mode_str',
'samp_geoms', 'samp_enes', 'samp_grads', 'samp_hessians',
'ref_geom', 'ref_grad', 'ref_hessian',
'zpe_chnlvl', 'reference_energy']
vals = [geom, sym_factor, spc_dct_i['elec_levels'], freqs, flux_str,
samp_geoms, samp_enes, samp_grads, samp_hessians,
ref_geom, ref_grad, ref_hessian,
zpe_chnlvl, reference_energy]
inf_dct = dict(zip(keys, vals))
return inf_dct | d27742140929d79dd4bb7a36094b5e5caf7173e2 | 19,729 |
def get_atten(log, atten_obj):
"""Get attenuator current attenuation value.
Args:
log: log object.
atten_obj: attenuator object.
Returns:
Current attenuation value.
"""
return atten_obj.get_atten() | 22d69d326846105491b1fa90f319eb9e0da69a20 | 19,730 |
def lfs_hsm_remove(log, fpath, host=None):
"""
HSM remove
"""
command = ("lfs hsm_remove %s" % (fpath))
extra_string = ""
if host is None:
retval = utils.run(command)
else:
retval = host.sh_run(log, command)
extra_string = ("on host [%s]" % host.sh_hostname)
if retval.cr_exit_status != 0:
log.cl_error("failed to run command [%s]%s, "
"ret = [%d], stdout = [%s], stderr = [%s]",
command, extra_string,
retval.cr_exit_status, retval.cr_stdout,
retval.cr_stderr)
return -1
return 0 | ddca4a626786dfecfef231737761924527d136d5 | 19,731 |
def area_under_curve_score(table,scoring_function):
"""Takes a run and produces the total area under the curve until the end of the run.
mean_area_under_curve_score is probably more informative."""
assert_run(table)
scores = get_scores(table,scoring_function)
return np.trapz(scores) | f84fd0a2adede09c17aa6254906bee36a2738983 | 19,732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.