content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from time import time
def vectorize_timing(n_targets):
"""
Calculate the rise time of ``n_targets`` targets, return the
run time in seconds.
"""
vega_coord = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
vega = FixedTarget(name="Vega", coord=vega_coord)
target_list = n_targets*[vega]
t = Time("2008-02-27 22:00:00")
obs = Observer(location=EarthLocation(10*u.deg, 20*u.deg, 0*u.m))
start = time()
obs.target_rise_time(t, target_list)
end = time()
return end-start | 287f723d66efedc9eaa874e3b1db9d6724598c10 | 5,348 |
import json
def get_game(name, all=False):
"""
Get the game information for a particular game.
For response object structure, see:
https://dev.twitch.tv/docs/v5/reference/search/#search-games
May throw exceptions on network/Twitch error.
"""
search_opts = {
'query': name,
'type': 'suggest',
'live': 'false',
}
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
res = common.http.request("https://api.twitch.tv/kraken/search/games", search_opts, headers=headers)
res = json.loads(res)
if all:
return res['games'] or []
else:
for game in res['games'] or []:
if game['name'] == name:
return game
return None | 0946516ca7062087d0dc01daa89b328a26367145 | 5,349 |
def compute_correlation_prob_class_target(candidates_per_query_target):
"""This function computes the overall correlation between the probability of being in
the positive class and the value of the target column
"""
probs_per_query_target = []
gains_per_query_target = []
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
tmp_probs = [candidates_per_query_target[key][candidate]['pred_prob'] for candidate in candidates]
tmp_gains = [candidates_per_query_target[key][candidate][TARGET_COLUMN] for candidate in candidates]
probs_per_query_target += tmp_probs
gains_per_query_target += tmp_gains
return pearsonr(probs_per_query_target, gains_per_query_target) | e0412cfa3940149d88c75f680aab55dece9b36a2 | 5,350 |
def get(sql: str):
""" execute select SQL and return unique result.
select count(1) form meters
or
select lass(ts) from meters where tag = 'xxx'
:return: only value
"""
result = _query(sql)
try:
value = result.next()
except StopIteration:
return None
except taos.error.OperationalError:
return None
if len(value) == 1:
return value[0]
else:
raise MultiColumnsError('Expect only one column.') | 10b03b64c0a18b4cd5a3c83e6d101d05566b251c | 5,351 |
def is_fully_defined(x):
"""Returns True iff `x` is fully defined in every dimension.
For more details, see `help(tf.TensorShape.is_fully_defined)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
is_fully_defined: `bool` indicating that the shape is fully known.
"""
return tf.TensorShape(x).is_fully_defined() | d3d419864fb9d6168adce54afae84f089c9a680c | 5,353 |
def make_shell_context():
"""
Creates a python REPL with several default imports
in the context of the current_app
:return:
"""
return dict(current_app=current_app) | 8db290ccfa51681ac63e8e5d88b29c4e82176f36 | 5,354 |
def recommend_hybrid_user(
df, model, interactions, user_id, user_dict,
item_dict, topn, new_only=True, threshold=3,
show=True):
"""Function to produce user recommendations. Hybrid version of
recommend_known_user
Args:
model: trained matrix factorization model
interactions: dataset used for training the model
user_id: user ID for which we need to generate recommendation
user_dict: Dictionary type input containing user_id as key and
interaction_index as value
item_dict: Dictionary type input containing item_id as key and
item_name as value
threshold: value above which the rating is favorable in interaction
matrix
topn: Number of output recommendation needed
new_only: whether to only recommend items that users have not visited
show: whether to show the result of function
Returns:
Prints list of items the given user has already visited
Prints list of N recommended items which user hopefully will be
interested in
"""
print('Recommending items for user {}...'.format(user_id))
n_users, n_items = interactions.shape
user_features, item_features, user_x, _ = feature_matrix(
df, user_id=user_id)
scores = pd.Series(model.predict(
user_x, interactions.values[user_x, :], user_features=user_features,
item_features=item_features))
scores.index = interactions.columns
scores = list(pd.Series(scores.sort_values(ascending=False).index))
known_items = list(pd.Series(
interactions.loc[user_id, :]
[interactions.loc[user_id, :] > threshold].index).sort_values(
ascending=False))
if new_only:
scores = [x for x in scores if x not in known_items]
item_list = scores[:topn]
known_items = list(pd.Series(known_items).apply(lambda x: item_dict[x]))
recommended_items = list(pd.Series(item_list).apply(
lambda x: item_dict[x]))
if show is True:
print("Known Likes:")
counter = 1
for i in known_items:
print(str(counter) + '- ' + i)
counter += 1
print("Recommended Items:")
counter = 1
for i in recommended_items:
print(str(counter) + '- ' + i)
counter += 1
return item_list | f6f1bb5486dcd3a7848ca006998587a8efce4939 | 5,355 |
def i(mu_i, mu_ij, N) :
"""Calcule le tableau I[i, j]"""
return [[I_ij(i, j, mu_i, mu_ij, N) for j in range(0, N)] for i in range(0, N)] | 518609bbe91088d94267515ccd07b3fa16525d4f | 5,356 |
import dateutil
def format_datetime(this, date, date_format=None):
"""Convert datetime to a required format."""
date = dateutil.parser.isoparse(date)
if date_format is None:
date_format = "%d-%m-%Y"
return date.strftime(date_format) | 0311eb918540dbb0c5751244b89de220073b9dcd | 5,357 |
import numpy
def _estimate_melting_levels(latitudes_deg, valid_time_unix_sec):
"""Estimates melting level at each point.
This estimate is based on linear regression with respect to latitude. There
is one set of regression coefficients for each month.
:param latitudes_deg: numpy array of latitudes (deg N).
:param valid_time_unix_sec: Valid time.
:return: melting_levels_m_asl: numpy array of melting levels (metres above
sea level), with same shape as `latitudes_deg`.
"""
month_index = int(
time_conversion.unix_sec_to_string(valid_time_unix_sec, '%m')
)
return (
MELT_LEVEL_INTERCEPT_BY_MONTH_M_ASL[month_index - 1] +
MELT_LEVEL_SLOPE_BY_MONTH_M_DEG01[month_index - 1] *
numpy.absolute(latitudes_deg)
) | d72ac1cf0c23eadb49fc15e55c2a71e273120500 | 5,358 |
def edit_municipality(self, request, form):
""" Edit a municipality. """
layout = EditMunicipalityLayout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_("Municipality modified."), 'success')
return redirect(layout.success_url)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'button_text': _("Save"),
'cancel': layout.cancel_url,
} | 4d233f97cbc7672b38348eb982cecc68f88ade17 | 5,359 |
import copy
def operate_monitor(params):
""" different apps has different required params"""
ret_obj = copy.deepcopy(RET_OBJ)
group_id = params.get("group_id", type=int, default=None)
app_name = params.get("app_name")
operate = "update" if key_exist(group_id, app_name) else "insert"
valid_key = "_".join([app_name, operate])
if valid_key not in param_valids:
raise CustomException("operate_monitor Not found corresponding valid function for %s" % app_name, 1005)
params = param_valids[valid_key](params)
api_create(params) if operate == "insert" else api_update(params)
ret_obj['msg'] = operate + " monitor successfully."
return ret_obj | 8d78d61dc44acf2fdcc85a8e3cd4d6fd68c47bf6 | 5,361 |
def construct_rgba_vector(img, n_alpha=0):
"""
Construct RGBA vector to be used to color faces of pcolormesh
This funciton was taken from Flamingo.
----------
Args:
img [Mandatory (np.ndarray)]: NxMx3 RGB image matrix
n_alpha [Mandatory (float)]: Number of border pixels
to use to increase alpha
----------
Returns:
rgba [Mandatory (np.ndarray)]: (N*M)x4 RGBA image vector
"""
alpha = np.ones(img.shape[:2])
if n_alpha > 0:
for i, a in enumerate(np.linspace(0, 1, n_alpha)):
alpha[:, [i, -2-i]] = a
rgb = img[:, :-1, :].reshape((-1, 3)) # we have 1 less faces than grid
rgba = np.concatenate((rgb, alpha[:, :-1].reshape((-1, 1))), axis=1)
if np.any(img > 1):
rgba[:, :3] /= 255.0
return rgba | 028275930ad4d2a3b98ce32e48021da8ff1e6c43 | 5,362 |
def nice(val):
"""Make sure this value is nice"""
if pd.isna(val):
return None
return val | a2d0c3c64c7c2e01d66d171902e85a3d0056cc73 | 5,363 |
import logging
import requests
import json
def retrieve_tree(issue_id):
"""Retrieve a tree of issues from Redmine, starting at `issue_id`."""
logging.info(f" Retrieving issue #{issue_id} ...")
params = {
'issue_id': issue_id
}
response = requests.get(ISSUES_ENDPOINT, params=params, headers=HEADERS)
data = json.loads(response.text)
issue = data['issues'][0]
issue['children'] = retrieve_children(issue_id)
return issue | 928d2f3d68e5b9033a062d5e24d3f34f74781357 | 5,364 |
from typing import Sequence
from typing import Dict
def cmdline_args(argv: Sequence[str], options: Sequence[Option], *, process: callable = None,
error: callable = None, results: dict = None) -> (Dict, Sequence[str]):
"""
Take an array of command line args, process them
:param argv: argument array
:param options: sequence of options to parse
:param process: process function
:param error: error function
:param results: optional dict to contain results (alternative to process callable)
:return: parsed results, remaining unprocessed arguments
"""
def select_option(short_opt, long_opt):
selected_option = None
for current_opt in options:
if short_opt is not None and short_opt == current_opt.short:
selected_option = current_opt
break
elif long_opt is not None and current_opt.long is not None:
if current_opt.long.startswith(long_opt) or long_opt.startswith(current_opt.long):
selected_option = current_opt
break
else:
if error is not None:
if short_opt:
error(f"unknown short option '-{short_opt}'")
else:
error(f"unknown long option '--{long_opt}'")
return selected_option
def dispatch_option(_option: Option, _opt: str, _args):
if _option.fn is not None:
return _option.fn(_option, _opt, _args) if callable(_option.fn) else _option.fn
if process:
tmp = process(_option, _opt, _args)
if tmp is not None:
return tmp
return _args if _option.has_arg else True
if results is None:
results = dict()
index = skip_count = 0
saved_args = []
for index, arg in enumerate(argv):
if skip_count:
skip_count -= 1
elif arg.startswith('--'): # long arg
skip_count = 0
longopt = arg[2:]
option = select_option(None, longopt)
if option is None:
saved_args.append(f"--{longopt}")
else:
args = None
if option.has_arg:
if '=' in longopt:
longopt, args = longopt.split('=', maxsplit=1)
else:
skip_count += 1
args = argv[index + skip_count]
results[option.long] = dispatch_option(option, longopt, args)
elif arg.startswith('-'):
skip_count = 0
for opt in arg[1:]:
option = select_option(opt, None)
if option is None:
saved_args.append(f"-{opt}")
else:
if option.has_arg:
skip_count += 1
args = argv[index + skip_count] if option.has_arg else None
results[option.long] = dispatch_option(option, opt, args)
else:
break
return results, saved_args + [arg for arg in argv[index + skip_count:]] | c9c78d5a6b5fb6147a8b392647ec9a7e4abc2800 | 5,365 |
def trailing_zeroes(value):
# type: (int) -> int
"""Count the number of trailing zeros in a given 8-bit integer"""
return CTZ_TABLE[value] | a98968aa38c886de9aa38bae71e52d0e012c432c | 5,366 |
def _calc_WaterCirculation(heat_load, CT_design, WBT, DBT, fixedCWT_ctrl, pump_ctrl, ignore_CT_eff, max_CT_eff=0.85):
"""Calculates the water circulation loop. Used by simulate_CT().
Parameters:
Returns:
All (time x CT) arrays as
HWT Hot water temp [pint, C]
CWT Cold water temp [pint, C]
waterflow Water mass flow rate [pint, kg/s]. This is the input water stream to the CTs.
Notes:
1) This routine determines the temperatures of the water circuit (HWT, CWT) and the water flow rate to
transfer the heat load to the CT.
2) The WBT serves as a lower limit to CWT.
(variables: WBT is an iterable (length nTime), whereas WBT2 is a 2d array (time x CT))
"""
nTime = len(WBT)
nCT = CT_design.shape[0]
# .......................................................... 1) Calc CWT (based on WBT) and approach
# i) CWT
if fixedCWT_ctrl:
raise NotImplementedError
# This ctrl is not as simple as setting CWT to rated, because what if ambient WBT + min approach is above this?
# CWT fixed at design value
# CWT = Q_(np.tile(CT_design['CWT [°C]'].values, (Nsimul, 1)), 'degC')
else:
# CWT from CT performance curves
perf_m = CT_design['CT perf slope'].values
perf_b = CT_design['CT perf y-int'].values
# time x CT
CWT = Q_(np.outer(WBT, perf_m) + np.tile(perf_b, (nTime, 1)), 'degC')
# ii) Approach
WBT2 = Q_(np.transpose(np.tile(WBT, (nCT, 1))), 'degC')
approach = CWT - WBT2
# .......................................................... 2) Calc water circulation loop
# (calc deltaT, waterflow, assuming loaded)
# Forms a time-invariant array with shape (time x CT) and as a Pint quantity
tile_and_pint = lambda arr, units: Q_(np.tile(arr, (nTime, 1)), units)
HWT_r = tile_and_pint(CT_design['HWT [°C]'].values, 'degC')
waterflow_r = tile_and_pint(CT_design['water flow [kg/s]'].values, 'kg/s')
if pump_ctrl == 'fixed HWT':
deltaT = HWT_r - CWT
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
elif pump_ctrl == 'range limit':
# Calc range as if HWT = HWT_r
deltaT = HWT_r - CWT
# i) Adjust deltaT
deltaT_min = np.tile(CT_design['Min Range [C°]'].values, (nTime, 1))
deltaT = Q_(np.clip((deltaT).magnitude, deltaT_min, None), 'delta_degC')
# ii) Calc water flow
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
elif pump_ctrl == 'c':
# Calc range & water flow as if HWT = HWT_r
deltaT = HWT_r - CWT
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
waterflow_units = waterflow.units
# i) Adjust water flow
# Clip violating values
waterflow_ub = np.tile((CT_design['Max per unit water flow'] * CT_design['water flow [kg/s]']).values,
(nTime, 1))
waterflow_lb = np.tile((CT_design['Min per unit water flow'] * CT_design['water flow [kg/s]']).values,
(nTime, 1))
_wf = np.clip(waterflow.magnitude, waterflow_lb, waterflow_ub)
# Back to pint
waterflow = Q_(_wf, waterflow_units)
# ii) Calc deltaT
deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')
else:
waterflow = waterflow_r
deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')
# .......................................................... 3) No-load fix
# This part is necessary for all conrtol modes because the operational limits applied
# in the step 2 assumed loaded operation. After this step, water flow and deltaT are final.
CT_load_mask = (heat_load != 0).astype('int') # 0 if no load, 1 otherwise
waterflow = waterflow * CT_load_mask
deltaT = deltaT * CT_load_mask
HWT = CWT + deltaT
# .......................................................... 4) HWT and CWT adjustment
# HWT cannot be less than DBT; in which case, HWT is limited to DBT and CWT rises.
# Vectorize DBT into (time x CT)
DBT = np.tile(DBT, (nCT, 1)).transpose()
HWT = Q_(np.maximum(HWT.magnitude, DBT), 'degC')
CWT = HWT - deltaT
# .......................................................... 5) Checks and return
assert waterflow.units == ureg.kg / ureg.s
assert deltaT.units == ureg.delta_degC, deltaT.units
# Check that CT efficiency is realistic. In practice, efficiency is 65-70% (normal operating conditions)
CT_eff = deltaT / (deltaT + approach)
assert ignore_CT_eff or np.all(CT_eff < max_CT_eff), \
"CT efficiency exceeded the limit: {}".format(CT_eff)
assert all(obj.shape == (nTime, nCT) for obj in (HWT, CWT, waterflow, deltaT, approach, CT_eff))
# Check energy balance
assert np.allclose(heat_load.magnitude, (cp_water * deltaT * waterflow).to(heat_load.units).magnitude)
res = {
'HWT': HWT,
'CWT': CWT,
'water flow': waterflow,
'range': deltaT,
'approach': approach,
'CT_eff': CT_eff,
}
return res | 46d4d7e8fb1c718821b9f64fe86fa268e05c459a | 5,367 |
def read_dataset_from_csv(data_type, path):
"""Read dataset from csv
Args:
data_type (str): train/valid/test
Returns:
pd: data
"""
data = pd.read_csv(tf.io.gfile.glob(path + data_type + "*")[0])
return data | 3b5fb8318d6b7297166b381d199fe206f4240d84 | 5,369 |
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s) | bf093c3864353278a9ff91267b0806bc1e2362a3 | 5,370 |
from typing import Union
from typing import List
import pathlib
def _prepare_directory(data_dir: Union[str, PathLike],
ignore_bad: bool = True,
confirm_uids: bool = True) -> List[str]:
"""
Reorganizes PPMI `data_dir` to a structure compatible with ``heudiconv``
PPMI data starts off with a sub-directory structure that is not conducive
to use with ``heudiconv``. By default, scans are grouped by scan type
rather than by session, and there are a number of redundant sub-directories
that we don't need. This script reorganizes the data, moving things around
so that the general hierarchy is {subject}/{session}/{scan}, which makes
for a much easier time converting the PPMI dataset into BIDS format.
An added complication is that a minority of the scans in the PPMI database
are "bad" to some degree. For most, it is likely that there was some issue
with exporting/uploading the DICOM files. For others, the conversion
process we intend to utilize (``heudiconv`` and ``dcm2niix``) fails to
appropriately convert the files due to some idiosyncratic reason that could
be fixed but we don't have the patience to fix at the current juncture.
Nonetheless, these scans need to be removed so that we can run the batch of
subjects through ``heudiconv`` without any abrupt failures. By default,
these scans are moved to a sub-directory of `data_dir`; setting
`ignore_bad` to False will retain these scans (but be warned!)
Parameters
----------
data_dir : str or pathlib.Path
Filepath to PPMI dataset, as downloaded from https://ppmi-info.org
ignore_bad : bool, optional
Whether to ignore "bad" scans (i.e., ones that are known to fail
conversion or reconstruction)
confirm_uids : bool, optional
Whether to check that DICOM study instance UIDs for provided subject
are all consistent for a given session. Only applicable if `pydicom`
is installed. Default: True
Returns
-------
subjects : list
List of subjects who are ready to be converted / reconstructed with
``heudiconv``
coerce : list
List of paths to data directories where subjects / sessions may have
had inconsistent study instance UIDs that should be coerced
"""
if isinstance(data_dir, str):
data_dir = pathlib.Path(data_dir).resolve()
# location where "bad" scans will be moved
if ignore_bad:
timeout = data_dir / 'bad'
timeout.mkdir(exist_ok=True)
else:
timeout = None
subjects, coerce = [], []
for subj_dir in sorted(data_dir.glob('*')):
if not subj_dir.is_dir() or subj_dir.name == 'bad':
continue
subj, force = _prepare_subject(subj_dir, timeout=timeout,
confirm_uids=confirm_uids)
subjects.append(subj)
coerce.extend(force)
return subjects, coerce | 2220dec499f875501ab7fd80e4bdcf25c61c641d | 5,372 |
import re
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output | c28659ce832dcc8ad372188a556699f20c9116db | 5,373 |
def create_patric_boolean_dict(genome_dict,all_ECs):
"""
Create new dict of dicts to store genome names
:param genome_dict: dict of key=genome_id, value=dict of genome's name, id, ec_numbers
:param all_ECs: set of all ECs found across all genomes
"""
## new format: key=genome, value={EC:0 or 1}
## This makes it easy to write to file with pandas
boolean_genome_dict = {}
for genome_id in genome_dict:
boolean_genome_dict[genome_id] = {}
boolean_genome_dict[genome_id]['genome_name'] = genome_dict[genome_id]['genome_name']
boolean_genome_dict[genome_id]['genome_name_with_id'] = genome_dict[genome_id]['genome_name_with_id']
boolean_genome_dict[genome_id]['duplicate'] = genome_dict[genome_id]['duplicate']
for EC in all_ECs:
if EC in genome_dict[genome_id]['ECs']:
boolean_genome_dict[genome_id][EC] = 1
else:
boolean_genome_dict[genome_id][EC] = 0
return boolean_genome_dict | 7ab3554bbf705ee8ce99d1d99ff453b06e3d2b53 | 5,376 |
def append_ast_if_req(field):
""" Adds a new filter to template tags that for use in templates. Used by writing {{ field | append_ast_if_req }}
@register registers the filter into the django template library so it can be used in template.
:param Form.field field:
a field of a form that you would like to return the label and potentially an asterisk for.
:returns:
The field label and, if it's a required field, an asterisk
:rtype: string
"""
if field.field.required:
return field.label + '*'
else:
return field.label | 76e36ead3387729b0536bf84f288c400f376a041 | 5,377 |
def getPileupMixingModules(process):
"""
Method returns two lists:
1) list of mixing modules ("MixingModule")
2) list of data mixing modules ("DataMixingModules")
The first gets added only pileup files of type "mc", the
second pileup files of type "data".
"""
mixModules, dataMixModules = [], []
prodsAndFilters = {}
prodsAndFilters.update(process.producers)
prodsAndFilters.update(process.filters)
for key, value in prodsAndFilters.items():
if value.type_() in ["MixingModule", "DataMixingModule", "PreMixingModule"]:
mixModules.append(value)
if value.type_() == "DataMixingModule":
dataMixModules.append(value)
return mixModules, dataMixModules | 4ee3cc5f7b11e4ad6a846f14dc99e4f82bd04905 | 5,378 |
from typing import Hashable
from typing import Type
from typing import Any
from typing import ForwardRef
import typing
def GetFirstTypeArgImpl_(type_: Hashable, parentClass: Type[Any]) -> Type[Any]:
""" Returns the actual type, even if type_ is a string. """
if isinstance(type_, type):
return type_
if not isinstance(type_, str):
# It's not a type and it's not a str.
# We don't know what to do with it.
raise ValueError("Bad type argument: {}".format(type_))
forwardRef = ForwardRef(type_, is_argument=False)
# pylint: disable=protected-access
evaluated = forwardRef._evaluate(GetClassNamespace_(parentClass), None)
if evaluated is None:
raise RuntimeError("Unable to resolve type {}".format(type_))
if isinstance(evaluated, typing._GenericAlias): # type: ignore
if isinstance(
evaluated.__args__[0], typing._GenericAlias): # type: ignore
# Now use the origin to retrieve the default value type.
return evaluated.__args__[0].__origin__
return evaluated.__args__[0]
return evaluated | f6fd63c4080af886de24465d866f87e716b49992 | 5,379 |
from typing import Callable
from typing import Any
from typing import Sequence
def tree_map_zipped(fn: Callable[..., Any], nests: Sequence[Any]):
"""Map a function over a list of identical nested structures.
Args:
fn: the function to map; must have arity equal to `len(list_of_nests)`.
nests: a list of identical nested structures.
Returns:
a nested structure whose leaves are outputs of applying `fn`.
"""
if not nests:
return nests
tree_def = tree_structure(nests[0])
if any([tree_structure(x) != tree_def for x in nests[1:]]):
raise ValueError('All elements must share the same tree structure.')
return jax.tree_unflatten(
tree_def, [fn(*d) for d in zip(*[jax.tree_leaves(x) for x in nests])]) | 8117efd93402fb7ab5e34b4015950c77a24dc038 | 5,380 |
def square_area(side):
"""Returns the area of a square"""
# You have to code here
# REMEMBER: Tests first!!!
return pow(side,2) | e3cc1a0d404c62a9b1d50de63ea924087c77066a | 5,381 |
def match_collision_name_to_mesh_name(properties):
"""
This function matches the selected collison to the selected mesh.
:param object properties: The property group that contains variables that maintain the addon's correct state.
:return str: The changed collision name.
"""
collisions = get_from_collection(properties.collision_collection_name, 'MESH', properties)
meshes = get_from_collection(properties.mesh_collection_name, 'MESH', properties)
if collisions and meshes:
selected_mesh = [mesh for mesh in meshes if mesh.select_get()][0]
selected_collision = [collision for collision in collisions if collision.select_get()][0]
name = f'{selected_collision.name.split("_")[0]}_{selected_mesh.name}'
selected_collision.name = name
return name
return '' | 3658435cdaa21408664a511e3555f3976c1b3614 | 5,382 |
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors | 31ae730af0a184b5de469687b74334960c2939ef | 5,383 |
import logging
import warnings
def redirect_logs_and_warnings_to_lists(
used_logs: list[logging.LogRecord], used_warnings: list
) -> RedirectedLogsAndWarnings:
"""For example if using many processes with multiprocessing, it may be beneficial to log from one place.
It's possible to log to variables (logs as well as warnings), pass it to the main process and then log it
with workings filter etc.
To log stored logs and warnings, use
Args:
used_logs (list): List where logs will be stored
used_warnings (list): List where warnings will be stored
Returns:
RedirectedLogsAndWarnings: Object, where you can reset redirect. Logs and warnings you already have
from inserted parameters.
"""
showwarning_backup = warnings.showwarning
OUTPUT_backup = config.OUTPUT
STREAM_backup = config.STREAM
def custom_warn(message, category, filename, lineno, file=None, line=None):
used_warnings.append(
{
"message": message,
"category": category,
"filename": filename,
"lineno": lineno,
"file": file,
"line": line,
}
)
warnings.showwarning = custom_warn
config.OUTPUT = None
config.STREAM = None
config.TO_LIST = used_logs
return RedirectedLogsAndWarnings(
logs=used_logs,
warnings=used_warnings,
showwarning_backup=showwarning_backup,
OUTPUT_backup=OUTPUT_backup,
STREAM_backup=STREAM_backup,
) | 31cda3f036c8438371811b6421a8af2b0f6ac215 | 5,384 |
def get_file_picker_settings():
"""Return all the data FileUploader needs to start the Google Drive Picker."""
google_settings = frappe.get_single("Google Settings")
if not (google_settings.enable and google_settings.google_drive_picker_enabled):
return {}
return {
"enabled": True,
"appId": google_settings.app_id,
"developerKey": google_settings.api_key,
"clientId": google_settings.client_id
} | 3b1840e22512e1f9112f9fa4dfb6697299aa248a | 5,385 |
def match_subset(pattern: oechem.OEMol, target:oechem.OEMol):
"""Check if target is a subset of pattern."""
# Atoms are equal if they have same atomic number (so explicit Hydrogens are needed as well for a match)
atomexpr = oechem.OEExprOpts_AtomicNumber
# single or double bonds are considered identical (resonance,chirality fix)
bondexpr = oechem.OEExprOpts_EqSingleDouble
ss = oechem.OESubSearch(pattern, atomexpr, bondexpr )
oechem.OEPrepareSearch(target, ss)
return ss.SingleMatch(target) | 99d8d5d73f465f929b6710ec53b5e01f92c1e229 | 5,387 |
def get_quad_strike_vector(q):
"""
Compute the unit vector pointing in the direction of strike for a
quadrilateral in ECEF coordinates. Top edge assumed to be horizontal.
Args:
q (list): A quadrilateral; list of four points.
Returns:
Vector: The unit vector pointing in strike direction in ECEF coords.
"""
P0, P1, P2, P3 = q
p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF
p1 = Vector.fromPoint(P1)
v1 = (p1 - p0).norm()
return v1 | 169b8043a5a385843b92225cba8677ef39bb43a5 | 5,388 |
def CP_to_TT(
cp_cores,
max_rank,
eps=1e-8,
final_round=None,
rsvd_kwargs=None,
verbose=False,
):
"""
Approximate a CP tensor by a TT tensor.
All cores of the TT are rounded to have a TT-rank of most `max_rank`, and singular values of at
most `eps` times the largest singular value. For the first core and last core this rounding is
done using SVD, for all other cores a randomized SVD is employed. Uses
`sklearn.utils.extmath.randomized_svd¶` for randomized SVD. After forming the TT, it is
optionally rounded again with an accuracy of `final_round`.
Parameters
----------
cp_cores: list<np.ndarray>
List of CP cores
max_rank: int
eps: float (default: 1e-8)
rsvd_kwargs: dict (optional)
keyword arguments to pass to the randomized svd method.
verbose: bool (default: False)
"""
d = len(cp_cores)
tt_cores = [None] * d
prev_rank = 1
if rsvd_kwargs is None:
rsvd_kwargs = {}
for alpha in range(d):
core = cp_cores[alpha]
dim = core.shape[0]
if alpha == 0:
U, S, V = svd(cp_cores[0], full_matrices=False)
elif alpha < d - 1: # Use randomized SVD for middle cores
core = np.einsum("ik,jk->ijk", SV, core)
core_mat = core.reshape(
core.shape[0] * core.shape[1], core.shape[2]
)
U, S, V = randomized_svd(
core_mat, n_components=max_rank, **rsvd_kwargs
)
else: # alpha = d - 1
core = np.einsum("ik,jk->ij", SV, core)
U, S, V = svd(core)
r = 1
r = max(1, min(max_rank, np.sum(S > eps)))
U = U[:, :r]
S = S[:r]
V = V[:r, :]
SV = (S * V.T).T
if alpha == d - 1:
tt_cores[alpha - 1] = np.einsum(
"ijk,kl->ijl", tt_cores[alpha - 1], U
)
tt_cores[alpha] = SV.reshape(SV.shape + (1,))
else:
tt_cores[alpha] = U.reshape((prev_rank, dim, r))
if verbose:
print(
f"feature {alpha+1}/{d}, compressed TT core size is {tt_cores[alpha].shape}"
)
prev_rank = r
if verbose:
print("Orthogonalizing")
tt = TensorTrain(tt_cores, is_orth=True)
if final_round is not None:
if verbose:
print(f"Rounding to {final_round}...")
tt.round(eps=final_round)
if verbose:
print(f"Final TT rank: {tt.tt_rank}")
return tt | 54c30dec3f18271050150dfcc443fcbfe74c4df5 | 5,389 |
def _create_key_list(entries):
"""
Checks if entries are from FieldInfo objects and extracts keys
:param entries: to create key list from
:return: the list of keys
"""
if len(entries) == 0:
return []
if all(isinstance(entry, FieldInfo) for entry in entries):
return [entry.key for entry in entries]
# this should be a regular list of strings
return entries | bb87bbfbfc1856d4041c12d8babaaa8d8ce42249 | 5,390 |
def compose_rule_hierarchies(rule_hierarchy1, lhs_instances1, rhs_instances1,
rule_hierarchy2, lhs_instances2, rhs_instances2):
"""Compose two rule hierarchies."""
if len(rule_hierarchy1["rules"]) == 0:
return rule_hierarchy2, lhs_instances2, rhs_instances2
if len(rule_hierarchy2["rules"]) == 0:
return rule_hierarchy1, lhs_instances1, rhs_instances1
graphs = set(rule_hierarchy1["rules"].keys()).union(
rule_hierarchy2["rules"].keys())
homomorphisms = set(rule_hierarchy1["rule_homomorphisms"].keys()).union(
rule_hierarchy2["rule_homomorphisms"].keys())
new_rule_hierarchy = {
"rules": {},
"rule_homomorphisms": {}
}
new_lhs_instances = {}
new_rhs_instances = {}
composition_data = {}
# Compose rules
for graph in graphs:
if graph in rule_hierarchy1["rules"]:
rule1 = rule_hierarchy1["rules"][graph]
lhs_instance1 = lhs_instances1[graph]
rhs_instance1 = rhs_instances1[graph]
else:
rule1 = Rule.identity_rule()
lhs_instance1 = {}
rhs_instance1 = {}
if graph in rule_hierarchy2["rules"]:
rule2 = rule_hierarchy2["rules"][graph]
lhs_instance2 = lhs_instances2[graph]
rhs_instance2 = rhs_instances2[graph]
else:
rule2 = Rule.identity_rule()
lhs_instance2 = {}
rhs_instance2 = {}
new_rule, new_lhs_instance, new_rhs_instance, data = compose_rules(
rule1, lhs_instance1, rhs_instance1,
rule2, lhs_instance2, rhs_instance2, return_all=True)
new_rule_hierarchy["rules"][graph] = new_rule
new_lhs_instances[graph] = new_lhs_instance
new_rhs_instances[graph] = new_rhs_instance
composition_data[graph] = data
# Compute rule homomorphisms
for source, target in homomorphisms:
lhs_hom1, p_hom1, rhs_hom1 = rule_hierarchy1["rule_homomorphisms"][
(source, target)]
lhs_hom2, p_hom2, rhs_hom2 = rule_hierarchy2["rule_homomorphisms"][
(source, target)]
source_data = composition_data[source]
target_data = composition_data[target]
# H_G -> H_T
h_hom = get_unique_map_from_pushout(
source_data["h"].nodes(),
source_data["rhs1_h"],
source_data["lhs2_h"],
compose(rhs_hom1, target_data["rhs1_h"]),
compose(lhs_hom2, target_data["lhs2_h"])
)
# P*G_1 -> P*T_1
p1_p_hom = get_unique_map_to_pullback_complement(
target_data["p1_p1_p"], target_data["p1_p_h"],
p_hom1, source_data["p1_p1_p"],
compose(source_data["p1_p_h"], h_hom))
# P*G_2 -> P*T_2
p2_p_hom = get_unique_map_to_pullback_complement(
target_data["p2_p2_p"], target_data["p2_p_h"],
p_hom2, source_data["p2_p2_p"],
compose(source_data["p2_p_h"], h_hom))
# Pi_G -> Pi_T
pi_hom = get_unique_map_to_pullback(
new_rule_hierarchy["rules"][target].p.nodes(),
target_data["pi_p1_p"], target_data["pi_p2_p"],
compose(source_data["pi_p1_p"], p1_p_hom),
compose(source_data["pi_p2_p"], p2_p_hom))
# L_G -> L_T
lambda_hom = get_unique_map_from_pushout(
new_rule_hierarchy["rules"][source].lhs.nodes(),
source_data["lhs1_lambda"], source_data["p1_p_lambda"],
compose(lhs_hom1, target_data["lhs1_lambda"]),
compose(p1_p_hom, target_data["p1_p_lambda"]))
# R_G -> R_T
rho_hom = get_unique_map_from_pushout(
new_rule_hierarchy["rules"][source].rhs.nodes(),
source_data["p2_p_rho"], source_data["rhs2_rho"],
compose(p2_p_hom, target_data["p2_p_rho"]),
compose(rhs_hom2, target_data["rhs2_rho"]))
new_rule_hierarchy["rule_homomorphisms"][(source, target)] = (
lambda_hom, pi_hom, rho_hom
)
return new_rule_hierarchy, new_lhs_instances, new_rhs_instances | 0cfab451af31bfc7b41d610381efb47e8c5c0fb5 | 5,391 |
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value=value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims) | 50adf652fff47418d1f8f1250a2a6d01f712da76 | 5,392 |
import logging
import requests
import time
import json
import pytz
def fetch_exchange(zone_key1='DK-DK1', zone_key2='DK-DK2', session=None,
target_datetime=None, logger=logging.getLogger(__name__)):
"""
Fetches 5-minute frequency exchange data for Danish bidding zones
from api.energidataservice.dk
"""
r = session or requests.session()
sorted_keys = '->'.join(sorted([zone_key1, zone_key2]))
# pick the correct zone to search
if 'DK1' in sorted_keys and 'DK2' in sorted_keys:
zone = 'DK1'
elif 'DK1' in sorted_keys:
zone = 'DK1'
elif 'DK2' in sorted_keys:
zone = 'DK2'
elif 'DK-BHM' in sorted_keys:
zone = 'DK2'
else:
raise NotImplementedError(
'Only able to fetch exchanges for Danish bidding zones')
exch_map = {
'DE->DK-DK1': '"ExchangeGermany"',
'DE->DK-DK2': '"ExchangeGermany"',
'DK-DK1->DK-DK2': '"ExchangeGreatBelt"',
'DK-DK1->NO-NO2': '"ExchangeNorway"',
'DK-DK1->NL': '"ExchangeNetherlands"',
'DK-DK1->SE': '"ExchangeSweden"',
'DK-DK1->SE-SE3': '"ExchangeSweden"',
'DK-DK1->NL': '"ExchangeNetherlands"',
'DK-DK2->SE': '("ExchangeSweden" - "BornholmSE4")', # Exchange from Bornholm to Sweden is included in "ExchangeSweden"
'DK-DK2->SE-SE4': '("ExchangeSweden" - "BornholmSE4")', # but Bornholm island is reported separately from DK-DK2 in eMap
'DK-BHM->SE': '"BornholmSE4"',
}
if sorted_keys not in exch_map:
raise NotImplementedError(
'Exchange {} not implemented'.format(sorted_keys))
timestamp = arrow.get(target_datetime).strftime('%Y-%m-%d %H:%M')
# fetch real-time/5-min data
sqlstr = 'SELECT "Minutes5UTC" as timestamp, {0} as "netFlow" \
from "{1}" WHERE "PriceArea" = \'{2}\' AND \
"Minutes5UTC" >= (timestamp\'{3}\'-INTERVAL \'24 hours\') AND \
"Minutes5UTC" <= timestamp\'{3}\' \
ORDER BY "Minutes5UTC" ASC'.format(exch_map[sorted_keys],
ids['real_time'],
zone,
timestamp)
url = 'https://api.energidataservice.dk/datastore_search_sql?sql={}'.format(sqlstr)
response = r.get(url)
# raise errors for responses with an error or no data
retry_count = 0
while response.status_code in [429, 403, 500]:
retry_count += 1
if retry_count > 5:
raise Exception('Retried too many times..')
# Wait and retry
logger.warn('Retrying..')
time.sleep(5 ** retry_count)
response = r.get(url)
if response.status_code != 200:
j = response.json()
if 'error' in j and 'info' in j['error']:
error = j['error']['__type']
text = j['error']['info']['orig']
msg = '"{}" fetching exchange data for {}: {}'.format(
error, sorted_keys, text)
else:
msg = 'error while fetching exchange data for {}: {}'.format(
sorted_keys, json.dumps(j))
raise requests.exceptions.HTTPError(msg)
if not response.json()['result']['records']:
raise ParserException(
"DK.py", 'API returned no data', zone_key=sorted_keys)
df = pd.DataFrame(response.json()['result']['records'])
df = df.set_index('timestamp')
df.index = pd.DatetimeIndex(df.index)
# drop empty rows
df.dropna(how='all', inplace=True)
# all exchanges are reported as net import,
# where as eMap expects net export from
# the first zone in alphabetical order
if 'DE' not in sorted_keys:
df['netFlow'] = -1 * df['netFlow']
# Format output
output = []
for dt in df.index:
data = {
'sortedZoneKeys': sorted_keys,
'datetime': None,
'netFlow': None,
'source': 'api.energidataservice.dk'
}
data['datetime'] = dt.to_pydatetime()
data['datetime'] = data['datetime'].replace(tzinfo=pytz.utc)
data['netFlow'] = df.loc[dt, 'netFlow']
output.append(data)
return output | 93488b4bc24a6a899232a5a1fd0e694d0747ad12 | 5,393 |
def snitch(func):
"""
This method is used to add test function to TestCase classes.
snitch method gets test function and returns a copy of this function
with 'test_' prefix at the beginning (to identify this function as
an executable test).
It provides a way to implement a storage (python module that
contains non-executable test functions) for tests and to include
different set of functions into different test cases.
"""
return FunctionType(func.func_code, func.func_globals,
'test_' + func.func_name, closure=func.func_closure) | b8b54d55269951cb3db4c1f45c375ac36cbd3bdf | 5,394 |
def average_h5(path, path_dc):
"""Return averaged data from HDF5 DC measurements.
Subtracts dark current from the signal measurements.
Args:
- path, path_dc: paths to signal and dark measurement files.
Returns:
- 2D array containing averaged and DC-subtracted measurement.
"""
with h5.File(path, 'r') as f:
with h5.File(path_dc, 'r') as fdc:
arr = (f['data'][...].mean(axis=0) -
fdc['data'][...].mean(axis=0))
return arr | 8daaa7efcdbaf7137d320407b64a96b73f847289 | 5,395 |
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name) | 9dfcebb6f49de41c5d5d6bfcc849873f14e2b3f9 | 5,396 |
def kl_bernoulli(p: np.ndarray, q: np.ndarray) -> np.ndarray:
"""
Compute KL-divergence between 2 probabilities `p` and `q`. `len(p)` divergences are calculated
simultaneously.
Parameters
----------
p
Probability.
q
Probability.
Returns
-------
Array with the KL-divergence between `p` and `q`.
"""
m = np.clip(p, 0.0000001, 0.9999999999999999).astype(float)
n = np.clip(q, 0.0000001, 0.9999999999999999).astype(float)
return m * np.log(m / n) + (1. - m) * np.log((1. - m) / (1. - n)) | 91567169da22ae42bd90c15292f1699f53a184ab | 5,398 |
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = _safe_accumulator_op(np.nansum, X, axis=0)
new_sample_count = np.sum(~np.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = (
_safe_accumulator_op(np.nanvar, X, axis=0) * new_sample_count)
last_unnormalized_variance = last_variance * last_sample_count
with cupyx.errstate(divide=None, invalid=None):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance + new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count | 8fac3715bed8431f0910bbf7a37d3924afece9c0 | 5,399 |
def avg_pds_from_events(
times,
gti,
segment_size,
dt,
norm="frac",
use_common_mean=True,
silent=False,
fluxes=None,
errors=None,
):
"""Calculate the average periodogram from a list of event times or a light curve.
If the input is a light curve, the time array needs to be uniformly sampled
inside GTIs (it can have gaps outside), and the fluxes need to be passed
through the ``fluxes`` array.
Otherwise, times are interpeted as photon arrival times.
Parameters
----------
times : float `np.array`
Array of times
gti : [[gti00, gti01], [gti10, gti11], ...]
good time intervals
segment_size : float
length of segments
dt : float
Time resolution of the light curves used to produce periodograms
Other Parameters
----------------
norm : str, default "frac"
The normalization of the periodogram. "abs" is absolute rms, "frac" is
fractional rms, "leahy" is Leahy+83 normalization, and "none" is the
unnormalized periodogram
use_common_mean : bool, default True
The mean of the light curve can be estimated in each interval, or on
the full light curve. This gives different results (Alston+2013).
Here we assume the mean is calculated on the full light curve, but
the user can set ``use_common_mean`` to False to calculate it on a
per-segment basis.
silent : bool, default False
Silence the progress bars
fluxes : float `np.array`, default None
Array of counts per bin or fluxes
errors : float `np.array`, default None
Array of errors on the fluxes above
Returns
-------
freq : `np.array`
The periodogram frequencies
power : `np.array`
The normalized periodogram powers
n_bin : int
the number of bins in the light curves used in each segment
n_ave : int
the number of averaged periodograms
mean : float
the mean flux
"""
if segment_size is None:
segment_size = gti.max() - gti.min()
n_bin = np.rint(segment_size / dt).astype(int)
dt = segment_size / n_bin
flux_iterable = get_flux_iterable_from_segments(
times, gti, segment_size, n_bin, fluxes=fluxes, errors=errors
)
cross = avg_pds_from_iterable(
flux_iterable, dt, norm=norm, use_common_mean=use_common_mean, silent=silent
)
if cross is not None:
cross.meta["gti"] = gti
return cross | 904f43b36380e07e115d23c6677b61bca155d898 | 5,400 |
import random
def check_random_state(seed):
"""
Turn seed into a random.Random instance
If seed is None, return the Random singleton used by random.
If seed is an int, return a new Random instance seeded with seed.
If seed is already a Random instance, return it.
Otherwise raise ValueError.
"""
# Code slightly adjusted from scikit-learn utils/validation.py
if seed is None or isinstance(seed, int):
rng = random.Random(seed)
elif isinstance(seed, random.Random):
rng = seed
else:
raise ValueError(
"### error: '{}' cannot be used to seed random.Random instance.".format(
seed
)
)
return rng | 347481de01f4a3bba59bc9a2c484c10d4857e1e2 | 5,401 |
def chunk(seq, size, groupByList=True):
"""Returns list of lists/tuples broken up by size input"""
func = tuple
if groupByList:
func = list
return [func(seq[i:i + size]) for i in range(0, len(seq), size)] | e7cece99822a01476b46351cebc1345793485cbd | 5,402 |
def registerNamespace(namespace, prefix):
"""
Register a namespace in libxmp.exempi
@namespace : the namespace to register
@prefix : the prefix to use with this namespace
"""
try:
registered_prefix = libxmp.exempi.namespace_prefix(namespace)
# The namespace already exists, return actual prefix.
return registered_prefix
except libxmp.XMPError:
# This namespace does not exist, that's cool
pass
try:
libxmp.exempi.prefix_namespace_uri(prefix)
# Prefix is already used, but not by us.
raise NameError("Prefix is already used")
except libxmp.XMPError:
# This prefix is not used yet, that's cool
pass
return libxmp.exempi.register_namespace(namespace, prefix)[:-1] | 6e7dbed515651f252222a283dc1cda08941fa4c5 | 5,403 |
def definition():
"""
Lists the parent-child relationships through the curriculum structure.
"""
sql = """
--Course to session
SELECT c.course_id as parent_id,
CASE WHEN cc.course_id IS NULL THEN 0 ELSE 1 END as linked,
cs.course_session_id as child_id, 'course' as parent,
cs.description + ' ' + cast(cs.session as char(1)) as description,
-1 as ratio,0 as changed
FROM c_course c
LEFT OUTER JOIN c_course_session cs on cs.curriculum_id = c.curriculum_id
LEFT OUTER JOIN c_course_config cc on c.course_id = cc.course_id
AND cc.course_session_id = cs.course_session_id
UNION ALL
--session to group
SELECT a.course_session_id as parent_id,
CASE WHEN c.course_session_id IS NULL THEN 0 ELSE 1 END as linked,
b.cgroup_id as child_id, 'course_session' as parent,
b.description,
-1 as ratio, 0 as changed
FROM c_course_session a
LEFT OUTER JOIN c_cgroup b ON a.curriculum_id = b.curriculum_id
LEFT OUTER JOIN c_course_session_config c on a.course_session_id = c.course_session_id
AND b.cgroup_id = c.cgroup_id
UNION ALL
--CGroup to component
SELECT a.cgroup_id as parent_id,
CASE WHEN c.component_id IS NULL THEN 0 ELSE 1 END as linked,
b.component_id as child_id, 'cgroup' as parent, b.description,
ISNULL(c.ratio, 1) as ratio, 0 as changed
FROM c_cgroup a
LEFT OUTER JOIN c_component b ON a.curriculum_id = b.curriculum_id
LEFT OUTER JOIN c_cgroup_config c on a.cgroup_id = c.cgroup_id
AND b.component_id = c.component_id
"""
return sql | e8dc6a720dcd5f62854ce95e708a88b43859e2cc | 5,404 |
def create_user(strategy, details, backend, user=None, *args, **kwargs):
"""Aggressively attempt to register and sign in new user"""
if user:
return None
request = strategy.request
settings = request.settings
email = details.get("email")
username = kwargs.get("clean_username")
if not email or not username:
return None
try:
validate_email(email)
validate_new_registration(request, {"email": email, "username": username})
except ValidationError:
return None
activation_kwargs = {}
if settings.account_activation == "admin":
activation_kwargs = {"requires_activation": User.ACTIVATION_ADMIN}
new_user = User.objects.create_user(
username, email, joined_from_ip=request.user_ip, **activation_kwargs
)
setup_new_user(settings, new_user)
send_welcome_email(request, new_user)
return {"user": new_user, "is_new": True} | afdff23d6ca578ef652872ba11bcfe57264b0a9b | 5,405 |
def prendreTresorPlateau(plateau,lig,col,numTresor):
"""
prend le tresor numTresor qui se trouve sur la carte en lin,col du plateau
retourne True si l'opération s'est bien passée (le trésor était vraiment sur
la carte
paramètres: plateau: le plateau considéré
lig: la ligne où se trouve la carte
col: la colonne où se trouve la carte
numTresor: le numéro du trésor à prendre sur la carte
resultat: un booléen indiquant si le trésor était bien sur la carte considérée
"""
if getTresor(getVal(plateau,lig,col))==numTresor:
prendreTresor(getVal(plateau,lig,col))
return True
else:
return False | 5fa94fb875e34068f4e391c66952fe4cc4248ddf | 5,406 |
def task_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc', admin_as_user=False):
"""
Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of tasks which it would see
if it were a regular user
:returns: tasks set
"""
filters = filters or {}
session = get_session()
query = session.query(models.Task)
if not (context.is_admin or admin_as_user) and context.owner is not None:
query = query.filter(models.Task.owner == context.owner)
_task_soft_delete(context, session=session)
showing_deleted = False
if 'deleted' in filters:
deleted_filter = filters.pop('deleted')
query = query.filter_by(deleted=deleted_filter)
showing_deleted = deleted_filter
for (k, v) in filters.items():
if v is not None:
key = k
if hasattr(models.Task, key):
query = query.filter(getattr(models.Task, key) == v)
marker_task = None
if marker is not None:
marker_task = _task_get(context, marker,
force_show_deleted=showing_deleted)
sort_keys = ['created_at', 'id']
if sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
query = _paginate_query(query, models.Task, limit,
sort_keys,
marker=marker_task,
sort_dir=sort_dir)
task_refs = query.all()
tasks = []
for task_ref in task_refs:
tasks.append(_task_format(task_ref, task_info_ref=None))
return tasks | 4d03e8f7ae15411c2cb597aeb06e25cd40c4f033 | 5,409 |
def updateAppMonConf(AppID, requestModel):
"""Update an Application Monitoring Configuration for a given application
Args:
AppID (str): This is the application ID of the Web App you are interested in.
requestModel: This is the data you wish to update and you need to put it in this
format:
{
"enabled": true,
"scanUrl": "https://mywebapp.com/directory"
}
explanation:
{
enabled (boolean): Enable Application Monitoring ,
scanUrl (string): Scan Url
}
Returns:
dict: Dictionary with the following layout
{
"success": true,
"errors": [
"string"
]
}
In the case of a return code 204, the update will take place but you will not
get the above layout, instead you will get a custom layout like this:
{'Response_Text': u'', 'Status_code': 204}
"""
url = "https://api.ams.fortify.com/api/v3/applications/{applicationId}/application-monitoring/configuration".format(applicationId=AppID)
req = fodRequest()
r = req.put(url, params=requestModel)
return r
#*******************************************************Applications************************************************************** | 0b61eae04b79702b2722d0c0bc5dafe48dcdf21f | 5,410 |
def plot_morphology(morphology,
order=MORPHOLOGY_ORDER,
colors=MORPHOLOGY_COLORS,
metastases=None,
metastasis_color=METASTASIS_COLOR,
ax=None,
bg_color='#f6f6f6',
**kwargs):
"""Plots morphology matrix as 2D heatmap.
Plots a morphology matrix (typically obtained from the parse_morphology
function) as a 2D heatmap. Matrix is expected to correspond with the
three categories returned by parse_morphology (ILC, Spindle cell
and Squamous).
Parameters
----------
morphology : pd.DataFrame
Boolean matrix of samples-by-morphologies.
order :
colors :
metastases : pd.DataFrame
Optional dataframe (single column) indicating which samples have
a metastasis. Used to draw metastases as an extra row in the heatmap.
metastasis_color :
ax : matplotlib.Axis
Axis to use for plotting.
bg_color : str
**kwargs
Any kwargs are passed to seaborns heatmap function.
Returns
-------
matplotlib.Axis
Axis that was used for plotting.
"""
if ax is None:
_, ax = plt.subplots()
# Add metastasis data if given.
if metastases is not None:
morphology = pd.concat([morphology, metastases], axis=1)
order = list(order) + [metastases.columns[0]]
colors = list(colors) + [metastasis_color]
# Sort by rows/columns.
morphology = morphology[list(order)]
morphology = sort_matrix(morphology, sort_columns=False)
# Convert to numeric matrix (for heatmap).
num_matrix = pd.DataFrame(
{
col: morphology[col].astype(int) * (i + 1)
for i, col in enumerate(morphology)
},
columns=morphology.columns)
# Draw heatmap.
cmap = ListedColormap([bg_color] + list(colors))
sns.heatmap(
num_matrix.T,
ax=ax,
cbar=False,
cmap=cmap,
vmin=0,
vmax=len(colors),
**kwargs)
ax.set_xticks([])
ax.set_xlim(0, num_matrix.shape[0])
ax.set_title('Tumor morphology')
ax.set_xlabel('Samples ({})'.format(morphology.shape[0]))
# Add counts to labels.
ax.set_yticklabels(
['{} ({})'.format(k, v) for k, v in morphology.sum().items()][::-1],
rotation=0)
return ax | 62048ced8ede14e9aa505c6e45dd5b196d12297d | 5,411 |
import requests
def post(name,url,message,params=None):
"""Wrap a post in some basic error reporting"""
start = dt.now()
s = requests.session()
if params is None:
response = s.post(url,json=message)
else:
response = s.post(url,json=message,params=params)
end = dt.now()
if not response.status_code == 200:
print(name, 'error:',response.status_code)
print(response.json())
return response.json()
print(f'{name} returned in {end-start}s')
m = response.json()
if 'message' in m:
if 'results' in m['message']:
print(f'Num Results: {len(m["message"]["results"])}')
print_errors(m)
return m | 9180424171cdf4cb7bf16a938d7207a99af0987f | 5,413 |
def get_lin_reg_results(est, true, zero_tol=0):
"""
Parameters
----------
est: an Estimator
A covariance estimator.
true: array-like, shape (n_features, n_features)
zero_tol: float
Output
------
out: dict with keys 'utri' and 'graph'
"""
est_coef = get_coef(est)[0]
est_adj = fill_hollow_sym(est_coef)
true_adj = fill_hollow_sym(est_coef)
coef_results = compare_vecs(est=est_coef, truth=true,
zero_tol=zero_tol)
graph_results = compare_adj_mats(est=est_adj, truth=true_adj,
zero_tol=zero_tol)
results = merge_dicts(coef_results, graph_results, allow_key_overlap=False)
return results | 0f963c135d0bd74a70714ef47ed6f2b0191df846 | 5,414 |
import requests
import logging
def download_file(requested_url: str) -> str:
"""Download a file from github repository"""
url = f"https://github.com/{requested_url.replace('blob', 'raw')}"
resp = requests.get(url)
logging.info(F"Requested URL: {requested_url}")
if resp.status_code != 200:
logging.info(f"Can not download {url}")
raise NotebookDownloadException("Can not download the file. Please, check the URL")
return resp.text | f96d68843f6291aa3497a6e7a5b1e30e2ea4005e | 5,415 |
import warnings
def readBody(response):
"""
Get the body of an L{IResponse} and return it as a byte string.
This is a helper function for clients that don't want to incrementally
receive the body of an HTTP response.
@param response: The HTTP response for which the body will be read.
@type response: L{IResponse} provider
@return: A L{Deferred} which will fire with the body of the response.
Cancelling it will close the connection to the server immediately.
"""
def cancel(deferred):
"""
Cancel a L{readBody} call, close the connection to the HTTP server
immediately, if it is still open.
@param deferred: The cancelled L{defer.Deferred}.
"""
abort = getAbort()
if abort is not None:
abort()
d = defer.Deferred(cancel)
protocol = _ReadBodyProtocol(response.code, response.phrase, d)
def getAbort():
return getattr(protocol.transport, 'abortConnection', None)
response.deliverBody(protocol)
if protocol.transport is not None and getAbort() is None:
warnings.warn(
'Using readBody with a transport that does not have an '
'abortConnection method',
category=DeprecationWarning,
stacklevel=2)
return d | bbc693fca1536a3699b0e088941d9577de94d8dd | 5,416 |
def is_valid_mac(address):
"""Verify the format of a MAC address."""
class mac_dialect(netaddr.mac_eui48):
word_fmt = '%.02x'
word_sep = ':'
try:
na = netaddr.EUI(address, dialect=mac_dialect)
except Exception:
return False
return str(na) == address.lower() | f8bb59a986773307f803dd52154ec03eaddb8597 | 5,417 |
def build_state_abstraction(similar_states, mdp, tol=0.1):
"""
"""
bools = similar_states + np.eye(similar_states.shape[0]) < tol # approximate abstraction
if bools.sum() == 0:
raise ValueError('No abstraction')
mapping, parts = partitions(bools)
idx = list(set(np.array([p[0] for p in parts]))) # pick a representative set of states. one from each partition
f = construct_abstraction_fn(mapping, idx, mdp.S, len(idx))
# print('Abstracting from {} states to {} states'.format(mdp.S, len(parts)))
# print('idx', idx)
# print('mapping', mapping)
# print('parts', parts)
# mapping, parts = fix_mapping(mapping)
# print(f)
# print(f.shape, abs_mdp.S)
abs_mdp = abstract_the_mdp(mdp, idx)
# want a way to do this stuff in numpy!?
# should calculate the error of the abstraction?! check it is related to tol!?
return idx, abs_mdp, f | d4d9354507172ee92ea11c915de0376f0c873878 | 5,418 |
def diagram(source, rstrip=True):
"""High level API to generate ASCII diagram.
This function is equivalent to:
.. code-block:: python
Diagram(source).renders()
:param source: The ADia source code.
:type source: str or file-like
:param rstrip: If ``True``, the trailing wihtespaces at the end of each
line will be removed.
:type rstrip: bool, optional, default: True
:return: ASCII diagram.
:rtype: str
"""
return Diagram(source).renders(rstrip) | 2a386b49052a7f4dd31eb4f40dec15d774d86b94 | 5,419 |
def eng_to_kong(eng_word: str)-> list[str]:
"""
Translate given English word into Korean into matching pronounciation,
matching the English Loanword Orthography.
For example, "hello" will be translated into 헐로.
# Panics
When given a english word that it cannot translate, `eng_to_kong` will raise a KeyError.
Example
```python
import konglog
def main():
word = "shrimp"
print(konglog.eng_to_kong(word))
```
"""
# Parse word into phonemes string for passing to Prolog Query.
prolog_arg_aras = "]"
for phe in cmudict.dict()[eng_word.lower().strip()][0]:
if phe[-1] == '0' or phe[-1] == '1' or phe[-1] == '2':
phe = phe[:-1]
prolog_arg_aras = "," + phe.lower() + prolog_arg_aras
prolog_arg_aras = "[" + prolog_arg_aras[1:]
# Execute Prolog query
with PrologMQI() as mqi:
with mqi.create_thread() as prolog_thread:
assert(prolog_thread.query("consult(\"ipa.pl\")"))
prolog_res = prolog_thread.query(f"ipa_to_kr(X,{prolog_arg_aras})")
# Parse results
jamo_lists = []
try:
for jls in prolog_res:
temp = jls['X']
temp.reverse()
jamo_lists.append(temp)
except TypeError:
raise KeyError
jamo_all = []
for jamo_list in jamo_lists:
temp_jamo_all = [""]
for jamos in jamo_list:
if isinstance(jamos, str):
for i in range(len(temp_jamo_all)):
temp_jamo_all[i] += jamos
else:
temp = []
for jamo in jamos:
for s in temp_jamo_all:
temp.append(s + jamo)
temp_jamo_all = temp
jamo_all.extend(temp_jamo_all)
# Combine jamos into Konglish word
jamo_all.sort(key = lambda x : len(x))
for jamos in jamo_all:
try:
return join_jamos(jamos, False)
except ValueError:
continue | 0b0d55fdacdea1493d73de85c21dc9c086352b99 | 5,420 |
def socfaker_dns_answers():
"""
A list of DNS answers during a DNS request
Returns:
list: A random list (count) of random DNS answers during a DNS request
"""
if validate_request(request):
return jsonify(str(socfaker.dns.answers)) | c1f641e1a0e977363067937487a6455800e6a25c | 5,422 |
def bilin(x, y, data, datax, datay): # --DC
""" x, y ARE COORDS OF INTEREST
data IS 2x2 ARRAY CONTAINING NEARBY DATA
datax, datay CONTAINS x & y COORDS OF NEARBY DATA"""
lavg = ( (y - datay[0]) * data[1,0] + (datay[1] - y) * data[0,0] ) / (datay[1] - datay[0])
ravg = ( (y - datay[0]) * data[1,1] + (datay[1] - y) * data[0,1] ) / (datay[1] - datay[0])
return ( (x - datax[0]) * ravg + (datax[1] - x) * lavg ) / (datax[1] - datax[0]) | 59a740f65c7187a08cdc09cef8aa100b01c652cf | 5,424 |
import array
def slice_data(xdata, ydata, x_range):
"""
crops or slices the data in xdata,ydata in the range x_range on the x axis
"""
data = zip(xdata, ydata)
sliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]
return array(zip(*sliced_data)) | faf90781e2f073e74a7b11cd80d7c127c0db1bb3 | 5,425 |
def best_low_rank(A, rank):
"""
Finding the best low rank approximation by SVD
"""
u, s, vh = np.linalg.svd(A)
s = np.sqrt(s[:rank])
return u[:, range(rank)] @ np.diag(s), np.diag(s) @ vh[range(rank)] | 5df8197fc0113bfa74a36803445a6a300766880f | 5,426 |
def get_bangumi(uid: int, type_: str = "bangumi", limit: int = 114514, callback=None, verify: utils.Verify = None):
"""
自动循环获取追番/追剧列表
:param callback: 回调函数
:param uid:
:param type_:
:param limit:
:param verify:
:return:
"""
if verify is None:
verify = utils.Verify()
bangumi = []
page = 1
count = 0
while count < limit:
data = get_bangumi_raw(uid=uid, pn=page, type_=type_, verify=verify)
if len(data["list"]) == 0:
break
bangumi += data["list"]
if callable(callback):
callback(data["list"])
count += len(data["list"])
page += 1
return bangumi[:limit] | b5a2a553a3bf13ded7eb30c53ce536b4a2b9e043 | 5,427 |
def local_cases_range(start_date='2020-06-01',end_date='2020-07-01',areaname='Hartlepool'):
"""calculate new cases in a time range"""
try:
q=DailyCases.objects.filter(areaname=areaname,specimenDate=start_date)[0]
start_total=q.totalLabConfirmedCases
q=DailyCases.objects.filter(areaname=areaname,specimenDate=end_date)[0]
end_total=q.totalLabConfirmedCases
return end_total-start_total
except Exception as e:
log.info(e)
return None | 747f03f2ef9925f7dc252798bb7f3844cd31d2c0 | 5,428 |
def ecal_phisym_flattables(process, produce_by_run : bool=False):
"""
Add the NanoAOD flat table producers.
This functions adjust also the output columns.
Should be called once nMisCalib has been set in the EcalPhiSymRecHitProducer
"""
process.load('Calibration.EcalCalibAlgos.EcalPhiSymFlatTableProducers_cfi')
nmis = process.EcalPhiSymRecHitProducerRun.nMisCalib.value()
for imis in range(1, nmis+1):
# get the naming and indexing right.
if imis<nmis/2+1:
var_name = 'sumEt_m'+str(abs(int(imis-(nmis/2)-1)))
var = Var(f'sumEt({imis})', float, doc='ECAL PhiSym rechits: '+str(imis-(nmis/2)-1)+'*miscalib et', precision=23)
else:
var_name = 'sumEt_p'+str(int(imis-(nmis/2)))
var = Var(f'sumEt({imis})', float, doc='ECAL PhiSym rechits: '+str(imis-(nmis/2))+'*miscalib et', precision=23)
if produce_by_run:
setattr(process.ecalPhiSymRecHitRunTableEB.variables, var_name, var)
setattr(process.ecalPhiSymRecHitRunTableEE.variables, var_name, var)
flattable_sequence = cms.Sequence( process.ecalPhiSymRecHitRunTableEB +
process.ecalPhiSymRecHitRunTableEE +
process.ecalPhiSymInfoRunTable )
else:
setattr(process.ecalPhiSymRecHitLumiTableEB.variables, var_name, var)
setattr(process.ecalPhiSymRecHitLumiTableEE.variables, var_name, var)
flattable_sequence = cms.Sequence( process.ecalPhiSymRecHitLumiTableEB +
process.ecalPhiSymRecHitLumiTableEE +
process.ecalPhiSymInfoLumiTable
)
return flattable_sequence | f6f48ab2e5b1df6a3e58c5e3130be56861eb1384 | 5,429 |
def nx_add_prefix(graph, prefix):
"""
Rename graph to obtain disjoint node labels
"""
assert isinstance(graph, nx.DiGraph)
if prefix is None:
return graph
def label(x):
if isinstance(x, str):
name = prefix + x
else:
name = prefix + repr(x)
return name
return nx.relabel_nodes(graph, label) | c8f05052c613adc17423637186867b70db31e70d | 5,430 |
def infected_asymptomatic_20x80():
"""
Real Name: b'Infected asymptomatic 20x80'
Original Eqn: b'Infected asymptomatic 20+Infected asymptomatic 80'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return infected_asymptomatic_20() + infected_asymptomatic_80() | fd9cba446672bf8ebcc5fe2fc7f2f961129bdb71 | 5,431 |
def biosample_table_data():
"""Return a dictionary containing the expected values of the BioSample Table"""
columns = [
"id",
"BioSample_id",
"BioSampleAccession",
"BioSampleAccessionSecondary",
"BioSampleBioProjectAccession",
"BioSampleSRAAccession",
"BioSampleOrganism",
"BioSampleStrain",
"BioSampleSubmissionDate",
"BioSampleComment",
]
metadata = [
"1",
"12991206",
"SAMN12991206",
"",
"",
"SRS5502739",
"TestOrganism1",
"TestStrain1",
"2019-10-08T07:15:03.950",
"",
]
table_dict = {}
# Populate the dict with data
for i in range(0, len(columns)):
key = columns[i]
value = metadata[i]
table_dict[key] = value
return table_dict | 65e5d5bb5416a8f113100562fba8f2e6fd66796a | 5,433 |
def grid_adapter3D(
out_dim=(100.0, 100.0),
in_dim=(50.0, 50.0),
z_dim=-10.0,
out_res=(10.0, 10.0, 10.0),
in_res=(5.0, 5.0, 5.0),
out_pos=(0.0, 0.0),
in_pos=(25.0, 25.0),
z_pos=0.0,
in_mat=0,
out_mat=0,
fill=False,
):
"""
Generate a grid adapter.
3D adapter from an outer grid resolution
to an inner grid resolution with gmsh.
Parameters
----------
out_dim : list of 2 float
xy-Dimension of the outer block
in_dim : list of 2 float
xy-Dimension of the inner block
z_dim : float
z-Dimension of the whole block
out_res : list of 3 float
Grid resolution of the outer block
in_res : list of 3 float
Grid resolution of the inner block
out_pos : list of 2 float
xy-Position of the origin of the outer block
in_pos : list of 2 float
xy-Position of the origin of the inner block
z_dim : float
z-Position of the origin of the whole block
in_mat : integer
Material-ID of the inner block
out_mat : integer
Material-ID of the outer block
fill : bool, optional
State if the inner block should be filled with a rectangular mesh.
Default: False.
Returns
-------
result : dictionary
Result contains one '#FEM_MSH' block of the OGS mesh file
with the following information (sorted by keys):
mesh_data : dict
dictionary containing information about
- AXISYMMETRY (bool)
- CROSS_SECTION (bool)
- PCS_TYPE (str)
- GEO_TYPE (str)
- GEO_NAME (str)
- LAYER (int)
nodes : ndarray
Array with all node postions
elements : dict
contains nodelists for elements sorted by element types
material_id : dict
contains material ids for each element sorted by element types
element_id : dict
contains element ids for each element sorted by element types
"""
out = gmsh(
gmsh_grid_adapt3D(
out_dim, in_dim, z_dim, out_res, in_res, out_pos, in_pos, z_pos
),
import_dim=3,
)
out["material_id"] = gen_std_mat_id(out["elements"], out_mat)
if fill:
element_no = [
int(in_dim[0] / in_res[0]),
int(in_dim[1] / in_res[1]),
int(abs(z_dim) / in_res[2]),
]
mesh_in = rectangular(
dim=3,
mesh_origin=in_pos + (z_pos + min(z_dim, 0.0),),
element_no=element_no,
element_size=in_res,
)
mesh_in["material_id"] = gen_std_mat_id(mesh_in["elements"], in_mat)
dec = int(np.ceil(-np.log10(min(min(in_res), min(out_res)))) + 2.0) * 2
out = combine(mesh_in, out, dec)
return out | 9c14a4f9b27ec14cdc550f81fd861207a5674616 | 5,434 |
from typing import Sequence
from typing import Tuple
from typing import Mapping
from typing import Any
from functools import reduce
from typing import cast
def concat_dtypes(ds: Sequence[np.dtype]) -> np.dtype:
"""Concat structured datatypes."""
def _concat(
acc: Tuple[Mapping[Any, Any], int], a: np.dtype
) -> Tuple[DTYPE_FIELDS_T, int]:
acc_fields, acc_itemsize = acc
fields = dtype_fields(a).throw()
intersection = set(acc_fields).intersection(set(fields))
if intersection != set():
raise ValueError(f'dtypes have overlapping fields: {intersection}')
return (
{
**acc_fields,
**{k: (d[0], d[1] + acc_itemsize) for k, d in fields.items()}
},
acc_itemsize + a.itemsize
)
# dtype.fields() doesn't match dtype constructor despite being compatible
return np.dtype(reduce(_concat, ds, (cast(DTYPE_FIELDS_T, {}), 0))[0]) | bef7d8ebe30f41297adbf8f1d8de9b93f646c8f4 | 5,435 |
def mutation(param_space, config, mutation_rate, list=False):
"""
Mutates given configuration.
:param param_space: space.Space(), will give us information about parameters
:param configs: list of configurations.
:param mutation_rate: integer for how many parameters to mutate
:param list: boolean whether returning one or more alternative configs
:return: list of dicts, list of mutated configurations
"""
parameter_object_list = param_space.get_input_parameters_objects()
rd_config = dict()
for name, obj in parameter_object_list.items():
x = obj.randomly_select()
single_valued_param = False
param_type = param_space.get_type(name)
if param_type == 'real' or param_type == 'integer':
if obj.get_max() == obj.get_min():
single_valued_param = True
else:
if obj.get_size() == 1:
single_valued_param = True
mutation_attempts = 0
while x == config[name] and single_valued_param == False:
x = obj.randomly_select()
mutation_attempts += 1
if mutation_attempts > 1000000:
break
rd_config[name] = x
parameter_names_list = param_space.get_input_parameters()
nbr_params = len(parameter_names_list)
configs = []
n_configs = nbr_params if list else 1
for _ in range(n_configs):
indices = rd.permutation(nbr_params)[:mutation_rate]
for idx in indices:
mutation_param = parameter_names_list[idx]
# Should I do something if they are the same?
temp = config.copy()
temp[mutation_param] = rd_config[mutation_param]
configs.append(temp)
return configs | 38427cfee226589d72117f102d2befdbe8ebbcc0 | 5,436 |
from typing import Union
from typing import Callable
from typing import Optional
from typing import Tuple
def uncertainty_batch_sampling(classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
metric: Union[str, Callable] = 'euclidean',
n_jobs: Optional[int] = None,
**uncertainty_measure_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix]]:
"""
Batch sampling query strategy. Selects the least sure instances for labelling.
This strategy differs from :func:`~modAL.uncertainty.uncertainty_sampling` because, although it is supported,
traditional active learning query strategies suffer from sub-optimal record selection when passing
`n_instances` > 1. This sampling strategy extends the interactive uncertainty query sampling by allowing for
batch-mode uncertainty query sampling. Furthermore, it also enforces a ranking -- that is, which records among the
batch are most important for labeling?
Refer to Cardoso et al.'s "Ranked batch-mode active learning":
https://www.sciencedirect.com/science/article/pii/S0020025516313949
Args:
classifier: One of modAL's supported active learning models.
X: Set of records to be considered for our active learning model.
n_instances: Number of records to return for labeling from `X`.
metric: This parameter is passed to :func:`~sklearn.metrics.pairwise.pairwise_distances`
n_jobs: If not set, :func:`~sklearn.metrics.pairwise.pairwise_distances_argmin_min` is used for calculation of
distances between samples. Otherwise it is passed to :func:`~sklearn.metrics.pairwise.pairwise_distances`.
**uncertainty_measure_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier.
Returns:
Indices of the instances from `X` chosen to be labelled; records from `X` chosen to be labelled.
"""
uncertainty = classifier_uncertainty(classifier, X, **uncertainty_measure_kwargs)
query_indices = ranked_batch(classifier, unlabeled=X, uncertainty_scores=uncertainty,
n_instances=n_instances, metric=metric, n_jobs=n_jobs)
return query_indices, X[query_indices] | eb95ad79f4326d89c94a42aa727e2e3c338e021e | 5,437 |
import re
def only_digits(raw, force_int=False):
"""Strips all not digit characters from string.
Args:
raw (str or unicode): source string.
Kwargs:
force_int (boolean): not to seek for dot, seek only for int value.
Returns:
int or float: in dependence of "raw" argument content.
None: if raw is None, empty or not contains digits.
"""
if isinstance(raw, (unicode, str)) and len(raw):
if not force_int and re.search(r'\d\.\d', raw):
try:
return float(u''.join(u'{0}'.format(one) for one in raw
if one.isdigit() or one == one.__class__(u'.')))
except (TypeError, ValueError):
return None
else:
try:
return int(u''.join(u'{0}'.format(one) for one in raw
if one.isdigit()))
except (TypeError, ValueError):
return None
elif isinstance(raw, (float, int)):
return raw
else:
return None | 413763588b067f335f7401fb914f1d6f3f8972fa | 5,438 |
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result | ce2e4b2f6fe0243a8ac5c418d10c6352c95ea302 | 5,439 |
from typing import Optional
from typing import Sequence
def get_users(compartment_id: Optional[str] = None,
external_identifier: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetUsersFilterArgs']]] = None,
identity_provider_id: Optional[str] = None,
name: Optional[str] = None,
state: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
This data source provides the list of Users in Oracle Cloud Infrastructure Identity service.
Lists the users in your tenancy. You must specify your tenancy's OCID as the value for the
compartment ID (remember that the tenancy is simply the root compartment).
See [Where to Get the Tenancy's OCID and User's OCID](https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#five).
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_users = oci.identity.get_users(compartment_id=var["tenancy_ocid"],
external_identifier=var["user_external_identifier"],
identity_provider_id=oci_identity_identity_provider["test_identity_provider"]["id"],
name=var["user_name"],
state=var["user_state"])
```
:param str compartment_id: The OCID of the compartment (remember that the tenancy is simply the root compartment).
:param str external_identifier: The id of a user in the identity provider.
:param str identity_provider_id: The id of the identity provider.
:param str name: A filter to only return resources that match the given name exactly.
:param str state: A filter to only return resources that match the given lifecycle state. The state value is case-insensitive.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['externalIdentifier'] = external_identifier
__args__['filters'] = filters
__args__['identityProviderId'] = identity_provider_id
__args__['name'] = name
__args__['state'] = state
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
compartment_id=__ret__.compartment_id,
external_identifier=__ret__.external_identifier,
filters=__ret__.filters,
id=__ret__.id,
identity_provider_id=__ret__.identity_provider_id,
name=__ret__.name,
state=__ret__.state,
users=__ret__.users) | 4d404eb1069c829bb757f3870efc548583998434 | 5,440 |
def se_resnet20(num_classes: int = 10,
in_channels: int = 3
) -> ResNet:
""" SEResNet by Hu+18
"""
return resnet(num_classes, 20, in_channels, block=partial(SEBasicBlock, reduction=16)) | da1d1327d5e5d1b55d3b4cc9d42dbf381ece029f | 5,441 |
def parallel_threaded(function):
"""
A decorator for running a function within a parallel thread
"""
def decorator(*args, **kwargs):
t = ParallelThread(target=function,
args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return decorator | 9f4936b0ab7de3d550b404043d6b0e37dbb3a066 | 5,442 |
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode="bilinear", align_corners=True) | dfaabb3999047589b2d755a2cc631b1389d172b1 | 5,443 |
def get_user_playlists(spotipy_obj, username):
"""Gets and returns all Spotify playlists owned by the username specified.
Parameters:
spotipy_obj: Spotipy object
username: Spotify username
Returns:
List of dictionaries, each dictionary a Spotify playlist object.
"""
# Grab all user playlists, including private ones
initial_playlists = spotipy_obj.user_playlists(username)
final_playlists = []
while initial_playlists:
for playlist in initial_playlists["items"]:
if playlist["owner"]["id"] == username:
final_playlists.append(playlist)
if initial_playlists["next"]:
initial_playlists = spotipy_obj.next(initial_playlists)
else:
initial_playlists = None
return final_playlists | 90c06e0ddd91a7a84f4d905dd9334f9b4c27f890 | 5,444 |
def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
validation_set = "auto",
verbose=True):
"""
Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~turicreate.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only available when `solver=newton`
or when the default `auto` solver option chooses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~turicreate.linear_regression.LinearRegression` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5)
"""
# Regression model names.
model_name = "regression_linear_regression"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set,
solver = solver, verbose = verbose,
l2_penalty=l2_penalty, l1_penalty = l1_penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
step_size = step_size,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations)
return LinearRegression(model.__proxy__) | 3988ac27163873a8feff9fd34a5e8fe87e923487 | 5,447 |
def argument(*name_or_flags, **kwargs):
"""Convenience function to properly format arguments to pass to the
subcommand decorator.
"""
args = list()
for arg in name_or_flags:
args.append(arg)
return args, kwargs | 0cae66e8b23211affc97fd8857f17b48a73cf286 | 5,448 |
def get_logger():
"""
Provides the stem logger.
:returns: **logging.Logger** for stem
"""
return LOGGER | 8189cae16a244f0237f641e613783a484be5cf38 | 5,449 |
def get_graphql_type_for_model(model):
"""
Return the GraphQL type class for the given model.
"""
app_name, model_name = model._meta.label.split('.')
# Object types for Django's auth models are in the users app
if app_name == 'auth':
app_name = 'users'
class_name = f'{app_name}.graphql.types.{model_name}Type'
try:
return dynamic_import(class_name)
except AttributeError:
raise GraphQLTypeNotFound(f"Could not find GraphQL type for {app_name}.{model_name}") | d9f2b4093c290260db864cedd6b06958651bf713 | 5,450 |
from pathlib import Path
def load_image_files(container_path, dimension=(64, 64)):
"""
Load image files with categories as subfolder names
which performs like scikit-learn sample dataset
"""
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
categories = [fo.name for fo in folders]
descr = "A image classification dataset"
images = []
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
flat_data.append(img_resized.flatten())
images.append(img_resized)
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
images = np.array(images)
print('done')
return Bunch(data=flat_data,
target=target,
target_names=categories,
images=images,
DESCR=descr) | 1c92309c7f8f0b99db841fed21901d37e143f41c | 5,451 |
from datetime import datetime
import calendar
def create_calendar(year=None, month=None):
"""
Create an inline keyboard with the provided year and month
:param int year: Year to use in the calendar,
if None the current year is used.
:param int month: Month to use in the calendar,
if None the current month is used.
:return: Returns the InlineKeyboardMarkup object with the calendar.
"""
now = datetime.datetime.now()
if year is None:
year = now.year
if month is None:
month = now.month
data_ignore = create_callback_data("IGNORE", year, month, 0)
keyboard = []
# First row - Month and Year
row = []
row.append(InlineKeyboardButton(
calendar.month_name[month]+" "+str(year), callback_data=data_ignore)
)
keyboard.append(row)
# Second row - Week Days
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=data_ignore))
keyboard.append(row)
my_calendar = calendar.monthcalendar(year, month)
for week in my_calendar:
row = []
for day in week:
if day == 0:
row.append(InlineKeyboardButton(
" ", callback_data=data_ignore)
)
else:
row.append(InlineKeyboardButton(
str(day),
callback_data=create_callback_data(
"DAY",
year,
month,
day
))
)
keyboard.append(row)
# Last row - Buttons
row = []
row.append(InlineKeyboardButton(
"<", callback_data=create_callback_data(
"PREV-MONTH",
year,
month,
day
))
)
row.append(InlineKeyboardButton(
" ", callback_data=data_ignore)
)
row.append(InlineKeyboardButton(
">", callback_data=create_callback_data(
"NEXT-MONTH",
year,
month,
day
))
)
keyboard.append(row)
return InlineKeyboardMarkup(keyboard) | 232dd093b08c53f099b942d4497aef920002f5d4 | 5,452 |
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
elif ports:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return return_val | 0c07feec16562826a1f38a11b1d57782adf09b4d | 5,453 |
def get_airmass(when, ra, dec):
"""Return the airmass of (ra,dec) at the specified observing time.
Uses :func:`cos_zenith_to_airmass`.
Parameters
----------
when : astropy.time.Time
Observation time, which specifies the local zenith.
ra : astropy.units.Quantity
Target RA angle(s)
dec : astropy.units.Quantity
Target DEC angle(s)
Returns
-------
array or float
Value of the airmass for each input (ra,dec).
"""
target = astropy.coordinates.ICRS(ra=ra, dec=dec)
zenith = get_observer(when, alt=90 * u.deg, az=0 * u.deg
).transform_to(astropy.coordinates.ICRS)
# Calculate zenith angle in degrees.
zenith_angle = target.separation(zenith)
# Convert to airmass.
return cos_zenith_to_airmass(np.cos(zenith_angle)) | 2d2b25963cc5814c8189b117734963feda762d88 | 5,455 |
def get_metadata(class_):
"""Returns a list of MetaDataTuple structures.
"""
return list(get_metadata_iterator(class_)) | 95bc083464431cd8bc3c273989680732f711c5c1 | 5,456 |
def get_register(regname):
"""
Get register value. Exception will be raised if expression cannot be parse.
This function won't catch on purpose.
@param regname: expected register
@return register value
"""
t = gdb.lookup_type("unsigned long")
reg = gdb.parse_and_eval(regname)
return long( reg.cast(t) ) | 43d077b59dc0b1cb8a6233538a2a1291216c1ec4 | 5,457 |
import re
def create_queries(project_id, ticket_number, pids_project_id, pids_dataset_id,
pids_table):
"""
Creates sandbox and truncate queries to run for EHR deactivated retraction
:param project_id: bq name of project
:param ticket_number: Jira ticket number to identify and title sandbox table
:param pids_project_id: deactivated ehr pids table in bq's project_id
:param pids_dataset_id: deactivated ehr pids table in bq's dataset_id
:param pids_table: deactivated pids table in bq's table name
:return: list of queries to run
"""
queries_list = []
dataset_list = set()
final_date_column_df = pd.DataFrame()
# Hit bq and receive df of deactivated ehr pids and deactivated date
client = get_client(project_id)
deactivated_ehr_pids_df = client.query(
DEACTIVATED_PIDS_QUERY.render(project=pids_project_id,
dataset=pids_dataset_id,
table=pids_table)).to_dataframe()
date_columns_df = get_date_info_for_pids_tables(project_id, client)
LOGGER.info(
"Dataframe creation complete. DF to be used for creation of retraction queries."
)
for date_row in date_columns_df.itertuples(index=False):
# Filter to only include tables containing deactivated pids with the earliest deactivated date
LOGGER.info(
f'Checking table: {date_row.project_id}.{date_row.dataset_id}.{date_row.table}'
)
if check_pid_exist(date_row, client, pids_project_id, pids_dataset_id,
pids_table):
dataset_list.add(date_row.dataset_id)
row = {
'project_id': date_row.project_id,
'dataset_id': date_row.dataset_id,
'table': date_row.table,
'date_column': date_row.date_column,
'start_date_column': date_row.start_date_column,
'end_date_column': date_row.end_date_column
}
final_date_column_df = final_date_column_df.append(
row, ignore_index=True)
LOGGER.info(
"Looping through the deactivated PIDS df to create queries based on the retractions needed per PID table"
)
for ehr_row in deactivated_ehr_pids_df.itertuples(index=False):
LOGGER.info(f'Creating retraction queries for PID: {ehr_row.person_id}')
for date_row in final_date_column_df.itertuples(index=False):
# Determine if dataset is deid to correctly pull pid or research_id and check if ID exists in dataset or if
# already retracted
if re.match(DEID_REGEX, date_row.dataset_id):
pid = get_research_id(date_row.project_id, date_row.dataset_id,
ehr_row.person_id, client)
else:
pid = ehr_row.person_id
# Get or create sandbox dataset
sandbox_dataset = check_and_create_sandbox_dataset(
date_row.project_id, date_row.dataset_id)
# Create queries based on type of date field
LOGGER.info(
f'Creating Query to retract {pid} from {date_row.dataset_id}.{date_row.table}'
)
if pd.isnull(date_row.date_column):
sandbox_query = SANDBOX_QUERY_END_DATE.render(
project=date_row.project_id,
sandbox_dataset=sandbox_dataset,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
end_date_column=date_row.end_date_column,
start_date_column=date_row.start_date_column)
clean_query = CLEAN_QUERY_END_DATE.render(
project=date_row.project_id,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
end_date_column=date_row.end_date_column,
start_date_column=date_row.start_date_column)
else:
sandbox_query = SANDBOX_QUERY_DATE.render(
project=date_row.project_id,
sandbox_dataset=sandbox_dataset,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
date_column=date_row.date_column)
clean_query = CLEAN_QUERY_DATE.render(
project=date_row.project_id,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
date_column=date_row.date_column)
queries_list.append({
clean_consts.QUERY:
sandbox_query,
clean_consts.DESTINATION:
date_row.project_id + '.' + sandbox_dataset + '.' +
(ticket_number + '_' + date_row.table),
clean_consts.DESTINATION_DATASET:
date_row.dataset_id,
clean_consts.DESTINATION_TABLE:
date_row.table,
clean_consts.DISPOSITION:
bq_consts.WRITE_APPEND,
'type':
'sandbox'
})
queries_list.append({
clean_consts.QUERY:
clean_query,
clean_consts.DESTINATION:
date_row.project_id + '.' + date_row.dataset_id + '.' +
date_row.table,
clean_consts.DESTINATION_DATASET:
date_row.dataset_id,
clean_consts.DESTINATION_TABLE:
date_row.table,
clean_consts.DISPOSITION:
bq_consts.WRITE_TRUNCATE,
'type':
'retraction'
})
LOGGER.info(
f"Query list complete, retracting ehr deactivated PIDS from the following datasets: "
f"{dataset_list}")
return queries_list | 2940468b76ccd4d16dfb1bbddf440be635eaaf8d | 5,458 |
def load_and_estimate(file, arguments, denoise=medfilt, data=None):
"""Loads mean+std images and evaluates noise. Required for parallelization."""
# Pipeline for µCT data
if data is not None:
# Evaluate noise on data
noises = np.zeros(len(metrics))
for m in range(len(metrics)):
noise = estimate_noise(data, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noises[m] = noise
return np.array(noises)
# Pipeline for images
# Get images
path = arguments.image_path
# Load images
image_surf, image_deep, image_calc = load_vois_h5(path, file)
# Auto crop
if arguments.auto_crop:
image_deep, cropped = auto_corner_crop(image_deep)
image_calc, cropped = auto_corner_crop(image_calc)
# Evaluate noise on mean+std images
noises_surf, noises_deep, noises_calc = np.zeros(len(metrics)), np.zeros(len(metrics)), np.zeros(len(metrics))
for m in range(len(metrics)):
noise_surf = estimate_noise(image_surf, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noise_deep = estimate_noise(image_deep, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noise_calc = estimate_noise(image_calc, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noises_surf[m] = noise_surf
noises_deep[m] = noise_deep
noises_calc[m] = noise_calc
return np.array((noises_surf, noises_deep, noises_calc)) | 63b53eb5441dd9a2e9f4b558005b640109fea220 | 5,459 |
import torch
def calc_self_attn(
bert_model: BertModel, protein: dict, device="cuda:0", **kwargs
):
"""Calculate self-attention matrices given Bert model for one protein.
Args:
bert_model: a BertModel instance
protein: a dict object from LM-GVP formatted data (json record).
device: device to do the computation
Returns:
torch.tensor of shape: [n_maps, seqlen, seqlen]
"""
bert_model = bert_model.to(device)
bert_model.eval()
with torch.no_grad():
self_attn_mats = bert_model(
protein["input_ids"].unsqueeze(0).to(device),
attention_mask=protein["attention_mask"].unsqueeze(0).to(device),
output_attentions=True,
).attentions
# gather self-attention map from all layers together
n_layers = len(self_attn_mats)
batch_size, n_heads, seqlen, _ = self_attn_mats[0].size()
self_attn_mats = torch.stack(self_attn_mats, dim=1).view(
batch_size, n_layers * n_heads, seqlen, seqlen
)
# remove [CLS] and [SEP]
self_attn_mats = self_attn_mats[..., 1:-1, 1:-1]
if self_attn_mats.size()[0] == 1:
self_attn_mats = self_attn_mats.squeeze(0)
self_attn_mats = self_attn_mats.detach().cpu()
return self_attn_mats | 4d076cc232207c9c446c4f9f52f1156af2afabf2 | 5,460 |
def compute_median_survival_time(times, surv_function):
"""
Computes a median survival time estimate by looking for where the survival
function crosses 1/2.
Parameters
----------
times : 1D numpy array
Sorted list of unique times (in ascending order).
surv_function : 1D numpy array
A survival function evaluated at each of time in `times`, in the same
order.
Returns
-------
output : float
Median survival time estimate.
"""
t_left = times[0]
t_right = times[-1]
if surv_function[-1] > 1/2:
# survival function never crosses 1/2; just output this last time point
return t_right
for t, s in zip(times, surv_function):
if s >= 0.5:
t_left = t
for t, s in zip(reversed(times), reversed(surv_function)):
if s <= 0.5:
t_right = t
return (t_left + t_right) / 2. | 22103bc705acb791c0937a403aa9c34e9145e1c2 | 5,461 |
def TDMAsolver_no_vec(coeffs):
"""
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
and to http://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
"""
a = coeffs[1:, 0]
b = coeffs[:, 1]
c = coeffs[:-1, 2]
d = coeffs[:, 3]
nf = len(d) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy arrays
for it in range(1, nf):
mc = ac[it-1]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = bc
xc[-1] = dc[-1]/bc[-1]
for il in range(nf-2, 1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
return xc | cdec1baa7ce0fe2baef71631b0ba678a0f7559dc | 5,462 |
def aggregate_ant(data, sub_num, response_type="full"):
"""
Aggregate data from the ANT task.
Calculates various summary statistics for the ANT task for a given subject.
Parameters
----------
data : dataframe
Pandas dataframe containing a single subjects trial data for the task.
sub_num : str
Subject number to which the data file belongs.
response_type : {'full', 'correct', 'incorrect'}, optional
Should the summary data be calculated using all trials? Only correct
trials? Or only incorrect trials? This is not supported in all tasks.
Returns
-------
stats : list
List containing the calculated data for the subject.
"""
# Calculate times following errors and correct responses
df = data
follow_error_rt = df.loc[df.correct.shift() == 0, "RT"].mean()
follow_correct_rt = df.loc[df.correct.shift() == 1, "RT"].mean()
if response_type == "correct":
df = data[data["correct"] == 1]
elif response_type == "incorrect":
df = data[data["correct"] == 0]
elif response_type == "full":
df = data
# Aggregated descriptives
## congruency conditions
grouped_congruency = df.groupby("congruency")
neutral_rt = grouped_congruency.mean().get_value("neutral", "RT")
congruent_rt = grouped_congruency.mean().get_value("congruent", "RT")
incongruent_rt = grouped_congruency.mean().get_value("incongruent", "RT")
neutral_rtsd = grouped_congruency.std().get_value("neutral", "RT")
congruent_rtsd = grouped_congruency.std().get_value("congruent", "RT")
incongruent_rtsd = grouped_congruency.std().get_value("incongruent", "RT")
neutral_rtcov = neutral_rtsd / neutral_rt
congruent_rtcov = congruent_rtsd / congruent_rt
incongruent_rtcov = incongruent_rtsd / incongruent_rt
neutral_correct = grouped_congruency.sum().get_value("neutral", "correct")
congruent_correct = grouped_congruency.sum().get_value("congruent", "correct")
incongruent_correct = grouped_congruency.sum().get_value("incongruent", "correct")
## cue conditions
grouped_cue = df.groupby("cue")
nocue_rt = grouped_cue.mean().get_value("nocue", "RT")
center_rt = grouped_cue.mean().get_value("center", "RT")
spatial_rt = grouped_cue.mean().get_value("spatial", "RT")
double_rt = grouped_cue.mean().get_value("double", "RT")
nocue_rtsd = grouped_cue.std().get_value("nocue", "RT")
center_rtsd = grouped_cue.std().get_value("center", "RT")
spatial_rtsd = grouped_cue.std().get_value("spatial", "RT")
double_rtsd = grouped_cue.std().get_value("double", "RT")
nocue_rtcov = nocue_rtsd / nocue_rt
center_rtcov = center_rtsd / center_rt
spatial_rtcov = spatial_rtsd / spatial_rt
double_rtcov = double_rtsd / double_rt
nocue_correct = grouped_cue.sum().get_value("nocue", "correct")
center_correct = grouped_cue.sum().get_value("center", "correct")
spatial_correct = grouped_cue.sum().get_value("spatial", "correct")
double_correct = grouped_cue.sum().get_value("double", "correct")
# OLS regression
conflict_intercept, conflict_slope = congruent_rt, incongruent_rt - congruent_rt
conflict_slope_norm = conflict_slope / congruent_rt
alerting_intercept, alerting_slope = double_rt, nocue_rt - double_rt
alerting_slope_norm = alerting_slope / double_rt
orienting_intercept, orienting_slope = spatial_rt, center_rt - spatial_rt
orienting_slope_norm = orienting_slope / spatial_rt
return [
sub_num,
follow_error_rt,
follow_correct_rt,
neutral_rt,
congruent_rt,
incongruent_rt,
neutral_rtsd,
congruent_rtsd,
incongruent_rtsd,
neutral_rtcov,
congruent_rtcov,
incongruent_rtcov,
neutral_correct,
congruent_correct,
incongruent_correct,
nocue_rt,
center_rt,
spatial_rt,
double_rt,
nocue_rtsd,
center_rtsd,
spatial_rtsd,
double_rtsd,
nocue_rtcov,
center_rtcov,
spatial_rtcov,
double_rtcov,
nocue_correct,
center_correct,
spatial_correct,
double_correct,
conflict_intercept,
conflict_slope,
conflict_slope_norm,
alerting_intercept,
alerting_slope,
alerting_slope_norm,
orienting_intercept,
orienting_slope,
orienting_slope_norm,
] | be01651d450560a5c36bc6240025fe59352d6347 | 5,463 |
def parse_search_after(params):
"""Validate search_after and return it as a list of [score, ID]."""
search_pair = params.get("search_after")
sort = params.get("sort")
if not search_pair or not sort:
return
if '_' not in search_pair or len(search_pair.split("_")) != 2:
return
_score, _id = search_pair.split("_")
_sort = sort.split("_")[0]
if _sort not in ["relevance", "created"]:
log.error("{} is not a supported sort value.".format(_sort))
return
if _sort == "relevance":
score = test_float(_score)
if score is None:
log.error("Search_after relevance score is not a float.")
return
elif _sort == "created":
if not str(_score).isdigit():
log.error("Search_after date score is not an integer.")
return
score = int(_score)
return [score, _id] | f44228d4f5b47129218d122adcb29e41a81c5a1f | 5,464 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.