content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
from typing import Dict
def process_line(line: str, conditional_chain: List[str],
fields: Dict[str, str]):
""" Processes a line in the template, i.e. returns the output html code
after evaluating all if statements and filling the fields. Since we
oftentimes are in the middle of several if statements, we need to pass
the current conditional_chain (i.e. the list of if statments the following
line will be subject to) on (and also need to use it).
Args:
line: Line we are processing
conditional_chain: In which conditionals are we currently enclosed?
fields: field values
Returns:
(html output, conditional_chain)
"""
after = line
out = ""
while after:
before, enclosed, after = next_braces(after)
if evaluate_conditional_chain(conditional_chain, fields):
out += before
if is_pos_conditional(enclosed) or is_neg_conditional(enclosed):
conditional_chain.append(enclosed)
elif is_close_conditional(enclosed):
if not len(conditional_chain) >= 1:
_ = "Closing conditional '{}' found, but we didn't encounter" \
" a conditional before.".format(enclosed)
logger.error(_)
else:
field_name = get_field_name(enclosed)
if field_name not in conditional_chain[-1]:
_ = "Closing conditional '{}' found, but the last opened" \
" conditional was {}. I will " \
"ignore this.".format(enclosed, field_name)
logger.error(_)
else:
conditional_chain.pop()
elif is_field(enclosed):
field_name = get_field_name(enclosed)
if field_name in fields:
out += fields[field_name]
else:
_ = "Could not find value for field '{}'".format(field_name)
logger.error(_)
return out, conditional_chain
|
d84a679d1dc292f3f9cccaf7b6f6718c1ff7cbfc
| 20,400 |
def get_groundstation_code(gsi):
"""
Translate a GSI code into an EODS domain code.
Domain codes are used in dataset_ids.
It will also translate common gsi aliases if needed.
:type gsi: str
:rtype: str
>>> get_groundstation_code('ASA')
'002'
>>> get_groundstation_code('HOA')
'011'
>>> # Aliases should work too.
>>> get_groundstation_code('ALSP')
'002'
"""
groundstation = metadata.get_groundstation(gsi)
if not groundstation:
return None
return groundstation.eods_domain_code
|
a9a04935cfceeb4ca4b90f7ba05ff5c7076ff917
| 20,401 |
from typing import Union
import copy
def concatenate(*data_frames: DataFrame,
**data_points_or_frames: Union[DataFrame, DataPoint]) \
-> DataFrame:
"""
Concatenate DataFrame's objects or DataPoint's into one DataFrame.
Example:
if one DataFrame represents as:
df1 -> {'a': 1, 'b': 2}
another as:
df2 -> {'c': 3, 'd': 4}
you can concatenate simple keys into one DataFrame:
df3 = concatenate(b=df1.get('b'), c=df2.get('c'))
It's not performance friendly operation for long DataFrames (DataFrames
which has a lot of function). Because data will be duplicated and merged
in the end of first pipeline graph.
:param data_frames: list of DataFrames to concatenate together, if
some keys overlapped - it will replaced by latest DataFrame.
:param data_points_or_frames: mapping of key->DataPoint or key->DataFrame
which will store in new DataFrame. If value is a DataFrame it will be wrapped
in new key.
:return: New DataFrame which represents concatenation of provided values.
"""
ensure_concatenate_allowed(data_frames, data_points_or_frames)
base_pipeline = None
if data_frames:
base_pipeline = data_frames[0].data_pipeline
if data_points_or_frames:
base_pipeline = list(data_points_or_frames.values())[0].data_pipeline
keys = sorted(data_points_or_frames.keys())
connector_name = "%s:%s" % ("concatenate", "/".join(keys))
last_p_component = PipelineConnectorComponent(
pipeline=base_pipeline,
name=connector_name)
# TODO: check if all data_pipelines_transformations actually transformations
data_pipeline_items = [dp.data_pipeline for dp in data_points_or_frames.values()]
# generate's result transformation object
for key, data_point_or_frame in data_points_or_frames.items():
p = data_point_or_frame.data_pipeline
leaves = pipeline_graph.get_leaves(p.graph)
assert len(leaves) == 1
if isinstance(data_point_or_frame, DataPoint):
data_point = data_point_or_frame
keys_to_transform = {data_point.get_key(): key}
transformation_func = transformations_types.KeyToKey(keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
else:
data_frame = data_point_or_frame
keys_to_transform = data_frame.transformation
transformation_func = transformations_types.KeysToDict(key,
keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
for data_frame in data_frames:
p = data_frame.data_pipeline
leaves = pipeline_graph.get_leaves(p.graph)
assert len(leaves) == 1
keys_to_transform = data_frame.transformation
transformation_func = transformations_types.AllKeys(keys_to_transform)
leaves[0].p_component.add_context(last_p_component,
transformation_func)
base_data_pipeline = copy.copy(data_pipeline_items[0])
base_graph = base_data_pipeline.graph
base_worker_info = data_pipeline_items[0].worker_info
for data_pipeline in data_pipeline_items[1:]:
if data_pipeline.worker_info is not base_worker_info:
raise RuntimeError("Not possible to use different worker configs, "
"for pipeline")
graph, base_g_item, handler_item = \
concatenate_sequentially(base_graph, data_pipeline.graph)
base_graph = graph
base_graph.add_pipeline_component(last_p_component)
return DataFrame(base_data_pipeline)
|
7f2fb0731c5f1ee8f743e086b8e5ea667f066982
| 20,402 |
def mixed_social_welfare(game, mix):
"""Returns the social welfare of a mixed strategy profile"""
return game.expected_payoffs(mix).dot(game.num_role_players)
|
72c465211bdc79c9fcf2b1b9d8c7dd5abae5d8df
| 20,403 |
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
"""
logger = logs.get_logger(bdh, level=kwargs.pop('log', logs.LOG_LEVEL))
# Dividend adjustments
if isinstance(adjust, str) and adjust:
if adjust == 'all':
kwargs['CshAdjNormal'] = True
kwargs['CshAdjAbnormal'] = True
kwargs['CapChg'] = True
else:
kwargs['CshAdjNormal'] = 'normal' in adjust or 'dvd' in adjust
kwargs['CshAdjAbnormal'] = 'abn' in adjust or 'dvd' in adjust
kwargs['CapChg'] = 'split' in adjust
con, _ = create_connection()
elms = assist.proc_elms(**kwargs)
ovrds = assist.proc_ovrds(**kwargs)
if isinstance(tickers, str): tickers = [tickers]
if flds is None: flds = ['Last_Price']
if isinstance(flds, str): flds = [flds]
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None:
start_date = pd.Timestamp(e_dt) - relativedelta(months=3)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
logger.info(
f'loading historical data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
logger.debug(
f'\nflds={flds}\nelms={elms}\novrds={ovrds}\nstart_date={s_dt}\nend_date={e_dt}'
)
res = con.bdh(
tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt
)
res.index.name = None
if (len(flds) == 1) and kwargs.get('keep_one', False):
return res.xs(flds[0], axis=1, level=1)
return res
|
2c57e9e7d8f1f0155dab35b59e3a2c31cf4f7aa1
| 20,404 |
import re
def isValid(text):
"""
"Play Blackjack"
"""
return bool(re.search(r'\bblackjack\b', text, re.IGNORECASE))
|
c1960a9683bde9701b4e3900edd41e4d6e5444ac
| 20,405 |
def init_app(app):
"""init the flask application
:param app:
:return:
"""
return app
|
6e460eb1fdc19553c6c4139e60db06daec507a2d
| 20,406 |
def bounds(geometry, **kwargs):
"""Computes the bounds (extent) of a geometry.
For each geometry these 4 numbers are returned: min x, min y, max x, max y.
Parameters
----------
geometry : Geometry or array_like
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Examples
--------
>>> bounds(Geometry("POINT (2 3)")).tolist()
[2.0, 3.0, 2.0, 3.0]
>>> bounds(Geometry("LINESTRING (0 0, 0 2, 3 2)")).tolist()
[0.0, 0.0, 3.0, 2.0]
>>> bounds(Geometry("POLYGON EMPTY")).tolist()
[nan, nan, nan, nan]
>>> bounds(None).tolist()
[nan, nan, nan, nan]
"""
# We need to provide the `out` argument here for compatibility with
# numpy < 1.16. See https://github.com/numpy/numpy/issues/14949
geometry_arr = np.asarray(geometry, dtype=np.object_)
out = np.empty(geometry_arr.shape + (4,), dtype="float64")
return lib.bounds(geometry_arr, out=out, **kwargs)
|
bdf90b760fc7c62d66596159136961e7840077c4
| 20,407 |
def getRidgeEdge(distComponent, maxCoord, direction):
"""
最大値〜最大値-1の範囲で、指定された方向から見て最も遠い点と近い点を見つける。
緑領域からの距離が最大値近辺で、カメラから見て最も遠い点と近い点を見つけるための関数。
これにより、石の天面の中心と底面の中心を求める
"""
# 最大値
maxValue = distComponent[maxCoord]
# 最大値-1以上の点の座標群
ridge = np.array(np.where(distComponent >= maxValue - 1)).T
# 隣の石を検出しないよう、maxCoordからの距離がmaxValue以内という制約を設ける
ridge = ridge[np.apply_along_axis(
lambda pt: np.linalg.norm( np.array(pt) - maxCoord ) <= maxValue ,
axis=1,
arr=ridge)]
# 内積の値
dotValue = np.apply_along_axis(
lambda pt: np.dot(np.array(pt) - maxCoord, direction),
axis=1,
arr=ridge
)
# 内積が最大になる点の座標と最小になる点の座標を返す
maxEdgePoint = np.array(ridge[np.argmax(dotValue)])
minEdgePoint = np.array(ridge[np.argmin(dotValue)])
return maxEdgePoint, minEdgePoint
|
b22b592ee9467f1205d49e5c83dfe978b5dc2f35
| 20,408 |
def map_vL(X, w):
"""
Maps a random sample drawn from vector Langevin with orientation u = [0,...,0,1] to
a sample that follows vector Langevin with orientation w.
"""
assert w.shape[0] == X.shape[0]
#assert np.linalg.norm(w) == 1.
#print('Orientation vector length : ' + str(np.linalg.norm(w)))
d = w.shape[0]
w = w.reshape(w.shape[0],1)
H = np.eye(d) - 2 * np.dot(w, w.T)
[l, v] = np.linalg.eigh(H)
V = v[:,::-1]
if np.sum( w.flatten()*V[:,-1] ) < 0:
V[:,-1] = -V[:,-1].copy()
return np.dot(V, X)
|
a7d06a295569bb08d800c46c302c5a38ef2c2f52
| 20,409 |
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
|
825c4e14f91992c39d5be37b814e5c3bd7177c50
| 20,410 |
def slid_window_avg(a, wi):
""" a simple window-averaging function, centerd on the current point """
# TODO: replace with pandas rolling average. - rth
acopy = np.array(a).copy()
a_smoothed = np.zeros(acopy.shape)
wi_half = wi // 2
wi_other_half = wi - wi_half
for i in range(acopy.shape[0]):
aslice = acopy[
max(0, i - wi_half) : min(i + wi_other_half, acopy.shape[0])
]
a_smoothed[i] = np.mean(aslice, axis=0)
# a_smoothed[i] += np.sum(aslice,axis=0)/ (aslice).shape[0]
# print(aslice,a_smoothed[i] , acopy[i])
return a_smoothed
|
bd29ca53e33b473a49fcf75cf639335fb84708fd
| 20,411 |
def find_bigrams(textdict, threshold=0.1):
"""
find bigrams in the texts
Input:
- textdict: a dict with {docid: preprocessed_text}
- threshold: for bigrams scores
Returns:
- bigrams: a list of "word1 word2" bigrams
"""
docids = set(textdict.keys())
# to identify bigrams, transform the texts into lists of words (assume texts are preprocessed)
text_words = [textdict[did].split() for did in docids]
bigram_scores = get_bigram_scores(text_words)
return [bigram for bigram in bigram_scores if bigram_scores[bigram] > threshold]
|
4f3d6e3b4b62e42a98ab8bc3f823853680ca9e6f
| 20,412 |
def _rec_filter_to_info(line):
"""Move a DKFZBias filter to the INFO field, for a record.
"""
parts = line.rstrip().split("\t")
move_filters = {"bSeq": "strand", "bPcr": "damage"}
new_filters = []
bias_info = []
for f in parts[6].split(";"):
if f in move_filters:
bias_info.append(move_filters[f])
elif f not in ["."]:
new_filters.append(f)
if bias_info:
parts[7] += ";DKFZBias=%s" % ",".join(bias_info)
parts[6] = ";".join(new_filters or ["PASS"])
return "\t".join(parts) + "\n"
|
496056126bdf390a6213dfad5c40c4a14ec35caa
| 20,413 |
def find_files(config, file_to_find, exact_filename=False):
"""finds all the files in config.diag_dir that matches the prefix or will use
the config.files string (split on ,) if present and not use a prefix but a full
file name match.
Example:
files = [my.log], diag_dir = "" => only matches my.log NOT my.log.1
file_to_find = "my.log", files = [], diag_dir = "mydir" => matches my.log, my.log.1, my.log.2, etc
"""
files = []
use_as_prefix = not exact_filename
if config.files:
files = config.files.split(",")
use_as_prefix = False
elif config.diag_dir:
try:
files = find_logs(config.diag_dir, file_to_find, use_as_prefix)
except Exception as ex:
if env.DEBUG:
print(ex)
raise UnableToReadDiagException(config.diag_dir, ex)
else:
print("no --diagdir or --files flag used")
return None
return files
|
2eea21dcc29a45871629ee081b0fcb422d6163af
| 20,414 |
import ast
def ast_node_to_source(ast_node: ast.AST) -> str:
"""
Uses astor package to produce source code from ast
Also handles low-level ast functions, such as wrapping in a module if necessary,
and fixing line numbers for modified/extracted ast
Args:
ast_node:
Returns:
"""
# Must be a module to output to source. Wrap in module if not already
if not isinstance(ast_node, ast.Module):
ast_node = ast.Module([ast_node])
# Fix line numbers
ast.fix_missing_locations(ast_node)
return astor.to_source(ast_node)
|
bc4bb2a9f09907e2c9ab8a8f1629135695da6aa9
| 20,415 |
import codecs
import os
def read(*parts):
"""
returns contents of file
"""
with codecs.open(os.path.join(PROJECT, *parts), "rb", "utf-8") as file:
return file.read()
|
065320d95a93602a55c886d05ca54b8be34610ce
| 20,416 |
import typing
def _get_base(*, name: str, schemas: oa_types.Schemas) -> typing.Type:
"""
Retrieve the base class of a schema considering inheritance.
If x-inherits is True, retrieve the parent. If it is a string, verify that the
parent is valid. In either case, the model for that schema is used as the base
instead of the usual base.
If x-inherits is not present or False, return the usual base.
Raise InheritanceConstructionOrderError if the parent of the schema has not been
constructed when attempting to construct the child.
Args:
name: The name of the schema to determine the base for.
schemas: All the schemas.
Returns:
The base of the model. Either the usual base or the model parent in the case of
inheritance.
"""
schema = schemas.get(name)
if schema is None:
raise exceptions.SchemaNotFoundError(f"Could not fund schema {name}.")
if _schema_helper.inherits(schema=schema, schemas=schemas):
parent = _inheritance.retrieve_parent(schema=schema, schemas=schemas)
try:
return getattr(models, parent)
except AttributeError as exc:
raise exceptions.InheritanceError(
"Any parents of a schema must be constructed before the schema can be "
"constructed."
) from exc
return getattr(models, "Base")
|
47bcca20c82078cd3bd821a0d10e951b346a1e87
| 20,417 |
def classNumber(A):
""" Returns the number of transition classes in the matrix A """
cos = 0
if type(A[0][0]) == list:
cos = len(A)
else:
cos = 1
return cos
|
a71bce468f7429746bfe246d94f5dcebb85c41d4
| 20,418 |
def lz4_decompress_c(src, dlen, dst=None):
"""
Decompresses src, a bytearray of compressed data.
The dst argument can be an optional bytearray which will have the output appended.
If it's None, a new bytearray is created.
The output bytearray is returned.
"""
if dst is None:
dst = bytearray()
print(str(src))
b = bytes(src)
d=lz4zfs.decompress(b,dlen)
l=len(d)
if (dlen != l):
print("[-] decompress size differ from %d, got %d" %(dlen,l))
raise RuntimeError("decompress size differ from %d, got %d" %(dlen,l))
else:
if (dlen < l):
dst[0:dlen] = d;
else:
dst[0:l] = d;
print(str(dst))
return dst
|
5b2b15f323c00a8cedec4b9caee0fea8dda89f76
| 20,419 |
def format_coordinates(obj, no_seconds=True, wgs_link=True):
"""Format WGS84 coordinates as HTML.
.. seealso:: https://en.wikipedia.org/wiki/ISO_6709#Order.2C_sign.2C_and_units
"""
def degminsec(dec, hemispheres):
_dec = abs(dec)
degrees = int(floor(_dec))
_dec = (_dec - int(floor(_dec))) * 60
minutes = int(floor(_dec))
_dec = (_dec - int(floor(_dec))) * 60
seconds = _dec
if no_seconds:
if seconds > 30:
if minutes < 59:
minutes += 1
else:
minutes = 0
degrees += 1
fmt = "{0}\xb0"
if minutes:
fmt += "{1:0>2d}'"
if not no_seconds and seconds:
fmt += '{2:0>2f}"'
fmt += hemispheres[0] if dec > 0 else hemispheres[1]
return str(fmt).format(degrees, minutes, seconds)
if not isinstance(obj.latitude, float) or not isinstance(obj.longitude, float):
return ''
return HTML.div(
HTML.table(
HTML.tr(
HTML.td(
'Coordinates ',
external_link(
'https://en.wikipedia.org/wiki/World_Geodetic_System_1984',
label="WGS84") if wgs_link else ''),
HTML.td(
HTML.span('%s, %s' % (
degminsec(obj.latitude, 'NS'), degminsec(obj.longitude, 'EW'))),
HTML.br(),
HTML.span(
'{0.latitude:.2f}, {0.longitude:.2f}'.format(obj),
class_='geo'))),
class_="table table-condensed"))
|
1fc6a151f73e8836ee935db3cf265438e597fec2
| 20,420 |
import os
import distutils
def _defaultGromacsIncludeDir():
"""Find the location where gromacs #include files are referenced from, by
searching for (1) gromacs environment variables, (2) for the gromacs binary
'pdb2gmx' or 'gmx' in the PATH, or (3) just using the default gromacs
install location, /usr/local/gromacs/share/gromacs/top """
if 'GMXDATA' in os.environ:
return os.path.join(os.environ['GMXDATA'], 'top')
if 'GMXBIN' in os.environ:
return os.path.abspath(os.path.join(os.environ['GMXBIN'], '..', 'share', 'gromacs', 'top'))
pdb2gmx_path = distutils.spawn.find_executable('pdb2gmx')
if pdb2gmx_path is not None:
return os.path.abspath(os.path.join(os.path.dirname(pdb2gmx_path), '..', 'share', 'gromacs', 'top'))
else:
gmx_path = distutils.spawn.find_executable('gmx')
if gmx_path is not None:
return os.path.abspath(os.path.join(os.path.dirname(gmx_path), '..', 'share', 'gromacs', 'top'))
return '/usr/local/gromacs/share/gromacs/top'
|
126d74d22cf0ee3e37c2e718d9fa3cddb8f7e6d5
| 20,421 |
def delete_keys_on_selected():
"""
deletes set driven keys from selected controllers.
:return: <bool> True for success.
"""
s_ctrls = object_utils.get_selected_node(single=False)
if not s_ctrls:
raise IndexError("[DeleteKeysOnSelectedError] :: No controllers are selected.")
selected_ctrls = s_ctrls[:-1]
interface_ctrl = s_ctrls[-1]
for c_ctrl in selected_ctrls:
if not check_if_object_is_control(c_ctrl):
continue
print('[DeleteKeysOnSelected] :: Deleting keys on {}.'.format(c_ctrl))
delete_keys_on_controller(c_ctrl, interface_ctrl)
return True
|
5d1b55bdcefcd8b4997432851e015e2a875eb80a
| 20,422 |
def scan_continuation(curr, prompt_tag, look_for=None, escape=False):
"""
Segment a continuation based on a given continuation-prompt-tag.
The head of the continuation, up to and including the desired continuation
prompt is reversed (in place), and the tail is returned un-altered.
The hint value |look_for| is used to determine when the continuation being
installed is a prefix of the extant continuation.
In this case, installing the continuation is much simpler, as the expensive
merge operation needed to find common substructure is the two continuation is
not needed.
"""
handlers = False
xs = []
while isinstance(curr, Cont):
if curr is look_for:
return None, handlers
handlers |= isinstance(curr, DynamicWindValueCont)
xs.append(curr)
if isinstance(curr, Prompt) and curr.tag is prompt_tag:
break
curr = curr.prev
if not escape and not jit.isvirtual(curr):
return _scan_continuation(curr, prompt_tag, look_for, xs, handlers)
return xs, handlers
|
1182394c26b6d9e745f469f5006b5269e5854db8
| 20,423 |
import logging
import os
import subprocess
def reloadATSConfigs(conf:Configuration) -> bool:
"""
This function will reload configuration files for the Apache Trafficserver caching HTTP
proxy. It does this by calling ``traffic_ctl config reload`
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: whether or not the reload succeeded (as indicated by the exit code of
``traffic_ctl``)
:raises OSError: when something goes wrong executing the child process
"""
# First of all, ATS must be running for this to work
if not setATSStatus(True, conf):
logging.error("Cannot reload configs, ATS not running!")
return False
cmd = [os.path.join(conf.tsroot, "bin", "traffic_ctl"), "config", "reload"]
cmdStr = ' '.join(cmd)
if ( conf.mode is Configuration.Modes.INTERACTIVE and
not getYN("Run command '%s' to reload configuration?" % cmdStr, default='Y')):
logging.warning("Configuration will not be reloaded for Apache Trafficserver!")
logging.warning("Changes will NOT be applied!")
return True
logging.info("Apache Trafficserver configuration reload will be done via: %s", cmdStr)
if conf.mode is Configuration.Modes.REPORT:
return True
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sub.communicate()
if sub.returncode:
logging.debug("========== PROCESS STDOUT ==========")
logging.debug("%s", out.decode())
logging.debug("========== PROCESS STDERR ==========")
logging.debug("%s", err.decode())
logging.debug("====================================")
return False
return True
|
cbfca3129ea6ef64bc9e30115597ce10b77063e6
| 20,424 |
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
"""
dateobs = wcs.wcs.dateobs if wcs.wcs.dateobs else None
# SunPy Map adds 'heliographic_observer' and 'rsun' attributes to the WCS
# object. We check for them here, and default to None.
if hasattr(wcs, 'heliographic_observer'):
observer = wcs.heliographic_observer
else:
observer = None
if hasattr(wcs, 'rsun'):
rsun = wcs.rsun
else:
rsun = None
# First we try the Celestial sub, which rectifies the order.
# It will return anything matching ??LN*, ??LT*
wcss = wcs.sub([WCSSUB_CELESTIAL])
# If the SUB works, use it.
if wcss.naxis == 2:
wcs = wcss
xcoord = wcs.wcs.ctype[0][0:4]
ycoord = wcs.wcs.ctype[1][0:4]
if xcoord == 'HPLN' and ycoord == 'HPLT':
return Helioprojective(obstime=dateobs, observer=observer, rsun=rsun)
if xcoord == 'HGLN' and ycoord == 'HGLT':
return HeliographicStonyhurst(obstime=dateobs)
if xcoord == 'CRLN' and ycoord == 'CRLT':
return HeliographicCarrington(obstime=dateobs)
if xcoord == 'SOLX' and ycoord == 'SOLY':
return Heliocentric(obstime=dateobs, observer=observer)
|
dd6a52e8356a242c5f2dfb4808e0a3d1ed57073f
| 20,425 |
def toa_error_peak_detection(snr):
"""
Computes the error in time of arrival estimation for a peak detection
algorithm, based on input SNR.
Ported from MATLAB Code
Nicholas O'Donoughue
11 March 2021
:param snr: Signal-to-Noise Ratio [dB]
:return: expected time of arrival error variance [s^2]
"""
# Convert SNR to linear units
snr_lin = utils.unit_conversions.db_to_lin(snr)
# Compute Error
return 1/(2*snr_lin)
|
4ab2f653c81a29484d96aed58cb73ca4327dbde0
| 20,426 |
from typing import Set
from typing import List
import glob
import os
import random
def scrape_random_contracts(data_dir: str, max_contracts=10000,
verbose: bool = True, filtering: bool = True, stop_words: Set[str] = None) -> List[LabeledProvision]:
"""Randomly sample contracts to extract labeled provisions from"""
if verbose:
print('Fetching contracts from', data_dir)
contracts = glob.glob(os.path.join(data_dir, '*/*/*/*.htm'))
if verbose:
print(len(contracts), 'contracts found, sampling', max_contracts)
random.shuffle(contracts)
contracts_scraped = 0
provisions: List[LabeledProvision] = []
for contract in contracts:
if verbose:
print('Scraping', contracts_scraped, contract)
provisions_doc = scrape_exhibit_10(contract, filtering=filtering, stop_words=stop_words)
if provisions_doc:
provisions.extend(provisions_doc)
contracts_scraped += 1
if contracts_scraped == max_contracts:
break
return provisions
|
a1724f4b88661faaa6ed345b2c48e3e12cabddbc
| 20,427 |
def statCellFraction(gridLimit, gridSpace, valueFile):
"""
Calculate the fractional value of each grid cell, based on the
values stored in valueFile.
:param dict gridLimit: Dictionary of bounds of the grid.
:param dict gridSpace: Resolution of the grid to calculate values.
:param str valueFile: Path to the ascii grid file containing values to sample.
:returns: :class:`numpy.ndarray` of fractional values, with length equal to the number
of cells
Notes: Still need to include bounds checking to ensure the valueFile
data actually covers the gridLimits.
"""
gLon, gLat, gData = grdRead(valueFile)
nCells = maxCellNum(gridLimit, gridSpace) + 1
output = np.zeros(nCells)
for cellNum in range(nCells):
cellLon, cellLat = getCellLonLat(cellNum, gridLimit, gridSpace)
wLon = cellLon
eLon = cellLon + gridSpace['x']
nLat = cellLat
sLat = cellLat - gridSpace['y']
ii = np.where((gLon <= eLon) & (gLon >= wLon))
jj = np.where((gLat <= nLat) & (gLat >= sLat))
cellValues = gData[np.meshgrid(jj[0], ii[0])]
if abs(cellValues).max() == 0:
output[cellNum] = np.average(cellValues)
else:
output[cellNum] = np.average(cellValues) / abs(cellValues).max()
return output
|
e26f011c10d94435b134e9f1b3adb2d1b1cd88ce
| 20,428 |
from tcrdist.repertoire import TCRrep
import re
import multiprocessing
import os
def tabulate_metaclonotype(
file,
metaclonotype_source_path,
metaclonotype_file,
source_path,
ncpus =1,
max_radius = 36,
write = False,
project_path = "counts"):
"""
Tabulate a set of meta-clonotypes in a single bulk repertoires
Parameters
----------
metaclonotype_source_path : str
filepath to metaclonotype file
metaclonotype_file : str
filename containing meta-clonotype definitions
source_path : str
filepath to bulk files
file: str
filename
ncpus = 6
maximum number of cpus to use in meta-clonotype vs. bulk distance computation
max_radius = 36
maximum radius to store
Returns
-------
df_join : pd.DataFrame
"""
ncpus = min(multiprocessing.cpu_count(), ncpus)
df_search = pd.read_csv(os.path.join(metaclonotype_source_path, metaclonotype_file), sep = "\t")
df_bulk = pd.read_csv(os.path.join(source_path, file), sep = "\t")
# When one want to track each clone indivually regardless of identical TRBV-CDR3-TRBJ
df_bulk = df_bulk.sort_values('count').reset_index(drop = True)
df_bulk['rank'] = df_bulk.index.to_list()
tr = TCRrep(
cell_df = df_search,
organism = "human",
chains = ['beta'],
compute_distances= False)
tr.cpus = ncpus
tr_bulk = TCRrep(
cell_df = df_bulk,
organism = "human",
chains = ['beta'],
compute_distances= False)
chunk_size = get_safe_chunk(tr.clone_df.shape[0], tr_bulk.clone_df.shape[0])
tr.compute_sparse_rect_distances(
df = tr.clone_df,
df2 = tr_bulk.clone_df,
radius = max_radius ,
chunk_size = chunk_size)
df_join = join_by_dist(
how = 'inner',
csrmat = tr.rw_beta,
left_df = tr.clone_df,
right_df = tr_bulk.clone_df,
left_cols = tr.clone_df.columns.to_list(),
right_cols = tr_bulk.clone_df.columns.to_list(),
left_suffix = '_search',
right_suffix = '_bulk',
max_n= 1000,
radius = max_radius )
# df_join has more results
df_join['RADIUS'] = df_join.apply(lambda x: x['dist'] <= x['radius_search'], axis = 1)
df_join['MOTIF'] = df_join.apply(lambda x: re.search(string = x['cdr3_b_aa_bulk'],
pattern = x['regex_search']) is not None, axis = 1)
df_join['RADIUSANDMOTIF'] = df_join['RADIUS'] & df_join['MOTIF']
df_join['EXACT'] = df_join.apply(lambda x: x['dist'] <= 0, axis = 1)
df_join['unique_clones'] = 1
df_join['feature'] = df_join['v_b_gene_search'] + "+" \
+ df_join['cdr3_b_aa_search'] + "+" \
+ df_join['radius_search'].apply(lambda x : str(x)) + "+"\
+ df_join['regex_search']
# mc_file = 'mira_epitope_55_524_ALRKVPTDNYITTY_KVPTDNYITTY.tcrdist3.csv.ranked_centers_bkgd_ctlr_1E6_manuscript.tsv'
df_mc = pd.read_csv(os.path.join(metaclonotype_source_path, metaclonotype_file), sep = "\t")
df_mc['feature'] = df_mc['v_b_gene'] + "+" \
+ df_mc['cdr3_b_aa'] + "+" \
+ df_mc['radius'].apply(lambda x : str(x)) + "+" \
+ df_mc['regex']
radius = df_join.query('RADIUS').groupby('feature')['templates_bulk'].sum()
motif = df_join.query('RADIUSANDMOTIF').groupby('feature')['templates_bulk'].sum()
exact = df_join.query('EXACT').groupby('feature')['templates_bulk'].sum()
df_result = pd.concat([pd.DataFrame(index = df_mc['feature'] ), radius, motif, exact], axis = 1)
df_result.columns = ['RADIUS','MOTIF','EXACT']
df_result['file'] = file
df_result = df_result.fillna(0)
if write:
outname = os.path.join(project_path, f"{file}.counts.tsv")
df_result.reset_index(drop = False).to_csv(outname, sep = "\t", index = False)
return (df_join, df_result)
|
af04ef49ccfa1ee8d707923a47b6f73c0267b817
| 20,429 |
def relative_url_functions(current_url, course, lesson):
"""Return relative URL generators based on current page.
"""
def lesson_url(lesson, *args, **kwargs):
if not isinstance(lesson, str):
lesson = lesson.slug
if course is not None:
absolute = url_for('course_page', course=course, lesson=lesson, *args, **kwargs)
else:
absolute = url_for('lesson', lesson=lesson, *args, **kwargs)
return get_relative_url(current_url, absolute)
def subpage_url(page_slug):
if course is not None:
absolute = url_for('course_page', course=course, lesson=lesson, page=page_slug)
else:
absolute = url_for('lesson', lesson=lesson, page=page_slug)
return get_relative_url(current_url, absolute)
def static_url(path):
absolute = url_for('lesson_static', lesson=lesson, path=path, course=course)
return get_relative_url(current_url, absolute)
return lesson_url, subpage_url, static_url
|
b49009cfdb8e9095c9ca17fee39feb8689624bd4
| 20,430 |
def fix_CompanySize(r):
"""
Fix the CompanySize column
"""
if type(r.CompanySize) != str:
if r.Employment == "Independent contractor, freelancer, or self-employed":
r.CompanySize = "0 to 1 Employees"
elif r.Employment in [
"Not employed, but looking for work",
"full-time",
"Not employed, and not looking for work",
"part-time",
"Retired",
]:
r.CompanySize = "Not Applicable"
return r
|
bd34bb3e72920fb7ef37279a743198387b1c4717
| 20,431 |
import yaml
from pathlib import Path
def write_thermo_yaml(phases=None, species=None, reactions=None,
lateral_interactions=None, units=None,
filename=None, T=300., P=1., newline='\n',
ads_act_method='get_H_act',
yaml_options={'default_flow_style': None, 'indent': 2,
'sort_keys': False, 'width': 79}):
"""Writes the units, phases, species, lateral interactions, reactions and
additional options in the CTI format for OpenMKM
Parameters
----------
phases : list of :class:`~pmutt.omkm.phase.Phase` objects
Phases to write in YAML file. The species should already be assigned.
species : list of :class:`~pmutt.empirical.nasa.Nasa`, :class:`~pmutt.empirical.nasa.Nasa9` or :class:`~pmutt.empirical.shomate.Shomate`
Species to write in YAML file.
reactions : list of :class:`~pmutt.omkm.reaction.SurfaceReaction`
Reactions to write in YAML file.
lateral_interactions : list of :class:`~pmutt.mixture.cov.PiecewiseCovEffect` objects, optional
Lateral interactions to include in YAML file. Default is None.
units : dict or :class:`~pmutt.omkm.units.Unit` object, optional
Units to write file. If a dict is inputted, the key is the quantity
and the value is the unit. If not specified, uses the default units
of :class:`~pmutt.omkm.units.Unit`.
filename: str, optional
Filename for the input.yaml file. If not specified, returns file
as str.
T : float, optional
Temperature in K. Default is 300 K.
P : float, optional
Pressure in atm. Default is 1 atm.
newline : str, optional
Type of newline to use. Default is Linux newline ('\\n')
ads_act_method : str, optional
Activation method to use for adsorption reactions. Accepted
options include 'get_H_act' and 'get_G_act'. Default is
'get_H_act'.
Returns
-------
lines_out : str
If ``filename`` is None, CTI file is returned.
"""
lines = [
_get_file_timestamp(comment_char='# '),
'# See documentation for OpenMKM YAML file here:',
'# https://vlachosgroup.github.io/openmkm/input',
]
yaml_dict = {}
'''Organize units units'''
if units is None:
units = Units()
elif isinstance(units, dict):
units = Units(**units)
units_out = units.to_omkm_yaml()
'''Pre-assign IDs for lateral interactions so phases can be written'''
if lateral_interactions is not None:
interactions_out = []
i = 0
for lat_interaction in lateral_interactions:
if lat_interaction.name is None:
lat_interaction.name = 'i_{:04d}'.format(i)
i += 1
interaction_dict = lat_interaction.to_omkm_yaml(units=units)
interactions_out.append(interaction_dict)
'''Pre-assign IDs for reactions so phases can be written'''
beps = []
if reactions is not None:
reactions_out = []
i = 0
for reaction in reactions:
# Assign reaction ID if not present
if reaction.id is None:
reaction.id = 'r_{:04d}'.format(i)
i += 1
# Write reaction
reaction_dict = reaction.to_omkm_yaml(units=units, T=T)
reactions_out.append(reaction_dict)
# Add unique BEP relationship if any
try:
bep = reaction.bep
except AttributeError:
pass
else:
if bep is not None and bep not in beps:
beps.append(bep)
'''Write phases'''
if phases is not None:
phases_out = []
for phase in phases:
phase_dict = _force_pass_arguments(phase.to_omkm_yaml, units=units)
phases_out.append(phase_dict)
# yaml_dict['phases'] = phases_out
'''Write species'''
if species is not None:
species_out = []
for ind_species in species:
ind_species_dict = _force_pass_arguments(ind_species.to_omkm_yaml,
units=units)
species_out.append(ind_species_dict)
# yaml_dict['species'] = species_out
'''Organize BEPs'''
if len(beps) > 0:
beps_out = []
i = 0
for bep in beps:
# Assign name if necessary
if bep.name is None:
bep.name = 'b_{:04d}'.format(i)
i += 1
bep_dict = _force_pass_arguments(bep.to_omkm_yaml, units=units)
beps_out.append(bep_dict)
# yaml_dict['beps'] = beps_out
'''Organize fields'''
fields = ('units', 'phases', 'species', 'reactions',
'beps', 'interactions',)
for field in fields:
try:
val = locals()['{}_out'.format(field)]
except:
pass
else:
# Create a YAML string
yaml_str = yaml.dump(data={field: val},
stream=None,
**yaml_options)
lines.extend(
['',
'#' + '-' * 79,
'# {}'.format(field.upper()),
'#' + '-' * 79,
yaml_str])
# yaml_dict[field] = val
# Convert to YAML format
# yaml_str = yaml.dump(data=yaml_dict, stream=None, **yaml_options)
# Remove redundant quotes
# yaml_str = yaml_str.replace('\'', '')
# lines.append(yaml_str)
lines_out = '\n'.join(lines)
# Remove redundant strings
lines_out = lines_out.replace('\'', '')
# Add spacing between list elements
lines_out = lines_out.replace('\n-', '\n\n-')
if filename is not None:
filename = Path(filename)
with open(filename, 'w', newline=newline) as f_ptr:
f_ptr.write(lines_out)
else:
# Or return as string
return lines_out
|
f8d12226a137d2cf3b9ddecb427dc51257498145
| 20,432 |
import random
def move_weighted(state: State, nnet: NNet) -> tuple:
"""
Returns are random move with weighted probabilities from the neural network.
:param state: State to evaluate
:param nnet: Neural network used for evaluation
:return: Move as ((origin_row, origin_column),(target_row,target_column)
"""
policy = nnet.prediction(state)[0]
moves = list(policy.keys())
weights = list(policy.values())
return random.choices(moves, weights=weights)[0]
|
0d9ad3e6344c3c24e71530bd7bbe6dc5a0b9a254
| 20,433 |
from typing import Any
def build_get301_request(**kwargs: Any) -> HttpRequest:
"""Return 301 status code and redirect to /http/success/200.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/http/redirect/301")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
|
2ef01a4c126890fd30fd3bc656036b92d2ef0408
| 20,434 |
def error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False,
additional_expression="none"):
"""
q = UzUyUz Ustart B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
q = refinedUzUyUz Ustart refinedB0 G*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
refinedB0 is obtained by refining the 5 /6 lattice parameters
possible keys for parameters to be refined are:
five detector frame calibration parameters:
det_distance,det_xcen,det_ycen,det_beta, det_gamma
three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz:
anglex,angley,anglez
5 lattice parameters among 6 (a,b,c,alpha, beta,gamma)
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
latticeparameters = np.array(allparameters[8:14])
nb_varying_parameters = len(varying_parameters_keys)
# factorscale = 1.
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("alpha", "beta", "gamma"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
elif parameter_name in ("a", "b", "c"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
if additional_expression == "a==b":
indparam = dict_lattice_parameters["b"]
indparam1 = dict_lattice_parameters["a"]
latticeparameters[indparam] = latticeparameters[indparam1]
newB0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("\n-------\nvarying_parameters_keys", varying_parameters_keys)
# print("varying_parameters_values_array", varying_parameters_values_array)
# print("Uxyz", Uxyz)
# print("latticeparameters", latticeparameters)
# print("newB0matrix", newB0matrix)
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
newmatrix = np.dot(Uxyz, initrot)
# if 0: # verbose:
# print("initrot", initrot)
# print("newmatrix", newmatrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=newB0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
if 0: # verbose:
print("Xmodel, Ymodel", Xmodel, Ymodel)
if 0: # verbose:
print("Xexp, Yexp", Xexp, Yexp)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "newB0matrix",newB0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print(
# "***********mean pixel deviation ", np.mean(alldistances_array),
# " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices
return alldistances_array, Uxyz, newmatrix, newB0matrix, latticeparameters
else:
return alldistances_array
|
e1c3242855354ed82d2dd164a7ae16aa76cd5e22
| 20,435 |
def run_forward_model(z_in):
"""
Run forward model and return approximate measured values
"""
x_dummy[:prm.nn]=z_in
x_dummy[prm.nn:]=prm.compute_velocity(z_in,t0)
x_meas = H_meas.dot(x_dummy)
return x_meas
|
fd6bfbbacba59e08b2bb8c4588793b969cab4b60
| 20,436 |
def optimize(name: str, circuit: cirq.Circuit) -> cirq.Circuit:
"""Applies sycamore circuit decompositions/optimizations.
Args:
name: the name of the circuit for printing messages
circuit: the circuit to optimize_for_sycamore
"""
print(f'optimizing: {name}', flush=True)
start = timer()
optimized = cirq.google.optimized_for_sycamore(circuit)
stop = timer()
print_stats(stop - start, optimized)
return optimized
|
07027dc2ad21e33ca2038cb40c3cbb2b529941e7
| 20,437 |
def download_sbr(destination=None):
"""Download an example of SBR+ Array and return the def path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Parameters
----------
destination : str, optional
Path where files will be downloaded. Optional. Default is user temp folder.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from pyaedt import examples
>>> path = examples.download_antenna_array()
>>> path
'C:/Users/user/AppData/local/temp/pyaedtexamples/FiniteArray_Radome_77GHz_3D_CADDM.aedt'
"""
return _download_file("sbr", "Cassegrain.aedt", destination)
|
0b928977806b546325569dbf71e93e8b760868fa
| 20,438 |
from typing import Tuple
from typing import Union
def isvalid_sequence(
level: str, time_series: Tuple[Union[HSScoring, CollegeScoring]]
) -> bool:
"""Checks if entire sequence is valid.
Args:
level: 'high school' or 'college' level for sequence analysis.
time_series: Tuple of sorted match time_series events.
Raises:
ValueError: Invalid level.
ValueError: Not sorted time_series.
ValueError: Invalid position.
Returns:
bool: True if sequence is valid, otherwise raises ValueError.
"""
if level not in {"college", "high school"}:
raise ValueError(
f"Expected `level` to be one of "
f"'college' or 'high school', "
f"got {level!r}."
)
# aliases sequences based on level
sequences = COLLEGE_SEQUENCES if level == "college" else HS_SEQUENCES
position = "neutral"
# skips iteration the last value because we check the next
for i, score in enumerate(time_series[:-1]):
# current time can't be larger than next time
if time_series[i].time_stamp > time_series[i + 1].time_stamp:
raise ValueError(
f"Values in `time_series` appear to be sorted incorrectly."
)
if position == "neutral":
check_neutral(score, sequences["neutral"])
if score.formatted_label == "fT2" or score.formatted_label == "oBOT" or score.formatted_label == 'fTOP':
position = "top"
elif score.formatted_label == "oT2" or score.formatted_label == "fBOT" or score.formatted_label == 'oTOP':
position = "bottom"
elif position == "top":
check_top(score, sequences["top"])
if (
score.formatted_label == "oE1"
or score.formatted_label == "fNEU"
or score.formatted_label == "oNEU"
):
position = "neutral"
elif (
score.formatted_label == "oR2"
or score.formatted_label == "fBOT"
or score.formatted_label == "oTOP"
):
position = "bottom"
elif position == "bottom":
check_bottom(score, sequences["bottom"])
if (
score.formatted_label == "fE1"
or score.formatted_label == "fNEU"
or score.formatted_label == "oNEU"
):
position = "neutral"
elif (
score.formatted_label == "fR2"
or score.formatted_label == "oBOT"
or score.formatted_label == "fTOP"
):
position = "top"
else:
raise ValueError(
f"Invalid `position`, expected one of ['neutral', "
f"'top', 'bottom'], got {position!r}."
)
return True
|
5e32906408540c504347c745113fc303ef0d989b
| 20,439 |
def create_backend_app(): # pragma: no cover
"""Returns WSGI app for backend."""
routes = handlers.get_backend_routes() + swarming.get_backend_routes()
app = webapp2.WSGIApplication(routes, debug=utils.is_local_dev_server())
gae_ts_mon.initialize(app, cron_module='backend')
gae_ts_mon.register_global_metrics(metrics.GLOBAL_METRICS)
gae_ts_mon.register_global_metrics_callback(
'buildbucket_global', metrics.update_global_metrics
)
return app
|
395b34d8a8752664bc49af6d2c0b37fe76ab6956
| 20,440 |
def non_linear_relationships():
"""Plot logarithmic and exponential data along with correlation coefficients."""
# make subplots
fig, axes = plt.subplots(1, 2, figsize=(12, 3))
# plot logarithmic
log_x = np.linspace(0.01, 10)
log_y = np.log(log_x)
axes[0].scatter(log_x, log_y)
axes[0].set_title(f'ρ = {np.round(np.corrcoef(log_x, log_y)[0][1], 2):.2f}')
# plot exponential
exp_x = np.linspace(0, 10)
exp_y = np.exp(exp_x)
axes[1].scatter(exp_x, exp_y)
axes[1].set_title(f'ρ = {np.round(np.corrcoef(exp_x, exp_y)[0][1], 2):.2f}')
# labels
for ax in axes:
ax.set_xlabel('x')
ax.set_ylabel('y')
return axes
|
86ce934aebc6b6f8e6b5c1826d9d26c408efc8df
| 20,441 |
import io
def label_samples(annotation, atlas, atlas_info=None, tolerance=2):
"""
Matches all microarray samples in `annotation` to parcels in `atlas`
Attempts to place each sample provided in `annotation` into a parcel in
`atlas`, where the latter is a 3D niimg-like object that contains parcels
each idnetified by a unique integer ID.
The function tries to best match samples in `annotation` to parcels defined
in `atlas` by:
1. Determining if the sample falls directly within a parcel,
2. Checking to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. Assigning the sample to the closest parcel if there are multiple
nearby parcels, where closest is determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If there is still no parcel for a given sample after this
process the sample is provided a label of 0.
Parameters
----------
annotation : (S, 13) pandas.DataFrame
Pre-loaded annotation information for a given AHBA donor
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : pandas.DataFrame, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have *at least* columns 'id', 'hemisphere', and
'structure' containing information mapping atlas IDs to hemisphere and
broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
labels : (S, 1) pandas.DataFrame
Dataframe with parcel labels for each of `S` samples
"""
# get annotation and atlas data
annotation = io.read_annotation(annotation)
atlas = utils.check_img(atlas)
label_data, affine = np.asarray(atlas.dataobj), atlas.affine
# load atlas_info, if provided
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get ijk coordinates for microarray samples and find labels
g_ijk = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']], affine)
labelled = label_data[g_ijk[:, 0], g_ijk[:, 1], g_ijk[:, 2]]
# if sample coordinates aren't directly inside a parcel, increment radius
# around sample up to `tolerance` to try and find nearby parcels.
# if still no parcel, then ignore this sample
for idx in np.where(labelled == 0)[0]:
label, tol = labelled[idx], 1
while label == 0 and tol <= tolerance:
label = _assign_sample(g_ijk[[idx]], atlas,
sample_info=annotation.iloc[idx],
atlas_info=atlas_info,
tolerance=tol)
tol += 1
labelled[idx] = label
return pd.DataFrame(labelled, dtype=int,
columns=['label'], index=annotation.index)
|
65a3f83b031871a14b250df48c9edef3cdcce7ac
| 20,442 |
def group_by(x, group_by_fields='Event', return_group_indices=False):
"""
Splits x into LIST of arrays, each array with rows that have same
group_by_fields values.
Gotchas:
Assumes x is sorted by group_by_fields (works in either order, reversed
or not)
Does NOT put in empty lists if indices skip a value! (e.g. events
without peaks)
If return_indices=True, returns list of arrays with indices of group
elements in x instead
"""
# Support single index and list of indices
try:
group_by_fields[0]
except TypeError:
group_by_fields = tuple(group_by_fields)
# Define array we'll split
if return_group_indices:
to_return = np.arange(len(x))
else:
to_return = x
# Indices to determine split points from
indices = fields_view(x, group_by_fields)
# Should we split at all?
if indices[0] == indices[-1]:
return [to_return]
else:
# Split where indices change value
split_points = np.where((np.roll(indices, 1) != indices))[0]
# 0 shouldn't be a split_point, will be in it due to roll (and indices[0] != indices[-1]), so remove it
split_points = split_points[1:]
return np.split(to_return, split_points)
|
12e8034556ca303a9ebd2ccaab83cbcc131b0bec
| 20,443 |
def unlock_file(fd):
"""unlock file. """
try:
fcntl.flock(fd, fcntl.LOCK_UN)
return (True, 0)
except IOError, ex_value:
return (False, ex_value[0])
|
2c6ce071072fa45607ce284b0881af5df44b5e6d
| 20,444 |
def DsseTrad(nodes_num, measurements, Gmatrix, Bmatrix, Yabs_matrix, Yphase_matrix):
"""
Traditional state estimator
It performs state estimation using rectangular node voltage state variables
and it is customized to work without PMU measurements
@param nodes_num: number of nodes of the grid
@param measurements: Vector of measurements in Input (voltages, currents, powers)
@param Gmatrix: conductance matrix
@param Bmatrix: susceptance matrix
@param Yabs_matrix: magnitude of the admittance matrix
@param Yphase_matrix: phase of the admittance matrix
return: np.array V - estimated voltages
"""
# calculate weightsmatrix (obtained as stdandard_deviations^-2)
weights = measurements.getWeightsMatrix()
W = np.diag(weights)
inj_code = 0
# Jacobian for Power Injection Measurements
H2, H3 = calculateJacobiMatrixSinj(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2)
# Jacobian for branch Power Measurements
H4, H5 = calculateJacobiBranchPower(measurements, nodes_num, Gmatrix, Bmatrix, inj_code, type=2)
# get array which contains the index of measurements type V_mag and I_mag
vidx = measurements.getIndexOfMeasurements(type=MeasType.V_mag)
iidx = measurements.getIndexOfMeasurements(type=MeasType.I_mag)
nvi = len(vidx)
nii = len(iidx)
# get array which contains the index of measurements type MeasType.Sinj_real, MeasType.Sinj_imag in the array measurements.measurements
pidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_real)
qidx = measurements.getIndexOfMeasurements(type=MeasType.Sinj_imag)
# get array which contains the index of measurements type MeasType.S_real, MeasType.S_imag in the array measurements.measurements
p1br = measurements.getIndexOfMeasurements(type=MeasType.S1_real)
p2br = measurements.getIndexOfMeasurements(type=MeasType.S2_real)
q1br = measurements.getIndexOfMeasurements(type=MeasType.S1_imag)
q2br = measurements.getIndexOfMeasurements(type=MeasType.S2_imag)
# get an array with all measured values (affected by uncertainty)
z = measurements.getMeasValues()
V = np.ones(nodes_num) + 1j * np.zeros(nodes_num)
State = np.concatenate((np.ones(nodes_num), np.zeros(nodes_num-1)), axis=0)
epsilon = 5
num_iter = 0
# Iteration of Netwon Rapson method: needed to solve non-linear system of equation
while epsilon > 10 ** (-6):
""" Computation of equivalent current measurements in place of the power measurements """
# in every iteration the input power measurements are converted into currents by dividing by the voltage estimated at the previous iteration
z = convertSinjMeasIntoCurrents(measurements, V, z, pidx, qidx)
z = convertSbranchMeasIntoCurrents(measurements, V, z, p1br, q1br, p2br, q2br)
""" Voltage Magnitude Measurements """
h1, H1 = update_h1_vector(measurements, V, vidx, nvi, nodes_num, inj_code, type=2)
""" Power Injection Measurements """
# h(x) vector where power injections are present
h2 = np.inner(H2, State)
h3 = np.inner(H3, State)
""" Power Flow Measurements """
# h(x) vector where power flows are present
h4 = np.inner(H4, State)
h5 = np.inner(H5, State)
""" Current Magnitude Measurements """
h6, H6 = update_h6_vector(measurements, V, iidx, nii, Yabs_matrix, Yphase_matrix, nodes_num, num_iter, inj_code, type=2)
""" WLS computation """
# all the sub-matrixes of H calcualted so far are merged in a unique matrix
H = np.concatenate((H1, H2, H3, H4, H5, H6), axis=0)
# h(x) sub-vectors are concatenated
y = np.concatenate((h1, h2, h3, h4, h5, h6), axis=0)
# "res" is the residual vector. The difference between input measurements and h(x)
res = np.subtract(z, y)
# g = transpose(H) * W * res
g = np.inner(H.transpose(), np.inner(W, res))
WH = np.inner(W, H.transpose())
# G is the gain matrix, that will have to be inverted at each iteration
G = np.inner(H.transpose(), WH.transpose())
# inversion of G
Ginv = np.linalg.pinv(G)
# Delta includes the updates of the states for the current Newton Rapson iteration
Delta_State = np.inner(Ginv, g)
# state is updated
State = State + Delta_State
# calculate the NR treeshold (for the next while check)
epsilon = np.amax(np.absolute(Delta_State))
# update the voltages
V.real = State[:nodes_num]
V.imag = np.concatenate(([0], State[nodes_num:]), axis=0)
num_iter = num_iter + 1
return V
|
9e662255875970fc8df38c29e728637e53a30db5
| 20,445 |
def _get_specs(layout, surfs, array_name, cbar_range, nvals=256):
"""Get array specifications.
Parameters
----------
layout : ndarray, shape = (n_rows, n_cols)
Array of surface keys in `surfs`. Specifies how window is arranged.
surfs : dict[str, BSPolyData]
Dictionary of surfaces.
array_name : ndarray
Names of point data array to plot for each layout entry.
cbar_range : {'sym'} or tuple,
Range for each array. If 'sym', uses a symmetric range. Only used is
array has positive and negative values.
nvals : int, optional
Number of lookup table values for continuous arrays.
Default is 256.
Returns
-------
specs : ndarray
Array with specifications for each array entry.
"""
nrow, ncol = layout.shape
n_overlays = max([len(a) for a in array_name.ravel()])
def _set_spec(x, rg):
if rg is None or rg == 'sym':
a, b = np.nanmin(x), np.nanmax(x)
if rg == 'sym' and np.sign(a) != np.sign(b):
b = max(np.abs(a), b)
a = -b
rg = (a, b)
if np.issubdtype(x.dtype, np.floating):
return (*rg, nvals, np.array([]), False)
vals = np.unique(x)
return (*rg, vals.size, vals, True)
dt = np.dtype([('min', 'f8'), ('max', 'f8'), ('nval', 'i8'),
('val', 'O'), ('disc', '?')])
specs = np.zeros((n_overlays, nrow, ncol), dtype=dt)
specs[:] = (np.nan, np.nan, nvals, np.array([]), False)
map_sp = {k: {} for k in surfs.keys()}
for idx, k in np.ndenumerate(layout):
if k is None:
continue
for ia, name in enumerate(array_name[idx]):
if name not in surfs[k].point_keys:
continue
if name not in map_sp[k]:
arr = surfs[k].PointData[name]
map_sp[k][name] = _set_spec(arr, cbar_range[idx][ia])
specs[(ia,) + idx] = map_sp[k][name]
return specs
|
310208c5bd8db46d37635fa8e2fcd8422a753a1b
| 20,446 |
import subprocess
def get_pool_data(index, val, field):
"""
Return val for volume based on index.
Parameters
----------
index: str
base field name.
val: str
base field value.
field: str
requested field value.
Returns
-------
str: the requested value, None if absent.
"""
cmd = [oci_kvm_path, 'list-pool', '--output-mode', 'parsable']
all_pool_data = subprocess.check_output(cmd).decode('utf-8').splitlines()
for pool in all_pool_data:
pool_list = pool.split('#')
if index not in pool_fields or field not in pool_fields:
return None
if pool_list[pool_fields.index(index)] == val:
return pool_list[pool_fields.index(field)]
return None
|
3f4934acfaae40e64bafc69048671d7f1c1c832f
| 20,447 |
def upper_camel_to_lower_camel(upper_camel: str) -> str:
"""convert upper camel case to lower camel case
Example:
CamelCase -> camelCase
:param upper_camel:
:return:
"""
return upper_camel[0].lower() + upper_camel[1:]
|
e731bbee45f5fc3d8e3e218837ccd36c00eff734
| 20,448 |
def get(isamAppliance, cert_dbase_id, check_mode=False, force=False):
"""
Get details of a certificate database
"""
return isamAppliance.invoke_get("Retrieving all current certificate database names",
"/isam/ssl_certificates/{0}/details".format(cert_dbase_id))
|
34ade7c42fcc1b1409b315f8748105ee99157986
| 20,449 |
def model_check(func):
"""Checks if the model is referenced as a valid model. If the model is
valid, the API will be ready to find the correct endpoint for the given
model.
:param func: The function to decorate
:type func: function
"""
def wrapper(*args, **kwargs):
model = None
if kwargs:
model = kwargs.get("model", None)
if not model:
if len(args) > 1:
model = args[1] # args[0] is the decorted function
if not constants.TRANSLATION.get(model, None):
raise ValueError(
"'{model}' doesn't exists. Allowed models: {allowed_models}".format(
model=model,
allowed_models=",\n".join(
list(constants.TRANSLATION.keys())
),
)
)
return func(*args, **kwargs)
return wrapper
|
809d7659a721ad6dedf4a651dd1fdab1b1dbf51e
| 20,450 |
def content_loss_func(sess, model):
"""Content loss function defined in the paper."""
def _content_loss(p, x):
# N is the number of filters at layer 1
N = p.shape[3]
# M is the height * width of the feature map at layer 1
M = p.shape[1] * p.shape[2]
return (1 / (4 * N * M)) * tf.reduce_mean(tf.pow(x - p, 2))
return _content_loss(sess.run(model["conv4_2"]), model["conv4_2"])
|
229866eaaf6021e7a078460dc29a6f0bfaa853bd
| 20,451 |
import joblib
def Extract_from_DF_kmeans(dfdir,num,mode=True):
"""
PlaneDFを読み込んで、client_IP毎に該当index番号の羅列をそれぞれのtxtに書き出す
modeがFalseのときはシーケンスが既にあっても上書き作成
"""
flag = exists("Database/KMeans/km_full_"+dfdir+"_database_name")#namelistが存在するかどうか
if(flag and mode):return
plane_df = joblib.load("./DFs/"+dfdir)
result_df = joblib.load("./DFs/Results/KMeans/Result_km_"+str(num)+"_full_Input_"+dfdir+"_continuous")
iplist=list(set(plane_df["client_ip"]))#読み込んだDFに含まれるclient_ipのリスト(重複はsetによって削除済み)
joblib.dump(iplist,"./List/iplist_"+dfdir)#iplistを出力:異常検知に各シーケンスを入れるときに利用
database = []#シーケンスをどんどん追加して最後に出力する
database_name = []
if(not(flag)):database_name = []#シーケンス毎の名前を記録 命名規則:(client_ip)_(server_ip)
for ip in iplist:
result_list = list(result_df.loc[list(plane_df[plane_df["client_ip"]==ip].index)].values.flatten())#client_IPでシーケンス作成
database.append(result_list)
database_name.append(ip)
#if(len(list(set(result_list)))>1):print(" "+ip+"_"+sip+" : "+str(result_list))
joblib.dump(database,"Database/KMeans/km_"+str(num)+"_full_"+dfdir+"_database")
if(not(flag)):joblib.dump(database_name,"Database/KMeans/km_full_"+dfdir+"_database_name")
return [database,database_name]
|
cb086c07024716022343c7e8eb5755f2de3695db
| 20,452 |
from typing import Optional
def get_workspace(workspace_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Resource schema for AWS::IoTTwinMaker::Workspace
:param str workspace_id: The ID of the workspace.
"""
__args__ = dict()
__args__['workspaceId'] = workspace_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:iottwinmaker:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
arn=__ret__.arn,
creation_date_time=__ret__.creation_date_time,
description=__ret__.description,
role=__ret__.role,
s3_location=__ret__.s3_location,
tags=__ret__.tags,
update_date_time=__ret__.update_date_time)
|
5c0970884be38923ae156511faf619fda725d004
| 20,453 |
import socket
def find_open_port():
"""
Use socket's built in ability to find an open port.
"""
sock = socket.socket()
sock.bind(('', 0))
host, port = sock.getsockname()
return port
|
516540fd23259d0fe247e02c4058c5ed7f3ee3a8
| 20,454 |
import itertools
def split_list_round_robin(data: tp.Iterable, chunks_num: int) -> tp.List[list]:
"""Divide iterable into `chunks_num` lists"""
result = [[] for _ in range(chunks_num)]
chunk_indexes = itertools.cycle(i for i in range(chunks_num))
for item in data:
i = next(chunk_indexes)
result[i].append(item)
return result
|
a87322b2c6a3601cda6c949354e55c38e215289a
| 20,455 |
def calc_Q_loss_FH_d_t(Q_T_H_FH_d_t, r_up):
"""温水床暖房の放熱損失
Args:
Q_T_H_FH_d_t(ndarray): 温水暖房の処理暖房負荷 [MJ/h]
r_up(ndarray): 当該住戸の温水床暖房の上面放熱率 [-]
Returns:
ndarray: 温水床暖房の放熱損失
"""
return hwfloor.get_Q_loss_rad(Q_T_H_rad=Q_T_H_FH_d_t, r_up=r_up)
|
04ad561fa0090de2eb64d5514a28729da92af63c
| 20,456 |
import asyncio
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
_LOGGER.debug("__init__ async_unload_entry")
unload_ok = all(
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
)
)
)
if unload_ok:
config_data = hass.data[DOMAIN].pop(entry.data["unique_id"])
if "api" in config_data:
energy_api = config_data[CONF_API]
await energy_api.close()
return unload_ok
|
dac09bd63986488e9d6164e775ddfc5a953576da
| 20,457 |
import random
def t06_ManyGetPuts(C, pks, crypto, server):
"""Many clients upload many files and their contents are checked."""
clients = [C("c" + str(n)) for n in range(10)]
kvs = [{} for _ in range(10)]
for _ in range(200):
i = random.randint(0, 9)
uuid1 = "%08x" % random.randint(0, 100)
uuid2 = "%08x" % random.randint(0, 100)
clients[i].upload(str(uuid1), str(uuid2))
kvs[i][str(uuid1)] = str(uuid2)
good = total = 0
# verify integrity
for i, (c, kv) in enumerate(zip(clients, kvs)):
for k, v in kv.items():
vv = c.download(k)
if vv == v:
good += 1
total += 1
return float(good) / total
|
384aa2b03169da613b25d2da60cdd1ec007aeed5
| 20,458 |
def multi_lightness_function_plot(functions=None, **kwargs):
"""
Plots given *Lightness* functions.
Parameters
----------
functions : array_like, optional
*Lightness* functions to plot.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Raises
------
KeyError
If one of the given *Lightness* function is not found in the factory
*Lightness* functions.
Examples
--------
>>> fs = ('CIE 1976', 'Wyszecki 1964')
>>> multi_lightness_function_plot(fs) # doctest: +SKIP
True
"""
if functions is None:
functions = ('CIE 1976', 'Wyszecki 1964')
samples = np.linspace(0, 100, 1000)
for i, function in enumerate(functions):
function, name = LIGHTNESS_METHODS.get(function), function
if function is None:
raise KeyError(
('"{0}" "Lightness" function not found in factory '
'"Lightness" functions: "{1}".').format(
name, sorted(LIGHTNESS_METHODS.keys())))
pylab.plot(samples,
[function(x) for x in samples],
label=u'{0}'.format(name),
linewidth=2)
settings = {
'title': '{0} - Lightness Functions'.format(', '.join(functions)),
'x_label': 'Luminance Y',
'y_label': 'Lightness L*',
'x_tighten': True,
'legend': True,
'legend_location': 'upper left',
'x_ticker': True,
'y_ticker': True,
'grid': True,
'limits': [0, 100, 0, 100]}
settings.update(kwargs)
bounding_box(**settings)
aspect(**settings)
return display(**settings)
|
18a4706d919c5b8822ff76a40dcd657028a6179b
| 20,459 |
def delete_notification(request):
"""
Creates a Notification model based on uer input.
"""
print request.POST
# Notification's PK
Notification.objects.get(pk=int(request.POST["pk"])).delete()
return JsonResponse({})
|
c4750bfbaa8184e64293517689671dbf717e6cd4
| 20,460 |
from dateutil import tz
def parse_query_value(query_str):
""" Return value for the query string """
try:
query_str = str(query_str).strip('"\' ')
if query_str == 'now':
d = Delorean(timezone=tz)
elif query_str.startswith('y'):
d = Delorean(Delorean(timezone=tz).midnight)
d -= timedelta(days=len(query_str))
elif query_str.startswith('t'):
d = Delorean(Delorean(timezone=tz).midnight)
d += timedelta(days=len(query_str) - 1)
else:
# Parse datetime string or timestamp
try:
ts = float(query_str)
if ts >= 1000000000000:
ts /= 1000
d = epoch(float(ts))
d.shift(tz)
except ValueError:
d = parse(str(query_str), tz, dayfirst=False)
except (TypeError, ValueError):
d = None
return d
|
ac9c6845871094d043eee7004214fdcecb20daec
| 20,461 |
def build_model():
"""
Build the model
:return: the model
"""
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
|
b5e4b0a64e7d39a0c7b72c0380ef98d8eaf9cc01
| 20,462 |
def detect(stream):
"""Returns True if given stream is a readable excel file."""
try:
opendocument.load(BytesIO(stream))
return True
except:
pass
|
a9ef5361d9f6f5ae40767f40f12b89c3d53177a4
| 20,463 |
def new_default_channel():
"""Create new gRPC channel from settings."""
channel_url = urlparse(format_url(settings.SERVICE_BIND))
return Channel(host=channel_url.hostname, port=channel_url.port)
|
4771306570213fa03cc5df08a0e8c9b216ecfd44
| 20,464 |
def iou(bbox1, bbox2):
"""
Calculates the intersection-over-union of two bounding boxes.
Args:
bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.
bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.
Returns:
int: intersection-over-onion of bbox1, bbox2
"""
bbox1 = [float(x) for x in bbox1]
bbox2 = [float(x) for x in bbox2]
(x0_1, y0_1, x1_1, y1_1) = bbox1
(x0_2, y0_2, x1_2, y1_2) = bbox2
# get the overlap rectangle
overlap_x0 = max(x0_1, x0_2)
overlap_y0 = max(y0_1, y0_2)
overlap_x1 = min(x1_1, x1_2)
overlap_y1 = min(y1_1, y1_2)
# check if there is an overlap
if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
return 0
# if yes, calculate the ratio of the overlap to each ROI size and the unified size
size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)
size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)
size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)
size_union = size_1 + size_2 - size_intersection
return size_intersection / size_union
|
7609bcc6eb39757240a22c28fc7c15f4024cd789
| 20,465 |
def get_version():
"""
Obtain the version of the ITU-R P.1511 recommendation currently being used.
Returns
-------
version: int
Version currently being used.
"""
return __model.__version__
|
4d36eacabebe74bfb18879ba64f190ceb1bbc22a
| 20,466 |
import copy
def sample_filepaths(filepaths_in, filepaths_out, intensity):
"""
`filepaths_in` is a list of filepaths for in-set examples.
`filepaths_out` is a list of lists, where `filepaths_out[i]` is a list of
filepaths corresponding to the ith out-of-set class.
`intensity` is the number of in-set examples as a proportion of the total
number of examples: `intensity = N_in / (N_in + N_out)`. We can
rearrange this to get `N_out = N_in * ((1 / intensity) - 1)`, which we
use to set `n_left_to_sample`. An intensity of 0.5 gives `N_in = N_out`.
"""
filepaths_out_copy = copy.deepcopy(filepaths_out)
filepaths_out_sampled = []
inds_to_sample_from = range(len(filepaths_out))
n_left_to_sample = int(len(filepaths_in) * ((1 / intensity) - 1))
while n_left_to_sample > 0:
if n_left_to_sample < len(filepaths_out):
inds_to_sample_from = np.random.choice(
inds_to_sample_from, n_left_to_sample, replace=False)
for i in inds_to_sample_from:
sample = np.random.choice(filepaths_out_copy[i])
filepaths_out_copy[i].remove(sample)
filepaths_out_sampled.append(sample)
n_left_to_sample -= len(inds_to_sample_from)
return np.random.permutation(filepaths_in + filepaths_out_sampled)
|
b20c1e1a019eaebc0eb7ea46b33c286b72da7af7
| 20,467 |
def makekey(s):
"""
enerates a bitcoin private key from a secret s
"""
return CBitcoinSecret.from_secret_bytes(sha256(s).digest())
|
51658c6426a78ae2e20752542bc579f5bb7ebc01
| 20,468 |
def shape_broadcast(shape1: tuple, shape2: tuple) -> tuple:
"""
Broadcast two shapes to create a new union shape.
Args:
shape1 (tuple) : first shape
shape2 (tuple) : second shape
Returns:
tuple : broadcasted shape
Raises:
IndexingError : if cannot broadcast
"""
for shape in (shape1, shape2):
if not shape:
raise IndexingError(f"Shape must have at least one dimension: {shape}")
len_shape1 = len(shape1)
len_shape2 = len(shape2)
max_length = max(len_shape1, len_shape2)
new_shape = [0] * max_length
shape1_reversed = list(reversed(shape1))
shape2_reversed = list(reversed(shape2))
for idx in range(max_length):
# iterate over every index. check if values are broadcastable, and if
# so, add to new shape dimension
if idx >= len_shape1:
new_shape[idx] = shape2_reversed[idx]
elif idx >= len_shape2:
new_shape[idx] = shape1_reversed[idx]
else:
new_shape[idx] = max(shape1_reversed[idx], shape2_reversed[idx])
if (
shape1_reversed[idx] != new_shape[idx] and shape1_reversed[idx] != 1
) or (shape2_reversed[idx] != new_shape[idx] and shape2_reversed[idx] != 1):
raise IndexingError(
f"The size of tensor a ({shape1_reversed[idx]}) must match the size "
f"of tensor b ({shape2_reversed[idx]}) at non-singleton dimension {idx}"
)
return tuple(reversed(new_shape))
|
4737332b371e0f16df3860d5c53e46718f68f30e
| 20,469 |
import xxhash
def hash_array(kmer):
"""Return a hash of a numpy array."""
return xxhash.xxh32_intdigest(kmer.tobytes())
|
9761316333fdd9f28e74c4f1975adfca1909f54a
| 20,470 |
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if len(secondary_node_uuids) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
minors = lu.cfg.AllocateDRBDMinor(
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
full_disk_params)
drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_data")
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
minors[idx * 2], minors[idx * 2 + 1])
disk_dev.mode = disk[constants.IDISK_MODE]
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disks.append(disk_dev)
else:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
else:
names = _GenerateUniqueNames(lu, ["%s.disk%s" %
(name_prefix, base_index + i)
for i in range(disk_count)])
if template_name == constants.DT_PLAIN:
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
"%s/%s" % (file_storage_dir,
names[idx]))
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
elif template_name == constants.DT_EXT:
def logical_id_fn(idx, _, disk):
provider = disk.get(constants.IDISK_PROVIDER, None)
if provider is None:
raise errors.ProgrammerError("Disk template is %s, but '%s' is"
" not found", constants.DT_EXT,
constants.IDISK_PROVIDER)
return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = template_name
for idx, disk in enumerate(disk_info):
params = ExtractDiskParams(disk, template_name)
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
(disk_index, utils.FormatUnit(size, "h")))
disk_dev = objects.Disk(dev_type=dev_type, size=size,
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
params=params,
spindles=disk.get(constants.IDISK_SPINDLES))
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
disks.append(disk_dev)
return disks
|
87995b08d3579fc22a8db7c8408a9c29e47a8271
| 20,471 |
def check_pc_overlap(pc1, pc2, min_point_num):
"""
Check if the bounding boxes of the 2 given point clouds overlap
"""
b1 = get_pc_bbox(pc1)
b2 = get_pc_bbox(pc2)
b1_c = Polygon(b1)
b2_c = Polygon(b2)
inter_area = b1_c.intersection(b2_c).area
union_area = b1_c.area + b2_c.area - inter_area
if b1_c.area > 11 and b2_c.area > 11:
overlap = (inter_area / union_area) > 0.5
elif inter_area > 0:
overlap = True
else:
overlap = False
pc_merged = pc2
if overlap:
bbox_min = MinimumBoundingBox.MinimumBoundingBox(
np.concatenate((pc1[:, 0:2], pc2[:, 0:2]), axis=0)
)
l01 = bbox_min.length_parallel
l02 = bbox_min.length_orthogonal
area = l01 * l02
# shape doesn't look like car bbox
if ((area < 2 or area > 12)
or ((l01 > 4.6 or l02 > 4.6))
or ((l01 < 1 or l02 < 1))
or union_area > 15
):
if b1_c.area > b2_c.area:
pc_merged = pc1
else:
pc_merged = pc2
else:
idx_overlap = np.zeros((len(pc1)))
for i in range(len(pc1)):
diff = pc2 - pc1[i]
diff = np.sum(diff ** 2, axis=1)
if 0 in diff:
idx_overlap[i] = 1
pc_merged = np.concatenate((pc_merged, pc1[idx_overlap == 0]), axis=0)
if not is_car(pc_merged, min_point_num):
overlap = False
return overlap, pc_merged
|
8caa07a42850d9ca2a4d298e9be91a44ac15f6a5
| 20,472 |
def apply_hypercube(cube: DataCube, context: dict) -> DataCube:
"""Reduce the time dimension for each tile and compute min, mean, max and sum for each pixel
over time.
Each raster tile in the udf data object will be reduced by time. Minimum, maximum, mean and sum are
computed for each pixel over time.
Args:
udf_data (UdfData): The UDF data object that contains raster and vector tiles
Returns:
This function will not return anything, the UdfData object "udf_data" must be used to store the resulting
data.
"""
# The list of tiles that were created
array: xarray.DataArray = cube.get_array()
result = xarray.concat(
[array.min(dim='t'), array.max(dim='t'), array.sum(dim='t'), array.mean(dim='t')],
dim='bands'
)
return DataCube(result)
|
c2c3b7b90a48a37f5e172111ef13e8529a3a80c5
| 20,473 |
def dateIsBefore(year1, month1, day1, year2, month2, day2):
"""Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False."""
if year1 < year2:
return True
if year1 == year2:
if month1 < month2:
return True
if month1 == month2:
if day1 < day2:
return True
else:
return False
else:
return False
else:
return False
|
3ba19b6e57c8e51a86e590561331057a44885d10
| 20,474 |
def all_stat(x, stat_func=np.mean, upper_only=False, stat_offset=3):
"""
Generate a matrix that contains the value returned by stat_func for
all possible sub-windows of x[stat_offset:].
stat_func is any function that takes a sequence and returns a scalar.
if upper_only is False, values are added to both the upper and lower
triangular sections of the matrix. If True, only the upper section
is populated
"""
if len(x) < stat_offset:
return np.zeros([])
stat = np.zeros((len(x), len(x)))
for i in range(0, len(x)):
for j in range(i + stat_offset, len(x)):
v = stat_func(x[i:j])
stat[i, j] = v
if not upper_only:
stat[j, i] = v
return stat
|
16ef240b33a477948ae99862bb21540a230a8a2f
| 20,475 |
def PyCallable_Check(space, w_obj):
"""Determine if the object o is callable. Return 1 if the object is callable
and 0 otherwise. This function always succeeds."""
return int(space.is_true(space.callable(w_obj)))
|
e5b8ee9bbbdb0fe53d6fc7241d19f93f7ee8259a
| 20,476 |
from typing import Callable
def _multiclass_metric_evaluator(metric_func: Callable[..., float], n_classes: int, y_test: np.ndarray,
y_pred: np.ndarray, **kwargs) -> float:
"""Calculate the average metric for multiclass classifiers."""
metric = 0
for label in range(n_classes):
metric += metric_func(y_test[:, label], y_pred[:, label], **kwargs)
metric /= n_classes
return metric
|
a8a61c7a2e3629ff69a6a2aefdb4565e903b82de
| 20,477 |
import glob
import os
import time
from datetime import datetime
from io import StringIO
def insert_phots_into_database(framedir,
frameglob='rsub-*-xtrns.fits',
photdir=None,
photglob='rsub-*-%s.iphot',
maxframes=None,
overwrite=False,
database=None):
"""
This makes photometry index rows in the postgresql database. Intended for
use when the sqlite3 databases get out of hand.
"""
# open a database connection
if database:
cursor = database.cursor()
closedb = False
else:
database = pg.connect(user=PGUSER,
password=PGPASSWORD,
database=PGDATABASE,
host=PGHOST)
cursor = database.cursor()
closedb = True
# first, figure out the directories
if not photdir:
photdir = framedir
# start work here
try:
if isinstance(framedir, list):
framelist = framedir
else:
# first, find all the frames
framelist = glob.glob(os.path.join(os.path.abspath(framedir),
frameglob))
# restrict to maxframes max frames
if maxframes:
framelist = framelist[:maxframes]
# turn off table logging and drop indexes for speed
cursor.execute('drop index if exists photindex_iphots_rjd_idx')
cursor.execute('drop index if exists photindex_iphots_objectid_idx')
starttime = time.time()
# go through all the frames
for ix, frame in enumerate(framelist):
print('%sZ: inserting %d frame %s into pg database' %
(datetime.utcnow().isoformat(), ix, frame))
# generate the names of the associated phot and sourcelist files
frameinfo = FRAMEREGEX.findall(os.path.basename(frame))
framekey = '%s-%s_%s' % (frameinfo[0][0],
frameinfo[0][1],
frameinfo[0][2])
photsearch = photglob % ('%s-%s_%s' % (frameinfo[0][0],
frameinfo[0][1],
frameinfo[0][2]))
originalframe = '%s-%s_%s.fits' % (frameinfo[0][0],
frameinfo[0][1],
frameinfo[0][2])
photmatch = glob.glob(os.path.join(os.path.abspath(photdir),
photsearch))
originalframe = os.path.join(os.path.abspath(framedir),
originalframe)
# check these files exist, and populate the dict if they do
if (photmatch and os.path.exists(photmatch[0])
and os.path.exists(originalframe)):
phot = photmatch[0]
# get the JD from the FITS file.
# NOTE: this is the ORIGINAL FITS frame, since the subtracted
# one contains some weird JD header (probably inherited from the
# photref frame)
framerjd = get_header_keyword(originalframe, 'JD')
# now get the phot file and read it
photf = open(phot, 'rb')
photo = StringIO()
for line in photf:
hatid = line.split()[0]
photo.write('%.5f,%s,%s,%s' % (framerjd,
hatid,
framekey,
line))
photf.close()
photo.seek(0)
# do a fast insert using pg's copy protocol
cursor.copy_from(photo,'photindex_iphots',sep=',')
photo.close()
# if some associated files don't exist for this frame, ignore it
else:
print('WRN! %sZ: ignoring frame %s, '
'photometry for this frame is not available!' %
(datetime.utcnow().isoformat(), frame))
# now we're all done with frame inserts
# regenerate the indexes and reset table logging for durability
print('%sZ: recreating indexes' % (datetime.utcnow().isoformat()))
cursor.execute('create index on photindex_iphots(rjd)')
cursor.execute('create index on photindex_iphots(objectid)')
cursor.execute('analyze photindex_iphots')
# commit the transaction
database.commit()
print('%sZ: done, time taken: %.2f minutes' %
(datetime.utcnow().isoformat(), (time.time() - starttime)/60.0))
returnval = (framedir, True)
# catch the overwrite = False scenario
except pg.IntegrityError as e:
database.rollback()
message = ('failed to insert photometry from %s '
'into DB because some of it exists already '
'and overwrite = False'
% framedir)
print('EXC! %sZ: %s\n%s' %
(datetime.utcnow().isoformat(), message, format_exc()) )
returnval = (framedir, False)
# if everything goes wrong, exit cleanly
except Exception as e:
database.rollback()
message = 'failed to insert photometry from %s into DB' % framedir
print('EXC! %sZ: %s\nexception was: %s' %
(datetime.utcnow().isoformat(),
message, format_exc()) )
returnval = (framedir, False)
finally:
cursor.close()
if closedb:
database.close()
return returnval
|
9d80f1016c4ed8c0fa70ed818c7ed44f8c09a920
| 20,478 |
def idxs_of_duplicates(lst):
""" Returns the indices of duplicate values.
"""
idxs_of = dict({})
dup_idxs = []
for idx, value in enumerate(lst):
idxs_of.setdefault(value, []).append(idx)
for idxs in idxs_of.values():
if len(idxs) > 1:
dup_idxs.extend(idxs)
return dup_idxs
|
adc8a0b0223ac78f0c8a6edd3d60acfaf7ca4c04
| 20,479 |
async def store_rekey(
handle: StoreHandle,
wrap_method: str = None,
pass_key: str = None,
) -> StoreHandle:
"""Replace the wrap key on a Store."""
return await do_call_async(
"askar_store_rekey",
handle,
encode_str(wrap_method and wrap_method.lower()),
encode_str(pass_key),
)
|
e7abb35147bd7b5be5aa37b6583571e5be8f144b
| 20,480 |
import requests
def prepare_bitbucket_data(data, profile_data, team_name):
"""
Prepare bitbucket data by extracting information needed
if the data contains next page for this team/organisation continue to fetch the next
page until the last page
"""
next_page = False
link = None
profile_data = append_bitbucket_data(data.json(), profile_data, team_name)
if data.json().get('next'):
next_page = True
link = data.json().get('next')
while next_page:
next_data = requests.get(link)
profile_data = append_bitbucket_data(next_data.json(), profile_data, team_name)
if next_data.json().get('next'):
link = next_data.json().get('next')
else:
next_page = False
return profile_data
|
a2fe54a4fd02e80b4bf4d41ff932e27b555afc5c
| 20,481 |
def add(x1: Array, x2: Array, /) -> Array:
"""
Array API compatible wrapper for :py:func:`np.add <numpy.add>`.
See its docstring for more information.
"""
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in add")
# Call result type here just to raise on disallowed type combinations
_result_type(x1.dtype, x2.dtype)
x1, x2 = Array._normalize_two_args(x1, x2)
return Array._new(np.add(x1._array, x2._array))
|
7c35ea06f5bff91da283e3521185a6b9f1b55b32
| 20,482 |
def aslist(l):
"""Convenience function to wrap single items and lists, and return lists unchanged."""
if isinstance(l, list):
return l
else:
return [l]
|
99ccef940229d806d27cb8e429da9c85c44fed07
| 20,483 |
import sys
import locale
import os
def form_03(request_data):
"""
Статистическая форма 066/у Приложение № 5 к приказу Минздрава России от 30 декабря 2002 г. № 413
"""
num_dir = request_data["dir_pk"]
direction_obj = Napravleniya.objects.get(pk=num_dir)
hosp_nums_obj = hosp_get_hosp_direction(num_dir)
hosp_nums = f"- {hosp_nums_obj[0].get('direction')}"
ind_card = direction_obj.client
patient_data = ind_card.get_data_individual()
hospital: Hospitals = request_data["hospital"]
hospital_name = hospital.safe_short_title
hospital_address = hospital.safe_address
hospital_kod_ogrn = hospital.safe_ogrn
if sys.platform == 'win32':
locale.setlocale(locale.LC_ALL, 'rus_rus')
else:
locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8')
pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf')))
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
buffer = BytesIO()
doc = SimpleDocTemplate(buffer, pagesize=A4, leftMargin=25 * mm, rightMargin=5 * mm, topMargin=6 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("066/у-02"))
width, height = portrait(A4)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = "PTAstraSerifReg"
style.fontSize = 12
style.leading = 15
style.spaceAfter = 0.5 * mm
styleBold = deepcopy(style)
styleBold.fontName = "PTAstraSerifBold"
styleCenter = deepcopy(style)
styleCenter.alignment = TA_CENTER
styleCenter.fontSize = 12
styleCenter.leading = 15
styleCenter.spaceAfter = 1 * mm
styleCenterBold = deepcopy(styleBold)
styleCenterBold.alignment = TA_CENTER
styleCenterBold.fontSize = 12
styleCenterBold.leading = 15
styleCenterBold.face = 'PTAstraSerifBold'
styleCenterBold.borderColor = black
styleJustified = deepcopy(style)
styleJustified.alignment = TA_JUSTIFY
styleJustified.spaceAfter = 4.5 * mm
styleJustified.fontSize = 12
styleJustified.leading = 4.5 * mm
objs = []
styleT = deepcopy(style)
styleT.alignment = TA_LEFT
styleT.fontSize = 10
styleT.leading = 4.5 * mm
styleT.face = 'PTAstraSerifReg'
print_district = ''
if SettingManager.get("district", default='True', default_type='b'):
if ind_card.district is not None:
print_district = 'Уч: {}'.format(ind_card.district.title)
opinion = [
[
Paragraph('<font size=11>{}<br/>Адрес: {}<br/>ОГРН: {} <br/><u>{}</u> </font>'.format(hospital_name, hospital_address, hospital_kod_ogrn, print_district), styleT),
Paragraph('<font size=9 >Код формы по ОКУД:<br/>Код организации по ОКПО: 31348613<br/>' 'Медицинская документация<br/>форма № 066/у-02</font>', styleT),
],
]
tbl = Table(opinion, 2 * [90 * mm])
tbl.setStyle(
TableStyle(
[
('GRID', (0, 0), (-1, -1), 0.75, colors.white),
('LEFTPADDING', (1, 0), (-1, -1), 80),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
]
)
)
objs.append(tbl)
space_symbol = ' '
if patient_data['age'] < SettingManager.get("child_age_before", default='15', default_type='i'):
patient_data['serial'] = patient_data['bc_serial']
patient_data['num'] = patient_data['bc_num']
else:
patient_data['serial'] = patient_data['passport_serial']
patient_data['num'] = patient_data['passport_num']
card_num_obj = patient_data['card_num'].split(' ')
p_card_num = card_num_obj[0]
sex = patient_data['sex']
if sex == 'м':
sex = f'{sex} - 1'
if sex == 'ж':
sex = f'{sex} - 2'
doc_patient = f"{patient_data['type_doc']}, {patient_data['serial']} - {patient_data['num']}"
polis_data = f"{patient_data['oms']['polis_serial']} {patient_data['oms']['polis_num']}"
############################################################################################################
# Получить данные из первичного приема (самого первого hosp-направления)
hosp_first_num = hosp_nums_obj[0].get('direction')
primary_reception_data = primary_reception_get_data(hosp_first_num)
hospitalized = ''
if primary_reception_data['what_time_hospitalized'] and primary_reception_data['plan_hospital']:
if primary_reception_data['what_time_hospitalized'].lower().replace(' ', '') == 'впервые':
hospitalized = "первично - 1"
if primary_reception_data['what_time_hospitalized'].lower().replace(' ', '') == 'повторно':
hospitalized = "повторно - 2"
if primary_reception_data['plan_hospital'].lower().replace(' ', '') == 'да':
hospitalized = f"{hospitalized}; в плановом порядке -4"
if primary_reception_data['extra_hospital'].lower().replace(' ', '') == 'да':
hospitalized = f"{hospitalized}; по экстренным показаниям - 3"
# Получить отделение - из названия услуги или самого главного направления
hosp_depart = hosp_nums_obj[0].get('research_title')
# взять самое последнее направленеие из hosp_dirs
hosp_last_num = hosp_nums_obj[-1].get('direction')
# 'Время выписки', 'Дата выписки', 'Основной диагноз (описание)', 'Осложнение основного диагноза (описание)', 'Сопутствующий диагноз (описание)'
date_value, time_value, outcome, result_hospital = '', '', '', ''
hosp_extract_data = hosp_extract_get_data(hosp_last_num)
days_count = '__________________________'
doc_fio = ''
if hosp_extract_data:
if hosp_extract_data['result_hospital']:
result_hospital = hosp_extract_data['result_hospital']
if hosp_extract_data['outcome']:
outcome = hosp_extract_data['outcome']
if hosp_extract_data['date_value']:
date_value = hosp_extract_data['date_value']
if hosp_extract_data['time_value']:
time_value = hosp_extract_data['time_value']
days_count = hosp_extract_data['days_count']
doc_fio = hosp_extract_data['doc_fio']
title_page = [
Indenter(left=0 * mm),
Spacer(1, 8 * mm),
Paragraph(
'<font fontname="PTAstraSerifBold" size=13>СТАТИСТИЧЕСКАЯ КАРТА ВЫБЫВШЕГО ИЗ СТАЦИОНАРА<br/> '
'круглосуточного пребывания, дневного стационара при больничном<br/> учреждении, дневного стационара при'
' амбулаторно-поликлиническом<br/> учреждении, стационара на дому<br/>'
'N медицинской карты {} {}</font>'.format(p_card_num, hosp_nums),
styleCenter,
),
Spacer(1, 2 * mm),
Spacer(1, 2 * mm),
Spacer(1, 2 * mm),
Paragraph('1. Код пациента: ________ 2. Ф.И.О.: {}'.format(patient_data['fio']), style),
Paragraph('3. Пол: {} {}4. Дата рождения {}'.format(sex, space_symbol * 24, patient_data['born']), style),
Paragraph('5. Документ, удостов. личность: (название, серия, номер) {} {}'.format(space_symbol * 2, doc_patient), style),
Paragraph('6. Адрес: регистрация по месту жительства: {}'.format(patient_data['main_address']), style),
Paragraph('7. Код территории проживания: ___ Житель: город - 1; село - 2.', style),
Paragraph('8. Страховой полис (серия, номер):{}'.format(polis_data), style),
Paragraph('Выдан: {}'.format(patient_data['oms']['polis_issued']), style),
Paragraph('9. Вид оплаты:______________', style),
Paragraph('10. Социальный статус: {}'.format(primary_reception_data['social_status']), style),
Paragraph('11. Категория льготности: {}'.format(primary_reception_data['category_privilege']), style),
Paragraph('12. Кем направлен больной: {}'.format(primary_reception_data['who_directed']), style),
Paragraph('13. Кем доставлен: _________________________________ Код______ Номер наряда__________', style),
Paragraph('14. Диагноз направившего учреждения: {}'.format(primary_reception_data['diagnos_who_directed']), style),
Paragraph('14.1 Состояние при поступлении: {}'.format(primary_reception_data['state']), style),
Paragraph('15. Диагноз приемного отделения:{}'.format(primary_reception_data['diagnos_entered']), style),
Paragraph('16. Доставлен в состоянии опьянения: Алкогольного — 1; Наркотического — 2.', style),
Paragraph('17. Госпитализирован по поводу данного заболевания в текущем году: {}'.format(hospitalized), style),
Paragraph('18.Доставлен в стационар от начала заболевания(получения травмы): {}'.format(primary_reception_data['time_start_ill']), style),
Paragraph('19. Травма: {}'.format(primary_reception_data['type_trauma']), style),
Paragraph('20. Дата поступления в приемное отделение:______________ Время__________', style),
Paragraph(
'21. Название отделения: <u>{}</u>; дата поступления: <u>{}</u>; время: <u>{}</u>'.format(
hosp_depart, primary_reception_data['date_entered_value'], primary_reception_data['time_entered_value']
),
style,
),
Paragraph('Подпись врача приемного отделения ______________ Код __________', style),
Paragraph('22. Дата выписки (смерти): {}; Время {}'.format(date_value, time_value), style),
Paragraph('23. Продолжительность госпитализации (койко - дней): {}'.format(days_count), style),
Paragraph('24. Исход госпитализации: {}'.format(outcome), style),
Paragraph('24.1. Результат госпитализации: {}'.format(result_hospital), style),
]
closed_bl_result = closed_bl(hosp_nums_obj[0].get('direction'))
title_page.append(
Paragraph(
f"25. Листок нетрудоспособности: открыт <u>{closed_bl_result['start_date']}</u> закрыт: <u>{closed_bl_result['end_date']}</u>"
f" к труду: <u>{closed_bl_result['start_work']}</u>",
style,
)
)
title_page.append(Paragraph(f"25.1. Номере ЛН : <u>{closed_bl_result['num']}</u>", style))
title_page.append(Paragraph(f"25.2. Выдан кому : {closed_bl_result['who_get']}", style))
title_page.append(Paragraph('25.3. По уходу за больным Полных лет: _____ Пол: {}'.format(sex), style))
title_page.append(Paragraph('26. Движение пациента по отделениям:', style))
objs.extend(title_page)
styleTB = deepcopy(style)
styleTB.fontSize = 8.7
styleTB.alignment = TA_CENTER
styleTB.leading = 3.5 * mm
styleTC = deepcopy(style)
styleTC.fontSize = 9.5
styleTC.alignment = TA_LEFT
styleTCright = deepcopy(styleTC)
styleTCright.alignment = TA_RIGHT
styleTCcenter = deepcopy(styleTC)
styleTCcenter.alignment = TA_CENTER
opinion = [
[
Paragraph('N', styleTB),
Paragraph('Код отделения', styleTB),
Paragraph('Профиль коек', styleTB),
Paragraph('Код врача', styleTB),
Paragraph('Дата поступления', styleTB),
Paragraph('Дата выписки, перевода', styleTB),
Paragraph('Код диагноза по МКБ', styleTB),
Paragraph('Код медицинского стандарта', styleTB),
Paragraph('Код прерванного случая', styleTB),
Paragraph('Вид оплаты', styleTB),
]
]
patient_movement = hosp_patient_movement(hosp_nums_obj)
x = 0
for i in patient_movement:
x += 1
doc_code = ''
if i['doc_confirm_code']:
doc_code = str(i['doc_confirm_code'])
tmp_data = [
[
Paragraph(str(x), styleTB),
Paragraph('', styleTB),
Paragraph(i['bed_profile_research_title'], styleTB),
Paragraph(doc_code, styleTB),
Paragraph(i['date_entered_value'], styleTB),
Paragraph(i['date_oute'], styleTB),
Paragraph(i['diagnos_mkb'], styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
Paragraph('ОМС', styleTB),
],
]
opinion.extend(tmp_data.copy())
# получить структуру данных для таблицы
tbl_act = Table(opinion, repeatRows=1, colWidths=(7 * mm, 15 * mm, 30 * mm, 20 * mm, 21 * mm, 21 * mm, 20 * mm, 14 * mm, 14 * mm, 20 * mm))
tbl_act.setStyle(
TableStyle(
[
('GRID', (0, 0), (-1, -1), 1.0, colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
)
)
objs.append(tbl_act)
objs.append(Spacer(1, 2 * mm))
objs.append(
Paragraph('27. Хирургические операции(обозначить: основную операцию, использование спец.аппаратуры):', style),
)
opinion = [
[
Paragraph('Дата, Час', styleTB),
Paragraph('Код <br/>хирурга', styleTB),
Paragraph('Код отделения', styleTB),
Paragraph('наименование операции', styleTB),
Paragraph('код операции', styleTB),
Paragraph('наименование осложнения', styleTB),
Paragraph('Код ослонения', styleTB),
Paragraph('Анестезия (код врача)', styleTB),
Paragraph('энд.', styleTB),
Paragraph('лазер.', styleTB),
Paragraph('криог.', styleTB),
Paragraph('Вид оплаты', styleTB),
]
]
patient_operation = hosp_get_operation_data(num_dir)
operation_result = []
for i in patient_operation:
operation_template = [''] * 12
operation_template[0] = Paragraph(i['date'] + '<br/>' + i['time_start'] + '-' + i['time_end'], styleTB)
operation_template[1] = Paragraph(str(i['doc_code']), styleTB)
operation_template[3] = Paragraph(f"{i['name_operation']} <br/><font face=\"PTAstraSerifBold\" size=\"8.7\">({i['category_difficult']})</font>", styleTB)
operation_template[4] = Paragraph('{}'.format(i['code_operation'] + '<br/>' + i['plan_operation']), styleTB)
operation_template[7] = Paragraph('{}'.format(i['anesthesia method'] + '<br/> (' + i['code_doc_anesthesia'] + ')'), styleTB)
operation_template[5] = Paragraph(i['complications'], styleTB)
operation_template[11] = Paragraph(" ОМС", styleTB)
operation_result.append(operation_template.copy())
opinion.extend(operation_result)
tbl_act = Table(opinion, repeatRows=1, colWidths=(22 * mm, 12 * mm, 11 * mm, 26 * mm, 26 * mm, 20 * mm, 10 * mm, 15 * mm, 7 * mm, 7 * mm, 7 * mm, 16 * mm))
tbl_act.setStyle(
TableStyle(
[
('GRID', (0, 0), (-1, -1), 1.0, colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
)
)
objs.append(tbl_act)
objs.append(Spacer(1, 2 * mm))
space_symbol = ' '
objs.append(
Paragraph('28. Обследован: RW {} AIDS '.format(space_symbol * 10), style),
)
objs.append(Spacer(1, 2 * mm))
objs.append(
Paragraph('29. Диагноз стационара(при выписке):', style),
)
opinion = [
[
Paragraph('Клинический заключительный', styleTB),
Paragraph('Основное заболевание', styleTB),
Paragraph('Код МКБ', styleTB),
Paragraph('Осложнение', styleTB),
Paragraph('Код МКБ', styleTB),
Paragraph('Сопутствующее заболевание', styleTB),
Paragraph('Код МКБ', styleTB),
]
]
hosp_last_num = hosp_nums_obj[-1].get('direction')
hosp_extract_data = hosp_extract_get_data(hosp_last_num)
opinion_diagnos = []
if hosp_extract_data:
opinion_diagnos = [
[
Paragraph('', styleTB),
Paragraph(hosp_extract_data['final_diagnos'], styleTB),
Paragraph(hosp_extract_data['final_diagnos_mkb'], styleTB),
Paragraph(hosp_extract_data['other_diagnos'], styleTB),
Paragraph(hosp_extract_data['other_diagnos_mkb'], styleTB),
Paragraph(hosp_extract_data['near_diagnos'].replace('<', '<').replace('>', '>'), styleTB),
Paragraph(hosp_extract_data['near_diagnos_mkb'], styleTB),
]
]
opinion.extend(opinion_diagnos)
opinion_pathologist = [
[
Paragraph('Патологоанатомический ', styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
Paragraph('', styleTB),
]
]
opinion.extend(opinion_pathologist)
tbl_act = Table(opinion, repeatRows=1, colWidths=(28 * mm, 45 * mm, 15 * mm, 30 * mm, 15 * mm, 30 * mm, 15 * mm))
tbl_act.setStyle(
TableStyle(
[
('GRID', (0, 0), (-1, -1), 1.0, colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('SPAN', (0, 0), (0, 1)),
]
)
)
objs.append(tbl_act)
objs.append(Spacer(1, 2 * mm))
objs.append(
Paragraph('30.В случае смерти указать основную причину:______________________________________________________________' 'Код МКБ', style),
)
objs.append(Spacer(1, 20 * mm))
objs.append(
Paragraph(
'31. Дефекты догоспитального этапа: несвоевременность госпитализации - 1; недостаточный объем клинико - диагностического обследования - 2; '
'неправильная тактика лечения - 3; несовпадение диагноза - 4.',
style,
),
)
objs.append(Spacer(1, 7 * mm))
objs.append(
Paragraph('Подпись лечащего врача ({}) ____________________________'.format(doc_fio), style),
)
objs.append(Spacer(1, 7 * mm))
objs.append(
Paragraph('Подпись заведующего отделением', style),
)
def first_pages(canvas, document):
canvas.saveState()
canvas.restoreState()
def later_pages(canvas, document):
canvas.saveState()
canvas.restoreState()
doc.build(objs, onFirstPage=first_pages, onLaterPages=later_pages)
pdf = buffer.getvalue()
buffer.close()
return pdf
|
29d3dba011969ce95ab3fcc99081ac2e1bee07d4
| 20,484 |
def getKeyList(rootFile,pathSplit):
"""
Get the list of keys of the directory (rootFile,pathSplit),
if (rootFile,pathSplit) is not a directory then get the key in a list
"""
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
return ROOT.gDirectory.GetListOfKeys()
else: return [getKey(rootFile,pathSplit)]
|
69d51a496ec77e00753518fee7ae8a0e5b9e7c9a
| 20,485 |
import argparse
def get_args():
"""Parse command line arguments and return namespace object"""
parser = argparse.ArgumentParser(description='Transcode some files')
parser.add_argument('-c', action="store", dest="config", required=True)
parser.add_argument('-l', action="store", dest="limit", type=int, default=None)
parser.add_argument('-p', action="store", dest="processes", type=int)
parser.add_argument('-q', action="store_true", dest="quiet")
return parser.parse_args()
|
8d811d1ff9437eef6fe5031618f9561248e40940
| 20,486 |
from typing import Any
def query_from_json(query_json: Any,
client: cl.Client = None):
"""
The function converts a dictionary or json string of Query to a Query object.
:param query_json: A dictionary or json string that contains the keys of a Query.
:type query_json: Any
:param client: An IBM PAIRS client.
:type client: ibmpairs.client.Client
:rtype: ibmpairs.query.Query
:raises Exception: if not a dict or a str.
"""
query = Query.from_json(query_json)
cli = common.set_client(input_client = client,
global_client = cl.GLOBAL_PAIRS_CLIENT)
query.client = cli
return query
|
58d1b1f3efedf0b74a0136d1edd1da13bf16bf8c
| 20,487 |
import os
def _write(info, directory, format, name_format):
"""
Writes the string info
Args:
directory (str): Path to the directory where to write
format (str): Output format
name_format (str): The file name
"""
#pylint: disable=redefined-builtin
file_name = name_format
file_path = os.path.join(
directory, f"{file_name}.{format}")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path,'w',encoding='utf-8') as f:
f.write(info)
return file_path
|
c8f595769607151aa771b4d8d841b4beee77bc9e
| 20,488 |
def fetch_validation_annotations():
""" Returns the validation annotations
Returns:
complete_annotations: array of annotation data - [n_annotations, 4]
row format is [T, X, Y, Z]
"""
ann_gen = _annotation_generator()
data = []
for annotation in ann_gen:
if annotation[0] in VAL_TIMEPOINTS:
data.append(annotation)
data = np.asarray(data)
# scale z down to expected range
data *= [1, 1, 1, 0.2]
return data
|
1b9a8b86bbc005c79b152e1f59e653b7711e674f
| 20,489 |
def enough_data(train_data, test_data, verbose=False):
"""Check if train and test sets have any elements."""
if train_data.empty:
if verbose:
print('Empty training data\n')
return False
if test_data.empty:
if verbose:
print('Empty testing data\n')
return False
return True
|
f11014d83379a5df84a67ee3b8f1e85b23c058f7
| 20,490 |
import argparse
import sys
def parse_args(args):
"""
Parse the arguments to this application, then return the constructed namespace argument.
:param args: list of arguments to parse
:return: namespace argument
"""
parser = argparse.ArgumentParser(
description="Connects data from F prime flight software to the GDS tcp server"
)
# Setup this parser to handle MiddleWare arguments
fprime_gds.executables.cli.MiddleWareParser.add_args(parser)
# Add a parser for each adapter
subparsers = parser.add_subparsers(
help="Type of adapter used for processing", dest="subcommand"
)
for (
adapter_name
) in fprime_gds.common.adapters.base.BaseAdapter.get_adapters().keys():
adapter = fprime_gds.common.adapters.base.BaseAdapter.get_adapters()[
adapter_name
]
# Check adapter real quick before moving on
if not hasattr(adapter, "get_arguments") or not callable(
getattr(adapter, "get_arguments", None)
):
LOGGER.error(
"'%s' does not have 'get_arguments' method, skipping.", (adapter_name)
)
continue
subparse = subparsers.add_parser(adapter_name)
# Add arguments for the parser
for argument in adapter.get_arguments().keys():
subparse.add_argument(*argument, **adapter.get_arguments()[argument])
args = parser.parse_args(args)
try:
extras = fprime_gds.executables.cli.refine(parser, args)
fprime_gds.common.logger.configure_py_log(extras["logs"], "comm-adapter.log")
except ValueError as exc:
print("[ERROR] {}".format(exc), file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(-1)
return args
|
36d450bca31efd8709a6c108fa719d15c1d1e724
| 20,491 |
def calculate_tidal_offset(TIDE, GM, R, refell):
"""
Calculates the spherical harmonic offset for a tide system to change
from a tide free state where there is no permanent direct and
indirect tidal potentials
Arguments
---------
TIDE: output tidal system
R: average radius used in gravity model
GM: geocentric graviational constant used in gravity model
refell: reference ellipsoid name
Returns
-------
deltaC20: offset for changing from tide free system
"""
#-- get ellipsoid parameters for refell
ellip = ref_ellipsoid(refell)
#-- standard gravitational acceleration
gamma = 9.80665
trans = (-0.198*gamma*R**3)/(np.sqrt(5.0)*GM*ellip['a']**2)
#-- load love number for degree 2 from PREM (Han and Wahr, 1995)
k2 = -0.30252982142510
#-- conversion for each tidal system
if (TIDE == 'mean_tide'):
conv = (1.0 + k2)
elif (TIDE == 'zero_tide'):
conv = k2
#-- return the C20 offset
return conv*trans
|
278b27b2a1378cf0ccb44055a37baf9def7d6c6a
| 20,492 |
def get_questions(set_id, default_txt=None):
"""Method to get set of questions list."""
try:
cache_key = 'question_list_%s' % (set_id)
cache_list = cache.get(cache_key)
if cache_list:
v_list = cache_list
print('FROM Cache %s' % (cache_key))
else:
v_list = ListAnswers.objects.filter(
answer_set_id=set_id, is_void=False)
cache.set(cache_key, v_list, 300)
my_list = v_list.values_list(
'answer_code', 'answer').order_by('the_order')
if default_txt:
initial_list = ('', default_txt)
final_list = [initial_list] + list(my_list)
return final_list
except Exception as e:
print('error - %s' % (e))
return ()
else:
return my_list
|
0153ab71caa705f7a4f2a07ce5ef210b02618dd4
| 20,493 |
import pathlib
import tempfile
import logging
import glob
import os
def _export_photo_uuid_applescript(
uuid,
dest,
filestem=None,
original=True,
edited=False,
live_photo=False,
timeout=120,
burst=False,
):
""" Export photo to dest path using applescript to control Photos
If photo is a live photo, exports both the photo and associated .mov file
uuid: UUID of photo to export
dest: destination path to export to
filestem: (string) if provided, exported filename will be named stem.ext
where ext is extension of the file exported by photos (e.g. .jpeg, .mov, etc)
If not provided, file will be named with whatever name Photos uses
If filestem.ext exists, it wil be overwritten
original: (boolean) if True, export original image; default = True
edited: (boolean) if True, export edited photo; default = False
If photo not edited and edited=True, will still export the original image
caller must verify image has been edited
*Note*: must be called with either edited or original but not both,
will raise error if called with both edited and original = True
live_photo: (boolean) if True, export associated .mov live photo; default = False
timeout: timeout value in seconds; export will fail if applescript run time exceeds timeout
burst: (boolean) set to True if file is a burst image to avoid Photos export error
Returns: list of paths to exported file(s) or None if export failed
Note: For Live Photos, if edited=True, will export a jpeg but not the movie, even if photo
has not been edited. This is due to how Photos Applescript interface works.
"""
# setup the applescript to do the export
export_scpt = AppleScript(
"""
on export_by_uuid(theUUID, thePath, original, edited, theTimeOut)
tell application "Photos"
set thePath to thePath
set theItem to media item id theUUID
set theFilename to filename of theItem
set itemList to {theItem}
if original then
with timeout of theTimeOut seconds
export itemList to POSIX file thePath with using originals
end timeout
end if
if edited then
with timeout of theTimeOut seconds
export itemList to POSIX file thePath
end timeout
end if
return theFilename
end tell
end export_by_uuid
"""
)
dest = pathlib.Path(dest)
if not dest.is_dir:
raise ValueError(f"dest {dest} must be a directory")
if not original ^ edited:
raise ValueError(f"edited or original must be True but not both")
tmpdir = tempfile.TemporaryDirectory(prefix="osxphotos_")
# export original
filename = None
try:
filename = export_scpt.call(
"export_by_uuid", uuid, tmpdir.name, original, edited, timeout
)
except Exception as e:
logging.warning("Error exporting uuid %s: %s" % (uuid, str(e)))
return None
if filename is not None:
# need to find actual filename as sometimes Photos renames JPG to jpeg on export
# may be more than one file exported (e.g. if Live Photo, Photos exports both .jpeg and .mov)
# TemporaryDirectory will cleanup on return
filename_stem = pathlib.Path(filename).stem
files = glob.glob(os.path.join(tmpdir.name, "*"))
exported_paths = []
for fname in files:
path = pathlib.Path(fname)
if len(files) > 1 and not live_photo and path.suffix.lower() == ".mov":
# it's the .mov part of live photo but not requested, so don't export
logging.debug(f"Skipping live photo file {path}")
continue
if len(files) > 1 and burst and path.stem != filename_stem:
# skip any burst photo that's not the one we asked for
logging.debug(f"Skipping burst photo file {path}")
continue
if filestem:
# rename the file based on filestem, keeping original extension
dest_new = dest / f"{filestem}{path.suffix}"
else:
# use the name Photos provided
dest_new = dest / path.name
logging.debug(f"exporting {path} to dest_new: {dest_new}")
_copy_file(str(path), str(dest_new))
exported_paths.append(str(dest_new))
return exported_paths
else:
return None
|
1602969314a3530b8a9312a3e009f2f0c21268a9
| 20,494 |
def get_mms_operation(workspace, operation_id):
"""
Retrieve the operation payload from MMS.
:return: The json encoded content of the reponse.
:rtype: dict
"""
response = make_mms_request(workspace, 'GET', '/operations/' + operation_id, None)
return response.json()
|
c88aca93803ab5075a217a10b7782ae791f168bc
| 20,495 |
def _check_data_nan(data):
"""Ensure data compatibility for the series received by the smoother.
(Without checking for inf and nans).
Returns
-------
data : array
Checked input.
"""
data = np.asarray(data)
if np.prod(data.shape) == np.max(data.shape):
data = data.ravel()
if data.ndim > 2:
raise ValueError(
"The format of data received is not appropriate. "
"Pass an objet with data in this format (series, timesteps)")
if data.ndim == 0:
raise ValueError(
"Pass an object with data in this format (series, timesteps)")
if data.dtype not in [np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64]:
raise ValueError("data contains not numeric types")
return data
|
1cde49f2836405deb0c1328d5ce53c69ffbcb721
| 20,496 |
def function(row, args):
"""Execute a named function
function(arg, arg...)
@param row: the HXL data row
@param args: the arguments parsed (the first one is the function name)
@returns: the result of executing the function on the arguments
"""
f = FUNCTIONS.get(args[0])
if f:
return f(row, args[1:], True)
else:
logger.error("Unknown function %s", args[0])
return ''
|
3b6e2e20c09c6cefebb4998d40376ff1b1aa63f2
| 20,497 |
import os
def currencyrates():
"""
print a sh-friendly set of variables representing todays currency rates for $EXCHANGERATES which is a semicolon-
separated list of currencyexchange names from riksbanken.se using daily avg aggregation
:return: none
"""
#print(ratesgroup())
rates = os.environ.get("EXCHANGERATES",'SEKEURPMI;SEKUSDPMI')
series = [dict(groupid=foreign_rates_groupid, seriesid=id) for id in rates.split(';')]
#print(series)
query = dict(languageid='en',
min=False,
max=True,
ultimo=False,
aggregateMethod='D',
avg=True,
dateto=date.today(),
datefrom=date.today() - timedelta(days=7),
searchGroupSeries=series)
result = client.service.getInterestAndExchangeRates(searchRequestParameters=query)
#print(result)
def avg(s):
vals = list(filter(lambda x: x is not None, [x['value'] for x in s['resultrows']]))
return '{0:.4f}'.format(sum(vals)/len(vals))
print (";".join(["{}={}".format(str(s['seriesid']).strip(),avg(s)) for s in result['groups'][0]['series']]))
|
64cefe54afe6fdb47496923a3bab9bc33b93cd3a
| 20,498 |
def extract_rfc2822_addresses(text):
"""Returns a list of valid RFC2822 addresses
that can be found in ``source``, ignoring
malformed ones and non-ASCII ones.
"""
if not text: return []
candidates = address_pattern.findall(tools.ustr(text).encode('utf-8'))
return filter(try_coerce_ascii, candidates)
|
b256bd585a30900e09a63f0cc29889044da8e0e0
| 20,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.