content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def upload():
"""
Implements the upload page form
"""
return render_template('upload.html') | 4dda3418621b3894234b049e22f810304050a398 | 6,800 |
def detect_changepoints(points, min_time, data_processor=acc_difference):
""" Detects changepoints on points that have at least a specific duration
Args:
points (:obj:`Point`)
min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have
data_processor (function): Function to extract data to feed to the changepoint algorithm.
Defaults to `speed_difference`
Returns:
:obj:`list` of int: Indexes of changepoints
"""
data = data_processor(points)
changepoints = pelt(normal_mean(data, np.std(data)), len(data))
changepoints.append(len(points) - 1)
result = []
for start, end in pairwise(changepoints):
time_diff = points[end].time_difference(points[start])
if time_diff > min_time:
result.append(start)
# adds the first point
result.append(0)
# adds the last changepoint detected
result.append(len(points) - 1)
return sorted(list(set(result))) | 872c2e4d5d8cbb33de100495bc8d9ddb050400c8 | 6,801 |
from qalgebra.core.hilbert_space_algebra import LocalSpace
from qalgebra.core.scalar_algebra import ScalarValue
from qalgebra.library.spin_algebra import SpinSpace
def Sum(idx, *args, **kwargs):
"""Instantiator for an arbitrary indexed sum.
This returns a function that instantiates the appropriate
:class:`QuantumIndexedSum` subclass for a given term expression. It is the
preferred way to "manually" create indexed sum expressions, closely
resembling the normal mathematical notation for sums.
Args:
idx (IdxSym): The index symbol over which the sum runs
args: arguments that describe the values over which `idx` runs,
kwargs: keyword-arguments, used in addition to `args`
Returns:
callable: an instantiator function that takes a
arbitrary `term` that should generally contain the `idx` symbol, and
returns an indexed sum over that `term` with the index range specified
by the original `args` and `kwargs`.
There is considerable flexibility to specify concise `args` for a variety
of index ranges.
Assume the following setup::
>>> i = IdxSym('i'); j = IdxSym('j')
>>> ket_i = BasisKet(FockIndex(i), hs=0)
>>> ket_j = BasisKet(FockIndex(j), hs=0)
>>> hs0 = LocalSpace('0')
Giving `i` as the only argument will sum over the indices of the basis
states of the Hilbert space of `term`::
>>> s = Sum(i)(ket_i)
>>> unicode(s)
'∑_{i ∈ ℌ₀} |i⟩⁽⁰⁾'
You may also specify a Hilbert space manually::
>>> Sum(i, hs0)(ket_i) == Sum(i, hs=hs0)(ket_i) == s
True
Note that using :func:`Sum` is vastly more readable than the equivalent
"manual" instantiation::
>>> s == KetIndexedSum.create(
... ket_i, ranges=(IndexOverFockSpace(i, hs=hs0),))
True
By nesting calls to `Sum`, you can instantiate sums running over multiple
indices::
>>> unicode( Sum(i)(Sum(j)(ket_i * ket_j.dag())) )
'∑_{i,j ∈ ℌ₀} |i⟩⟨j|⁽⁰⁾'
Giving two integers in addition to the index `i` in `args`, the index will
run between the two values::
>>> unicode( Sum(i, 1, 10)(ket_i) )
'∑_{i=1}^{10} |i⟩⁽⁰⁾'
>>> Sum(i, 1, 10)(ket_i) == Sum(i, 1, to=10)(ket_i)
True
You may also include an optional step width, either as a third integer or
using the `step` keyword argument.
>>> #unicode( Sum(i, 1, 10, step=2)(ket_i) ) # TODO
Lastly, by passing a tuple or list of values, the index will run over all
the elements in that tuple or list::
>>> unicode( Sum(i, (1, 2, 3))(ket_i) )
'∑_{i ∈ {1,2,3}} |i⟩⁽⁰⁾'
"""
dispatch_table = {
tuple(): _sum_over_fockspace,
(LocalSpace,): _sum_over_fockspace,
(SpinSpace,): _sum_over_fockspace,
(list,): _sum_over_list,
(tuple,): _sum_over_list,
(int,): _sum_over_range,
(int, int): _sum_over_range,
(int, int, int): _sum_over_range,
}
key = tuple((type(arg) for arg in args))
try:
idx_range_func = dispatch_table[key]
except KeyError:
raise TypeError("No implementation for args of type %s" % str(key))
def sum(term):
if isinstance(term, ScalarValue._val_types):
term = ScalarValue.create(term)
idx_range = idx_range_func(term, idx, *args, **kwargs)
return term._indexed_sum_cls.create(term, ranges=(idx_range,))
return sum | 8d1a1c97e28153b24a6e958b2694aac269b69f22 | 6,802 |
def quad1(P):
"""[summary]
Arguments:
P (type): [description]
Returns:
[type]: [description]
"""
x1, z1, x2, z2 = P
return (Fraction(x1, z1) - Fraction(x2, z2))**2 | f3e9c34740038242c29f4abbe168df573da12390 | 6,803 |
def update_position(position, velocity):
"""
:param position: position(previus/running) of a particle
:param velocity: the newest velocity that has been calculated during the specific iteration- new velocity is calculated
before the new position
:return: list - new position
"""
pos = []
length = len(position)
for i in range(length):
pos.append(position[i] + velocity[i])
return pos | 7734e4021d958f42d974401b78331bcd2911ac92 | 6,804 |
def respond_batch():
"""
responses with [{"batch": [{blacklist_1_name: true}, ]}]
"""
result = get_result(request)
return jsonify([{"batch": result}]) | 97b1ceaafa88aacba09fc0ba6c564e87bfb07b66 | 6,805 |
from typing import Union
import types
from typing import Iterable
def get_iterable_itemtype(obj):
"""Attempts to get an iterable's itemtype without iterating over it,
not even partly. Note that iterating over an iterable might modify
its inner state, e.g. if it is an iterator.
Note that obj is expected to be an iterable, not a typing.Iterable.
This function leverages various alternative ways to obtain that
info, e.g. by looking for type annotations of '__iter__' or '__getitem__'.
It is intended for (unknown) iterables, where the type cannot be obtained
via sampling without the risk of modifying inner state.
"""
# support further specific iterables on demand
if isinstance(obj, _typechecked_Iterable):
return obj.itemtype
try:
if isinstance(obj, range):
tpl = tuple(deep_type(obj.start), deep_type(obj.stop), deep_type(obj.step))
return Union[tpl]
except TypeError:
# We're running Python 2
pass
if type(obj) is tuple:
tpl = tuple(deep_type(t) for t in obj)
return Union[tpl]
elif type(obj) is types.GeneratorType:
return get_generator_yield_type(obj)
else:
tp = deep_type(obj)
if is_Generic(tp):
if issubclass(tp.__origin__, Iterable):
if len(tp.__args__) == 1:
return tp.__args__[0]
return _select_Generic_superclass_parameters(tp, Iterable)[0]
if is_iterable(obj):
if type(obj) is str:
return str
if hasattr(obj, '__iter__'):
if has_type_hints(obj.__iter__):
itrator = _funcsigtypes(obj.__iter__, True, obj.__class__)[1]
if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator:
return itrator.__args__[0]
if hasattr(obj, '__getitem__'):
if has_type_hints(obj.__getitem__):
itrator = _funcsigtypes(obj.__getitem__, True, obj.__class__)[1]
if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator:
return itrator.__args__[0]
return None # means that type is unknown
else:
raise TypeError('Not an iterable: '+str(type(obj))) | 103a3928e4a161f119c8664e58ba7e6270a94a14 | 6,806 |
import re
def getTimerIPs():
"""
returns list of ip addr
"""
client = docker.from_env()
container_list = client.containers.list()
timer_ip_list = []
for container in container_list:
if re.search("^timer[1-9][0-9]*", container.name):
out = container.exec_run("awk 'END{print $1}' /etc/hosts", stdout=True)
timer_ip_list.append(out.output.decode().split("\n")[0])
client.close()
return timer_ip_list | e4bc28407fb8b292df9a813809998bf6b323c938 | 6,807 |
def country_buttons():
"""Generates the country buttons for the layout
TODO(@[email protected])
Fix to use this one instead of the dropdown menu
Returns:
dbcButtonGroup -- A button group of all countries
"""
countries = [{'label': '🇸🇪 Sweden',
'value': 'Sweden'
},
{
'label': '🇫🇮 Finland',
'value': 'Finland'
},
{
'label': '🇳🇴 Norway',
'value': 'Norway'
},
{
'label': '🇩🇰 Denmark',
'value': 'Denmark'
},
{
'label': '🇮🇸 Iceland',
'value': 'Iceland'
}]
button_style = {
'padding': '.25rem .5rem',
'font-size': '10px',
'line-height': '1',
'border-radius': '10px',
'height': '25px',
'align-items': 'center',
'justify-content': 'center',
}
buttons = []
for country in countries:
buttons.append(dbc.Button(
country['label'], id=country['value'], style=button_style))
return dbc.ButtonGroup(buttons, id="country_buttons") | 3a149d37ae2457c84cd2fd7ae774b5a7e4c7bbbf | 6,808 |
def BRepBlend_BlendTool_NbSamplesV(*args):
"""
:param S:
:type S: Handle_Adaptor3d_HSurface &
:param v1:
:type v1: float
:param v2:
:type v2: float
:rtype: int
"""
return _BRepBlend.BRepBlend_BlendTool_NbSamplesV(*args) | 99af0513463ce64d5369fde226e679e48a7e397a | 6,809 |
def patch_urllib(monkeypatch, requests_monitor):
"""
Patch urllib to provide the following features:
- Retry failed requests. Makes test runs more stable.
- Track statistics with RequestsMonitor.
Retries could have been implemented differently:
- In test.geocoders.util.GeocoderTestBase._make_request. The issue
is that proxy tests use raw urlopen on the proxy server side,
which will not be covered by _make_request.
- With pytest plugins, such as pytest-rerunfailures. This
might be a good alternative, however, they don't distinguish
between network and test logic failures (the latter shouldn't
be re-run).
"""
def mock_factory(do_open):
def wrapped_do_open(self, conn, req, *args, **kwargs):
requests_monitor.record_request(req)
retries = max_retries
netloc = netloc_from_req(req)
is_proxied = req.host != netloc
if is_proxied or netloc in no_retries_for_hosts:
# XXX If there's a system proxy enabled, the failed requests
# won't be retried at all because of this check.
# We need to disable retries for proxies in order to
# not retry requests to the local proxy server set up in
# tests/proxy_server.py, which breaks request counters
# in tests/test_proxy.py.
# Perhaps we could also check that `req.host` points
# to localhost?
retries = 0
for i in range(retries + 1):
try:
start = default_timer()
resp = do_open(self, conn, req, *args, **kwargs)
end = default_timer()
if i == retries or resp.getcode() not in retry_status_codes:
# Note: we shouldn't blindly retry on any >=400 code,
# because some of them are actually expected in tests
# (like input validation verification).
# TODO Retry failures with the 200 code?
# Some geocoders return failures with 200 code
# (like GoogleV3 for Quota Exceeded).
# Should we detect this somehow to restart such requests?
requests_monitor.record_response(req, resp, end - start)
return resp
except: # noqa
if i == retries:
raise
requests_monitor.record_retry(req)
sleep(error_wait_seconds)
raise RuntimeError("Should not have been reached")
return wrapped_do_open
original_http_do_open = HTTPHandler.do_open
original_https_do_open = HTTPSHandler.do_open
monkeypatch.setattr(http_handler_do_open, mock_factory(original_http_do_open))
monkeypatch.setattr(https_handler_do_open, mock_factory(original_https_do_open)) | 2e7dec88873351a0e95a3b6f963115d065b90a39 | 6,810 |
def get_response(session, viewstate, event_validation, event_target, outro=None, stream=False, hdfExport=''):
"""
Handles all the responses received from every request made to the website.
"""
url = "http://www.ssp.sp.gov.br/transparenciassp/"
data = [
('__EVENTTARGET', event_target),
('__EVENTARGUMENT', ''),
('__VIEWSTATE', viewstate),
('__EVENTVALIDATION', event_validation),
('ctl00$cphBody$hdfExport', hdfExport),
]
if outro:
data.append(('ctl00$cphBody$filtroDepartamento', '0'))
data.append(('__LASTFOCUS', ''))
response = session.post(url, headers=headers, data=data, stream=stream)
return response | 0bb31b29fb8fb8a0fe007f87d88b8d131fd4308c | 6,811 |
def matchVuln(vuln, element, criteria):
"""
================================================================================
Name:
matchVuln
Description:
Sets the finding details of a given VULN.
Parameter(s):
vuln: The VULN element to be searched.
element: The element to find.
criteria: The search criteria against which to match.
Returns:
True: If a match is found.
False: If a match is not found.
Notes:
N/A
================================================================================
"""
if (getVulnElementValue(vuln, element) == criteria): return True
return False | 7158b263fd70e921b0b131fd8f2537223521570f | 6,812 |
def require(*modules):
"""Check if the given modules are already available; if not add them to
the dependency list."""
deplist = []
for module in modules:
try:
__import__(module)
except ImportError:
deplist.append(module)
return deplist | 88df83cd33d8bddea63e4d2fbfb4d8351a3c23b1 | 6,813 |
def fixture_base_context(
env_name: str,
) -> dict:
"""Return a basic context"""
ctx = dict(
current_user="a_user",
current_host="a_host",
)
return ctx | fbfed439f784bdd64e93910bbb581955200af2bb | 6,814 |
import os
def parse_pharmvar(fn):
"""
Parse PharmVar gene data.
Parameters
----------
fn : str
Gene data directory.
"""
gene = os.path.basename(fn).split('-')[0]
rs_dict = {}
vfs = {'GRCh37': [], 'GRCh38': []}
alleles = {}
for i, assembly in enumerate(['GRCh37', 'GRCh38']):
for r, d, f in os.walk(f'{fn}/{assembly}'):
for file in f:
if file.endswith('.tsv'):
df = pd.read_table(f'{r}/{file}', comment='#')
if file.endswith('.vcf'):
vf = pyvcf.VcfFrame.from_file(f'{r}/{file}')
vfs[assembly].append(vf)
chrom = vfs['GRCh37'][0].contigs[0]
for j, r in df.iterrows():
name = r['Haplotype Name'].replace(gene, '')
if name not in alleles:
alleles[name] = [[], []]
if pd.isna(r['Variant Allele']):
continue
variant = f"{chrom}-{r['Variant Start']}-{r['Reference Allele']}-{r['Variant Allele']}"
rs_dict[variant] = r['rsID']
alleles[name][i].append(variant)
variants = {'GRCh37': {}, 'GRCh38': {}}
for name in alleles:
for i, assembly in enumerate(['GRCh37', 'GRCh38']):
for variant in alleles[name][i]:
if variant not in variants[assembly]:
variants[assembly][variant] = []
if name not in variants[assembly][variant]:
variants[assembly][variant].append(name)
for name in alleles:
alleles[name] = [','.join(alleles[name][0]), ','.join(alleles[name][1])]
df1 = pd.DataFrame(alleles).T
df1.columns = ['GRCh37', 'GRCh38']
df1 = df1.replace('', 'N/A')
df1.to_csv(f'{gene}-allele-table.csv')
def func(r):
if len(r.REF) == len(r.ALT) == 1:
return f'{r.CHROM}-{r.POS}-{r.REF}-{r.ALT}'
elif len(r.REF) == 1 and len(r.ALT) > 1:
return f'{r.CHROM}-{r.POS}---{r.ALT[1:]}'
elif len(r.REF) > 1 and len(r.ALT) == 1:
return f'{r.CHROM}-{r.POS+1}-{r.REF[1:]}--'
else:
raise ValueError('Something went wrong')
for assembly in ['GRCh37', 'GRCh38']:
df2 = pyvcf.merge(vfs[assembly]).update_chr_prefix(mode='remove').df
df2['Name'] = df2.apply(func, axis=1)
df2['Alleles'] = df2.apply(lambda r: ','.join(variants[assembly][r.Name]), axis=1)
df2['rsID'] = df2.apply(lambda r: rs_dict[r.Name], axis=1)
df2.to_csv(f'{gene}-{assembly}.csv') | a62f1c57602f3093f9c2c1e5a70389baef6094bd | 6,815 |
from typing import Optional
from typing import List
import yaml
def definition(server: KedroLanguageServer, params: TextDocumentPositionParams) -> Optional[List[Location]]:
"""Support Goto Definition for a dataset or parameter.
Currently only support catalog defined in `conf/base`
"""
if not server.is_kedro_project():
return None
document = server.workspace.get_document(params.text_document.uri)
word = _word_at_position(params.position, document)
if word.startswith("params:"):
param_location = _get_param_location(server.project_metadata, word)
if param_location:
return [param_location]
catalog_paths = get_conf_paths(server.project_metadata)
for catalog_path in catalog_paths:
catalog_conf = yaml.load(catalog_path.read_text(), Loader=SafeLineLoader)
if word in catalog_conf:
line = catalog_conf[word]["__line__"]
location = Location(
uri=f"file://{catalog_path}",
range=Range(
start=Position(line=line - 1, character=0),
end=Position(
line=line,
character=0,
),
),
)
return [location]
return None | 7713d92fafa2f0acf68ee34b4dc83f1d5100a9b3 | 6,816 |
def augmented_neighbors_list(q_id,
neighbors,
is_training,
processor,
train_eval=False):
"""Retrieve and convert the neighbors to a list.
Args:
q_id: a question id
neighbors: a table mapping q_id to a list of top candidates
is_training: True for training set examples
processor: Helper object
train_eval: If this is on, we have a sub-set of the training set for which
we don't add the gold answer if it is not in the neighbors list
Returns:
lists of passage ids, list of corresponding labels, list of scores,
and the index of the first random negative
"""
n_pb = neighbors[q_id]
n_list = []
n_labels = []
n_scores = [] # the higher, the better
n_positive = 0
answers = processor.get_answers(q_id)
for n in range(len(n_pb)):
if n >= FLAGS.max_neighbors:
break # ignore any later neighbors
next_n = n_pb[n]
if processor.answer_match(q_id, next_n[0], answers):
n_list.append(next_n[0])
n_labels.append(1)
n_scores.append(-next_n[1])
n_positive += 1
else:
# see if we keep it
n_list.append(next_n[0])
n_labels.append(0)
n_scores.append(-next_n[1])
if not n_positive:
if (is_training or FLAGS.add_gold_to_eval):
gold_p_id = processor.get_gold_passage_id(q_id)
if gold_p_id is None and is_training:
print("Did not find answer matches.")
return [], [], [], 0
if gold_p_id is not None:
n_list.append(gold_p_id)
n_labels.append(1)
prior_gold = 0
n_scores.append(prior_gold)
n_positive += 1
else:
if is_training:
print("Did not find answer matches.")
return [], [], [], 0
# add the same number of random examples as we have neighbors
# we should add about
# (FLAGS.num_candidates -1) * FLAGS. train_records_per_query/2 random
index_rand_start = len(n_list)
num_random = index_rand_start
if is_training and not train_eval: # getting fewer random for speed
num_random = (int)(
(FLAGS.num_candidates - 1) * FLAGS.train_records_per_query / 2)
if FLAGS.add_random:
random_passages = processor.get_random(num_random)
random_labels = []
random_scores = [0] * num_random
for r in range(len(random_passages)):
n_scores.append(random_scores[r])
if processor.answer_match(q_id, random_passages[r], answers):
random_labels.append(1)
else:
random_labels.append(0)
n_list.extend(random_passages)
n_labels.extend(random_labels)
return n_list, n_labels, n_scores, index_rand_start | 3ae8756a60fdfa4ce3fc6de91d364c5edebcc0ff | 6,817 |
def estimate_tau_exp(chains, **kwargs):
"""
Estimate the exponential auto-correlation time for all parameters in a chain.
"""
# Calculate the normalised autocorrelation function in each parameter.
rho = np.nan * np.ones(chains.shape[1:])
for i in range(chains.shape[2]):
try:
rho[:, i] = autocorr.function(np.mean(chains[:, :, i], axis=0),
**kwargs)
except:
continue
# Take the max rho at any step.
rho_max = np.max(rho, axis=1)
# Now fit the max rho with an exponential profile.
x = np.arange(rho_max.size)
func = lambda tau_exp: np.exp(-x/tau_exp)
chi = lambda tau_exp: func(tau_exp[0]) - rho_max # tau_exp is a list
# Start with 50% of the chain length. probably OK.
tau_exp, ier = leastsq(chi, [chains.shape[1]/2.])
return (tau_exp, rho, func(tau_exp)) | 3de72cec6fa079913489c9c1b9b72ff572cedf60 | 6,818 |
def lda_model_onepass(dictionary, corpus, topics):
"""Create a single pass LDA model"""
start_time = time.time()
model = LdaMulticore(corpus, id2word = dictionary, num_topics = topics)
model.save(""./data/lda/all_topics_single.lda"")
print(model.print_topics(-1))
print("\nDone in {}".format(time.time() - start_time))
return model | 3a88250af8c83fb23112b15cacaad39eeaebb27c | 6,819 |
import dataclasses
import pydantic
def paramclass(cls: type) -> type:
""" Parameter-Class Creation Decorator
Transforms a class-definition full of Params into a type-validated dataclass,
with methods for default value and description-dictionary retrieval.
Hdl21's `paramclass`es are immutable, strongly-typed data-storage structures.
They are defined through a syntax similar to `@dataclass`, but using the `Param`
constructor, and assignment rather than type annotation.
@paramclass
class C:
reqd = Param(dtype=int, desc="A Required Parameter")
optn = Param(dtype=int, desc="An Optional Parameter", default=11)
`Param`s each have required datatype (`dtype`) and description (`desc`) fields,
and optional default values.
Each `paramclass` constructor can be called with ordered arguments,
in the order defined in the `paramclass`, or with named arguments.
Named arguments are highly recommended for more than a single parameter.
Note Python's function-argument ordering requirements also dictate
that all `paramclass` required-arguments be declared *before* any optional arguments.
This also reinforces good practice for communicating which parameters are required.
Each `paramclass` comes with class-methods `descriptions` and `defaults`,
which return dictionaries of the parameter names to descriptions and
names to default values (for those with defaults), respectively.
Requirements of the input `cls`:
* *All* non-Python-internal fields must be of type `Param`
* Inheritance is not supported
"""
if cls.__bases__ != (object,):
raise RuntimeError(f"Invalid @hdl21.paramclass inheriting from {cls.__bases__}")
protected_names = ["descriptions", "defaults"]
dunders = dict()
params = dict()
# Take a lap through the class dictionary, type-check everything and grab Params
for key, val in cls.__dict__.items():
if key in protected_names:
raise RuntimeError(f"Invalid field name {key} in paramclass {cls}")
elif key.startswith("__"):
dunders[key] = val
elif isinstance(val, Param):
params[key] = val
else:
raise RuntimeError(
f"Invalid class-attribute {key} in paramclass {cls}. All attributes should be `hdl21.Param`s."
)
# Translate the Params into dataclass.field-compatible tuples
fields = list()
for name, par in params.items():
field = [name, par.dtype]
if par.default is not _default:
field.append(dataclasses.field(default=par.default))
# Default factories: not supported, yet. See `Param` below.
# elif par.default_factory is not _default:
# field.append(dataclasses.field(default_factory=par.default_factory))
fields.append(tuple(field))
# Add a few helpers to the class namespace
ns = dict(
__params__=params,
__paramclass__=True,
descriptions=classmethod(
lambda cls: {k: v.desc for k, v in cls.__params__.items()}
),
defaults=classmethod(
lambda cls: {
k: v.default
for k, v in cls.__params__.items()
if v.default is not _default
}
),
)
# Create ourselves a (std-lib) dataclass
cls = dataclasses.make_dataclass(cls.__name__, fields, namespace=ns, frozen=True)
# Pass this through the pydantic dataclass-decorator-function
cls = pydantic.dataclasses.dataclass(cls, frozen=True)
# Pydantic seems to want to add this one *after* class-creation
def _brick_subclassing_(cls, *_, **__):
msg = f"Error: attempt to sub-class `hdl21.paramclass` {cls} is not supported"
raise RuntimeError(msg)
cls.__init_subclass__ = classmethod(_brick_subclassing_)
# And don't forget to return it!
return cls | 5f5b4b6612d3afc7858a4b26419d9238aaf6ec92 | 6,820 |
import string
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
words = [word for word in words if word.lower() not in pills['BrandName'].values]
# words = [word for word in words if word.lower() not in pills['ChemName'].values]
words = [word.lower() for word in words if word.isalpha()]
words = [word.lower() for word in words if len(word) > 2]
return words | 9069df05eb4d1b87c2091a64e7dd55754e362334 | 6,821 |
def IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args):
"""
:param thePolyh:
:type thePolyh: IntCurveSurface_ThePolyhedronOfHInter &
:param Index1:
:type Index1: int
:param Index2:
:type Index2: int
:rtype: bool
"""
return _IntCurveSurface.IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args) | bd64cb058793730197a805d7660fe4c8dc4f7af5 | 6,822 |
def read_mrc_like_matlab(mrc_file):
""" Read MRC stack and make sure stack is 'Fortran indexed' before returning it. """
mrc_stack = mrcfile.open(mrc_file).data
fortran_indexed_stack = c_to_fortran(mrc_stack)
return fortran_indexed_stack | 245a7371e94ae6c05248d24e231ce56afc937dd1 | 6,823 |
from typing import List
def freeze_session(session: tf.Session,
keep_var_names: List[str] = None,
output_names: List[str] = None,
clear_devices: bool = True) -> tf.GraphDef:
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
:param session: The TensorFlow session to be frozen.
:param keep_var_names: A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
:param output_names: Names of the relevant graph outputs.
:param clear_devices: Remove the device directives from the graph for better
portability.
:return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph | 4754de754217031ac6151bc3360b6969a46a4e66 | 6,824 |
def linear_regression_noreg(X, y):
"""
Compute the weight parameter given X and y.
Inputs:
- X: A numpy array of shape (num_samples, D) containing feature.
- y: A numpy array of shape (num_samples, ) containing label
Returns:
- w: a numpy array of shape (D, )
"""
#####################################################
# TODO 2: Fill in your code here #
#####################################################
tmp_mult_x = np.matmul(np.linalg.inv(np.matmul(X.transpose(),X)),X.transpose())
if len(tmp_mult_x.shape) == 1:
xr = 1
xc = tmp_mult_x.shape[0]
else:
xr,xc = tmp_mult_x.shape
if len(y.shape) == 1:
yr = 1
yc = y.shape[0]
else:
yr,yc = y.shape
# if X, y both right
if xc == yr:
return np.matmul(tmp_mult_x,y)
# if X is not right and y right
elif xr == yr:
tmp_mult_x.transpose()
# if X is right and y is wrong
elif xc == yc:
y.transpose()
# if X and y is both wrong
elif xr == yc:
tmp_mult_x.transpose()
y.transpose()
return np.matmul(tmp_mult_x,y)
# w = None
# return w | e1a130dbf1c75db929779cc3d9d6b6097521c02e | 6,825 |
import os
def _update_core_rrds(data, core_metrics_dir, rrdclient, step, sys_maj_min):
"""Update core rrds"""
interval = int(step) * 2
total = 0
for cgrp in data:
rrd_basename = CORE_RRDS[cgrp]
rrdfile = os.path.join(core_metrics_dir, rrd_basename)
rrd.prepare(rrdclient, rrdfile, step, interval)
if rrd.update(rrdclient, rrdfile, data[cgrp], sys_maj_min):
total += 1
return total | 21ce7c4002b7c07c7f072cfb802702262a8fdaeb | 6,826 |
def evaluation(evaluators, dataset, runners, execution_results, result_data):
"""Evaluate the model outputs.
Args:
evaluators: List of tuples of series and evaluation functions.
dataset: Dataset against which the evaluation is done.
runners: List of runners (contains series ids and loss names).
execution_results: Execution results that include the loss values.
result_data: Dictionary from series names to list of outputs.
Returns:
Dictionary of evaluation names and their values which includes the
metrics applied on respective series loss and loss values from the run.
"""
eval_result = {}
# losses
for runner, result in zip(runners, execution_results):
for name, value in zip(runner.loss_names, result.losses):
eval_result["{}/{}".format(runner.output_series, name)] = value
# evaluation metrics
for generated_id, dataset_id, function in evaluators:
if (not dataset.has_series(dataset_id)
or generated_id not in result_data):
continue
desired_output = dataset.get_series(dataset_id)
model_output = result_data[generated_id]
eval_result["{}/{}".format(generated_id, function.name)] = function(
model_output, desired_output)
return eval_result | ef3470edb8b2336bdc54507a5df8023f8095b995 | 6,827 |
def bestof(reps, func, *args, **kwargs):
"""Quickest func() among reps runs.
Returns (best time, last result)
"""
best = 2 ** 32
for i in range(reps):
start = timer()
ret = func(*args, **kwargs)
elapsed = timer() - start
if elapsed < best: best = elapsed
return (best, ret) | 975d106a79b79cab3bc287d8b658585f45dd648d | 6,828 |
import tempfile
def gdal_aspect_analysis(dem, output=None, flat_values_are_zero=False):
"""Return the aspect of the terrain from the DEM.
The aspect is the compass direction of the steepest slope (0: North, 90: East, 180: South, 270: West).
Parameters
----------
dem : str
Path to file storing DEM.
output : str
Path to output file.
flat_values_are_zero: bool
Designate flat values with value zero. Default: -9999.
Returns
-------
ndarray
Aspect array.
Notes
-----
Ensure that the DEM is in a *projected coordinate*, not a geographic coordinate system, so that the
horizontal scale is the same as the vertical scale (m).
"""
if output is None:
output = tempfile.NamedTemporaryFile().name
DEMProcessing(destName=output, srcDS=dem, processing='aspect', zeroForFlat=flat_values_are_zero,
format='GTiff', band=1, creationOptions=[GDAL_TIFF_COMPRESSION_OPTION, ])
with rasterio.open(output) as src:
return np.ma.masked_values(src.read(1), value=-9999) | ec8aa51f799368508f78dcff81ae991087b56132 | 6,829 |
import requests
import json
def _handle_braze_response(response: requests.Response) -> int:
"""Handles server response from Braze API.
The amount of requests made is well
below the limits for the given API endpoint therefore Too Many Requests
API errors are not expected. In case they do, however, occur - the API
calls will be re-tried, up to `MAX_API_RETRIES`, using exponential delay.
In case of a server error, the same strategy will be applied. After max
retries have been reached, the execution will terminate.
In case users were posted but there were minor mistakes, the errors will be
logged. In case the API received data in an unexpected format, the data
that caused the issue will be logged.
In any unexpected client API error (other than 400), the function execution
will terminate.
:param response: Response from the API
:return: Number of users that resulted in an error
:raise APIRetryError: On a 429 or 500 server error
:raise FatalAPIError: After `MAX_API_RETRIES` unsuccessful retries, or on
any non-400 client error
"""
res_text = json.loads(response.text)
if response.status_code == 201 and 'errors' in res_text:
print(
f"Encountered errors processing some users: {res_text['errors']}")
return len(res_text['errors'])
if response.status_code == 400:
print(f"Encountered error for user chunk. {response.text}")
return 0
server_error = response.status_code == 429 or response.status_code >= 500
if server_error:
raise APIRetryError("Server error. Retrying..")
if response.status_code > 400:
raise FatalAPIError(res_text.get('message', response.text))
return 0 | da8aca622f7a4812235797501a1afe56cc760ea4 | 6,830 |
def unpack_file(filepath, tmpdir):
"""
Attempt to unpack file.
filepath is the path to the file that should be attempted unpacked.
tmpdir is a path to a temporary directory unique to this thread where
the thread will attempt to unpack files to.
Returns a list of unpacked files or an empty list.
"""
# Other unpacking tools have been removed due to
# lacking reliability and usefulness of the tools.
# If multiple unpacking tools are to be used here,
# subdirectories below tmpdir should be created for each
# tool to avoid tools overwriting output of each other.
# Attempt static unpacking with ClamAV. Return unpacked files.
return clam_unpack(filepath, tmpdir) | 79fb80fe61145e865b128587525bc743d19e2ad0 | 6,831 |
def xarray_image_as_png(img_data, loop_over=None, animate=False, frame_duration=1000):
"""
Render an Xarray image as a PNG.
:param img_data: An xarray dataset, containing 3 or 4 uint8 variables: red, greed, blue, and optionally alpha.
:param loop_over: Optional name of a dimension on img_data. If set, xarray_image_as_png is called in a loop
over all coordinate values for the named dimension.
:param animate: Optional generate animated PNG
:return: A list of bytes representing a PNG image file. (Or a list of lists of bytes, if loop_over was set.)
"""
if loop_over and not animate:
return [
xarray_image_as_png(img_data.sel(**{loop_over: coord}))
for coord in img_data.coords[loop_over].values
]
xcoord = None
ycoord = None
for cc in ("x", "longitude", "Longitude", "long", "lon"):
if cc in img_data.coords:
xcoord = cc
break
for cc in ("y", "latitude", "Latitude", "lat"):
if cc in img_data.coords:
ycoord = cc
break
if not xcoord or not ycoord:
raise Exception("Could not identify spatial coordinates")
width = len(img_data.coords[xcoord])
height = len(img_data.coords[ycoord])
img_io = BytesIO()
# Render XArray to APNG via Pillow
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#apng-sequences
if loop_over and animate:
time_slices_array = [
xarray_image_as_png(img_data.sel(**{loop_over: coord}), animate=True)
for coord in img_data.coords[loop_over].values
]
images = []
for t_slice in time_slices_array:
im = Image.fromarray(t_slice, "RGBA")
images.append(im)
images[0].save(img_io, "PNG", save_all=True, default_image=True, loop=0, duration=frame_duration, append_images=images)
img_io.seek(0)
return img_io.read()
if "time" in img_data.dims:
img_data = img_data.squeeze(dim="time", drop=True)
pillow_data = render_frame(img_data.transpose(xcoord, ycoord), width, height)
if not loop_over and animate:
return pillow_data
# Change PNG rendering to Pillow
im_final = Image.fromarray(pillow_data, "RGBA")
im_final.save(img_io, "PNG")
img_io.seek(0)
return img_io.read() | 201cb29144054417d7eb743690601ae37558dfbd | 6,832 |
def x_section_from_latlon(elevation_file,
x_section_lat0,
x_section_lon0,
x_section_lat1,
x_section_lon1,
as_polygon=False,
auto_clean=False):
"""
This workflow extracts a cross section from a DEM
based on the input latitude and longitude point pairs.
Parameters:
-----------
elevation_file: str
Path to the elevation DEM.
x_section_lat0: float
THe first coordinate latitude.
x_section_lon0: float
THe first coordinate longitude.
x_section_lat1: float
THe second coordinate latitude.
x_section_lon1: float
THe second coordinate longitude.
as_polygon: bool, optional
If True, will return cross section as a
:obj:`shapely.geometry.Polygon`. Default is False.
auto_clean: bool, optional
If True, will attempt to clean any issues from the polygon.
Default is False.
Returns:
--------
list or :obj:`shapely.geometry.Polygon`
Cross section information.
The list will be xy coordinate pairs.
Example::
from shapely.geometry import Polygon
from xman.xsect import x_section_from_latlon
elevation_file = '/path/to/elevation.tif'
lat1 = 34.105265417341442
lon1 = 38.993958690587505
lat2 = 34.107264451129197
lon2 = 38.99355588515526)
x_sect_list = x_section_from_latlon(elevation_file,
lat1,
lon1,
lat2,
lon2)
"""
utm_proj = utm_proj_from_latlon(x_section_lat0, x_section_lon0,
as_osr=True)
sp_ref = osr.SpatialReference()
sp_ref.ImportFromEPSG(4326)
geo_to_utm_trans = osr.CoordinateTransformation(sp_ref, utm_proj)
x_line_m = LineString((
geo_to_utm_trans.TransformPoint(x_section_lon0, x_section_lat0)[:2],
geo_to_utm_trans.TransformPoint(x_section_lon1, x_section_lat1)[:2]
))
elevation_utm_ggrid = GDALGrid(elevation_file).to_projection(utm_proj)
x_sect_list = []
for x_step in np.linspace(0, x_line_m.length, num=20):
x_point = x_line_m.interpolate(x_step)
x_sect_list.append((
x_step, elevation_utm_ggrid.get_val_coord(x_point.x, x_point.y)
))
if as_polygon or auto_clean:
x_sect_poly = Polygon(x_sect_list)
if not x_sect_poly.is_valid and auto_clean:
x_sect_poly = x_sect_poly.buffer(0)
print("WARNING: Cross section cleaned up.")
if hasattr(x_sect_poly, 'geoms'):
if len(x_sect_poly.geoms) > 1:
largest_poly = x_sect_poly.geoms[0]
for geom_poly in x_sect_poly.geoms[1:]:
if geom_poly.area > largest_poly.area:
largest_poly = geom_poly
x_sect_poly = largest_poly
if as_polygon:
return x_sect_poly
x_coords, y_coords = x_sect_poly.exterior.coords.xy
return list(zip(x_coords, y_coords))
return x_sect_list | 0773cf535ee18ff91db805d692687c67bf6b2ed4 | 6,833 |
import re
def convert_not_inline(line):
""" Convert the rest of part which are not inline code but might impact inline code
This part will dealing with following markdown syntax
- strong
- scratch
- italics
- image
- link
- checkbox
- highlight
:param line: str, the not inline code part of markdown
:return: str, the html format
"""
# deal with strong
line = strong(line)
# Scratch
line = scratch(line)
# italics
line = italics(line)
# highlight
line = highlight(line)
# image
while len(re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \
!= 0:
match = re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line)
pre_text = match.group('pre_text')
alt_text = match.group('alt_text')
link = match.group('link')
after_text = match.group('after_text')
# scale image
if len(re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link).group()) != 0:
match_scale = re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link)
scale = match_scale.group('scale')
img_html = '<img style="display: block; margin-left: auto; margin-right: auto; height:' + str(scale) + '%" src="' + link + '" alt="' + alt_text + '">'
else:
img_html = '<img style="display: block; margin-left: auto; margin-right: auto;" src="' + link + '" alt="' + alt_text + '">'
line = pre_text + img_html + after_text
# link
while len(re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \
!= 0:
match = re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line)
pre_text = match.group('pre_text')
alt_text = match.group('alt_text')
link = match.group('link')
if len(link) != 0 and link[0] == '#':
link = link.replace(' ', '-')
after_text = match.group('after_text')
img_html = '<a href="' + link + '">' + alt_text + '</a>'
line = pre_text + img_html + after_text
return line | 871abca2977fc494036c6d5aa19de789cfbfd5b9 | 6,834 |
def uniform_square_aperture(side, skypos, frequency, skyunits='altaz',
east2ax1=None, pointing_center=None,
power=False):
"""
-----------------------------------------------------------------------------
Compute the electric field or power pattern pattern at the specified sky
positions due to a uniformly illuminated square aperture
Inputs:
side [scalar] Sides of the square (in m)
skypos [list or numpy vector] Sky positions at which the power pattern
is to be estimated. Size is M x N where M is the number of
locations, N = 2 (if skyunits = altaz denoting Alt-Az
coordinates), or N = 3 (if skyunits = dircos denoting direction
cosine coordinates). If skyunits = altaz, then altitude and
azimuth must be in degrees
frequency [list or numpy vector] frequencies (in GHz) at which the power
pattern is to be estimated. Frequencies differing by too much
and extending over the usual bands cannot be given.
Keyword Inputs:
skyunits [string] string specifying the coordinate system of the sky
positions. Accepted values are 'altaz', and 'dircos'.
Default = 'altaz'. If 'dircos', the direction cosines are
aligned with the local East, North, and Up. If 'altaz', then
altitude and azimuth must be in degrees.
east2ax1 [scalar] Angle (in degrees) the primary axis of the array makes
with the local East (positive anti-clockwise).
pointing_center
[list or numpy array] coordinates of pointing center (in the same
coordinate system as that of sky coordinates specified by
skyunits). 2-element vector if skyunits='altaz'. 2- or
3-element vector if skyunits='dircos'.
power [boolean] If set to True, compute power pattern, otherwise
compute field pattern (default=False).
Output:
Electric field pattern or power pattern, number of rows equal to the number
of sky positions (which is equal to the number of rows in skypos), and number
of columns equal to the number of wavelengths.
-----------------------------------------------------------------------------
"""
try:
side, skypos, frequency
except NameError:
raise NameError('Square antenna side, skypos, frequency must be specified')
if not isinstance(sides, (int,float)):
raise TypeError('Antenna sides must be a scalar')
sides = NP.asarray([side]*2, dtype=NP.float)
ab = uniform_rectangular_aperture(sides, skypos, frequency,
skyunits=skyunits,
east2ax1=east2ax1,
pointing_center=pointing_center,
power=power)
return ab | 275249164bba5fae8f8652f8af1f2c8dc13c9525 | 6,835 |
def sources_table(citator):
"""
Return the content for an HTML table listing every template that the
citator can link to.
"""
rows = []
for template in citator.templates.values():
# skip templates that can't make URLs
if not template.__dict__.get('URL_builder'):
continue
URL = urlsplit(''.join(template.URL_builder.parts))
domain_URL = f'{URL.scheme}://{URL.netloc}'
domain_name = URL.hostname
regex = unify_regex(template, simplify_for_regexper=True)
rows.append(SOURCES_TABLE_ROW.format(
name=template.name,
domain_URL=domain_URL,
domain_name=domain_name,
escaped_regex=quote_plus(regex).replace('+', '%20')
))
return SOURCES_TABLE.format(rows=''.join(rows)) | 460a06e03e7ec6d5cee465001d9b828976a4da1b | 6,836 |
def parse_bot_commands(data, starterbot_id):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
user_id, message = parse_direct_mention(data["text"])
print(f'user_id: {user_id}')
print(f'starterbot_id: {starterbot_id}')
if user_id == starterbot_id:
return message, data["channel"]
return None, None | 5d614cfdf55133180425a87aedac34896a54b552 | 6,837 |
from typing import Callable
def construct_obj_in_dict(d: dict, cls: Callable) -> dict:
"""
Args
d (dict):
d[name][charge][annotation]
"""
if not isinstance(d, dict):
return d
else:
new_d = deepcopy(d)
for key, value in d.items():
if value.get("@class", "") == cls.__name__:
new_d[key] = cls.from_dict(value)
else:
new_d[key] = construct_obj_in_dict(value, cls)
return new_d | c069fb474a6a675f8d917483435856df506ff331 | 6,838 |
def signup_user(request):
"""
Function to sing up user that are not admins
:param request: This param contain all the information associated to the request
:param type request: Request
:return: The URL to render
:rtype: str
"""
try:
log = LoggerManager('info', 'singup_manager-info', session=request.session)
if request.method == 'POST':
form = ClientRegistrationForm(request.POST)
if form.is_valid():
form.save()
max_id = Account.objects.all().aggregate(Max('id'))['id__max']
user = Account.objects.filter(id=max_id)
web_group, created = Group.objects.get_or_create(name=request.user.email)
web_group.user_set.add(request.user.id)
web_group.user_set.add(user.get().id)
log.write_info(form.data)
return redirect('client_list')
else:
form = ClientRegistrationForm()
return render(request, 'registration/signup.html', {
'form': form
})
except Exception as ex:
log = LoggerManager('exception', 'singup_manager-exception', session=request.session)
log.write_exception(ex) | 3ea45a9b84cb8281f3afc6997230fdcbab75f045 | 6,839 |
def haar_rand_state(dim: int) -> np.ndarray:
"""
Given a Hilbert space dimension dim this function returns a vector
representing a random pure state operator drawn from the Haar measure.
:param dim: Hilbert space dimension.
:return: Returns a dim by 1 vector drawn from the Haar measure.
"""
unitary = haar_rand_unitary(dim)
fiducial_vec = np.zeros((dim, 1))
fiducial_vec[0] = 1
return np.matmul(unitary, fiducial_vec) | 3d374fe32fee91667747df86d79f9feb08836c61 | 6,840 |
from pathlib import Path
def is_src_package(path: Path) -> bool:
"""Checks whether a package is of the form:
├─ src
│ └─ packagename
│ ├─ __init__.py
│ └─ ...
├─ tests
│ └─ ...
└─ setup.py
The check for the path will be if its a directory with only one subdirectory
containing an __init__.py file.
Parameters
----------
path : Path
Full path pointing to a dir.
Returns
-------
check : bool
If the package is an src package, returns True, False otherwise.
See Also
--------
is_package
"""
check: bool = False
if path.is_dir():
maybe_subdirs = list(path.iterdir())
if len(maybe_subdirs) == 1:
check = is_package(path / maybe_subdirs[0])
return check | 36bfd704a0a71a41e9943dc9a9d19cf5e46746f8 | 6,841 |
from re import T
from typing import Optional
from re import I
def value_element(units=(OneOrMore(T('NN')) | OneOrMore(T('NNP')) | OneOrMore(T('NNPS')) | OneOrMore(T('NNS')))('raw_units').add_action(merge)):
"""
Returns an Element for values with given units. By default, uses tags to guess that a unit exists.
:param BaseParserElement units: (Optional) A parser element for the units that are to be looked for. Default option looks for nouns.
:returns: An Element to look for values and units.
:rtype: BaseParserElement
"""
number = R('^[\+\-–−]?\d+(\.\d+)?$')
joined_range = R('^[\+\-–−]?\d+(\.\d+)?[\-–−~∼˜]\d+(\.\d+)?$')('raw_value').add_action(merge)
spaced_range = (number + Optional(units).hide() + (R('^[\-–−~∼˜]$') + number | number))('raw_value').add_action(merge)
to_range = (number + Optional(units).hide() + I('to') + number)('raw_value').add_action(join)
plusminus_range = (number + R('±') + number)('value').add_action(join)
between_range = (I('between').hide() + number + I('and') + number).add_action(join)
value_range = (Optional(R('^[\-–−]$')) + (plusminus_range | joined_range | spaced_range | to_range | between_range))('raw_value').add_action(merge)
value_single = (Optional(R('^[~∼˜\<\>]$')) + Optional(R('^[\-–−]$')) + number)('raw_value').add_action(merge)
value = Optional(lbrct).hide() + (value_range | value_single)('raw_value') + Optional(rbrct).hide()
return value + units | 1b111fb30369d0d3b6c506d5f02e80b5c88044d5 | 6,842 |
def get_pattern(model_id, release_id) -> list:
"""
content demo:
[
'...',
{
0.1: [
['if', 'checker.check'],
3903,
['if', 'checker.check', '*', Variable(name="ip", value='10.0.0.1')],
['if checker.check():', 'if checker.check()'],
[282. 1877],
27886975249790003104399390262688492018705644758766193963474214767849400520551
]
},
'...',
'...'
]
sensitive_pattern [List]:
- representative tokens: 符合pattern的其中一个分词
- numbers: 属于该pattern的日志数量
- pattern: 聚类模式
- raw_log: 所有原始log,list
- log_index: 所有原始log的index
- log_signature: 聚类模型signature
"""
content = AiopsModelHandler.pickle_decode(
content=AiopsModelHandler().aiops_release_model_release_id_model_file(
model_id=model_id, model_release_id=release_id
)["file_content"]
)
patterns = []
for _, sensitive_patterns in content[CONTENT_PATTERN_INDEX].items():
for sensitive_pattern in sensitive_patterns:
signature = sensitive_pattern[PATTERN_SIGNATURE_INDEX]
pattern_list = []
for pattern in sensitive_pattern[PATTERN_INDEX]:
if hasattr(pattern, "name"):
pattern_list.append("#{}#".format(pattern.name))
continue
pattern_list.append(str(pattern))
patterns.append({"signature": str(signature), "pattern": " ".join(pattern_list)})
return patterns | 2307180d26e687fd7057c326e15e21b7aaf81471 | 6,843 |
def bit_remove(bin_name, byte_offset, byte_size, policy=None):
"""Creates a bit_remove_operation to be used with operate or operate_ordered.
Remove bytes from bitmap at byte_offset for byte_size.
Args:
bin_name (str): The name of the bin containing the map.
byte_offset (int): Position of bytes to be removed.
byte_size (int): How many bytes to remove.
policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
return {
OP_KEY: aerospike.OP_BIT_REMOVE,
BIN_KEY: bin_name,
POLICY_KEY: policy,
BYTE_OFFSET_KEY: byte_offset,
BYTE_SIZE_KEY: byte_size
} | 356ff2f3421b67790f7e224ecc49c844335864f8 | 6,844 |
def has_session_keys_checksums(session_key_checksums):
"""Check if this session key is (likely) already used."""
assert session_key_checksums, 'Eh? No checksum for the session keys?'
LOG.debug('Check if session keys (hash) are already used: %s', session_key_checksums)
with connection.cursor() as cur:
LOG.debug('SELECT * FROM local_ega.has_session_keys_checksums_sha256(%s);', session_key_checksums)
cur.execute('SELECT * FROM local_ega.has_session_keys_checksums_sha256(%(sk_checksums)s);',
{'sk_checksums': list(session_key_checksums)})
found = cur.fetchone()
LOG.debug("Check session keys: %s", found)
return (found and found[0]) | 9f6240f5ba2640c43ed5ae75de46a7e04d0041e0 | 6,845 |
def four2five(data, format_, dst_dtype='float16', need_custom_tiling=True):
"""
Convert 4-dims "data" to 5-dims,the format of "data" is defined in "format_"
Args:
data (tvm.tensor.Tensor): 4-dims tensor of type float16, float32
format_ (str): a str defined the format of "data"
dst_dtype (str): a str defined the type of output, could be float16 or float32
Returns:
5-dims tvm.tensor.Tensor,type is defined by dst_dtype,
which shape is [N, ceil(C / 16), H, W, 16] and attr about tiling args
Raises:
ValueError: If the type of format_ is invalid.
"""
# Check dtype
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
# Check shape
shape = get_shape(data)
vc_util.davinci_format_check(shape, format_, dim=4)
# Check format
if format_ not in ['NCHW', 'NHWC']:
raise ValueError("{} format is not support, four2five only support NCHW and NHWC format input"
.format(format_))
last_channel = 16
if format_ == "NCHW":
bs, c, h, w = get_shape(data)
else:
bs, h, w, c = get_shape(data)
pad_c = c
if c % last_channel != 0:
pad_c = (c + 15) // last_channel * last_channel
c1 = pad_c // last_channel
c0 = last_channel
is_dynamic = ds.shape_is_dynamic(data)
if not is_dynamic:
attrs = get_attrs()
else:
attrs = get_dynamic_attrs()
# Check size c when casting happens
if data.dtype != dst_dtype and c0 * c1 >= C_LIMIT_FOR_CAST:
raise ValueError("When input and output data type is not matched, shape of 'c' axis should not exceed {}, "
"while currently set is {}".format(C_LIMIT_FOR_CAST, c0 * c1))
@script(capture=locals())
def nchw_to_nc1hwc0_step(inputs, bs, c1, h, w, c0):
output = allocate((bs, c1, h, c0, w), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output[n_i, c_i, h_i, c_i0, w_i] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i]
output1 = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output1[n_i, c_i, h_i, w_i, c_i0] = output[n_i, c_i, h_i, c_i0, w_i]
return output1
@script(capture=locals())
def nchw_to_nc1hwc0(inputs, bs, c1, h, w, c0):
output = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i]
return output
@script(capture=locals())
def nhwc_to_nc1hwc0(inputs, zero, bs, c1, h, w, c0):
output = allocate((bs, c1, h, w, c0), inputs.dtype, "local")
for n_i in range(bs):
for c_i in range(c1):
for h_i in range(h):
for w_i in range(w):
for c_i0 in range(c0):
if c_i * last_channel + c_i0 < c:
output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, h_i, w_i, c_i * last_channel + c_i0]
else:
output[n_i, c_i, h_i, w_i, c_i0] = zero
return output
cast_data = data
need_cast = data.dtype == 'float32' and dst_dtype == 'float16'
if c % last_channel != 0 or need_cast:
expansion = int(ct_util.BLOCK_SIZE / get_bytes(data.dtype))
else:
expansion = None
# float32 -> float16, need to cast before transform
if need_cast:
cast_data = akg.lang.cce.cast_to(data, dst_dtype)
zero_ = akg.tvm.const(0.0, cast_data.dtype)
if format_ == "NCHW":
if c % last_channel != 0:
pad_shape = [bs, pad_c, h, w]
if h == 1 and w == 1:
# if h and w both are 1, it is pad last dim case
output_shape = [bs, pad_c // last_channel, h, w, last_channel]
output = akg.tvm.compute(output_shape,
lambda i, c1, k, l, c0: akg.tvm.expr.Select(
c0 < c - c1 * last_channel, cast_data[i, c1 * last_channel + c0, k, l],
akg.tvm.const(0, cast_data.dtype)),
name="output")
else:
# if need to pad c dim, separate transpose to two steps
# first is nchw -> nc1hc0w, second is nc1hc0w -> nc1hwc0
pad_data = akg.tvm.compute(pad_shape,
lambda i, j, k, l: akg.tvm.expr.Select(j < c, cast_data[i, j, k, l], zero_),
name="pad_data")
output = nchw_to_nc1hwc0_step(
pad_data,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
else:
if not is_dynamic and data.dtype == "float16" and h * w % last_channel == 0 and h * w < 3600:
output_shape = [bs, c1, h, w, c0]
output = akg.tvm.compute(output_shape, lambda n, c1, h, w, c0:
akg.lang.cce.four2five_nchw(cast_data[n, c1 * last_channel + c0, h, w]),
name="output")
else:
output = nchw_to_nc1hwc0(
cast_data,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
else:
if not is_dynamic and c < last_channel:
rank = 5 # (n, c1, h, w, c0)
pad_before = []
pad_after = []
for _ in range(rank):
pad_before.append(0)
pad_after.append(0)
pad_after[-1] = last_channel - c
# As c < last_channel, c1 is 1
output = akg.tvm.compute((bs, c1, h, w, c), lambda bs_i, _, h_i, w_i, c_i: cast_data[
bs_i, h_i, w_i, c_i], name="output")
output = tvm_pad(output, pad_before, pad_after=pad_after, name='pad_output')
else:
output = nhwc_to_nc1hwc0(
cast_data,
zero_,
to_tvm_const(bs),
to_tvm_const(c1),
to_tvm_const(h),
to_tvm_const(w),
to_tvm_const(c0))
# float16 -> float32, need to cast after transform
if data.dtype == 'float16' and dst_dtype == 'float32':
output = akg.lang.cce.cast_to(output, dst_dtype)
vc_util.davinci_format_check(output.shape, "NC1HWC0", dim=5)
if not is_dynamic:
dim_info, _ = four2five_set_dim_func(data, format_, dst_dtype)
if dim_info != "":
attrs["dim"] = dim_info
if need_custom_tiling:
attrs["custom_tiling"] = four2five_tiling_strategy(output, format_, expansion)
elif need_custom_tiling:
attrs["custom_tiling"] = four2five_tiling_strategy_dynamic(output, format_)
if is_dynamic:
attrs["enable_feature_library_pre_poly"] = True
return output, attrs | 71de138f15e5b407a244c1670c48eb806b3be765 | 6,846 |
def MT33_SDR(MT33):
"""Converts 3x3 matrix to strike dip and rake values (in radians)
Converts the 3x3 Moment Tensor to the strike, dip and rake.
Args
MT33: 3x3 numpy matrix
Returns
(float, float, float): tuple of strike, dip, rake angles in radians
(Note: Function from MTFIT.MTconvert)
"""
T,N,P,E=MT33_TNPE(MT33)
N1,N2=TP_FP(T,P)
return FP_SDR(N1,N2) | 0b80df0e43bc546aa36a06858f144f32d75cb478 | 6,847 |
from generate_changelog.utilities import pairs
from typing import Optional
from typing import List
import re
def get_commits_by_tags(repository: Repo, tag_filter_pattern: str, starting_tag: Optional[str] = None) -> List[dict]:
"""
Group commits by the tags they belong to.
Args:
repository: The git repository object
tag_filter_pattern: A regular expression pattern that matches valid tags as versions
starting_tag: Only include tags after this one
Returns:
A list of dictionaries with tag information with most recent first
"""
tags = [tag for tag in get_tags(repository) if re.match(tag_filter_pattern, tag.name)]
head_commit = repository.commit("HEAD")
head_tagger = head_commit.committer.name
if head_commit.committer.email:
head_tagger += f" <{head_commit.committer.email}>"
head = TagInfo(
name="HEAD",
commit=head_commit.hexsha,
tagger=head_tagger,
tagged_datetime=head_commit.committed_datetime,
)
tags.insert(0, head)
groups = []
for end_tag, start_tag in pairs(tags):
start_tag_name = getattr(start_tag, "name", None)
groups.append(
{
"tag_name": end_tag.name,
"tag_info": end_tag,
"commits": parse_commits(repository, start_tag_name, end_tag.name),
}
)
if starting_tag and start_tag_name == starting_tag:
break
return groups | 1e870d2169496e183c1df8d90ddceb4e63cb1689 | 6,848 |
def GetObject():
"""
Required module function.
@returns class object of the implemented adapter.
"""
return SpacePacketAdapter | 38ddac47a1ac7a58f203fc5552a00340a542518d | 6,849 |
import os
import copy
import io
import math
def filter_paths(ctx, raw_paths, path_type="repo", **kwds):
"""Filter ``paths``.
``path_type`` is ``repo`` or ``file``.
"""
cwd = os.getcwd()
filter_kwds = copy.deepcopy(kwds)
changed_in_commit_range = kwds.get("changed_in_commit_range", None)
diff_paths = None
if changed_in_commit_range is not None:
diff_files = git.diff(ctx, cwd, changed_in_commit_range)
if path_type == "repo":
diff_dirs = set(os.path.dirname(p) for p in diff_files)
diff_paths = set()
for diff_dir in diff_dirs:
while diff_dir:
if os.path.isfile(os.path.join(diff_dir, SHED_CONFIG_NAME)):
diff_paths.add(diff_dir)
break
diff_dir = os.path.dirname(diff_dir)
else:
diff_paths = diff_files
unique_paths = set(os.path.relpath(p, cwd) for p in raw_paths)
if diff_paths is not None:
new_unique_paths = []
for path in unique_paths:
if path in diff_paths:
new_unique_paths.append(path)
unique_paths = new_unique_paths
filtered_paths = sorted(io.filter_paths(unique_paths, cwd=cwd, **filter_kwds))
excluded_paths = sorted(set(unique_paths) - set(filtered_paths))
if excluded_paths:
ctx.log("List of excluded paths: %s" % excluded_paths)
path_count = len(filtered_paths)
chunk_size = ((1.0 * path_count) / kwds["chunk_count"])
chunk = kwds["chunk"]
chunked_paths = []
for i, path in enumerate(filtered_paths):
if int(math.floor(i / chunk_size)) == chunk:
chunked_paths.append(path)
return chunked_paths | b4eae04a1d836174793c02cf831116db69dfef31 | 6,850 |
def parse(args):
"""Parse the command-line arguments of the `inpaint` command.
Parameters
----------
args : list of str
List of arguments, without the command name.
Returns
-------
InPaint
Filled structure
"""
struct = InPaint()
struct.files = []
while cli.next_isvalue(args):
val, *args = args
struct.files.append(val)
while args:
if cli.next_isvalue(args):
raise ParseError(f'Value {args[0]} does not seem to belong '
f'to a tag.')
tag, *args = args
if tag in ('-m', '--missing'):
struct.missing = []
while cli.next_isvalue(args):
val, *args = args
struct.missing.append(float(val))
elif tag in ('-nrls', '--max-rls'):
cli.check_next_isvalue(args, tag)
struct.max_rls, *args = args
struct.max_rls = int(struct.max_rls)
elif tag in ('-trls', '--tol-rls'):
cli.check_next_isvalue(args, tag)
struct.tol_rls, *args = args
struct.tol_rls = float(struct.tol_rls)
elif tag in ('-ncg', '--max-cg'):
cli.check_next_isvalue(args, tag)
struct.max_cg, *args = args
struct.max_cg = int(struct.max_cg)
elif tag in ('-tcg', '--tol-cg'):
cli.check_next_isvalue(args, tag)
struct.tol_cg, *args = args
struct.tol_cg = float(struct.tol_cg)
elif tag in ('-cpu', '--cpu'):
struct.device = 'cpu'
elif tag in ('-gpu', '--gpu'):
struct.device = 'cuda'
if cli.next_isvalue(args):
gpu, *args = args
struct.device = 'cuda:{:d}'.format(int(gpu))
elif tag in ('-o', '--output'):
struct.output = []
while cli.next_isvalue(args):
val, *args = args
struct.output.append(val)
elif tag in ('-v', '--verbose'):
struct.verbose = 1
if cli.next_isvalue(args):
struct.verbose, *args = args
struct.verbose = int(struct.verbose)
elif tag in ('-h', '--help'):
print(help)
return None
else:
raise ParseError(f'Unknown tag {tag}')
return struct | 6b3eb929ce13559f9bd3d50ee2b15dd25e967d33 | 6,851 |
from unittest.mock import Mock
from datetime import datetime
def log_context(servicer_context: Mock) -> LogContext:
"""Mock LogContext."""
context = LogContext(
servicer_context,
"/abc.test/GetTest",
Mock(name="Request"),
Mock(name="Response", ByteSize=Mock(return_value=10)),
datetime(2021, 4, 3, 0, 0, 0, 0, timezone.utc),
datetime(2021, 4, 3, 0, 1, 0, 0, timezone.utc),
)
return context | 1ea1b8d4e6dad80ac8d95925a70a7f79ec84a686 | 6,852 |
import os
def filename(name):
""" Get filename without extension"""
return os.path.splitext(name)[0] | 9899b6e187684ddb95ff9d1bd7974163a7e3e78b | 6,853 |
import base64
def base64_decode(string):
"""
Decodes data encoded with MIME base64
"""
return base64.b64decode(string) | 38870882fca9e6595e3f5b5f8943d0bf781f006c | 6,854 |
import re
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for the given
operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper()) | 3d26a0b330ae64209655b24dfe86578cb4b8724c | 6,855 |
from datetime import datetime
def screen_missing_data(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the percentage (range [0,1]) of loss data
calculated based on the transitions of screen status. In general, if
screen_status(t) == screen_status(t+1), we declared we have at least one
missing point.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"usr not given in string format"
screen = database.raw(table='AwareScreen', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = screen.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = screen.iloc[len(screen)-1]['datetime']
screen=screen.drop_duplicates(subset=['datetime'],keep='first')
screen = screen.drop(['device','user','time'],axis=1)
screen=screen.loc[begin:end]
screen['screen_status']=pd.to_numeric(screen['screen_status'])
#Include the missing points that are due to shutting down the phone
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'screen_status'})
shutdown['screen_status']=0
screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True)
screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y']
screen = screen.drop(['screen_status_x','screen_status_y'],axis=1)
dates=screen.datetime_x.combine_first(screen.datetime_y)
screen['datetime']=dates
screen = screen.drop(['datetime_x','datetime_y'],axis=1)
#Detect missing data points
screen['missing']=0
screen['next']=screen['screen_status'].shift(-1)
screen['dummy']=screen['screen_status']-screen['next']
screen['missing'] = np.where(screen['dummy']==0, 1, 0)
screen['missing'] = screen['missing'].shift(1)
screen = screen.drop(['dummy','next'], axis=1)
screen = screen.fillna(0)
screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day))
screen = screen.drop(['screen_status'], axis=1)
count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count')
count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0)
count['missing'] = count[1.0]/(count[0.0]+count[1.0])
count = count.drop([0.0,1.0], axis=1)
if (pd.Timestamp.tzname(count.index[0]) != 'EET'):
if pd.Timestamp.tzname(count.index[0]) != 'EEST':
count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki')
return count | 70666ed7ddfea359c4c91afd8b52f9821580bda6 | 6,856 |
def check(text):
"""Check the text."""
error_code = "example.first"
msg = "First line always has an error."
reverse(text)
return [(1, 1, error_code, msg)] | 50d8406322225153c055b925609af702bb86d7b6 | 6,857 |
def figure(**kwargs):
"""
Create a new figure with the given settings.
Settings like the current colormap, title or axis limits as stored in the
current figure. This function creates a new figure, restores the default
settings and applies any settings passed to the function as keyword
arguments.
**Usage examples:**
>>> # Restore all default settings
>>> mlab.figure()
>>> # Restore all default settings and set the title
>>> mlab.figure(title="Example Figure")
"""
global _plt
_plt = _Figure()
_plt.kwargs.update(kwargs)
return _plt | a7de48597ecc80872d8d4b108a642956200adcc2 | 6,858 |
from typing import List
from bs4 import BeautifulSoup
def parse(content: str, target: str = "all") -> List[Inline]:
"""Parses an HTML document and extracts."""
soup = BeautifulSoup(content, "html.parser")
if target == "all":
search_queries = chain(*_VALID_TARGETS.values())
elif target in _VALID_TARGETS.keys():
search_queries = chain(_VALID_TARGETS[target])
else:
raise ValueError("Invalid Target")
elements = []
for q in search_queries:
for tag in soup.find_all(q.search_function):
if q.attr_name:
inline = Inline(tag[q.attr_name], tag.sourceline, tag.sourcepos)
else:
if not tag.contents:
continue
inline = Inline(tag.contents[0], tag.sourceline, tag.sourcepos)
elements.append(inline)
return elements | 56238a4def01713220c7d59b266cfcc55f1daf7f | 6,859 |
def create_output(verified_specific_headers_list:list) -> str:
""" Design Output """
if args.verbose is True:
print("[!] INFO: Outputting Specific Header Information")
return_output = ""
for specific_header in verified_specific_headers_list:
split_header = specific_header.split(":")
if split_header[1] != "":
return_output += f"{split_header[0]:<25} is declared -> DATA:{split_header[1]:30}\n"
else:
return_output += f"{split_header[0]:<25} is NOT declared -> NO DATA\n"
return return_output | e38fdf467d1f01167ff00040e7d0f7d8816e4915 | 6,860 |
def registered_response_data():
"""Body (bytes) of the registered response."""
return b"response data" | 1ee44d70592747947d76ff757901f44fde5c9946 | 6,861 |
def parse_log(log_file):
"""
Parses a log file into a list of lists containing the messages logged
:param log_file: path-like: Path to the log file
:return: list of lists containing messages in the log file
"""
parsed_logs = [[] for i in range(5)]
with open(log_file, 'r') as f:
for line in f.readlines():
parts = line.split(':')
for i in range(0, len(parts)):
parts[i] = parts[i].strip()
if parts[0] == LogLevel.ERROR:
parsed_logs[0].append(":".join(parts[1:]))
elif parts[0] == LogLevel.WARNING:
parsed_logs[1].append(":".join(parts[1:]))
elif parts[0] == LogLevel.INFO:
parsed_logs[2].append(":".join(parts[1:]))
elif parts[0] == LogLevel.STARTUP:
parsed_logs[3].append(":".join(parts[1:]))
else:
parsed_logs[3].append(line)
return parsed_logs | 065618c66470a8c538cbe9346ba66949819672b9 | 6,862 |
def generate_experiment_fn(train_files,
eval_files,
num_epochs=None,
train_batch_size=40,
eval_batch_size=40,
embedding_size=8,
first_layer_size=100,
num_layers=4,
scale_factor=0.7,
**experiment_args):
"""Create an experiment function given hyperparameters.
See command line help text for description of args.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
"""
# Check verbose logging flag
verbose_logging = experiment_args.pop('verbose_logging')
model.set_verbose_logging(verbose_logging)
def _experiment_fn(output_dir):
# num_epochs can control duration if train_steps isn't
# passed to Experiment
train_input = model.generate_input_fn(
train_files,
num_epochs=num_epochs,
batch_size=train_batch_size,
)
# Don't shuffle evaluation data
eval_input = model.generate_input_fn(
eval_files,
batch_size=eval_batch_size,
shuffle=False
)
return tf.contrib.learn.Experiment(
model.build_estimator(
output_dir,
embedding_size=embedding_size,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_layer_size * scale_factor**i))
for i in range(num_layers)
]
),
train_input_fn=train_input,
eval_input_fn=eval_input,
# export strategies control the prediction graph structure
# of exported binaries.
export_strategies=[saved_model_export_utils.make_export_strategy(
model.serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)],
**experiment_args
)
return _experiment_fn | a7af08f955d1d93c1c9735b08b5e18b2fd9e405a | 6,863 |
def get_inclination_and_azimuth_from_locations(self, locations):
"""
self must to point to Main_InputWindow
"""
"""
Return "Inc" and "Azi" array objects in reference units.
"""
Inc = []
Azi = []
for MD in locations:
tangentVector = get_ASCT_from_MD(self, MD)
verticalVector = np.array([0.0,0.0,1.0,0.0])
if np.allclose( tangentVector, verticalVector, atol=1e-2, rtol=0.0 ):
tangentVector = verticalVector
inc = np.arccos( tangentVector[2] )
if inc==0.0:
azi = 0.0
else:
sinazi = tangentVector[0]/np.sin(inc)
cosazi = tangentVector[1]/np.sin(inc)
if sinazi>=0:
azi = np.arccos( cosazi )
elif sinazi<0:
azi = 2*np.pi-np.arccos( cosazi )
Inc.append(inc)
Azi.append(azi)
return np.array(Inc), np.array(Azi) | 2761137c670d3ad90c40d0689db062baf743d7a5 | 6,864 |
def _ensure_package(base, *parts):
"""Ensure that all the components of a module directory path exist, and
contain a file __init__.py."""
bits = []
for bit in parts[:-1]:
bits.append(bit)
base.ensure(*(bits + ['__init__.py']))
return base.ensure(*parts) | fc9bb95445cc1b0e8ec819dfafdaff7d5afbf372 | 6,865 |
def make_cat_matrix(n_rows: int, n_cats: int) -> tm.CategoricalMatrix:
"""Make categorical matrix for benchmarks."""
mat = tm.CategoricalMatrix(np.random.choice(np.arange(n_cats, dtype=int), n_rows))
return mat | 5c1f314a9582685d6c6da0f9ac0ee58fe9046952 | 6,866 |
def add_stabilizer_nodes(boundaries_raw, electrodes, nr_nodes_between):
"""
Segmentation of nodes:
we have the existing nodes
N.F is the ratio of required nodes and existing nodes
first, add N nodes to each segment
then, add one more node to the F first segments
* assume ordered boundaries
"""
boundaries = []
boundaries = boundaries_raw
# find first electrode in boundary
for nr in range(electrodes.shape[0] - 1):
index0 = np.where(
(boundaries[:, 0] == electrodes[nr, 0]) &
(boundaries[:, 1] == electrodes[nr, 1])
)[0]
index1 = np.where(
(boundaries[:, 0] == electrodes[nr + 1, 0]) &
(boundaries[:, 1] == electrodes[nr + 1, 1])
)[0]
index0 = index0[0]
index1 = index1[0]
if index1 - index0 < 0:
index0, index1 = index1, index0
running_index = index0
nr_nodes = index1 - index0 - 1
while nr_nodes < nr_nodes_between:
# determine line equation
xy0 = boundaries[running_index, 0:2]
xy1 = boundaries[running_index + 1, 0:2]
direction = xy1 - xy0
heading = direction / np.sqrt(np.sum(direction ** 2))
# new node
xy_new = xy0 + heading * direction / 2.0
a = boundaries[running_index, 2][np.newaxis]
xyb = np.hstack((xy_new, a))
boundaries = np.insert(boundaries, running_index + 1, xyb, axis=0)
# 2, because we have to count the new one
running_index += 2
index1 += 1
nr_nodes += 1
if running_index == index1:
running_index = index0
return boundaries | fe8ff9618ee34cb9caedd828a880af05a1c964f0 | 6,867 |
def read_data(creds):
"""Read court tracking data in and drop duplicate case numbers"""
# try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),"01_Community_lawyer_test_out_final","Frontend"))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df | 95bb588305c230c2f3aaa306e367da2602788f67 | 6,868 |
def _build_indie_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the indie lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Indie',
root=root.replace('indie-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
) | 9eaaf9742c587a649d036e5a7da30dc5ca37db79 | 6,869 |
def getHostname(request):
"""
Utility method for getting hostname of client.
"""
if request.getClientIP() in LOOPBACK_ADDRESSES and has_headers(request, X_FORWARDED_FOR):
# nginx typically returns ip addresses
addr = get_headers(request, X_FORWARDED_FOR)
if isIPAddress(addr):
# we really shouldn't do such blocking calls in twisted,
# but the twisted dns interface is rather terrible and
# odd things happen when using it
# Set timeout to 1 second to limit the possible damage
try:
socket.setdefaulttimeout(1)
info = socket.gethostbyaddr(addr)
return info[0]
except socket.error, msg:
log.msg("Error performing reverse lookup: %s" % msg)
return addr
else:
addr
else:
hostname = request.getClient()
if hostname is None:
hostname = request.getClientIP()
return hostname | 41ab9ed3a01d1e1bc53565115a8336a5eac741b3 | 6,870 |
def CollapseSolutionPosition(x,x0):
"""
Calculate a free-fall collapse solution
x - position to calculate time at in cm
x0 - initial position in cm
Sam Geen, March 2018
"""
X = x/x0
t = (np.arccos(np.sqrt(X)) + np.sqrt(X * (1.0-X))) * x0**1.5 / np.sqrt(2.0*units.G*gravity.centralmass)
return t | 3d0aaeef997a688b72df38ea2188ea34d62c1d55 | 6,871 |
from scipy import signal
from nsdata import bfixpix
def scaleSpectralSky_cor(subframe, badpixelmask=None, maxshift=20, fitwidth=2, pord=1, nmed=3, dispaxis=0, spatial_index=None, refpix=None, tord=2):
"""
Use cross-correlation to subtract tilted sky backgrounds.
subframe : NumPy array
data subframe containing sky data to be subtracted (and,
perhaps, an object's spectral trace).
badpixelmask : None or NumPy array
A boolean array, equal to zero for good pixels and unity for bad
pixels. If this is set, the first step will be a call to
:func:`nsdata.bfixpix` to interpolate over these values.
nmed : int
size of 2D median filter for pre-smoothing.
pord : int
degree of spectral tilt. Keep this number low!
maxshift : int
Maximum acceptable shift. NOT YET IMPLEMENTED!
fitwidth : int
Maximum radius (in pixels) for fitting to the peak of the
cross-correlation.
nmed : int
Size of window for 2D median filter (to reject bad pixels, etc.)
dispaxis : int
set dispersion axis: 0 = horizontal and 1 = vertical
spatial_index : None, or 1D NumPy array of type *bool*
Which spatial rows (if dispaxis=0) to use when fitting the tilt
of sky lines across the spectrum. If you want to use all, set
to None. If you want to ignore some (e.g., because there's a
bright object's spectrum there) then set those rows' elements
of spatial_index to 'False'.
refpix : scalar
Pixel along spatial axis to which spectral fits should be
aligned; if a spectral trace is present, one should set
"refpix" to the location of the trace.
tord : int
Order of polynomial fits along spatial direction in aligned
2D-spectral frame, to account for misalignments or
irregularities of tilt direction.
:RETURNS:
a model of the sky background, of the same shape as 'subframe.'
"""
# 2012-09-22 17:04 IJMC: Created
# 2012-12-27 09:53 IJMC: Edited to better account for sharp edges
# in backgrounds.
# Parse inputs
if not isinstance(subframe, np.ndarray):
subframe = pyfits.getdata(subframe)
if badpixelmask is None:
pass
else:
badpixelmask = np.array(badpixelmask).astype(bool)
subframe = bfixpix(subframe, badpixelmask, retdat=True)
if dispaxis==1:
subframe = subframe.transpose()
# Define necessary variables and vectors:
npix, nlam = subframe.shape
if spatial_index is None:
spatial_index = np.ones(npix, dtype=bool)
else:
spatial_index = np.array(spatial_index, copy=False)
if refpix is None:
refpix = npix/2.
lampix = np.arange(nlam)
tpix = np.arange(npix)
alllags = np.arange(nlam-maxshift*2) - np.floor(nlam/2 - maxshift)
# Median-filter the input data:
if nmed > 1:
ssub = signal.medfilt2d(subframe, nmed)
else:
ssub = subframe.copy()
ref = np.median(ssub, axis=0)
#allcor = np.zeros((npix, nlam-maxshift*2))
shift = np.zeros(npix, dtype=float)
for ii in tpix:
# Cross-correlate to measure alignment at each row:
cor = np.correlate(ref[maxshift:-maxshift], signal.medfilt(ssub[ii], nmed)[maxshift:-maxshift], mode='same')
# Measure offset of each row:
maxind = alllags[(cor==cor.max())].mean()
fitind = np.abs(alllags - maxind) <= fitwidth
quadfit = np.polyfit(alllags[fitind], cor[fitind], 2)
shift[ii] = -0.5 * quadfit[1] / quadfit[0]
shift_polyfit = an.polyfitr(tpix[spatial_index], shift[spatial_index], pord, 3) #, w=weights)
refpos = np.polyval(shift_polyfit, refpix)
#pdb.set_trace()
fitshift = np.polyval(shift_polyfit, tpix) - refpos
# Interpolate each row to a common frame to create an improved reference:
newssub = np.zeros((npix, nlam))
for ii in tpix:
newssub[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii])
#pdb.set_trace()
newref = np.median(newssub[spatial_index,:], axis=0)
tfits = np.zeros((nlam, tord+1), dtype=float)
newssub2 = np.zeros((npix, nlam))
for jj in range(nlam):
tfits[jj] = an.polyfitr(tpix, newssub[:,jj], tord, 3)
newssub2[:, jj] = np.polyval(tfits[jj], tpix)
# Create the final model of the sky background:
skymodel = np.zeros((npix, nlam), dtype=float)
shiftmodel = np.zeros((npix, nlam), dtype=float)
for ii in tpix:
#skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newref)
skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newssub2[ii])
shiftmodel[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii])
#pdb.set_trace()
if dispaxis==1:
skymodel = skymodel.transpose()
return skymodel, shiftmodel, newssub, newssub2 | 50ee28ff81c4e981dca47e67ed525b7d9a421288 | 6,872 |
def login():
"""
Implements the login feature for the app.
Errors are shown if incorrect details are used. If the user tried
to access a page requiring login without being authenticated,
they are redirected there after sign in.
"""
if current_user.is_authenticated:
return redirect(url_for("auth.index"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data
).first() # None if invalid
if user is None or not user.check_password(form.password.data):
flash("Invalid username or password")
return redirect(url_for("auth.login"))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get("next")
"""To prevent malicious users from adding a malicious site into the parameters,
this checks to see if the url is relative.
"""
if not next_page or url_parse(next_page).netloc != "" or next_page == "/logout":
next_page = url_for("auth.index")
return redirect(next_page)
return render_template("login.html", form=form) | 43c60504648aa4e93e24150b1aceb98293a4064d | 6,873 |
def _get_plot_axes(grid):
"""Find which axes are being plotted.
Parameters
----------
grid : Grid
Returns
-------
tuple
"""
plot_axes = [0, 1, 2]
if np.unique(grid.nodes[:, 0]).size == 1:
plot_axes.remove(0)
if np.unique(grid.nodes[:, 1]).size == 1:
plot_axes.remove(1)
if np.unique(grid.nodes[:, 2]).size == 1:
plot_axes.remove(2)
return tuple(plot_axes) | 3112ba7d954c7b39bec035e31b5281919dc78244 | 6,874 |
import argparse
def make_parser(inheritable=False):
"""Make parser.
Parameters
----------
inheritable: bool
whether the parser can be inherited from (default False).
if True, sets ``add_help=False`` and ``conflict_hander='resolve'``
Returns
-------
parser: ArgumentParser
"""
parser = argparse.ArgumentParser(
description="get_globular_clusters",
add_help=~inheritable,
conflict_handler="resolve" if ~inheritable else "error",
)
parser.add_argument(
"output_dir",
type=str,
# default="../../data",
help="The data directory",
)
# parser.add_argument(
# "--data_dir",
# type=str,
# default="data",
# help="The input data directory",
# )
return parser | 444c242cb79dd28e7fba8c5d71a92af80e4b3178 | 6,875 |
def _read_uint(addr):
""" Read a uint """
value = gdb.parse_and_eval("*(unsigned int*)0x%x" % addr)
try:
if value is not None:
return _cast_uint(value)
except gdb.MemoryError:
pass
print("Can't read 0x%x to lookup KASLR uint value" % addr)
return None | abe969c2f8595fdf1efdc98157536131d7a8a5ca | 6,876 |
def line_at_infinity(n):
"""the line at infinity just contains the points at infinity"""
return points_at_infinity(n) | 8a787b4598e072c101f8babbe948c4996b121a9a | 6,877 |
def check_section(config:Namespace, name:str) -> Namespace:
"""Check that a section with the specified name is present."""
section = config._get(name)
if section is None:
raise ConfigurationError(f"Section {name} not found in configuration")
if not isinstance(section, Namespace):
raise ConfigurationError(f"Configuration error: {name} not a section")
return section | 09a315a77bd25a3a78b8e80592a32c8709aa511f | 6,878 |
import math
def ceil(a):
"""The ceil function.
Args:
a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix.
Returns:
The least integer greater than or equal to `a`.
"""
return _unary_operation(_ti_core.expr_ceil, math.ceil, a) | 456436d8d1104b4df16327665dd477139528f6fa | 6,879 |
from typing import Any
from typing import Optional
def Body(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: str = None,
title: str = None,
description: str = None,
const: bool = None,
gt: float = None,
ge: float = None,
lt: float = None,
le: float = None,
multiple_of: float = None,
min_items: int = None,
max_items: int = None,
min_length: int = None,
max_length: int = None,
regex: str = None,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field, either for the model schema or complex validation. Some arguments
apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param const: this field is required and *must* take it's default value
:param gt: only applies to numbers, requires the field to be "greater than". The schema
will have an ``exclusiveMinimum`` validation keyword
:param ge: only applies to numbers, requires the field to be "greater than or equal to". The
schema will have a ``minimum`` validation keyword
:param lt: only applies to numbers, requires the field to be "less than". The schema
will have an ``exclusiveMaximum`` validation keyword
:param le: only applies to numbers, requires the field to be "less than or equal to". The
schema will have a ``maximum`` validation keyword
:param multiple_of: only applies to numbers, requires the field to be "a multiple of". The
schema will have a ``multipleOf`` validation keyword
:param min_items: only applies to list or tuple and set, requires the field to have a minimum length.
:param max_items: only applies to list or tuple and set, requires the field to have a maximum length.
:param min_length: only applies to strings, requires the field to have a minimum length. The
schema will have a ``maximum`` validation keyword
:param max_length: only applies to strings, requires the field to have a maximum length. The
schema will have a ``maxLength`` validation keyword
:param regex: only applies to strings, requires the field match again a regular expression
pattern string. The schema will have a ``pattern`` validation keyword
:param extra: any additional keyword arguments will be added as is to the schema
"""
if default is not Undefined and default_factory is not None:
raise ValueError("cannot specify both default and default_factory")
return BodyInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
min_items=min_items,
max_items=max_items,
min_length=min_length,
max_length=max_length,
regex=regex,
**extra,
) | efc636d1b0e42736cecb04857afa67f636fd0bb6 | 6,880 |
def warp(img, pers_margin=425, margin_bottom=50, margin_top=450, margin_sides=150, reverse=False):
"""
This function warps an image. For the transformation a src polygon and a destination
polygon are used. The source polygon is calculated by the image shape and the margins
given. The destination polygon is calculated solely on the image shape.
:param img: Input image
:param pers_margin: This value determines how sharp the polygon is
:param margin_bottom: This value sets the distance between the polygon and the bottom of
the image
:param margin_top: This value sets the distance between the polygon and the top of the
image
:param margin_sides: This value sets the distance between the polygon and the sides of the
image
:param reverse: If True, src and dst will be swapped, thus the image will be unwarped
:return: Warped image
"""
img_size = (img.shape[1], img.shape[0])
# Four source coordinates
src = np.float32(
[[img_size[0] - margin_sides - pers_margin, margin_top],
[img_size[0] - margin_sides, img_size[1] - margin_bottom],
[margin_sides, img_size[1] - margin_bottom],
[margin_sides + pers_margin, margin_top]])
# Four destination coordinates
dst = np.float32(
[[img_size[0]*3//4, 0],
[img_size[0]*3//4, img_size[1]],
[img_size[0]//4, img_size[1]],
[img_size[0]//4, 0]])
# Compute perspective transform matrix
if not reverse:
m = cv2.getPerspectiveTransform(src, dst)
else:
m = cv2.getPerspectiveTransform(dst, src)
# Warp image
warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR)
return warped | 06c4b08e43a3efcfaf3a44bd58727c6b0db833da | 6,881 |
def get_all_doorstations(hass):
"""Get all doorstations."""
return [
entry[DOOR_STATION]
for entry in hass.data[DOMAIN].values()
if DOOR_STATION in entry
] | a6e785e6c667b956ef41ad98681e38b142d99ef5 | 6,882 |
import requests
import json
def get_weather() -> dict:
"""Makes an api request for the weather api
country code queries the specific country
city name queries the specific city within that country
units determines the type of numerical data returned (centigrade or Fahrenheit)
:return: the response from the api
"""
query = f"{city_name},{country_code}"
url_current_weather = f"https://api.openweathermap.org/data/2.5/weather?q={query}" \
f"&appid={api_key}&units={units}"
response = requests.get(url_current_weather).json()
if response["cod"] != 200:
log.error(json.dumps(response, indent=4))
response = None
return response | 023253ec2466182515a345d2bca1f10adf7b67ab | 6,883 |
def _create_off_value():
"""create off value"""
return Tensor(0.0, mstype.float32) | 9cddddc27810fdfc4dbe3970aaa5c5a064f4345c | 6,884 |
from datetime import datetime
def is_datetime(value):
"""
Check if an object is a datetime
:param value:
:return:
"""
result = False
if isinstance(value, datetime.datetime):
result = True
# else:
# result = is_datetime_str(str(value))
return result | 95c2392c9a3da9e4fccb43bd50c54914ffe91b8e | 6,885 |
import math
def sigmoid(z):
"""Sigmoid function"""
if z > 100:
return 0
return 1.0 / (1.0 + math.exp(z)) | 097e1a85fc46264cb1c7cd74498d6cfab97e5b88 | 6,886 |
async def get_company_sumary(symbol: str, db: Session = Depends(get_db)):
"""
This method receibe a symbol, if does not exits in our database
go to extract data, save it on our database and retunr the
stored data
"""
company_solver = CompanySolver(company_symbol=symbol)
_ = company_solver.get_company_data(db)
return _ | 9cd4a5e6dfe4f308f564d956280cb6cd522c6296 | 6,887 |
def make_dataloaders(params: MinkLocParams, debug=False):
"""
Create training and validation dataloaders that return groups of k=2 similar elements
:param train_params:
:param model_params:
:return:
"""
datasets = make_datasets(params, debug=debug)
dataloders = {}
train_sampler = BatchSampler(datasets['train'], batch_size=params.batch_size,
batch_size_limit=params.batch_size_limit,
batch_expansion_rate=params.batch_expansion_rate)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
train_collate_fn = make_collate_fn(datasets['train'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['train'] = DataLoader(datasets['train'], batch_sampler=train_sampler, collate_fn=train_collate_fn,
num_workers=params.num_workers, pin_memory=True)
if 'val' in datasets:
val_sampler = BatchSampler(datasets['val'], batch_size=params.batch_size)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
# Currently validation dataset has empty set_transform function, but it may change in the future
val_collate_fn = make_collate_fn(datasets['val'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['val'] = DataLoader(datasets['val'], batch_sampler=val_sampler, collate_fn=val_collate_fn,
num_workers=params.num_workers, pin_memory=True)
return dataloders | 3868c414d77492814ba57c5872ea55dda7c3d108 | 6,888 |
def get_attn_pad_mask(seq_q, seq_k):
"""
由于各句子长度不一样,故需要通过PAD将所有句子填充到指定长度;
故用于填充的PAD在句子中无任何含义,无需注意力关注;
注意力掩码函数,可用于屏蔽单词位置为PAD的位置,将注意力放在其他单词上。
:param seq_q: [batch_size, seq_len]
:param seq_k: [batch_size, seq_len]
"""
batch_size, len_q = seq_q.size()
_, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], 0代表PAD,eq(0)返回和seq_k同等维度的矩阵
# 若是seq_k某个位置上的元素为0,那么该位置为True,否则为False
# [1, 2, 3, 0] -> [F, F, F, T]
return pad_attn_mask.expand(batch_size, len_q, len_k) | 522fc244c02ec767b80da2f0c9b5cf6720e931c0 | 6,889 |
def convert_str_to_float(string):
"""Convert str to float
To handle the edge case
Args:
string (str): string
Returns:
f (float): float value
"""
try:
f = float(string)
except Exception:
f = np.nan
return f | f597d9d59c00f484d9b5183fc610fabf84529218 | 6,890 |
def node_tree(node: str):
"""Format printing for locate"""
str2list = list(node.replace(' ', ''))
count = 0
for i, e in enumerate(str2list):
if e == '(':
count += 1
str2list[i] = '(\n{}'.format('| ' * count)
elif e == ')':
count -= 1
str2list[i] = '\n{})'.format('| ' * count)
elif e == ',':
str2list[i] = ',\n{}'.format('| ' * count)
elif e == '[':
count += 1
str2list[i] = '[\n{}'.format('| ' * count)
elif e == ']':
count -= 1
str2list[i] = '\n{}]'.format('| ' * count)
return ''.join(str2list) | 010805499cb6e886ec8811949a1d1d013db1d15f | 6,891 |
def process_data(data):
"""
:param datas:
:param args:
:return:
"""
# copy of the origin question_toks
for d in datas:
if 'origin_question_toks' not in d:
d['origin_question_toks'] = d['question_toks']
for entry in datas:
entry['question_toks'] = symbol_filter(entry['question_toks'])
origin_question_toks = symbol_filter([x for x in entry['origin_question_toks'] if x.lower() != 'the'])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in entry['question_toks'] if x.lower() != 'the']
entry['question_toks'] = question_toks
table_names = []
table_names_pattern = []
for y in entry['table_names']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
table_names.append(" ".join(x))
x = [re_lemma(x.lower()) for x in y.split(' ')]
table_names_pattern.append(" ".join(x))
header_toks = []
header_toks_list = []
header_toks_pattern = []
header_toks_list_pattern = []
for y in entry['col_set']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
header_toks.append(" ".join(x))
header_toks_list.append(x)
x = [re_lemma(x.lower()) for x in y.split(' ')]
header_toks_pattern.append(" ".join(x))
header_toks_list_pattern.append(x)
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
# fully header
end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for table
end_idx, tname = group_header(question_toks, idx, num_toks, table_names)
if tname:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
# check for column
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for partial column
end_idx, tname = partial_header(question_toks, idx, header_toks_list)
if tname:
tok_concol.append(tname)
type_concol.append(["col"])
idx = end_idx
continue
# check for aggregation
end_idx, agg = group_header(question_toks, idx, num_toks, AGG)
if agg:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
# string match for Time Format
if num2year(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
def get_concept_result(toks, graph):
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in entry['col_set']:
if col in mi:
return col
end_idx, symbol = group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = group_digital(question_toks, idx)
if result is True:
tok_concol.append(question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
entry['question_arg'] = tok_concol
entry['question_arg_type'] = type_concol
entry['nltk_pos'] = nltk_result
return datas | 3e2ab0daa83e48abc121b72cbf1970c8b5fabe87 | 6,892 |
from typing import Union
from typing import Collection
import typing
from typing import Literal
from typing import Callable
from typing import Optional
from typing import Any
from typing import Mapping
def concat(
adatas: Union[Collection[AnnData], "typing.Mapping[str, AnnData]"],
*,
axis: Literal[0, 1] = 0,
join: Literal["inner", "outer"] = "inner",
merge: Union[StrategiesLiteral, Callable, None] = None,
uns_merge: Union[StrategiesLiteral, Callable, None] = None,
label: Optional[str] = None,
keys: Optional[Collection] = None,
index_unique: Optional[str] = None,
fill_value: Optional[Any] = None,
pairwise: bool = False,
) -> AnnData:
"""Concatenates AnnData objects along an axis.
See the :doc:`concatenation` section in the docs for a more in-depth description.
.. warning::
This function is marked as experimental for the `0.7` release series, and will
supercede the :meth:`AnnData.concatenate() <anndata.AnnData.concatenate>` method
in future releases.
Params
------
adatas
The objects to be concatenated. If a Mapping is passed, keys are used for the `keys`
argument and values are concatenated.
axis
Which axis to concatenate along.
join
How to align values when concatenating. If "outer", the union of the other axis
is taken. If "inner", the intersection. See :doc:`concatenation` for more.
merge
How elements not aligned to the axis being concatenated along are selected.
Currently implemented strategies include:
* `None`: No elements are kept.
* `"same"`: Elements that are the same in each of the objects.
* `"unique"`: Elements for which there is only one possible value.
* `"first"`: The first element seen at each from each position.
* `"only"`: Elements that show up in only one of the objects.
uns_merge
How the elements of `.uns` are selected. Uses the same set of strategies as
the `merge` argument, except applied recursively.
label
Column in axis annotation (i.e. `.obs` or `.var`) to place batch information in.
If it's None, no column is added.
keys
Names for each object being added. These values are used for column values for
`label` or appended to the index if `index_unique` is not `None`. Defaults to
incrementing integer labels.
index_unique
Whether to make the index unique by using the keys. If provided, this
is the delimeter between "{orig_idx}{index_unique}{key}". When `None`,
the original indices are kept.
fill_value
When `join="outer"`, this is the value that will be used to fill the introduced
indices. By default, sparse arrays are padded with zeros, while dense arrays and
DataFrames are padded with missing values.
pairwise
Whether pairwise elements along the concatenated dimension should be included.
This is False by default, since the resulting arrays are often not meaningful.
Notes
-----
.. warning::
If you use `join='outer'` this fills 0s for sparse data when
variables are absent in a batch. Use this with care. Dense data is
filled with `NaN`.
Examples
--------
Preparing example objects
>>> import anndata as ad, pandas as pd, numpy as np
>>> from scipy import sparse
>>> a = ad.AnnData(
... X=sparse.csr_matrix(np.array([[0, 1], [2, 3]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var1", "var2"]),
... varm={"ones": np.ones((2, 5)), "rand": np.random.randn(2, 3), "zeros": np.zeros((2, 5))},
... uns={"a": 1, "b": 2, "c": {"c.a": 3, "c.b": 4}},
... )
>>> b = ad.AnnData(
... X=sparse.csr_matrix(np.array([[4, 5, 6], [7, 8, 9]])),
... obs=pd.DataFrame({"group": ["b", "c"], "measure": [1.2, 4.3]}, index=["s3", "s4"]),
... var=pd.DataFrame(index=["var1", "var2", "var3"]),
... varm={"ones": np.ones((3, 5)), "rand": np.random.randn(3, 5)},
... uns={"a": 1, "b": 3, "c": {"c.b": 4}},
... )
>>> c = ad.AnnData(
... X=sparse.csr_matrix(np.array([[10, 11], [12, 13]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var3", "var4"]),
... uns={"a": 1, "b": 4, "c": {"c.a": 3, "c.b": 4, "c.c": 5}},
... )
Concatenating along different axes
>>> ad.concat([a, b]).to_df()
var1 var2
s1 0.0 1.0
s2 2.0 3.0
s3 4.0 5.0
s4 7.0 8.0
>>> ad.concat([a, c], axis=1).to_df()
var1 var2 var3 var4
s1 0.0 1.0 10.0 11.0
s2 2.0 3.0 12.0 13.0
Inner and outer joins
>>> inner = ad.concat([a, b]) # Joining on intersection of variables
>>> inner
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
>>> (inner.obs_names, inner.var_names) # doctest: +NORMALIZE_WHITESPACE
(Index(['s1', 's2', 's3', 's4'], dtype='object'),
Index(['var1', 'var2'], dtype='object'))
>>> outer = ad.concat([a, b], join="outer") # Joining on union of variables
>>> outer
AnnData object with n_obs × n_vars = 4 × 3
obs: 'group', 'measure'
>>> outer.var_names
Index(['var1', 'var2', 'var3'], dtype='object')
>>> outer.to_df() # Sparse arrays are padded with zeroes by default
var1 var2 var3
s1 0.0 1.0 0.0
s2 2.0 3.0 0.0
s3 4.0 5.0 6.0
s4 7.0 8.0 9.0
Keeping track of source objects
>>> ad.concat({"a": a, "b": b}, label="batch").obs
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat([a, b], label="batch", keys=["a", "b"]).obs # Equivalent to previous
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat({"a": a, "b": b}, index_unique="-").obs
group
s1-a a
s2-a b
s3-b b
s4-b c
Combining values not aligned to axis of concatenation
>>> ad.concat([a, b], merge="same")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones'
>>> ad.concat([a, b], merge="unique")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'zeros'
>>> ad.concat([a, b], merge="first")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'rand', 'zeros'
>>> ad.concat([a, b], merge="only")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'zeros'
The same merge strategies can be used for elements in `.uns`
>>> dict(ad.concat([a, b, c], uns_merge="same").uns)
{'a': 1, 'c': {'c.b': 4}}
>>> dict(ad.concat([a, b, c], uns_merge="unique").uns)
{'a': 1, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="only").uns)
{'c': {'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="first").uns)
{'a': 1, 'b': 2, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
"""
# Argument normalization
merge = resolve_merge_strategy(merge)
uns_merge = resolve_merge_strategy(uns_merge)
if isinstance(adatas, Mapping):
if keys is not None:
raise TypeError(
"Cannot specify categories in both mapping keys and using `keys`. "
"Only specify this once."
)
keys, adatas = list(adatas.keys()), list(adatas.values())
else:
adatas = list(adatas)
if keys is None:
keys = np.arange(len(adatas)).astype(str)
if axis == 0:
dim = "obs"
elif axis == 1:
dim = "var"
alt_axis, alt_dim = _resolve_dim(axis=1 - axis)
# Label column
label_col = pd.Categorical.from_codes(
np.repeat(np.arange(len(adatas)), [a.shape[axis] for a in adatas]),
categories=keys,
)
# Combining indexes
concat_indices = pd.concat(
[pd.Series(dim_indices(a, axis=axis)) for a in adatas], ignore_index=True
)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
alt_indices = resolve_index(
[dim_indices(a, axis=1 - axis) for a in adatas], join=join
)
reindexers = [
gen_reindexer(alt_indices, dim_indices(a, axis=1 - axis)) for a in adatas
]
# Annotation for concatenation axis
concat_annot = pd.concat(
[getattr(a, dim) for a in adatas], join=join, ignore_index=True
)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
# Annotation for other axis
alt_annot = merge_dataframes(
[getattr(a, alt_dim) for a in adatas], alt_indices, merge
)
X = concat_arrays(
[a.X for a in adatas], reindexers, axis=axis, fill_value=fill_value
)
if join == "inner":
layers = inner_concat_aligned_mapping(
[a.layers for a in adatas], axis=axis, reindexers=reindexers
)
concat_mapping = inner_concat_aligned_mapping(
[getattr(a, f"{dim}m") for a in adatas], index=concat_indices
)
if pairwise:
concat_pairwise = concat_pairwise_mapping(
mappings=[getattr(a, f"{dim}p") for a in adatas],
shapes=[a.shape[axis] for a in adatas],
join_keys=intersect_keys,
)
else:
concat_pairwise = {}
elif join == "outer":
layers = outer_concat_aligned_mapping(
[a.layers for a in adatas], reindexers, axis=axis, fill_value=fill_value
)
concat_mapping = outer_concat_aligned_mapping(
[getattr(a, f"{dim}m") for a in adatas],
index=concat_indices,
fill_value=fill_value,
)
if pairwise:
concat_pairwise = concat_pairwise_mapping(
mappings=[getattr(a, f"{dim}p") for a in adatas],
shapes=[a.shape[axis] for a in adatas],
join_keys=union_keys,
)
else:
concat_pairwise = {}
# TODO: Reindex lazily, so we don't have to make those copies until we're sure we need the element
alt_mapping = merge(
[
{k: r(v, axis=0) for k, v in getattr(a, f"{alt_dim}m").items()}
for r, a in zip(reindexers, adatas)
],
)
alt_pairwise = merge(
[
{k: r(r(v, axis=0), axis=1) for k, v in getattr(a, f"{alt_dim}p").items()}
for r, a in zip(reindexers, adatas)
]
)
uns = uns_merge([a.uns for a in adatas])
raw = None
has_raw = [a.raw is not None for a in adatas]
if all(has_raw):
raw = concat(
[
AnnData(
X=a.raw.X,
obs=pd.DataFrame(index=a.obs_names),
var=a.raw.var,
varm=a.raw.varm,
)
for a in adatas
],
join=join,
label=label,
keys=keys,
index_unique=index_unique,
fill_value=fill_value,
axis=axis,
)
elif any(has_raw):
warn(
"Only some AnnData objects have `.raw` attribute, "
"not concatenating `.raw` attributes.",
UserWarning,
)
return AnnData(
**{
"X": X,
"layers": layers,
dim: concat_annot,
alt_dim: alt_annot,
f"{dim}m": concat_mapping,
f"{alt_dim}m": alt_mapping,
f"{dim}p": concat_pairwise,
f"{alt_dim}p": alt_pairwise,
"uns": uns,
"raw": raw,
}
) | bf85455f3cebb61d4711c2022442d7bbcc75d6b2 | 6,893 |
import copy
def repeated_parity_data_binning(shots, nr_of_meas:int):
"""
Used for data binning of the repeated parity check experiment.
Assumes the data qubit is alternatively prepared in 0 and 1.
Args:
shots (1D array) : array containing all measured values of 1 qubit
nr_of_meas (int) : number of measurement per prepared state.
used to determine the period for data binning. Includes
the initialization measurement.
Returns
prep_0 (1D array) outcomes of the initialization measurement
meas_0 (1D array) outcomes of the first measurement
trace_0 (2D array) traces
prep_1 (1D array)
meas_1 (1D array)
trace_1 (2D array)
"""
prep_0 = copy(shots[::nr_of_meas*2])
meas_0 = copy(shots[1::nr_of_meas*2])
prep_1 = copy(shots[nr_of_meas::nr_of_meas*2])
meas_1 = copy(shots[nr_of_meas+1::nr_of_meas*2])
trace_0 = np.zeros((len(prep_0), nr_of_meas-1))
trace_1 = np.zeros((len(prep_1), nr_of_meas-1))
for i in range(len(prep_0)):
trace_0[i, :] = shots[1+(2*i)*nr_of_meas: (2*i+1)*nr_of_meas]
trace_1[i, :] = shots[1+(2*i+1)*nr_of_meas: (2*i+2)*nr_of_meas]
return (prep_0, meas_0, trace_0, prep_1, meas_1, trace_1) | 3cd724579738f5ccf4bd664cf1b023d1c7c08f27 | 6,894 |
def get_user_activities(user_id, timestamp_start, timestamp_end):
""" Returns the activities for a user, between two times"""
activities = Activity.query \
.filter(Activity.user_id == user_id) \
.filter(Activity.timestamp_end >= timestamp_start) \
.filter(Activity.timestamp_start <= timestamp_end).all()
# If required, add the current_activity (The above loop will not get it)
current_activity_id = get_current_user_activity_id(target_user_id=user_id)
if current_activity_id is not None:
current_act = Activity.query.get(current_activity_id)
# Don't add the current activity if it started after the requested end
if current_act.timestamp_start <= timestamp_end:
activities.append(current_act)
return activities | 0b58c1e6a430e0179d34b0ee6d8fdb70f6b102c1 | 6,895 |
def _find_matches(ref, pred):
""" find potential matches between objects in the reference and
predicted images. These need to have at least 1 pixel of overlap.
"""
matches = {}
for label in ref.labels:
mask = ref.labeled == label
matches[label] = [m for m in np.unique(pred.labeled[mask]) if m>0]
return matches | 82ea5c5a0c73996187d7f5409745b947b7e17960 | 6,896 |
def _process(config: ConfigType, should_make_dir: bool) -> ConfigType:
"""Process the config
Args:
config (ConfigType): Config object
should_make_dir (bool): Should make dir for saving logs, models etc
Returns:
[ConfigType]: Processed config
"""
config = _process_general_config(config=config)
config = _process_logbook_config(config=config, should_make_dir=should_make_dir)
config = _process_experiment_config(config=config, should_make_dir=should_make_dir)
return config | 3bf2cc4eff379fcfe8f7d58332ae33658e7e5540 | 6,897 |
def calendar_heatmap_echarts(data_frame: pd.DataFrame, date_field: str = None, value_field: str = None,
title: str = "",
width: str = "100%", height: str = "300px") -> Echarts:
"""
日历热度图,显示日期热度
:param data_frame:
:param date_field: 日期列
:param value_field: 值列
:param title: 可选标题
:param width: 输出div的宽度 支持像素和百分比 比如800px/100%
:param height: 输出div的高度 支持像素和百分比 比如800px/100%
:return:
"""
df = data_frame[[date_field, value_field]].copy()
value_max = df[value_field].max()
value_min = df[value_field].min()
date_start = pd.to_datetime(df[date_field].min()).strftime("%Y-%m-%d")
date_end = pd.to_datetime(df[date_field].max()).strftime("%Y-%m-%d")
df[date_field] = pd.to_datetime(df[date_field]).dt.strftime("%Y-%m-%d")
options = {
'title': {
'text': title
},
'tooltip': {'formatter': "{c}"},
'visualMap': {
'text': ['高', '低'],
'min': value_min,
'max': value_max,
'type': 'continuous',
'orient': 'horizontal',
'inRange': {
'color': ["#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#ffffbf", "#fee090", "#fdae61",
"#f46d43", "#d73027", "#a50026"]
},
'left': 'center',
'top': 0,
'hoverLink': True
},
'calendar': {
'top': 60,
'left': 30,
'right': 30,
'cellSize': ['auto', 'auto'],
'range': [date_start, date_end],
'itemStyle': {
'borderWidth': 0.5
},
'dayLabel': {
'firstDay': 1
},
'monthLabel': {
'nameMap': 'cn'
},
'yearLabel': {'show': True}
},
'series': {
'type': 'heatmap',
'coordinateSystem': 'calendar',
'emphasis': {
'itemStyle': {
'borderColor': "#333",
'borderWidth': 1,
'shadowColor': 'rgba(0, 0, 0, 0.5)',
'shadowBlur': 15
}
},
'data': df[[date_field, value_field]].values.tolist()
}
}
return Echarts(options=options, width=width, height=height) | e92a41dcb533f5fdb0fba91bb1f80b0199d1523e | 6,898 |
from typing import Union
import torch
def adj_to_edge_indices(adj: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]:
"""
Args:
adj: a (N, N) adjacency matrix, where N is the number of nodes
Returns:
A (2, E) array, edge_idxs, where E is the number of edges,
and edge_idxs[0], edge_idxs[1] are the source & destination nodes, respectively.
"""
edge_tuples = torch.nonzero(adj, as_tuple=True) if torch.is_tensor(adj) else np.nonzero(adj)
edge_src = edge_tuples[0].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[0], axis=0)
edge_dest = edge_tuples[1].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[1], axis=0)
if torch.is_tensor(adj):
edge_idxs = torch.cat((edge_src, edge_dest), dim=0)
else:
edge_idxs = np.concatenate((edge_src, edge_dest), axis=0)
return edge_idxs | b84d978e7ea6b24cf9b4e8aaa074581d4516435d | 6,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.