content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def trajnet_batch_multi_eval(preds, gt, seq_start_end):
"""Calculate Top-k ADE, Top-k FDE for batch of samples.
pred = Num_modes x Num_ped x Num_timesteps x 2
gt = Num_ped x Num_timesteps x 2
seq_start_end (batch delimiter) = Num_batches x 2
"""
s_topk_ade = 0
s_topk_fde = 0
for (start, end) in seq_start_end:
s_preds = [pred[start:end] for pred in preds]
s_topk_ade += topk_ade(s_preds, gt[start:end])
s_topk_fde += topk_fde(s_preds, gt[start:end])
return s_topk_ade, s_topk_fde | ff93309e61d871a2d337810cc1836950f883c184 | 10,200 |
def disemvowel(sentence):
"""Disemvowel:
Given a sentence, return the sentence with all vowels removed.
>>> disemvowel('the quick brown fox jumps over the lazy dog')
'th qck brwn fx jmps vr th lzy dg'
"""
vowels = ('a','e','i','o','u')
for x in sentence:
if x in vowels:
sentence = sentence.replace(x,"")
return sentence
pass | d9b6d873c29e82cb65e43f71e2b6298af18b25fd | 10,201 |
def runPolyReg(xValueList, yValueList, degrees):
"""
Preforms *Polynomial Regression* based on the arguments provided.
Note that we split the data by the *First* 80 percent of the data and then the *Last* 20 percent of the data, rather than randomly splitting the data by 80/20 for the Train/Test split.
Args:
xValueList (list of floats) : List of X values used for polynomial regression. Offset 1 day earlier than the y values so we have something to predict. Prepared by *prepDataSets*. Can change based on the values in saved in the configuration file.
yValueList (list of floats) : Close values tied to the X value list for the following day.
degrees (int) : Level of degress the polynomial will be operating at.
:return:
model: The actual machine Learning model.
float: the R^2 score for the model.
"""
splitValue = int(len(xValueList) * 0.2)
xTrain, xTest, yTrain, yTest = (
xValueList.iloc[:-splitValue],
xValueList.iloc[splitValue:],
yValueList[:-splitValue],
yValueList[splitValue:],
)
polyreg = make_pipeline(PolynomialFeatures(degree=degrees), LinearRegression())
polyreg.fit(xTrain, yTrain)
yPred = polyreg.predict(xTest)
results = metrics.rmse_score(yTest, yPred)
return (polyreg, results) | 25d4699f720d943dc49264edc12f2246df51f053 | 10,202 |
def unfold_phi_vulpiani(phidp, kdp):
"""Alternative phase unfolding which completely relies on :math:`K_{DP}`.
This unfolding should be used in oder to iteratively reconstruct
:math:`Phi_{DP}` and :math:`K_{DP}` (see :cite:`Vulpiani2012`).
Parameters
----------
phidp : :class:`numpy:numpy.ndarray`
array of floats
kdp : :class:`numpy:numpy.ndarray`
array of floats
"""
# unfold phidp
shape = phidp.shape
phidp = phidp.reshape((-1, shape[-1]))
kdp = kdp.reshape((-1, shape[-1]))
for beam in range(len(phidp)):
below_th3 = kdp[beam] < -20
try:
idx1 = np.where(below_th3)[0][2]
phidp[beam, idx1:] += 360
except Exception:
pass
return phidp.reshape(shape) | 72386a05500c4ba11385e3b57288655e0a207352 | 10,203 |
def get_result_df(session):
"""
query the match table and put results into pandas dataframe,
to train the team-level model.
"""
df_past = pd.DataFrame(
np.array(
[
[s.fixture.date, s.fixture.home_team, s.fixture.away_team, s.home_score, s.away_score]
for s in session.query(Result).all()
]
),
columns=["date", "home_team", "away_team", "home_goals", "away_goals"],
)
df_past["home_goals"] = df_past["home_goals"].astype(int)
df_past["away_goals"] = df_past["away_goals"].astype(int)
df_past["date"] = pd.to_datetime(df_past["date"])
return df_past | 364d9e7f9ef1a97018402fa964f246954f51f945 | 10,204 |
def permute1d(preserve_symmetry = True):
"""Choose order to rearrange rows or columns of puzzle."""
bp = block_permutation(preserve_symmetry)
ip = [block_permutation(False),block_permutation(preserve_symmetry)]
if preserve_symmetry:
ip.append([2-ip[0][2],2-ip[0][1],2-ip[0][0]])
else:
ip.append(block_permutation(False))
return [bp[i]*3+ip[i][j] for i in [0,1,2] for j in [0,1,2]] | a9ccd2cb486e0ee3d50840c6ab41871396f3ca93 | 10,205 |
import os
import subprocess
def find_diff(sha, files=None):
"""Find the diff since the given sha."""
if files:
for file_or_dir in files:
msg = f"{file_or_dir} doesn't exist. Please provide a valid path."
assert os.path.exists(file_or_dir), msg
else:
files = ['*.py']
res = subprocess.run(
['git', 'diff', '--unified=0', sha, '--'] + files,
stdout=subprocess.PIPE,
encoding='utf-8'
)
res.check_returncode()
return res.stdout | a9cdd27318180c1e9f6572dbb1c49b4a17384236 | 10,206 |
import os
def split_name_with_nii(filename):
"""
Returns the clean basename and extension of a file.
Means that this correctly manages the ".nii.gz" extensions.
:param filename: The filename to clean
:return: A tuple of the clean basename and the full extension
"""
base, ext = os.path.splitext(filename)
if ext == ".gz":
# Test if we have a .nii additional extension
temp_base, add_ext = os.path.splitext(base)
if add_ext == ".nii":
ext = add_ext + ext
base = temp_base
return base, ext | d897804e4a0b773a1c23bff8ad8d7e7e678a9799 | 10,207 |
from typing import Iterable
from typing import List
def take(n: int, iterable: Iterable[T_]) -> List[T_]:
"""Return first n items of the iterable as a list"""
return list(islice(iterable, n)) | 491cdaaa20ad67b480ea92acaeb53e4edf2b4d56 | 10,208 |
def abs(rv):
"""
Returns the absolute value of a random variable
"""
return rv.abs() | 6bf2f8420f8a5e883dfddfc9a93106662a8f1a74 | 10,209 |
def compute_ssm(X, metric="cosine"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
for i in range(D.shape[0]):
for j in range(D.shape[1]):
if np.isnan(D[i, j]):
D[i, j] = 0
D /= D.max()
return 1 - D | 646d9af2134db13b69391817ddfeace0fef1217d | 10,210 |
def escape(instruction):
"""
Escape used dot graph characters in given instruction so they will be
displayed correctly.
"""
instruction = instruction.replace('<', r'\<')
instruction = instruction.replace('>', r'\>')
instruction = instruction.replace('|', r'\|')
instruction = instruction.replace('{', r'\{')
instruction = instruction.replace('}', r'\}')
instruction = instruction.replace(' ', ' ')
return instruction | 936ed1d6c55650bf5f9ce52af8f113a9d466a534 | 10,211 |
def _json_object_hook(d):
"""
JSON to object helper
:param d: data
:return: namedtuple
"""
keys = []
for k in d.keys():
if k[0].isdigit():
k = 'd_{}'.format(k)
keys.append(k)
return namedtuple('X', keys)(*d.values()) | a4a534a975d6faff440f66065d4954e2a5a91ff2 | 10,212 |
def _fourier_interpolate(x, y):
""" Simple linear interpolation for FFTs"""
xs = np.linspace(x[0], x[-1], len(x))
intp = interp1d(x, y, kind="linear", fill_value="extrapolate")
ys = intp(xs)
return xs, ys | cfe663b9e261bbaea2ab6fe58366f4ec3726468c | 10,213 |
import hashlib
def compute_hash_json_digest(*args, **kwargs):
"""compute json hash of given args and kwargs and return md5 hex digest"""
as_json = compute_hash_json(*args, **kwargs)
return hashlib.md5(as_json).hexdigest() | 98dfedb000e2780dba5007d9fe6abd7a74a43a31 | 10,214 |
def hello_world():
"""Print welcome message as the response body."""
return '{"info": "Refer to internal http://metadata-db for more information"}' | ecb2208053e4ff530bcc0dcc117172449a51afbd | 10,215 |
def get_role(server: discord.Server, role_arg: str) -> discord.Role:
"""
Get a role from a passed command parameter (name, mention or ID).
:return:
"""
try:
role_id = extract_role_id(role_arg)
except discord.InvalidArgument: # no ID, treat as a role name
try:
role = get_named_role(server, role_arg) # type: discord.Role
except discord.InvalidArgument:
logger.warning("Cannot find role {!r} as name or ID".format(role_arg))
role = None
else:
logger.debug("Found role ID in {!r}".format(role_arg))
role = discord.utils.get(server.roles, id=role_id) # type: discord.Role
if role is None:
raise commands.BadArgument('No such role: {}'.format(role))
return role | 2f7b2ae3ec3ed950c70eec6338e2f06f771e4bde | 10,216 |
import google
from datetime import datetime
def build_timestamp(timestamp=None) -> google.protobuf.timestamp_pb2.Timestamp:
"""Convert Python datetime to Protobuf Timestamp"""
# https://github.com/protocolbuffers/protobuf/issues/3986
proto_timestamp = google.protobuf.timestamp_pb2.Timestamp()
return proto_timestamp.FromDatetime(timestamp or datetime.datetime.utcnow()) | ae2278b66c200f007240ca5f683a60ebc1ebddf2 | 10,217 |
from typing import Dict
from typing import Any
import os
def set_workspace(data: Dict[str, Any]) -> Dict[str, Any]:
"""Set workspace."""
workspace_path = data.get("path", None)
if not workspace_path:
raise ClientErrorException("Parameter 'path' is missing in request.")
os.makedirs(workspace_path, exist_ok=True)
workdir = Workdir()
workdir.set_active_workspace(workspace_path)
return {"message": "SUCCESS"} | 5feb25f748e2fc57eb0ab7bca9f1d1d8c964156c | 10,218 |
def read_blosum():
"""Read blosum dict and delete some keys and values."""
with open('./psiblast/blosum62.pkl', 'rb') as f:
blosum_dict = cPickle.load(f)
temp = blosum_dict.pop('*')
temp = blosum_dict.pop('B')
temp = blosum_dict.pop('Z')
temp = blosum_dict.pop('X')
temp = blosum_dict.pop('alphas')
for key in blosum_dict:
for i in range(4):
temp = blosum_dict[key].pop()
return blosum_dict | ddbf71c03e05bd156ad688a9fe9692da1d0a3dc4 | 10,219 |
from typing import List
from typing import Tuple
def parse_spans_bio_with_errors(seq: List[str]) -> Tuple[List[Span], List[Error]]:
"""Parse a sequence of BIO labels into a list of spans but return any violations of the encoding scheme.
Note:
In the case where labels violate the span encoded scheme, for example the
tag is a new type (like ``I-ORG``) in the middle of a span of another type
(like ``PER``) without a proper starting token (``B-ORG``) we will finish
the initial span and start a new one, resulting in two spans. This follows
the ``conlleval.pl`` script.
Note:
Span are returned sorted by their starting location. Due to the fact that
spans are not allowed to overlap there is no resolution policy when two
spans have same starting location.
Note:
Errors are returned sorted by the location where the violation occurred. In the
case a single transition triggered multiple errors they are sorted lexically based
on the error type.
Args:
seq: The sequence of labels
Returns:
A list of spans and a list of errors.
"""
errors = []
spans = []
# This tracks the type of the span we are building out
span = None
# This tracks the tokens of the span we are building out
tokens = []
for i, s in enumerate(seq):
func = extract_function(s)
_type = extract_type(s)
# A `B` ends a span and starts a new one
if func == BIO.BEGIN:
# Save out the old span
if span is not None:
spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens)))
# Start the new span
span = _type
tokens = [i]
# An `I` will continue a span when types match and start a new one otherwise.
elif func == BIO.INSIDE:
# A span is already being built
if span is not None:
# The types match so we just add to the current span
if span == _type:
tokens.append(i)
# Types mismatch so create a new span
else:
# Log error from type mismatch
LOGGER.warning("Illegal Label: I doesn't match previous token at %d", i)
errors.append(Error(i, "Illegal Transition", s, safe_get(seq, i - 1), safe_get(seq, i + 1)))
# Save out the previous span
spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens)))
# Start a new span
span = _type
tokens = [i]
# No span was being build so start a new one with this I
else:
# Log error from starting with I
LOGGER.warning("Illegal Label: starting a span with `I` at %d", i)
errors.append(Error(i, "Illegal Start", s, safe_get(seq, i - 1), safe_get(seq, i + 1)))
span = _type
tokens = [i]
# An `O` will cut off a span being built out.
else:
if span is not None:
spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens)))
# Set so no span is being built
span = None
tokens = []
# If we fell off the end so save the entity that we were making.
if span is not None:
spans.append(Span(span, start=tokens[0], end=tokens[-1] + 1, tokens=tuple(tokens)))
return sort_spans(spans), sort_errors(errors) | 6cea777cfb8bf96325f2695af2c48cc22c4884cf | 10,220 |
from typing import Sequence
from typing import Tuple
def find_best_similar_match(i1: int, i2: int, j1: int, j2: int, a: Sequence, b: Sequence, sm: SequenceMatcher = None) \
-> Tuple[int, int, float]:
"""
Finds most similar pair of elements in sequences bounded by indexes a[i1:i2], b[j1: j2].
:param i1: starting index in "a" sequence.
:param i2: ending index in "a" sequence.
:param j1: starting index in "b" sequence.
:param j2: ending index in "b" sequence.
:param a: first sequence.
:param b: second sequence.
:param sm: SequenceMatcher object. Creates new difflib.SequenceMatcher instance if not passed.
:return: Tuple (best_i, best_j, best_ratio) where:
best_i: is index of most similar element in sequence "a".
best_j: is index of most similar element in sequence "b".
best_ratio: similarity ratio of elements a[best_i] and b[best_j], where 1.0 means elements are identical
and 0.0 means that elements are completely different.
"""
best_ratio = 0.0
best_i = best_j = None
if not sm:
sm = SequenceMatcher()
for i in range(i1, i2):
sm.set_seq1(a[i])
for j in range(j1, j2):
if a[i] == b[j]:
continue
sm.set_seq2(b[j])
if sm.real_quick_ratio() > best_ratio and sm.quick_ratio() > best_ratio and sm.ratio() > best_ratio:
best_i = i
best_j = j
best_ratio = sm.ratio()
return best_i, best_j, best_ratio | ca6e73c2315e2d2419b631cb505131f3daabea4b | 10,221 |
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2):
"""
Basic conv transpose block for Encoder-Decoder upsampling
Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net | 787104a3015bd901105383b203551573f9f07fcb | 10,222 |
import logging
def create_ticket(
client, chat_id, user_id, group_id, recipient_email, subject,
slack_message_url
):
"""Create a new zendesk ticket in response to a new user question.
:param client: The Zendesk web client to use.
:param chat_id: The conversation ID on slack.
:param user_id: Who to create the ticket as.
:param group_id: Which group the ticket belongs to.
:param recipient_email: The email addres to CC on the issue.
:param subject: The title of the support issue.
:param slack_message_url: The link to message on the support slack channel.
:returns: A Zenpy.Ticket instance.
"""
log = logging.getLogger(__name__)
log.debug(
f'Assigning new ticket subject:<{subject}> to '
f'user:<{user_id}> and group:<{group_id}> '
)
# And assign this ticket to them. I can then later filter comments that
# should go to the ZenSlackChat webhook to just those in the ZenSlackChat
# group.
issue = Ticket(
type='ticket',
external_id=chat_id,
requestor_id=user_id,
submitter_id=user_id,
assingee_id=user_id,
group_id=group_id,
subject=subject,
description=subject,
recipient=recipient_email,
comment=Comment(
body=f'This is the message on slack {slack_message_url}.',
author_id=user_id
)
)
log.debug(f'Creating new ticket with subject:<{subject}>')
ticket_audit = client.tickets.create(issue)
ticket_id = ticket_audit.ticket.id
log.debug(f'Ticket for subject:<{subject}> created ok:<{ticket_id}>')
return ticket_audit.ticket | ea71cd7055b64997660f7da4ea25dec0b41465ba | 10,223 |
def make_random_password(self, length = 10, allowed_chars = 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
"""
return get_random_string(length, allowed_chars) | be155b2537b062a396ed1d5aed6367857b21d49e | 10,224 |
def autocov_vector(x, nlags=None):
"""
This method computes the following function
.. math::
R_{xx}(k) = E{ x(t)x^{*}(t-k) } = E{ x(t+k)x^{*}(k) }
k \in {0, 1, ..., nlags-1}
(* := conjugate transpose)
Note: this is related to
the other commonly used definition for vector autocovariance
.. math::
R_{xx}^{(2)}(k) = E{ x(t-k)x^{*}(k) } = R_{xx}^{*}(k) = R_{xx}(-k)
Parameters
----------
x: ndarray (nc, N)
nlags: int, optional
compute lags for k in {0, ..., nlags-1}
Returns
-------
rxx : ndarray (nc, nc, nlags)
"""
return crosscov_vector(x, x, nlags=nlags) | 8725b2695b51c014e8234605bc5e64ad1ca0c26b | 10,225 |
def sequence_masking(x, mask, mode=0, axis=None, heads=1):
"""为序列条件mask的函数
mask: 形如(batch_size, sequence)的0-1矩阵;
mode: 如果是0,则直接乘以mask;
如果是1,则在padding部分减去一个大正数。
axis: 序列所在轴,默认为1;
heads: 相当于batch这一维要被重复的次数。
"""
if mask is None or mode not in [0, 1]:
return x
else:
if heads is not 1:
mask = K.expand_dims(mask, 1)
mask = K.tile(mask, (1, heads, 1))
mask = K.reshape(mask, (-1, K.shape(mask)[2]))
if axis is None:
axis = 1
if axis == -1:
axis = K.ndim(x) - 1
assert axis > 0, "axis must be greater than 0"
for _ in range(axis - 1):
mask = K.expand_dims(mask, 1)
for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
mask = K.expand_dims(mask, K.ndim(mask))
if mode == 0:
return x * mask
else:
return x - (1 - mask) * 1e12 | ac7e0da24eca87ab3510c1c274f0caeb2d527816 | 10,226 |
def declare_encoding(log, labelling, encoding, additional_columns, cols=None): #TODO JONAS
"""creates and returns the DataFrame encoded using the declare encoding
:param log:
:param labelling:
:param encoding:
:param additional_columns:
:param cols:
:return:
"""
filter_t = True
print("Filter_t", filter_t)
templates = template_sizes.keys()
constraint_threshold = 0.1
candidate_threshold = 0.1
#apply prefix
log = [Trace(trace[:encoding.prefix_length], attributes=trace.attributes) for trace in log]
# Read into suitable data structure
transformed_log = xes_to_positional(log)
labels = {trace.attributes['concept:name']: trace.attributes['label'] for trace in log}
# Extract unique activities from log
events_set = {event_label for tid in transformed_log for event_label in transformed_log[tid]}
# Brute force all possible candidates
if cols is None:
candidates = [(event,) for event in events_set] + [(e1, e2) for e1 in events_set for e2 in events_set if e1 != e2]
else:
candidates = list({
make_tuple(c.split(':')[1]) if len(c.split(':')) > 1 else c
for c in cols
if c not in ['label', 'trace_id']
})
print("Start candidates:", len(candidates))
# Count by class
true_count = len([trace.attributes['concept:name'] for trace in log if trace.attributes['label'] == 'true'])
false_count = len(log) - true_count
print("{} deviant and {} normal traces in set".format(false_count, true_count))
ev_support_true = int(true_count * candidate_threshold)
ev_support_false = int(false_count * candidate_threshold)
if filter_t and cols is None:
print(filter_t)
print("Filtering candidates by support")
candidates = filter_candidates_by_support(candidates, transformed_log, labels, ev_support_true, ev_support_false)
print("Support filtered candidates:", len(candidates))
constraint_support_false = int(false_count * constraint_threshold)
constraint_support_true = int(true_count * constraint_threshold)
train_results = generate_train_candidate_constraints(candidates, templates, transformed_log, labels, constraint_support_true, constraint_support_false, filter_t=filter_t)
print("Candidate constraints generated")
# transform to numpy
# get trace names
data, labels, featurenames, train_names = transform_results_to_numpy(train_results, labels, transformed_log, cols)
df = pd.DataFrame(data, columns=featurenames)
df["trace_id"] = train_names
df["label"] = labels.tolist()
return df | 956bc6e37d2909abaa96abe862187179bc7b50df | 10,227 |
def __long_description() -> str:
"""Returns project long description."""
return f"{__readme()}\n\n{__changelog()}" | 53260637e4e4f1e59e6a67238577fb6969e7769c | 10,228 |
def captains_draft(path=None, config=None):
"""Similar to captains mode with a 27 heroes, only 3 bans per teams"""
game = _default_game(path, config=config)
game.options.game_mode = int(DOTA_GameMode.DOTA_GAMEMODE_CD)
return game | 05af49626cff0827ff1b78ffb1da082bba160d29 | 10,229 |
def create(width, height, pattern=None):
"""Create an image optionally filled with the given pattern.
:note: You can make no assumptions about the return type; usually it will
be ImageData or CompressedImageData, but patterns are free to return
any subclass of AbstractImage.
:Parameters:
`width` : int
Width of image to create
`height` : int
Height of image to create
`pattern` : ImagePattern or None
Pattern to fill image with. If unspecified, the image will
initially be transparent.
:rtype: AbstractImage
"""
if not pattern:
pattern = SolidColorImagePattern()
return pattern.create_image(width, height) | dcd287353c84924afcdd0a56e9b51f00cde7bb85 | 10,230 |
import logging
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features | 0b923d7616741312d8ac129d7c7c99081a2c3f97 | 10,231 |
def get_api_key():
"""Load API key."""
api_key_file = open('mailgun_api_key.txt', 'r')
api_key = api_key_file.read()
api_key_file.close()
return api_key.strip() | 55c87d15d616f0f6dfbc727253c2222128b63560 | 10,232 |
def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target):
"""bitserial_conv2d hls strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.hls",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.hls",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy | f009b1f7ac073573877b1ddab616868cdf1d42c7 | 10,233 |
import yaml
import os
def visit(planfile,tracefile=None) :
""" Reduce an APOGEE visit
Driver to do 3 chips in parallel
Makes median flux plots
"""
# reduce channels in parallel
chan=['a','b','c' ]
procs=[]
for channel in [1] :
kw={'planfile' : planfile, 'channel' : channel, 'clobber' : False}
procs.append(mp.Process(target=do_visit,kwargs=kw))
for proc in procs : proc.start()
for proc in procs : proc.join()
plan=yaml.load(open(planfile,'r'), Loader=yaml.FullLoader)
fig,ax=plots.multi(1,1)
allmags=[]
allinst=[]
for ichan,channel in enumerate([1]) :
mags=[]
inst=[]
for obj in plan['APEXP'] :
if obj['flavor'] != 'object' : continue
name='ap1D-{:s}-{:08d}.fits'.format(chan[channel],obj['name'])
out=CCDData.read(name)
print(name,out.header['NREAD'])
mapname=plan['plugmap']
if np.char.find(mapname,'conf') >=0 :
plug=sdss.config(out.header['CONFIGID'],specid=2)
hmag=plug['h_mag']
else :
plug=sdss.config(os.environ['MAPPER_DATA_N']+'/'+mapname.split('-')[1]+'/plPlugMapM-'+mapname+'.par',specid=2,struct='PLUGMAPOBJ')
plate=int(mapname.split('-')[0])
holes=yanny('{:s}/plates/{:04d}XX/{:06d}/plateHolesSorted-{:06d}.par'.format(
os.environ['PLATELIST_DIR'],plate//100,plate,plate))
h=esutil.htm.HTM()
m1,m2,rad=h.match(plug['ra'],plug['dec'],holes['STRUCT1']['target_ra'],holes['STRUCT1']['target_dec'],0.1/3600.,maxmatch=500)
hmag=plug['mag'][:,1]
hmag[m1]=holes['STRUCT1']['tmass_h'][m2]
i1,i2=match.match(300-np.arange(300),plug['fiberId'])
mag='H'
rad=np.sqrt(plug['xFocal'][i2]**2+plug['yFocal'][i2]**2)
plots.plotp(ax,hmag[i2],+2.5*np.log10(np.median(out.data/(out.header['NREAD']-2),axis=1))[i1],color=None,
zr=[0,300],xr=[8,15],size=20,label=name,xt=mag,yt='-2.5*log(cnts/read)')
mags.append(hmag[i2])
inst.append(-2.5*np.log10(np.median(out.data/(out.header['NREAD']-2),axis=1))[i1])
ax.grid()
ax.legend()
allmags.append(mags)
allinst.append(inst)
fig.suptitle(planfile)
fig.tight_layout()
fig.savefig(planfile.replace('.yaml','.png'))
return allmags,allinst | c8a17a07a355ad2eb493e247769ea441458735e0 | 10,234 |
def get_fpga_bypass_mode(serverid):
""" Read back FPGA bypass mode setting
"""
try:
interface = get_ipmi_interface(serverid, ["ocsoem", "fpgaread", "mode"])
return parse_get_fpga_bypass_mode(interface, "mode")
except Exception, e:
return set_failure_dict("get_fpga_bypass_mode() Exception {0}".format(e), completion_code.failure) | b572a372c6c73bb0f65686b3235e3362d31e8655 | 10,235 |
def lookup_complement(binding):
"""
Extracts a complement link from the scope of the given binding.
Returns an instance of :class:`htsql.core.tr.binding.Recipe`
or ``None`` if a complement link is not found.
`binding` (:class:`htsql.core.tr.binding.Binding`)
A binding node.
"""
probe = ComplementProbe()
return lookup(binding, probe) | 104f7b0139a8ca6390cb90dc10529d3be9a723ea | 10,236 |
import itertools
def flatten(colours):
"""Flatten the cubular array into one long list."""
return list(itertools.chain.from_iterable(itertools.chain.from_iterable(colours))) | 41576ef947354c30d1995fefdd30ad86bddbfe6f | 10,237 |
def efficientnet_b6(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""EfficientNet-B6"""
# NOTE for train, drop_rate should be 0.5
# kwargs['drop_connect_rate'] = 0.2 # set when training, TODO add as cmd arg
model_name = "tf_efficientnet_b6"
default_cfg = default_cfgs[model_name]
model = _gen_efficientnet(
model_name=model_name,
channel_multiplier=1.8,
depth_multiplier=2.6,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfgs[model_name], num_classes)
return model | 0f47b42a000e0d58dd01e7254dc5187e298ad8e5 | 10,238 |
import numpy
def create_word_search_board(number: int):
"""
This function creates a numpy array of zeros, with dimensions of
number x number, which is set by the user. The array is then
iterated through, and zeros are replaced with -1's to avoid
confusion with the alphabet (A) beginning at 0.
"""
board = numpy.zeros((number, number))
for i in range(len(board)):
for x in range(number):
board[i][x] = -1
return board | 31f22d56c947f61840ba87d028eb7de275d33cc9 | 10,239 |
def get_parent_choices(menu, menu_item=None):
"""
Returns flat list of tuples (possible_parent.pk, possible_parent.caption_with_spacer).
If 'menu_item' is not given or None, returns every item of the menu. If given, intentionally omit it and its descendant in the list.
"""
def get_flat_tuples(menu_item, excepted_item=None):
if menu_item == excepted_item:
return []
else:
choices = [(menu_item.pk, mark_safe(menu_item.caption_with_spacer()))]
if menu_item.has_children():
for child in menu_item.children():
choices += get_flat_tuples(child, excepted_item)
return choices
return get_flat_tuples(menu.root_item, menu_item) | c88ca93f7e8a7907425a51323ba53bb75bdf29c2 | 10,240 |
def _update_jacobian(state, jac):
"""
we update the jacobian using J(t_{n+1}, y^0_{n+1})
following the scipy bdf implementation rather than J(t_n, y_n) as per [1]
"""
J = jac(state.y0, state.t + state.h)
n_jacobian_evals = state.n_jacobian_evals + 1
LU = jax.scipy.linalg.lu_factor(state.M - state.c * J)
n_lu_decompositions = state.n_lu_decompositions + 1
return state._replace(
J=J,
n_jacobian_evals=n_jacobian_evals,
LU=LU,
n_lu_decompositions=n_lu_decompositions,
) | 31570ad29dca3ee01281819865e6efe1aec4050d | 10,241 |
from typing import Tuple
from typing import List
def reduce_pad(sess: tf.Session, op_tensor_tuple: Tuple[Op, List[tf.Tensor]], _) -> (str, tf.Operation, tf.Operation):
"""
Pad module reducer
:param sess: current tf session
:param op_tensor_tuple: tuple containing the op to reduce, and a list of input tensors to the op
"""
name = "reduced_" + op_tensor_tuple[0].dotted_name
pad_op = op_tensor_tuple[0].get_module()
# Get padding tensor dimensions
# Padding dimension information is captured in an input tensor to the pad op, index 1 of pad op inputs
# Dimensions of this tensor are always (N, 2), where N is the dimensionality of the input tensor coming into pad.
# The value of padding[N][0] gives the amount to pad in dimension N prior to the contents of the input to pad, while
# padding[N][1] gives the amount to pad in dimension N after the contents of the input.
# Currently we do not support reducing a pad op that modifies the channel dimension, which is the last dimension,
# indexed by -1 below. So check to make sure that indices [-1][0] and [-1][1] remain 0 (no padding).
padding_tensor_eval = sess.run(pad_op.inputs[1])
if padding_tensor_eval[-1][0] != 0 or padding_tensor_eval[-1][1] != 0:
raise NotImplementedError("Attempting to reduce pad operation that modifies channel size, not supported.")
new_padding_tensor = tf.constant(padding_tensor_eval) # No need to actually modify padding tensor
# Get constant value for padding
# If pad op takes a non default constant value (default = 0), it appears as a third input tensor to pad op, index 2
const_val = 0
if len(pad_op.inputs) > 2:
const_val = sess.run(pad_op.inputs[2])
# Get mode
# Mode can be 'CONSTANT', 'SYMMETRIC', or 'REFLECT'. 'CONSTANT' is default, and will not appear as a mode attribute
# if it is the case.
try:
mode = pad_op.get_attr('mode')
mode = mode.decode('utf-8')
except ValueError:
mode = 'CONSTANT'
new_tensor = tf.pad(op_tensor_tuple[1][0],
new_padding_tensor,
constant_values=const_val,
mode=mode,
name=name)
module = sess.graph.get_operation_by_name(name)
return name, new_tensor.op, module | 29d7e8daf85a9fe8fee118fd5ec5dc00018120a9 | 10,242 |
def parse_fastq(fh):
""" Parse reads from a FASTQ filehandle. For each read, we
return a name, nucleotide-string, quality-string triple. """
reads = []
while True:
first_line = fh.readline()
if len(first_line) == 0:
break # end of file
name = first_line[1:].rstrip()
seq = fh.readline().rstrip()
fh.readline() # ignore line starting with +
qual = fh.readline().rstrip()
reads.append((name, seq, qual))
return reads | d33d3efebdd1c5f61e25397328c6b0412f1911dd | 10,243 |
def minhash_256(features):
# type: (List[int]) -> bytes
"""
Create 256-bit minimum hash digest.
:param List[int] features: List of integer features
:return: 256-bit binary from the least significant bits of the minhash values
:rtype: bytes
"""
return compress(minhash(features), 4) | 1dba3d02dd05bfd2358211fa97d99ce136cc198d | 10,244 |
def coalesce(*values):
"""Returns the first not-None arguement or None"""
return next((v for v in values if v is not None), None) | 245177f43962b4c03c2347725a2e87f8eb5dc08a | 10,245 |
from mitsuba.core.xml import load_string
def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals):
"""Tests the OBJ & PLY loaders with combinations of vertex / face normals,
presence and absence of UVs, etc.
"""
def test():
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" />
<boolean name="face_normals" value="{2}" />
</shape>
""".format(mesh_format, features, str(face_normals).lower()))
assert shape.has_vertex_normals() == (not face_normals)
positions = shape.vertex_positions_buffer()
normals = shape.vertex_normals_buffer()
texcoords = shape.vertex_texcoords_buffer()
faces = shape.faces_buffer()
(v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]]
assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3)
assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3)
assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3)
if 'uv' in features:
assert shape.has_vertex_texcoords()
(uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]]
# For OBJs (and .serialized generated from OBJ), UV.y is flipped.
if mesh_format in ['obj', 'serialized']:
assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3)
else:
assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3)
if shape.has_vertex_normals():
for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]:
assert ek.allclose(n, [0.0, 1.0, 0.0])
return fresolver_append_path(test)() | a0117fe48b53e448181014e006ce13368c777d90 | 10,246 |
import torch
def euc_reflection(x, a):
"""
Euclidean reflection (also hyperbolic) of x
Along the geodesic that goes through a and the origin
(straight line)
"""
xTa = torch.sum(x * a, dim=-1, keepdim=True)
norm_a_sq = torch.sum(a ** 2, dim=-1, keepdim=True).clamp_min(MIN_NORM)
proj = xTa * a / norm_a_sq
return 2 * proj - x | 83b5a8559e783b24d36a18fb30059dce82bf9cf7 | 10,247 |
def is_online():
"""Check if host is online"""
conn = httplib.HTTPSConnection("www.google.com", timeout=1)
try:
conn.request("HEAD", "/")
return True
except Exception:
return False
finally:
conn.close() | 4dd9d2050c94674ab60e0dfbcfa0c713915aa2f3 | 10,248 |
def text_value(s):
"""Convert a raw Text property value to the string it represents.
Returns an 8-bit string, in the encoding of the original SGF string.
This interprets escape characters, and does whitespace mapping:
- linebreak (LF, CR, LFCR, or CRLF) is converted to \n
- any other whitespace character is replaced by a space
- backslash followed by linebreak disappears
- other backslashes disappear (but double-backslash -> single-backslash)
"""
s = _newline_re.sub(b"\n", s)
s = s.translate(_whitespace_table)
is_escaped = False
result = []
for chunk in _chunk_re.findall(s):
if is_escaped:
if chunk != b"\n":
result.append(chunk)
is_escaped = False
elif chunk == b"\\":
is_escaped = True
else:
result.append(chunk)
return b"".join(result) | 24d40367dbefcfbdd0420eb466cf6d09657b2768 | 10,249 |
def modifica_immobile_pw():
"""La funzione riceve l' ID immobile da modificare e ne modifica un attibuto scelto dall'utente """
s = input("Vuoi la lista degli immobili per scegliere il ID Immobile da modificare? (S/N)")
if s == "S" or s =="s":
stampa_immobili_pw()
s= input("Dammi ID Immobile da modificare -")
immo = Immobile.select().where(Immobile.id == int(s)).get()
scel = input("Cosa vuoi modificare?\ni=ID proprietario -\nd=Indirizzo -\np=Prezzo -\nc=ClasseEnergetica ")
if scel == "i":
#controllare se immo e' una lista va iterata se oggetto no
id_cliente = (input("Dammi il nuovo ID Cliente del Proprietario -"))
immo.cliente_id=int(id_cliente)
elif scel == "d":
new_indirizzo = input("Dammi il nuovo indirizzo dell'immobile -")
immo.indirizzo = new_indirizzo
elif scel == "p":
new_prezzo = input("Dammi il nuovo prezzo dell'Immobile -")
immo.prezzo = int(new_prezzo)
elif scel == "c":
new_classe = input("Dammi la nuova Classe Energetica dell'Immobile -")
immo.classe_energ = new_classe
immo.save()
return True | 99905d61d91178092dba8860265b2034b3f8430b | 10,250 |
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl | 57bdd2a7f7ae54861943fb44f3bc51f1f6544911 | 10,251 |
from typing import List
def arrays_not_same_size(inputs: List[np.ndarray]) -> bool:
"""Validates that all input arrays are the same size.
Args:
inputs (List[np.ndarray]): Input arrays to validate
Returns:
true if the arrays are the same size and false if they are not
"""
shapes = [i.shape for i in inputs]
shp_first = shapes[0]
shp_rest = shapes[1:]
return not np.array_equiv(shp_first, shp_rest) | 8b9988f49d766bc7a27b79cf6495182e98a8fe18 | 10,252 |
def GetReaderForFile(filename):
"""
Given a filename return a VTK reader that can read it
"""
r = vtkPNGReader()
if not r.CanReadFile(filename):
r = vtkPNMReader()
if not r.CanReadFile(filename):
r = vtkJPEGReader()
if not r.CanReadFile(filename):
r = vtkTIFFReader()
if not r.CanReadFile(filename):
return None
r.SetFileName(filename)
return r | f574417df44f8a43277e62967ec6fd4c986fa85a | 10,253 |
def build_figure_nn(df, non_private, semantic):
"""
Dataframe with one semantic and one model
"""
l = df.query("epsilon > 0").sort_values(["train_size", "epsilon"])
naive, low, high = get_plot_bounds(df)
fig = px.line(
l,
x="train_size",
y="accuracy",
range_y=[low, high],
color="epsilon",
hover_data=["n_blocks", "delta", "noise"],
title=f"{list(l['task'])[0]} {list(l['model'])[0]} {semantic} accuracy",
log_y=False,
).update_traces(mode="lines+markers")
fig.add_trace(
go.Scatter(
x=non_private.sort_values("train_size")["train_size"],
y=non_private.sort_values("train_size")["accuracy"],
mode="lines+markers",
name="Non private",
)
)
fig.add_trace(
go.Scatter(
x=l["train_size"],
y=[naive] * len(l),
mode="lines",
name="Naive baseline",
)
)
return fig | 5eab366e20eaec721d7155d82e42d9222cacd3b5 | 10,254 |
def get_incomplete_sample_nrs(df):
""" Returns sample nrs + topologies if at least 1 algorithm result is missing """
topology_incomplete_sample_nr_map = dict()
n_samples = df.loc[df['sample_idx'].idxmax()]['sample_idx'] + 1
for ilp_method in np.unique(df['algorithm_complete']):
dfx = df[df['algorithm_complete'] == ilp_method]
dfg_tops = dfx.groupby(by='topology_name')
for key, group in dfg_tops:
if n_samples > group.shape[0]:
if key not in topology_incomplete_sample_nr_map:
topology_incomplete_sample_nr_map[key] = set()
for s_nr in range(n_samples):
if s_nr not in list(group['sample_idx']):
topology_incomplete_sample_nr_map[key].add(s_nr)
return topology_incomplete_sample_nr_map | 2d816d80bb2f0c2686780ca49d0c01e89c69e7b5 | 10,255 |
from typing import Optional
def _read_pos_at_ref_pos(rec: AlignedSegment,
ref_pos: int,
previous: Optional[bool] = None) -> Optional[int]:
"""
Returns the read or query position at the reference position.
If the reference position is not within the span of reference positions to which the
read is aligned an exception will be raised. If the reference position is within the span
but is not aligned (i.e. it is deleted in the read) behavior is controlled by the
"previous" argument.
Args:
rec: the AlignedSegment within which to find the read position
ref_pos: the reference position to be found
previous: Controls behavior when the reference position is not aligned to any
read position. True indicates to return the previous read position, False
indicates to return the next read position and None indicates to return None.
Returns:
The read position at the reference position, or None.
"""
if ref_pos < rec.reference_start or ref_pos >= rec.reference_end:
raise ValueError(f"{ref_pos} is not within the reference span for read {rec.query_name}")
pairs = rec.get_aligned_pairs()
index = 0
read_pos = None
for read, ref in pairs:
if ref == ref_pos:
read_pos = read
break
else:
index += 1
if not read_pos and previous is not None:
if previous:
while read_pos is None and index > 0:
index -= 1
read_pos = pairs[index][0]
else:
while read_pos is None and index < len(pairs):
read_pos = pairs[index][0]
index += 1
return read_pos | 51270a1c1a5f69b179e3623824632443775ec9c7 | 10,256 |
from astropy.io import fits as pf
import numpy as np
import logging
def load_gtis(fits_file, gtistring=None):
"""Load GTI from HDU EVENTS of file fits_file."""
gtistring = _assign_value_if_none(gtistring, 'GTI')
logging.info("Loading GTIS from file %s" % fits_file)
lchdulist = pf.open(fits_file, checksum=True)
lchdulist.verify('warn')
gtitable = lchdulist[gtistring].data
gti_list = np.array([[a, b]
for a, b in zip(gtitable.field('START'),
gtitable.field('STOP'))],
dtype=np.longdouble)
lchdulist.close()
return gti_list | c1a8019d052ce437680e6505e65134a5ed66a1a3 | 10,257 |
import requests
def macro_australia_unemployment_rate():
"""
东方财富-经济数据-澳大利亚-失业率
http://data.eastmoney.com/cjsj/foreign_5_2.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "5",
"stat": "2",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df | 260debcfaf342d08acacfe034da51b3d3162393e | 10,258 |
from typing import List
import math
def _convert_flattened_paths(
paths: List,
quantization: float,
scale_x: float,
scale_y: float,
offset_x: float,
offset_y: float,
simplify: bool,
) -> "LineCollection":
"""Convert a list of FlattenedPaths to a :class:`LineCollection`.
Args:
paths: list of FlattenedPaths
quantization: maximum length of linear elements to approximate curve paths
scale_x, scale_y: scale factor to apply
offset_x, offset_y: offset to apply
simplify: should Shapely's simplify be run
Returns:
new :class:`LineCollection` instance containing the converted geometries
"""
lc = LineCollection()
for result in paths:
# Here we load the sub-part of the path element. If such sub-parts are connected,
# we merge them in a single line (e.g. line string, etc.). If there are disconnection
# in the path (e.g. multiple "M" commands), we create several lines
sub_paths: List[List[complex]] = []
for elem in result:
if isinstance(elem, svg.Line):
coords = [elem.start, elem.end]
else:
# This is a curved element that we approximate with small segments
step = int(math.ceil(elem.length() / quantization))
coords = [elem.start]
coords.extend(elem.point((i + 1) / step) for i in range(step - 1))
coords.append(elem.end)
# merge to last sub path if first coordinates match
if sub_paths:
if sub_paths[-1][-1] == coords[0]:
sub_paths[-1].extend(coords[1:])
else:
sub_paths.append(coords)
else:
sub_paths.append(coords)
for sub_path in sub_paths:
path = np.array(sub_path)
# transform
path += offset_x + 1j * offset_y
path.real *= scale_x
path.imag *= scale_y
lc.append(path)
if simplify:
mls = lc.as_mls()
lc = LineCollection(mls.simplify(tolerance=quantization))
return lc | 876421cd7f89dc5f3d64357e76f302c633e41ba7 | 10,259 |
def _CustomSetAttr(self, sAttr, oValue):
""" Our setattr replacement for DispatchBaseClass. """
try:
return _g_dCOMForward['setattr'](self, ComifyName(sAttr), oValue)
except AttributeError:
return _g_dCOMForward['setattr'](self, sAttr, oValue) | 8a0fea986531aec66564bafcc679fed3b8631c10 | 10,260 |
def reduce_to_contemporaneous(ts):
"""
Simplify the ts to only the contemporaneous samples, and return the new ts + node map
"""
samples = ts.samples()
contmpr_samples = samples[ts.tables.nodes.time[samples] == 0]
return ts.simplify(
contmpr_samples,
map_nodes=True,
keep_unary=True,
filter_populations=False,
filter_sites=False,
record_provenance=False,
filter_individuals=False,
) | 7661a58b6f4b95d5cb4b711db39bb28852151304 | 10,261 |
from datetime import datetime
import subprocess
def mk_inv_part_txt_file(filename):
"""This function downloads the inventory pdf file given by 'part' and
saves it in the 'data' directory.
It also saves the retrieval time of the file.
It produces a txt file for the pdf file with pdftotext.
"""
url = ('http://www.pinakothek.de/sites/default/files/files/' + filename)
print(url)
# RequestObj = urllib.request.urlopen(url)
now = datetime.datetime.utcnow().isoformat()
print(now)
# RequestObjRead = RequestObj.read()
# with open('data/' + filename, 'w+b') as pdffile:
# pdffile.write(RequestObjRead)
txtoutput = subprocess.check_output(['pdftotext', '-layout',
'data/' + filename, '-'])
# With PyPDF2 I got only blank lines ...
txtoutput = txtoutput.decode('utf-8')
return url, now, txtoutput | 192f5e42a5c876461767262c50fd2525315aaf62 | 10,262 |
def scenario_map_fn(
example,
*,
snr_range: tuple = (20, 30),
sync_speech_source=True,
add_speech_reverberation_early=True,
add_speech_reverberation_tail=True,
early_rir_samples: int = int(8000 * 0.05), # 50 milli seconds
details=False,
):
"""
This will care for convolution with RIR and also generate noise.
The random noise generator is fixed based on example ID. It will
therefore generate the same SNR and same noise sequence the next time
you use this DB.
Args:
example: Example dictionary.
snr_range: required for noise generation
sync_speech_source: pad and/or cut the source signal to match the
length of the observations. Considers the offset.
add_speech_reverberation_direct:
Calculate the speech_reverberation_direct signal.
add_speech_reverberation_tail:
Calculate the speech_reverberation_tail signal.
Returns:
"""
h = example['audio_data']['rir'] # Shape (K, D, T)
# Estimate start sample first, to make it independent of channel_mode
rir_start_sample = np.array([get_rir_start_sample(h_k) for h_k in h])
_, D, rir_length = h.shape
# TODO: SAMPLE_RATE not defined
# rir_stop_sample = rir_start_sample + int(SAMPLE_RATE * 0.05)
# Use 50 milliseconds as early rir part, excluding the propagation delay
# (i.e. "rir_start_sample")
assert isinstance(early_rir_samples, int), (type(early_rir_samples), early_rir_samples)
rir_stop_sample = rir_start_sample + early_rir_samples
log_weights = example['log_weights']
# The two sources have to be cut to same length
K = example['num_speakers']
T = example['num_samples']['observation']
if 'original_source' not in example['audio_data']:
# legacy code
example['audio_data']['original_source'] = example['audio_data']['speech_source']
if 'original_source' not in example['num_samples']:
# legacy code
example['num_samples']['original_source'] = example['num_samples']['speech_source']
s = example['audio_data']['original_source']
def get_convolved_signals(h):
assert s.shape[0] == h.shape[0], (s.shape, h.shape)
x = [fftconvolve(s_[..., None, :], h_, axes=-1)
for s_, h_ in zip(s, h)]
assert len(x) == len(example['num_samples']['original_source'])
for x_, T_ in zip(x, example['num_samples']['original_source']):
assert x_.shape == (D, T_ + rir_length - 1), (
x_.shape, D, T_ + rir_length - 1)
# This is Jahn's heuristic to be able to still use WSJ alignments.
offset = [
offset_ - rir_start_sample_
for offset_, rir_start_sample_ in zip(
example['offset'], rir_start_sample)
]
assert len(x) == len(offset)
x = [extract_piece(x_, offset_, T) for x_, offset_ in zip(x, offset)]
x = np.stack(x, axis=0)
assert x.shape == (K, D, T), x.shape
return x
x = get_convolved_signals(h)
# Note: scale depends on channel mode
std = np.maximum(
np.std(x, axis=(-2, -1), keepdims=True),
np.finfo(x.dtype).tiny,
)
# Rescale such that invasive SIR is as close as possible to `log_weights`.
scale = (10 ** (np.asarray(log_weights)[:, None, None] / 20)) / std
# divide by 71 to ensure that all values are between -1 and 1
scale /= 71
x *= scale
example['audio_data']['speech_image'] = x
if add_speech_reverberation_early:
h_early = h.copy()
# Replace this with advanced indexing
for i in range(h_early.shape[0]):
h_early[i, ..., rir_stop_sample[i]:] = 0
x_early = get_convolved_signals(h_early)
x_early *= scale
example['audio_data']['speech_reverberation_early'] = x_early
if details:
example['audio_data']['rir_early'] = h_early
if add_speech_reverberation_tail:
h_tail = h.copy()
for i in range(h_tail.shape[0]):
h_tail[i, ..., :rir_stop_sample[i]] = 0
x_tail = get_convolved_signals(h_tail)
x_tail *= scale
example['audio_data']['speech_reverberation_tail'] = x_tail
if details:
example['audio_data']['rir_tail'] = h_tail
if sync_speech_source:
example['audio_data']['speech_source'] = synchronize_speech_source(
example['audio_data']['original_source'],
offset=example['offset'],
T=T,
)
else:
# legacy code
example['audio_data']['speech_source'] = \
example['audio_data']['original_source']
clean_mix = np.sum(x, axis=0)
rng = _example_id_to_rng(example['example_id'])
snr = rng.uniform(*snr_range)
example["snr"] = snr
rng = _example_id_to_rng(example['example_id'])
n = get_white_noise_for_signal(clean_mix, snr=snr, rng_state=rng)
example['audio_data']['noise_image'] = n
example['audio_data']['observation'] = clean_mix + n
return example | a3e6e5bf368bdfbb29b9d1c6684daa4077de9061 | 10,263 |
def get_name(tree, from_='name'):
"""
Get the name (token) of the AST node.
:param tree ast:
:rtype: str|None
"""
# return tree['name']['name']
if 'name' in tree and isinstance(tree['name'], str):
return tree['name']
if 'parts' in tree:
return djoin(tree['parts'])
if from_ in tree:
return get_name(tree[from_])
return None | b5f1e97eb570859b01bf9489c6b9d4874511fdcc | 10,264 |
def pcaTable(labels,vec_mean,vec_std,val_mean,val_std):
"""Make table with PCA formation mean and std"""
header="\\begin{center}\n\\begin{tabular}{| l |"+" c |"*6+"}\\cline{2-7}\n"
header+="\\multicolumn{1}{c|}{} & \\multicolumn{2}{c|}{PC1} & \multicolumn{2}{c|}{PC2} & \multicolumn{2}{c|}{PC3} \\\\\\cline{2-7}"
header+="\\multicolumn{1}{c|}{} & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ & $\mu$ & $\sigma$ \\\\\\hline\n"
tt=n.zeros((vec_mean.shape[0],6))
tt[:,::2]=vec_mean
tt[:,1::2]=vec_std
tt_=n.zeros(6)
tt_[::2]=val_mean
tt_[1::2]=val_std
tab_data=n.vstack((tt,tt_))
footer="\\hline\\end{tabular}\n\\end{center}"
table=header + makeTables(labels,tab_data,True) + footer
return table | 646fc1b5344a716b8f30714f112a477063bf91ce | 10,265 |
def render_reference_page(conn: Connection, reference: str) -> str:
"""Create HTML section that lists all notes that cite the reference."""
sql = """
SELECT note, Bibliography.html,Notes.html FROM Citations
JOIN Notes ON Citations.note = Notes.id
JOIN Bibliography ON Bibliography.key = Citations.reference
WHERE reference = ?
ORDER BY note
"""
notes = []
text = ""
for note, _text, html in conn.execute(sql, (reference,)):
assert not text or text == _text
text = _text
notes.append(Note(note, get_section_title(html)))
section = Elem("section",
Elem("h1", '@' + reference[4:]),
Elem("p", text),
note_list(notes),
id=reference,
title=reference,
**{"class": "level1"})
return render(section) | 6ab73d0d85da28676e7bb3cf42b3304cd0d6ad47 | 10,266 |
import logging
def normalize_bridge_id(bridge_id: str):
"""Normalize a bridge identifier."""
bridge_id = bridge_id.lower()
# zeroconf: properties['id'], field contains semicolons after each 2 char
if len(bridge_id) == 17 and sum(True for c in "aa:bb:cc:dd:ee:ff"
if c == ":"):
return bridge_id.replace(':', '')
# nupnp: contains 4 extra characters in the middle: "fffe"
if len(bridge_id) == 16 and bridge_id[6:10] == "fffe":
return bridge_id[0:6] + bridge_id[-6:]
# SSDP/UPNP and Hue Bridge API contains right ID.
if len(bridge_id) == 12:
return bridge_id
logging.getLogger(__name__).warn("Received unexpected bridge id: %s",
bridge_id)
return bridge_id | 5370cc49e4c0272da2a471006bbbf3fd5e5521bf | 10,267 |
import imp
def pyc_file_from_path(path):
"""Given a python source path, locate the .pyc.
See http://www.python.org/dev/peps/pep-3147/
#detecting-pep-3147-availability
http://www.python.org/dev/peps/pep-3147/#file-extension-checks
"""
has3147 = hasattr(imp, 'get_tag')
if has3147:
return imp.cache_from_source(path)
else:
return path + "c" | 459011ca1f07a023b139695cd2368767d46ca396 | 10,268 |
def get_bytes_per_data_block(header):
"""Calculates the number of bytes in each 128-sample datablock."""
N = 128 # n of amplifier samples
# Each data block contains N amplifier samples.
bytes_per_block = N * 4 # timestamp data
bytes_per_block += N * 2 * header['num_amplifier_channels']
# DC amplifier voltage (absent if flag was off)
# bytes_per_block += N * 2 * header['dc_amplifier_data_saved']
if header['dc_amplifier_data_saved'] > 0:
bytes_per_block += N * 2 * header['num_amplifier_channels']
# Stimulation data, one per enabled amplifier channels
bytes_per_block += N * 2 * header['num_amplifier_channels']
# Board analog inputs are sampled at same rate as amplifiers
bytes_per_block += N * 2 * header['num_board_adc_channels']
# Board analog outputs are sampled at same rate as amplifiers
bytes_per_block += N * 2 * header['num_board_dac_channels']
# Board digital inputs are sampled at same rate as amplifiers
if header['num_board_dig_in_channels'] > 0:
bytes_per_block += N * 2
# Board digital outputs are sampled at same rate as amplifiers
if header['num_board_dig_out_channels'] > 0:
bytes_per_block += N * 2
return bytes_per_block | 524e9015dacaf99042dd1493b24a418fff8c6b04 | 10,269 |
def recovered():
"""
Real Name: b'Recovered'
Original Eqn: b'INTEG ( RR, 0)'
Units: b'Person'
Limits: (None, None)
Type: component
b''
"""
return integ_recovered() | 1a5133a3cc9231e3f7a90b54557ea9e836975eae | 10,270 |
def Mux(sel, val1, val0):
"""Choose between two values.
Parameters
----------
sel : Value, in
Selector.
val1 : Value, in
val0 : Value, in
Input values.
Returns
-------
Value, out
Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``.
"""
sel = Value.cast(sel)
if len(sel) != 1:
sel = sel.bool()
return Operator("m", [sel, val1, val0]) | 62fa5abf293a1321af5e4a209427b896756e5617 | 10,271 |
def get_identity_list(user, provider=None):
"""
Given the (request) user
return all identities on all active providers
"""
identity_list = CoreIdentity.shared_with_user(user)
if provider:
identity_list = identity_list.filter(provider=provider)
return identity_list | f5a9a7e461813edbc293338dca242d4dd4877281 | 10,272 |
def get_user_pool_domain(prefix: str, region: str) -> str:
"""Return a user pool domain name based on the prefix received and region.
Args:
prefix: The domain prefix for the domain.
region: The region in which the pool resides.
"""
return "%s.auth.%s.amazoncognito.com" % (prefix, region) | dc1eec674379d04bd8b23318207ac5b2e6a905f3 | 10,273 |
import re
import os
from functools import reduce
def corr_activity(ppath, recordings, states, nskip=10, pzscore=True, bands=[]):
"""
correlate DF/F during states with delta power, theta power, sigma power and EMG amplitude
:param ppath: base filder
:param recordings: list of recordings
:param states: list of len 1 to 3, states to correlate EEG power with; if you want to correlate power during
NREM and REM, then set states = [3,1]
:param nskip: number of seconds in the beginning to be skipped
:param pzscore, if Tue z-score activity, i.e. DF/F - mean(DF/F) / std(DF/F)
:return: n/a
"""
# Fixed Parameters
sf_spectrum = 5
if len(bands) == 0:
eeg_bands = [[0.5, 4], [6, 10], [10, 15], [100, 150]]
else:
eeg_bands = bands
# EMG band
emg_bands = [[10, 100]]
bands = eeg_bands + emg_bands
bands = {k:bands[k] for k in range(len(bands))}
nbands = len(bands)
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
# dict, Band -> Mouse ID -> values
Pow = {m:{} for m in mice}
DFF = {m:[] for m in mice}
for m in mice:
d = {b:[] for b in range(nbands)}
Pow[m] = d
for rec in recordings:
idf = re.split('_', rec)[0]
sr = get_snr(ppath, rec)
# number of time bins for each time bin in spectrogram
nbin = int(np.round(sr) * 2.5)
sdt = nbin * (1 / sr)
nskip = int(nskip / sdt)
M = sleepy.load_stateidx(ppath, rec)[0][nskip:]
ddir = os.path.join(ppath, rec)
if os.path.isfile(os.path.join(ddir, 'dffd.mat')):
dff_rec = so.loadmat(os.path.join(ddir, 'dffd.mat'), squeeze_me=True)['dffd']
else:
dff_rec = so.loadmat(os.path.join(ddir, 'DFF.mat'), squeeze_me=True)['dffd']
print('%s - saving dffd.mat' % rec)
so.savemat(os.path.join(ddir, 'dffd.mat'), {'dffd': dff_rec})
#dff_rec = so.loadmat(os.path.join(ppath, rec, 'DFF.mat'), squeeze_me=True)['dffd'][nskip:]*100.0
if pzscore:
dff_rec = (dff_rec - dff_rec.mean()) / dff_rec.std()
# collect all brain state indices
idx = []
for s in states:
idx.append(np.where(M==s)[0])
bs_idx = reduce(lambda x,y:np.concatenate((x,y)), idx)
# load spectrogram and normalize
P = so.loadmat(os.path.join(ppath, rec, 'sp_%s.mat' % rec), squeeze_me=True)
SP = P['SP']
freq = P['freq']
df = freq[1] - freq[0]
sp_mean = SP.mean(axis=1)
SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T)
# load EMG
MP = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)['mSP']
# calculate EEG bands
for b in range(nbands-1):
ifreq = np.where((freq >= bands[b][0]) & (freq <= bands[b][1]))[0]
tmp = SP[ifreq,:].sum(axis=0)*df
tmp = sleepy.smooth_data(tmp, sf_spectrum)
Pow[idf][b] = np.concatenate((Pow[idf][b], tmp[bs_idx]))
# add EMG band
b = nbands-1
ifreq = np.where((freq >= bands[b][0]) & (freq <= bands[b][1]))[0]
tmp = MP[ifreq, :].sum(axis=0) * df
tmp = sleepy.smooth_data(tmp, sf_spectrum)
Pow[idf][b] = np.concatenate((Pow[idf][b], tmp[bs_idx]))
DFF[idf] = np.concatenate((DFF[idf], dff_rec[bs_idx]))
# collapse all Power values and dff values
PowAll = {b:[] for b in bands}
DFFAll = []
for b in bands:
for m in mice:
PowAll[b] = np.concatenate((PowAll[b], Pow[m][b]))
for m in mice:
DFFAll = np.concatenate((DFFAll, DFF[m]))
r_values = {}
for b in bands:
p = linregress(PowAll[b], DFFAll)
r_values[b] = p
plt.ion()
plt.figure(figsize=(12,6))
nx = 1.0/nbands
dx = 0.2 * nx
i=0
for b in bands:
ax = plt.axes([nx * i + dx, 0.15, nx - dx - dx / 2.0, 0.3])
j=0
for m in mice:
ax.plot(Pow[m][b], DFF[m], '.', color=[j*nx,j*nx,j*nx])
j+=1
i+=1
if b>0:
#ax.set_yticklabels([])
pass
if b<nbands-1:
plt.xlabel('EEG Power')
else:
plt.xlabel('EMG Power')
plt.title('%.2f<f<%.2f, r2=%.2f' % (bands[b][0], bands[b][1], r_values[b][2]), fontsize=10)
if b==0:
if pzscore:
plt.ylabel('DF/F (z-scored)')
else:
plt.ylabel('DF/F')
sleepy.box_off(ax)
x = np.linspace(PowAll[b].min(), PowAll[b].max(), 100)
ax.plot(x, x*r_values[b][0]+r_values[b][1], color='blue')
plt.draw()
return r_values | c16cc2b347f7ac1cd2f53d9092e3c0b678f1799c | 10,274 |
def add_dep_info(tgt_tokens, lang, spacy_nlp, include_detail_tag=True):
"""
:param tgt_tokens: a list of CoNLLUP_Token_Template() Objects from CoNLL_Annotations.py file
:param spacy_nlp: Spacy language model of the target sentence to get the proper Dependency Tree
:return:
"""
doc = spacy_nlp.tokenizer.tokens_from_list([t.word for t in tgt_tokens])
spacy_nlp.tagger(doc)
spacy_nlp.parser(doc)
for ix, token in enumerate(doc):
tgt_tokens[ix].lemma = token.lemma_ or "_"
tgt_tokens[ix].head = token.head.i + 1
if lang in ["ES", "FR"]:
detail_tag = token.tag_.split("__") # [VERB , Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin]
tgt_tokens[ix].pos_tag = detail_tag[0] or "_"
if include_detail_tag:
tgt_tokens[ix].detail_tag = detail_tag[-1] or "_"
else:
tgt_tokens[ix].pos_tag = token.tag_ or "_"
tgt_tokens[ix].pos_universal = token.pos_ or "_" # Is SpaCy already Universal?
tgt_tokens[ix].dep_tag = token.dep_ or "_"
tgt_tokens[ix].ancestors = [(t.i, t.text) for t in token.ancestors]
tgt_tokens[ix].children = [(t.i, t.text) for t in token.children]
# print(token.i, token.text, token.pos_, token.dep_, token.head.text, token.head.i, token.tag_)
assert len(doc) == len(tgt_tokens), f"LEN Mismatch! Spacy has {len(doc)} tokens and CoNLL has {len(tgt_tokens)} tokens"
return tgt_tokens | 0083d16f4344a6afaeb5fba9a6b2e9282d617ef3 | 10,275 |
async def apod(request: Request) -> dict:
"""Get the astronomy picture of the day."""
http_client = request.app.state.http_client
async with http_client.session.get(
f"https://api.nasa.gov/planetary/apod?api_key={NASA_API}"
) as resp:
data = await resp.json()
return {
"title": data["title"],
"explanation": data["explanation"],
"img": data["hdurl"],
} | edc526732904c5f0a29c144023df7fefb6d7743c | 10,276 |
def main_view(request, url, preview=False):
"""
@param request: HTTP request
@param url: string
@param preview: boolean
"""
url_result = parse_url(url)
current_site = get_site()
# sets tuple (template_name, posts_on_page)
current_template = get_template()
language = get_language(url_result)
if not url_result['page']:
page = get_index_page(language)
else:
page = get_page(url_result['page'], language, preview)
menuitems = get_main_menuitems(url_result['page'], page, preview)
meta_data = get_metadata(page)
page_num = url_result['page_num'] or 1
if url_result['post']:
posts = get_post(page, url_result['post'], preview)
template_page = 'post.html'
form = handle_comment_form(request, posts)
else:
posts = get_paginated_posts(page, page_num, page.items_per_menu)
template_page = 'page.html'
site_content = {'site': current_site,
'languages': get_languages(),
'current_language': language,
'menuitems': menuitems,
'page': page,
'scripts': get_scripts(),
'metadata': meta_data,
'posts': posts, }
if has_other_menu():
site_content['other_menuitems'] = get_other_menuitems()
try:
site_content['form'] = form
except NameError:
pass
template = '{}/{}'.format(current_template[0], template_page)
return render_to_response(
template,
{'site_content': site_content},
RequestContext(request)
) | 6febddd1e98f94865a364b8cf9a339574a303809 | 10,277 |
def parse_configs(code_config, field_config, time_config):
"""
Wrapper to validate and parse each of the config files. Returns a
a dictionary with config types as keys and parsed config files as values.
"""
# performing basic validation of config paths, obtaining dictionary of
# config types and correpsonding raw dataframes
raw_dfs = validate_config_dfs(code_config, field_config, time_config)
# performing additional config-specific validation and parsing
config_dict = {}
for config_type, df in raw_dfs.items():
if config_type in validation_functions:
validation_functions[config_type](df)
if config_type in parse_functions:
config_dict[config_type] = parse_functions[config_type](df)
else:
config_dict[config_type] = df
# concatenating code and field configs
if CODE_CONFIG in config_dict:
if FIELD_CONFIG in config_dict:
config_dict[FIELD_CONFIG] = pd.concat([config_dict[CODE_CONFIG],
config_dict[FIELD_CONFIG]], sort=True)
else:
config_dict[FIELD_CONFIG] = config_dict[CODE_CONFIG]
config_dict.pop(CODE_CONFIG)
return config_dict | 1d625e0b56ea4d197280b91a7993a16c82a2461d | 10,278 |
def butter_highpass_filter_eda(data):
""" High pass filter for 1d EDA data.
"""
b, a = eda_hpf()
y = lfilter(b, a, data)
return y | 1449d09a810e0c1ff78ab325106a6235cb94d26b | 10,279 |
def normalize_null_vals(reported_val):
"""
Takes a reported value and returns a normalized NaN is null, nan, empty, etc.
Else returns reported value.
"""
if is_empty_value(reported_val):
return np.NaN
else:
return reported_val | 790ebbad188390752401699f2d04fddbd08bcc7e | 10,280 |
import sys
def in_notebook() -> bool:
"""Evaluate whether the module is currently running in a jupyter notebook."""
return "ipykernel" in sys.modules | 3be74bda76eaf0ff32c1d48d23c52f8d5f0ea728 | 10,281 |
def test_insert(type):
"""
>>> test_insert(int_)
[0, 1, 2, 3, 4, 5]
"""
tlist = nb.typedlist(type, [1,3])
tlist.insert(0,0)
tlist.insert(2,2)
tlist.insert(4,4)
tlist.insert(8,5)
return tlist | a0eb1f5bbf861863b47c6639d1159afffb63093e | 10,282 |
def get_next_month_range(unbounded=False):
"""获取 下个月的开始和结束时间.
:param unbounded: 开区间
"""
return get_month_range(months=1, unbounded=unbounded) | 989054b3e523400ed28ab0d3d9a840d6606ee8cc | 10,283 |
import functools
def probit_regression(
dataset_fn,
name='probit_regression',
):
"""Bayesian probit regression with a Gaussian prior.
Args:
dataset_fn: A function to create a classification data set. The dataset must
have binary labels.
name: Name to prepend to ops created in this function, as well as to the
`code_name` in the returned `TargetDensity`.
Returns:
target: `TargetDensity`.
"""
with tf.name_scope(name) as name:
dataset = dataset_fn()
num_train_points = dataset.train_features.shape[0]
num_test_points = dataset.test_features.shape[0]
have_test = num_test_points > 0
# Add bias.
train_features = tf.concat(
[dataset.train_features,
tf.ones([num_train_points, 1])], axis=-1)
train_labels = tf.convert_to_tensor(dataset.train_labels)
test_features = tf.concat(
[dataset.test_features,
tf.ones([num_test_points, 1])], axis=-1)
test_labels = tf.convert_to_tensor(dataset.test_labels)
num_features = int(train_features.shape[1])
root = tfd.JointDistributionCoroutine.Root
zero = tf.zeros(num_features)
one = tf.ones(num_features)
def model_fn(features):
weights = yield root(tfd.Independent(tfd.Normal(zero, one), 1))
probits = tf.einsum('nd,...d->...n', features, weights)
yield tfd.Independent(tfd.ProbitBernoulli(probits=probits), 1)
train_joint_dist = tfd.JointDistributionCoroutine(
functools.partial(model_fn, features=train_features))
test_joint_dist = tfd.JointDistributionCoroutine(
functools.partial(model_fn, features=test_features))
dist = joint_distribution_posterior.JointDistributionPosterior(
train_joint_dist, (None, train_labels))
expectations = {
'params':
target_spec.expectation(
fn=lambda params: params[0],
human_name='Parameters',
)
}
if have_test:
expectations['test_nll'] = target_spec.expectation(
fn=lambda params: ( # pylint: disable=g-long-lambda
-test_joint_dist.sample_distributions(value=params)
[0][-1].log_prob(test_labels)),
human_name='Test NLL',
)
expectations['per_example_test_nll'] = target_spec.expectation(
fn=lambda params: ( # pylint: disable=g-long-lambda
-test_joint_dist.sample_distributions(value=params)
[0][-1].distribution.log_prob(test_labels)),
human_name='Per-example Test NLL',
)
return target_spec.TargetDensity.from_distribution(
distribution=dist,
constraining_bijectors=(tfb.Identity(),),
expectations=expectations,
code_name='{}_{}'.format(dataset.code_name, name),
human_name='{} Probit Regression'.format(dataset.human_name),
) | b4cca9054d0ebd8c349cdb148443246616ed6120 | 10,284 |
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) | b6cfe5d6eeaaef2d4a02420577884393f4bfcd4b | 10,285 |
import six
from datetime import datetime
def from_json(js):
"""
Helper to parse json values from server into python types
"""
if js is None or js is True or js is False or isinstance(js, six.text_type):
# JsNull, JsBoolean, JsString
return js
if not isinstance(js, dict) or 'type' not in js or 'data' not in js:
raise ValueError('Expected a dict, got {!r}'.format(js))
t = js['type']
data = js['data']
if t in ('byte', 'short', 'int', 'long'):
return int(data)
if t in ('float', 'double'):
return float(data)
if t == 'timestamp':
# server return timestamp in milliseconds, which is not the python convention
return float(data) / 1E3
if t == 'date':
# server return timestamp in milliseconds
return datetime.date.fromtimestamp(float(data) / 1E3)
if t == 'byte_array':
return bytearray([int(x) for x in data])
if t in ('wrapped_array', 'seq', 'array'):
return [from_json(x) for x in data]
if t == 'map':
d = {}
for entry in data:
if 'key' not in entry or 'val' not in entry:
raise ValueError('Invalid map entry: {!r}'.format(entry))
d[from_json(entry['key'])] = from_json(entry['val'])
return d
raise ValueError('Failed to parse value: {!r}'.format(js)) | 0e4f94e8fdfb634ea3a1f1f84d3ff3d5bb125175 | 10,286 |
def exposexml(func):
"""
Convenience decorator function to expose XML
"""
def wrapper(self, data, expires, contentType="application/xml"):
data = func(self, data)
_setCherryPyHeaders(data, contentType, expires)
return self.templatepage('XML', data=data,
config=self.config,
path=request.path_info)
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
wrapper.exposed = True
return wrapper | 57c62490e51693551801aa4722de2e08d3fd3817 | 10,287 |
import os
import uuid
def _SetSource(build_config,
messages,
is_specified_source,
no_source,
source,
gcs_source_staging_dir,
ignore_file,
hide_logs=False):
"""Set the source for the build config."""
default_gcs_source = False
default_bucket_name = None
if gcs_source_staging_dir is None:
default_gcs_source = True
default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)
gcs_client = storage_api.StorageClient()
# --no-source overrides the default --source.
if not is_specified_source and no_source:
source = None
gcs_source_staging = None
if source:
suffix = '.tgz'
if source.startswith('gs://') or os.path.isfile(source):
_, suffix = os.path.splitext(source)
# Next, stage the source to Cloud Storage.
staged_object = '{stamp}-{uuid}{suffix}'.format(
stamp=times.GetTimeStampFromDateTime(times.Now()),
uuid=uuid.uuid4().hex,
suffix=suffix,
)
gcs_source_staging_dir = resources.REGISTRY.Parse(
gcs_source_staging_dir, collection='storage.objects')
try:
gcs_client.CreateBucketIfNotExists(
gcs_source_staging_dir.bucket, check_ownership=default_gcs_source)
except api_exceptions.HttpForbiddenError:
raise BucketForbiddenError(
'The user is forbidden from accessing the bucket [{}]. Please check '
'your organization\'s policy or if the user has the "serviceusage.services.use" permission'
.format(gcs_source_staging_dir.bucket))
except storage_api.BucketInWrongProjectError:
# If we're using the default bucket but it already exists in a different
# project, then it could belong to a malicious attacker (b/33046325).
raise c_exceptions.RequiredArgumentException(
'gcs-source-staging-dir',
'A bucket with name {} already exists and is owned by '
'another project. Specify a bucket using '
'--gcs-source-staging-dir.'.format(default_bucket_name))
if gcs_source_staging_dir.object:
staged_object = gcs_source_staging_dir.object + '/' + staged_object
gcs_source_staging = resources.REGISTRY.Create(
collection='storage.objects',
bucket=gcs_source_staging_dir.bucket,
object=staged_object)
if source.startswith('gs://'):
gcs_source = resources.REGISTRY.Parse(
source, collection='storage.objects')
staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
build_config.source = messages.Source(
storageSource=messages.StorageSource(
bucket=staged_source_obj.bucket,
object=staged_source_obj.name,
generation=staged_source_obj.generation,
))
else:
if not os.path.exists(source):
raise c_exceptions.BadFileException(
'could not find source [{src}]'.format(src=source))
if os.path.isdir(source):
source_snapshot = snapshot.Snapshot(source, ignore_file=ignore_file)
size_str = resource_transform.TransformSize(
source_snapshot.uncompressed_size)
if not hide_logs:
log.status.Print(
'Creating temporary tarball archive of {num_files} file(s)'
' totalling {size} before compression.'.format(
num_files=len(source_snapshot.files), size=size_str))
staged_source_obj = source_snapshot.CopyTarballToGCS(
gcs_client,
gcs_source_staging,
ignore_file=ignore_file,
hide_logs=hide_logs)
build_config.source = messages.Source(
storageSource=messages.StorageSource(
bucket=staged_source_obj.bucket,
object=staged_source_obj.name,
generation=staged_source_obj.generation,
))
elif os.path.isfile(source):
unused_root, ext = os.path.splitext(source)
if ext not in _ALLOWED_SOURCE_EXT:
raise c_exceptions.BadFileException('Local file [{src}] is none of ' +
', '.join(_ALLOWED_SOURCE_EXT))
if not hide_logs:
log.status.Print('Uploading local file [{src}] to '
'[gs://{bucket}/{object}].'.format(
src=source,
bucket=gcs_source_staging.bucket,
object=gcs_source_staging.object,
))
staged_source_obj = gcs_client.CopyFileToGCS(source, gcs_source_staging)
build_config.source = messages.Source(
storageSource=messages.StorageSource(
bucket=staged_source_obj.bucket,
object=staged_source_obj.name,
generation=staged_source_obj.generation,
))
else:
# No source
if not no_source:
raise c_exceptions.InvalidArgumentException(
'--no-source', 'To omit source, use the --no-source flag.')
return build_config | 41abbce50eeabee9780b5822ecb31921611bcae4 | 10,288 |
def transform_categorical_by_percentage(TRAIN, TEST=None,
handle_unknown="error", verbose=0):
"""
Transform categorical features to numerical. The categories are encoded
by their relative frequency (in the TRAIN dataset).
To be consistent with scikit-learn transformers having categories
in transform that are not present during training will raise an error
by default.
-----
Arguments:
TRAIN: DataFrame.
TEST: DataFrame, optional (default=None).
handle_unknown: str, "error", "ignore" or "NaN",
optional (default="error").
Whether to raise an error, ignore or replace by NA if an unknown
category is present during transform.
verbose: integer, optional (default=0).
Controls the verbosity of the process.
-----
Returns:
TRAIN: DataFrame.
TEST: DataFrame.
This second DataFrame is returned if two DataFrames were provided.
"""
categorical = TRAIN.select_dtypes(include=["object"]).columns
if TEST is not None:
if len(categorical) > 0:
for col in categorical:
cat_counts = TRAIN[col].value_counts(normalize=True,
dropna=False)
dict_cat_counts = dict(zip(cat_counts.index, cat_counts))
not_in_train = list(set(TEST[col].unique()) -
set(cat_counts.index))
if len(not_in_train) > 0:
if handle_unknown == "error":
raise ValueError("TEST contains new labels: {0} "
"in variable {1}."
.format(not_in_train, col))
if handle_unknown == "ignore":
print("\n-----\n")
print("Variable: {0}".format(col))
print("Unknown category(ies) {0} present during "
"transform has(ve) been ignored."
.format(not_in_train))
if handle_unknown == "NaN":
print("\n-----\n")
print("Variable: {0}".format(col))
print("Unknown category(ies) {0} present during "
"transform has(ve) been replaced by NA."
.format(not_in_train))
for item in not_in_train:
dict_cat_counts[item] = np.nan
TRAIN[col] = TRAIN[col].replace(dict_cat_counts)
TEST[col] = TEST[col].replace(dict_cat_counts)
if verbose > 0:
print("\n-----\n")
print("Feature: {0}".format(col))
if verbose > 1:
print(cat_counts)
return (TRAIN, TEST)
else:
for col in categorical:
cat_counts = TRAIN[col].value_counts(normalize=True, dropna=False)
dict_cat_counts = dict(zip(cat_counts.index, cat_counts))
TRAIN[col] = TRAIN[col].replace(dict_cat_counts)
if verbose > 0:
print("\n-----\n")
print("Feature: {0}".format(col))
if verbose > 1:
print(cat_counts)
return TRAIN | ce2c568b40109e11d1920a211314aebdc076be7f | 10,289 |
def buildDescription(flinfoDescription='', flickrreview=False, reviewer='',
override='', addCategory='', removeCategories=False):
"""Build the final description for the image.
The description is based on the info from flickrinfo and improved.
"""
description = '== {{int:filedesc}} ==\n{}'.format(flinfoDescription)
if removeCategories:
description = textlib.removeCategoryLinks(description,
pywikibot.Site(
'commons', 'commons'))
if override:
description = description.replace('{{cc-by-sa-2.0}}\n', '')
description = description.replace('{{cc-by-2.0}}\n', '')
description = description.replace('{{flickrreview}}\n', '')
description = description.replace(
'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not '
'a free license --~~~~}}\n',
'')
description = description.replace('=={{int:license}}==',
'=={{int:license}}==\n' + override)
elif flickrreview:
if reviewer:
description = description.replace(
'{{flickrreview}}',
'{{flickrreview|%s|'
'{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-'
'{{subst:CURRENTDAY2}}}}' % reviewer)
if addCategory:
description = description.replace('{{subst:unc}}\n', '')
description = description + '\n[[Category:' + addCategory + ']]\n'
description = description.replace('\r\n', '\n')
return description | 94a529e5a26a4390536359e0233f26d32465e3ed | 10,290 |
def allowed_file(filename, extensions):
"""
Check file is image
:param filename: string
:param extensions: list
:return bool:
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extensions | c61e77205e40cd05fc0ea6e4e4f770180f15e6d8 | 10,291 |
import os
def read(infile):
"""Read result from disk."""
_, ext = os.path.splitext(infile)
ext = ext.strip('.')
return read_funcs[ext](infile) | 36601f9d7bb2c83e14aabdf3569062f0e7e509b7 | 10,292 |
import hashlib
import os
def _url_in_cache(url):
"""
Determine if a URL's response exists in the cache.
Parameters
----------
url : string
the url to look for in the cache
Returns
-------
filepath : string
path to cached response for url if it exists in the cache,
otherwise None
"""
# hash the url to generate the cache filename
filename = hashlib.md5(url.encode("utf-8")).hexdigest()
filepath = os.path.join(settings.cache_folder, os.extsep.join([filename, "json"]))
# if this file exists in the cache, return its full path
if os.path.isfile(filepath):
return filepath | 4c3d43f6b6ce4efb4b3ee9b627e1c4e1ab17687b | 10,293 |
def get_number_of_recovery_codes(userid):
"""
Get and return the number of remaining recovery codes for `userid`.
Parameters:
userid: The userid for which to check the count of recovery codes.
Returns:
An integer representing the number of remaining recovery codes.
"""
return d.engine.scalar("""
SELECT COUNT(*)
FROM twofa_recovery_codes
WHERE userid = %(userid)s
""", userid=userid) | f292fbbc2e2ed55f53136988ef7d6f13ab2881e6 | 10,294 |
import platform
import os
def creation_time(path_to_file):
"""The file creation time.
Try to get the date that a file was created, falling back to when
it was last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime | 5d342f0f1b8e006e94ee5e8192b9fc3f4d9b53eb | 10,295 |
async def mailbox_search(search_term: str, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)):
"""Search email with a search term"""
Authorize.jwt_required()
try:
return JSONResponse(dumps({"success": True, "email": database.search(search_term)}))
except Exception as err:
return JSONResponse({"success": False, "error": str(err)}) | 28d82605b78e124eb029125e9b2c9625bc207a64 | 10,296 |
def otel_service(docker_ip, docker_services):
"""Ensure that port is listening."""
# `port_for` takes a container port and returns the corresponding host port
port = docker_services.port_for("otel-collector", 4317)
docker_services.wait_until_responsive(
timeout=30.0, pause=5, check=lambda: is_portListening(docker_ip, port)
)
return True | 19bd5f21ce30fa3cf5202369bf8e0a43456fdf9e | 10,297 |
from re import T
import numpy
def broadcast(vec: T.Tensor, matrix: T.Tensor) -> T.Tensor:
"""
Broadcasts vec into the shape of matrix following numpy rules:
vec ~ (N, 1) broadcasts to matrix ~ (N, M)
vec ~ (1, N) and (N,) broadcast to matrix ~ (M, N)
Args:
vec: A vector (either flat, row, or column).
matrix: A matrix (i.e., a 2D tensor).
Returns:
tensor: A tensor of the same size as matrix containing the elements
of the vector.
Raises:
BroadcastError
"""
try:
return numpy.broadcast_to(vec, shape(matrix))
except ValueError:
raise BroadcastError('cannot broadcast vector of dimension {} \
onto matrix of dimension {}'.format(shape(vec), shape(matrix))) | 3d471489ecef50a70a668db0262d0d21a8b76c86 | 10,298 |
def neighbour_list_n_out(nlist_i: NeighbourList,
nlist_j: NeighbourList) -> np.ndarray:
"""
Compute n^out between two NeighbourList object.
Args:
nlist_i (NeighbourList): A NeighbourList object for neighbour lists at time 0.
nlist_j (NeighbourList): A NeighbourList object for neighbour lists at time t.
Returns:
(np.ndarray(float)): A 1D array of normalised correlation terms.
Raises:
ValueError: If the two NeighbourList objects have different numbers
of lengths of neighbour list vectors.
Note:
For each neighbour list vector, computes (l_i.l_i) - (l_i.l_j).
See Rabani et al. J. Chem. Phys. 1997 doi:https://doi.org/10.1063/1.474927
Eqn. 8 for details.
"""
if nlist_i.vectors.shape != nlist_j.vectors.shape:
raise ValueError(f'NeighbourList vector shapes are not equal: {nlist_i.vectors.shape} != {nlist_j.vectors.shape}')
return (np.einsum('ij,ij->i', nlist_i.vectors, nlist_i.vectors) -
np.einsum('ij,ij->i', nlist_i.vectors, nlist_j.vectors)) | 15069fbb3995f7f65f61b9145353df7116d45e23 | 10,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.