content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_social_profile_provider_list(profile):
""" """
sp = None
sp = SocialProfileProvider.objects.filter(user=profile.user).order_by('provider', 'website',)
return sp
|
9e157c97dfd0d7daa1a2d0ad80bd84add3dcc281
| 33,288 |
import socket
import struct
def ip2long(ip):
""" Convert an IP string to long """
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
|
fbcd7e6255590fa5f67c90bb077d2aa9858abf0a
| 33,290 |
def item_link_copy(request, op):
""" Objekt zum Einblenden markieren """
if request.GET.has_key('id'):
item_container = get_item_container_by_id(request.GET['id'])
else:
item_container = get_my_item_container(request, op)
if item_container.parent_item_id != -1:
#request.session['dms_link_copy_id'] = item_container.item.id
request.session['dms_link_copy_id'] = item_container.id
if 'dms_cut_id' in request.session:
del request.session['dms_cut_id']
elif 'dms_copy_id' in request.session:
del request.session['dms_copy_id']
if request.GET.has_key('base_url'):
path = request.GET['base_url'] + '/manage/'
else:
parent = item_container.get_parent()
path = get_site_url(parent, 'index.html/manage/')
return HttpResponseRedirect(path)
|
1696eaa219193f80d4f61a9b573d00c4a06f077b
| 33,291 |
def get_aggregation_fn_cls(rng):
"""Sample aggregation function for feature"""
return rng.choice(AGGREGATION_OPERATORS)
|
843fc97c7216148e2bdfb40009da5a56f77b7008
| 33,292 |
def resnet_mvgcnn(depth, pretrained=False, **kwargs):
"""Constructs a MVGCNN based on ResNet-18 model."""
model = ResNetMVGCNN(BasicBlock,
resnet_layers[depth],
**kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(
model_urls['resnet{}'.format(depth)])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict and v.shape == model_dict[k].shape}
print('Loading {} inputs from pretrained model...'
.format(len(pretrained_dict)))
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
|
433b028c8af6c99a17b4cb35fc217bffaeee1439
| 33,293 |
def _smooth_samples_by_weight(values, samples):
"""Add Gaussian noise to each bootstrap replicate.
The result is used to compute a "smoothed bootstrap," where the added noise
ensures that for small samples (e.g. number of bins in the segment) the
bootstrapped CI is close to the standard error of the mean, as it should be.
Conceptually, sample from a KDE instead of the values themselves.
This addresses the issue that small segments (#bins < #replicates) don't
fully represent the underlying distribution, in particular the extreme
values, so the CI is too narrow. For single-bin segments in particular,
the confidence interval will always have zero width unless the samples are
smoothed.
Standard deviation of the noise added to each bin comes from each bin's
weight, which is an estimate of (1-variance).
Parameters
----------
values : np.ndarray
Original log2 values within the segment.
samples : list of np.ndarray
Bootstrap replicates as (value_sample, weight_sample).
Returns
-------
`samples` with random N(0, pop_sd) added to each value, and
weights unchanged.
"""
k = len(values)
# KDE bandwidth narrows for larger sample sizes
# Following Silverman's Rule and Polansky 1995,
# but requiring k=1 -> bw=1 for consistency
bw = k ** (-1/4)
samples = [(v + (bw * np.sqrt(1-w) * np.random.randn(k)), w)
for v, w in samples]
return samples
|
470c2263f81212daf56450e43f64b56d32b654a0
| 33,294 |
def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, 'float32')
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size-1:signal.size+order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype = 'float32')
|
bd148dd367fa179933b7f318e071e1017fd707ce
| 33,295 |
def make_mpo_networks(
action_spec,
policy_layer_sizes = (300, 200),
critic_layer_sizes = (400, 300),
):
"""Creates networks used by the agent."""
num_dimensions = np.prod(action_spec.shape, dtype=int)
critic_layer_sizes = list(critic_layer_sizes) + [1]
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes),
networks.MultivariateNormalDiagHead(num_dimensions)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = networks.CriticMultiplexer(
critic_network=networks.LayerNormMLP(critic_layer_sizes),
action_network=networks.ClipToSpec(action_spec))
return {
'policy': policy_network,
'critic': critic_network,
'observation': tf_utils.batch_concat,
}
|
c52cb76f96390ab633ca05f33e1e40b20d78ae93
| 33,296 |
from typing import Callable
from typing import Any
def numgrad_x(f: Callable[[Any], Any], x: Tensor, eps: float = 1e-6) -> Tensor:
"""get numgrad with the same shape of x
Args:
f (Callable[[Any], Any]): the original function
x (Tensor): the source x
eps (float, optional): default error. Defaults to 1e-6.
Returns:
Tensor: the numgrad with the same shape of x
"""
g = numgrad(f, x, eps)
return numgrad_to_dim(g, x.ndim)
|
fd8b680e122de3adaa3a3246f8d7f786bffd24a2
| 33,297 |
def throw_darts_serial(n_darts):
"""Throw darts at a square. Execute serially! Count
how many end up in an inscribed circle. Approximate pi.
Parameters
----------
n_darts : int
Number of darts to throw
Returns
-------
pi_approx : float
Approximation of pi
"""
# count the number of hits in the circle
n_success = 0
for dart in range(n_darts):
if hit_target(dart):
n_success += 1
# approximate pi
pi_approx = 4 * n_success / n_darts
return pi_approx
|
1aee3692af97eb2ac6d68ccfd573fa55e9e98a6c
| 33,298 |
def sample_category(user, name='Movie', slug='film'):
"""Create and return a sample ingredient"""
return Category.objects.create(user=user, name=name, slug=slug)
|
c7291808b63244a74e97700819584d3630fd2f04
| 33,299 |
def get_all():
"""
gets all genres.
:rtype: list[GenreEntity]
"""
return get_component(GenresPackage.COMPONENT_NAME).get_all()
|
091ecec08926542df9d923a180a359c2adaa7a1c
| 33,300 |
def dist(node1, node2):
"""Calculate the distance between two vertices
"""
return np.sqrt(np.sum((node1 - node2)**2))
|
54458b1f32fc9af2bd426fa91711f802bdfb5d2c
| 33,301 |
def propagate_errors(f, x, dx, jac=None, n_samples=1e4, seed=42):
"""Propagates the errors over all the elements of the array x
To see how the error propagation is performed see the documentation
of dy
Returns:
[array]: Array with the same size of x with the errors
"""
return np.sqrt([dy(f, x[i], dx[i]**2, jac, n_samples, seed+i) for i in range(len(x))])
|
cc0f790293fbfdccafded53cf182ab296df183a8
| 33,303 |
def getFibonacciIterative(n: int) -> int:
"""
Calculate the fibonacci number at position n iteratively
"""
a = 0
b = 1
for _ in range(n):
a, b = b, a + b
return a
|
19920a0dfc83f6dc17b5445294c206003ebe04f7
| 33,304 |
def louvain(graph, min_progress=1000, progress_tries=1):
"""Compute best partition on the `graph` by louvain.
Args:
graph (:class:`Graph`): A projected simple undirected graph.
min_progress: The minimum delta X required to be considered progress, where X is the number of nodes
that have changed their community on a particular pass.
Delta X is then the difference in number of nodes that changed communities
on the current pass compared to the previous pass.
progress_tries: number of times the min_progress setting is not met
before exiting form the current level and compressing the graph.
Returns:
:class:`graphscope.framework.context.VertexDataContextDAGNode`:
A context with each vertex assigned with id of community it belongs to, evaluated in eager mode.
References:
[1] Blondel, V.D. et al. Fast unfolding of communities in large networks. J. Stat. Mech 10008, 1-12(2008).
[2] https://github.com/Sotera/distributed-graph-analytics
[3] https://sotera.github.io/distributed-graph-analytics/louvain/
Notes:
louvain now only support undirected graph. If input graph is directed graph, louvain would raise
a InvalidArgumentError.
Examples:
.. code:: python
import graphscope as gs
s = gs.session()
g = s.g(directed=False)
g = g.add_vertices('The parameters for loading a graph...')
g = g.add_edges('The parameters for loading a graph...')
pg = g.project(vertices={"vlabel": []}, edges={"elabel": ["weight"]})
r = gs.louvain(pg)
s.close()
"""
if graph.is_directed():
raise InvalidArgumentError("Louvain not support directed graph.")
return AppAssets(algo="louvain", context="vertex_data")(
graph, min_progress, progress_tries
)
|
a79fee324f3f7a79cb568166f91adfa5af095ab4
| 33,305 |
def compat(data):
"""
Check data type, transform to string if needed.
Args:
data: The data.
Returns:
The data as a string, trimmed.
"""
if not isinstance(data, str):
data = data.decode()
return data.rstrip()
|
51d2d37b427e77b038d8f18bebd22efa4b4fdcce
| 33,306 |
from typing import Tuple
def cascade(u: np.ndarray, balanced: bool = True, phase_style: str = PhaseStyle.TOP,
error_mean_std: Tuple[float, float] = (0., 0.), loss_mean_std: Tuple[float, float] = (0., 0.)):
"""Generate an architecture based on our recursive definitions programmed to implement unitary :math:`U`,
or a set of :math:`K` mutually orthogonal basis vectors.
Args:
u: The (:math:`k \\times n`) mutually orthogonal basis vectors (unitary if :math:`k=n`) to be configured.
balanced: If balanced, does balanced tree (:code:`m = n // 2`) otherwise linear chain (:code:`m = n - 1`).
phase_style: Phase style for the nodes (see the :code:`PhaseStyle` enum).
error_mean_std: Mean and standard deviation for errors (in radians).
loss_mean_std: Mean and standard deviation for losses (in dB).
Returns:
Node list, thetas and phis.
"""
subunits = []
thetas = np.array([])
phis = np.array([])
gammas = np.array([])
n_rails = u.shape[0]
num_columns = 0
num_nodes = 0
w = u.conj().T.copy()
for i in reversed(range(n_rails + 1 - u.shape[1], n_rails)):
# Generate the architecture as well as the theta and phi for each row of u.
network, w = vector_unit(w[:i + 1, :i + 1], n_rails, balanced, phase_style,
error_mean_std, loss_mean_std)
# Update the phases.
thetas = np.hstack((thetas, network.thetas))
phis = np.hstack((phis, network.phis))
gammas = np.hstack((network.gammas[-1], gammas))
# We need to index the thetas and phis correctly based on the number of programmed nodes in previous subunits
# For unbalanced architectures (linear chains), we can actually pack them more efficiently into a triangular
# architecture.
network.offset(num_nodes).offset_column(num_columns if balanced else 2 * (n_rails - 1 - i))
# Add the nodes list to the subunits
subunits.append(network)
# The number of columns and nodes in the architecture are incremented by the subunit size (log_2(i))
num_columns += subunits[-1].num_columns
num_nodes += subunits[-1].num_nodes
gammas = np.hstack((-np.angle(w[0, 0]), gammas))
unit = ForwardMesh.aggregate(subunits)
unit.params = thetas, phis, gammas
return unit
|
20fc298b8bd2c68ad46570c2815e674deb2d7020
| 33,307 |
def rsa_decrypt(cipher: int, d: int, n: int) -> int:
"""
decrypt ciphers with the rsa cryptosystem
:param cipher: the ciphertext
:param d: your private key
:param n: your public key (n)
:return: the plaintext
"""
return pow(cipher, d, n)
|
33822a0a683eca2f86b0e2b9b319a42806ae56cc
| 33,308 |
import json
def ajax_update_report_findings(request):
"""
Update the ``position`` and ``severity`` fields of all :model:`reporting.ReportFindingLink`
attached to an individual :model:`reporting.Report`.
"""
data = {"result": "error"}
if request.method == "POST" and request.is_ajax():
pos = request.POST.get("positions")
report_id = request.POST.get("report")
severity_class = request.POST.get("severity").replace("_severity", "")
order = json.loads(pos)
logger.info(
"Received AJAX POST to update report %s's %s severity group findings in this order: %s",
report_id,
severity_class,
", ".join(order),
)
try:
severity = Severity.objects.get(severity__iexact=severity_class)
except Severity.DoesNotExist:
severity = None
if severity:
counter = 1
for finding_id in order:
if "placeholder" not in finding_id:
finding_instance = ReportFindingLink.objects.get(id=finding_id)
if finding_instance:
finding_instance.severity = severity
finding_instance.position = counter
finding_instance.save()
counter += 1
else:
logger.error(
"Received a finding ID, %s, that did not match an existing finding",
finding_id,
)
else:
data = {"result": "specified severity, {}, is invalid".format(severity_class)}
# If all went well, return success
data = {"result": "success"}
else:
data = {"result": "error"}
return JsonResponse(data)
|
61c62d76e458bc7f752d6e9cecedd4cf5b941b91
| 33,309 |
def head_pos_to_trans_rot_t(quats):
"""Convert Maxfilter-formatted head position quaternions.
Parameters
----------
quats : ndarray, shape (N, 10)
MaxFilter-formatted position and quaternion parameters.
Returns
-------
translation : ndarray, shape (N, 3)
Translations at each time point.
rotation : ndarray, shape (N, 3, 3)
Rotations at each time point.
t : ndarray, shape (N,)
The time points.
See Also
--------
read_head_pos
write_head_pos
"""
t = quats[..., 0].copy()
rotation = quat_to_rot(quats[..., 1:4])
translation = quats[..., 4:7].copy()
return translation, rotation, t
|
9244452c59d132240a0b040effd35f68f80891df
| 33,311 |
def geo_to_h3(lats, lngs, res):
"""
Convert arrays describing lat/lng paris to cells.
Parameters
----------
lats, lngs : arrays of floats
res: int
Resolution for output cells.
Returns
-------
array of H3Cells
"""
assert len(lats) == len(lngs)
out = np.zeros(len(lats), dtype='uint64')
_vect.geo_to_h3_vect(lats, lngs, res, out)
return out
|
acc004b6ecbc861e17e7e6421040fb3d36ca1e24
| 33,312 |
def create_roads(roads):
""" Create roads information from roads reference data
Args:
roads: dataframe from roads reference data
Returns:
list of road curves for all roads
"""
road_curves = []
for _, row in roads.iterrows():
splitcoords = row["coordinates"].split(",")
longlats = list(zip(*[iter(splitcoords)]*2))
latlongs = [tuple(reversed(item))
for item in longlats] # correct to lat/long, reversed
# LineString (Spatial Lines based on road coords)
shape_points = []
for point in latlongs:
shape_point = Point(float(point[0]), float(point[1]))
shape_points.append(shape_point)
line = LineString(shape_points)
# Road curvature/lengths based on line points
dist = haversine_np(
line.coords.xy[0][0], # First X
line.coords.xy[1][0], # First Y
line.coords.xy[0][len(line.coords.xy[0])-1], # End X
line.coords.xy[1][len(line.coords.xy[0])-1] # End Y
)
curve = (line.length / dist) if dist != 0 else 0
road_curves.append(curve)
return road_curves
|
764c8f77edb3603824c0de2bd50d71147b2e8abe
| 33,313 |
def set_extentions(doc):
"""No-param function. Sets 'frequency' and 'instance_list' variable for each token.
The frequency is calculated by lemma_.lower() word of the noun phrase.
And lemma_.lower() is used to add instance to instance list.
Returns: None
"""
freq_count = defaultdict(int)
instance_list = defaultdict(list)
for t in doc:
freq_count[t.lemma_.lower()] += 1
instance_list[t.lemma_.lower()].append((t))
def get_freq(t): return freq_count[t.lemma_.lower()]
def get_instance_list(t): return instance_list[t.lemma_.lower()]
spacy.tokens.Token.set_extension('frequency', getter=get_freq, force=True)
spacy.tokens.Token.set_extension('instance_list', getter=get_instance_list, force=True)
return doc
|
485f2afe1947144da38bb5cf5cf1c788a1a3adb1
| 33,314 |
import time
def stream_occurrences(read_token, project_slug, last_id=None):
"""
read_token: Rollbar project_access_token with read_scope
project_slug: The name of the project that this token is for
last_id: The lowest occurence_id we are interested in
returns:
Occurrence list
Highest occurrence_id in the returned list (None if list empty)
"""
now_epoch_seconds = int(time.time())
occ_list = []
occ_list, highest_id, lowest_id = get_occurrence_batch(read_token,
project_slug)
if last_id == None:
print('last_id was None')
last_id = lowest_id
while lowest_id > last_id:
batch_list, highest_id, lowest_id = \
get_occurrence_batch(read_token, project_slug, lowest_id)
occ_list = occ_list + batch_list
if not continue_getting_occs(occ_list, now_epoch_seconds):
break
occ_list = [x for x in occ_list if x.id > last_id]
occ_list.sort(key=lambda x: x.id, reverse=True)
max_id = None
if len(occ_list) > 0:
max_id = occ_list[0].id
else:
max_id = last_id
return occ_list, max_id
|
9ede2738c9485047b807bb0917ac5c18f4d7a0a5
| 33,315 |
def git_describe(repos):
"""Try running git describe on the given repos"""
return run(["git", "-C", str(repos), "describe", "--tags", "--abbrev=8"])
|
46faa032e08f09df3aeaa7c07478125fdac6b66f
| 33,316 |
def convertToModelZ(z):
""" scale Z-axis coordinate to the model
"""
return z * SCALE_2_MODEL
|
b6a48f434bd9bebe57f724474bcc462647d55659
| 33,318 |
def opt_get_model_rest_api(model_id):
"""Retrieve model data
"""
model_id = Markup.escape(model_id)
return retrieve_model_data(model_id)
|
2c69c4573736a455284df1098ef9b397588a7c35
| 33,319 |
import torch
def tokenize(texts, context_length=77, truncate=False) -> Tensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
|
d6fbe8b863e4a1153a84f39851482f7211153dd9
| 33,320 |
import yaml
def config_get_timesteps(filepath):
"""Get list of time steps from YAML configuration file.
Parameters
----------
filepath : pathlib.Path or str
Path of the YAML configuration file.
Returns
-------
list
List of time-step indices (as a list of integers).
"""
with open(filepath, 'r') as infile:
config = yaml.safe_load(infile)['parameters']
config.setdefault('startStep', 0)
nstart, nt, nsave = config['startStep'], config['nt'], config['nsave']
return list(range(nstart, nt + 1, nsave))
|
8a51e1437edbf2d73884cb633dbf05e9cfe5a98d
| 33,321 |
def join_expressions(positions, labels, sep="\t"):
"""Join mean expressions.
Join expressions from different time points and return only those that are
in all samples.
"""
dfs = []
for position, replicates in positions:
dfs.append(
get_mean_expression(
replicates,
name=labels[position],
sep=sep,
)
)
inner = pd.concat(dfs, axis=1, join="inner")
outer = pd.concat(dfs, axis=1, join="outer")
excluded = sorted(outer.index.difference(inner.index))
return inner, excluded
|
48cc69bf02add561c3d5297b4d90f6e148117d18
| 33,322 |
def current_snapshot(id_or_symbol):
"""
获得当前市场快照数据。只能在日内交易阶段调用,获取当日调用时点的市场快照数据。
市场快照数据记录了每日从开盘到当前的数据信息,可以理解为一个动态的day bar数据。
在目前分钟回测中,快照数据为当日所有分钟线累积而成,一般情况下,最后一个分钟线获取到的快照数据应当与当日的日线行情保持一致。
需要注意,在实盘模拟中,该函数返回的是调用当时的市场快照情况,所以在同一个handle_bar中不同时点调用可能返回的数据不同。
如果当日截止到调用时候对应股票没有任何成交,那么snapshot中的close, high, low, last几个价格水平都将以0表示。
:param str order_book_id: 合约代码或简称
:return: :class:`~Snapshot`
:example:
在handle_bar中调用该函数,假设策略当前时间是20160104 09:33:
.. code-block:: python3
:linenos:
[In]
logger.info(current_snapshot('000001.XSHE'))
[Out]
2016-01-04 09:33:00.00 INFO
Snapshot(order_book_id: '000001.XSHE', datetime: datetime.datetime(2016, 1, 4, 9, 33), open: 10.0, high: 10.025, low: 9.9667, last: 9.9917, volume: 2050320, total_turnover: 20485195, prev_close: 9.99)
"""
env = Environment.get_instance()
frequency = env.config.base.frequency
order_book_id = assure_order_book_id(id_or_symbol)
dt = env.calendar_dt
if env.config.base.run_type == RUN_TYPE.BACKTEST:
if ExecutionContext.phase() == EXECUTION_PHASE.BEFORE_TRADING:
dt = env.data_proxy.get_previous_trading_date(env.trading_dt.date())
return env.data_proxy.current_snapshot(order_book_id, "1d", dt)
elif ExecutionContext.phase() == EXECUTION_PHASE.AFTER_TRADING:
return env.data_proxy.current_snapshot(order_book_id, "1d", dt)
# PT、实盘直接取最新快照,忽略 frequency, dt 参数
return env.data_proxy.current_snapshot(order_book_id, frequency, dt)
|
25c7493fdd380504854d4eb6be17af440966b639
| 33,323 |
def infection_rate_symptomatic_50x40():
"""
Real Name: b'infection rate symptomatic 50x40'
Original Eqn: b'Susceptible 40*Infected symptomatic 40x50*contact infectivity symptomatic 40x50*(self quarantine policy SWITCH self 40\\\\ * self quarantine policy 40+(1-self quarantine policy SWITCH self 40))/non controlled pop 40x50'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return susceptible_40() * infected_symptomatic_40x50() * contact_infectivity_symptomatic_40x50(
) * (self_quarantine_policy_switch_self_40() * self_quarantine_policy_40() +
(1 - self_quarantine_policy_switch_self_40())) / non_controlled_pop_40x50()
|
0ae1831493e02e413b0bd67d78f4fd88163ecd7e
| 33,324 |
def cut_limits(x_data: tuple, y_data: tuple, x_lims: tuple):
"""Selecting range of x/y data based on x_lims.
Args:
x_data (tuple): x axis data
y_data (tuple): y axis values
x_lims (tuple): limits to select range from x and y
Raises:
ValueError: If x_lims have wrong shape
Returns:
list: limited x data, limited y data
"""
# # extending x axis to match number of datasets.
y_data = nest_data(y_data)
n_dat = len(y_data)
x_data = duplicate_nesting(x_data, y_data)
print(f'number of datasets: {n_dat}.')
# no limits, take all positive x
x = []
data = []
if x_lims is None:
for i in range(n_dat):
x.append(x_data[i][x_data[i] > 0])
data.append(y_data[i][x_data[i] > 0])
# x limits global for all kinetics
elif len(x_lims) == 2:
for i in range(n_dat):
beg, end = sup.get_idx(*x_lims, axis=x_data[i])
x.append(x_data[i][beg:end+1])
data.append(y_data[i][beg:end+1])
# x limits for each kinetic specified
elif len(x_lims) == n_dat*2:
for i in range(n_dat):
beg, end = sup.get_idx(*x_lims[2*i:2*i+2], axis=x_data[i])
x.append(x_data[i][beg:end+1])
data.append(y_data[i][beg:end+1])
else:
raise ValueError('Wrong shape of t_lims.')
return
return x, data
|
144f47231e80c7bc00e6d7ea9038c2040748ff3b
| 33,327 |
import re
def str_to_number_with_uncert(representation):
"""
Given a string that represents a number with uncertainty, returns the
nominal value and the uncertainty.
See the documentation for ufloat_fromstr() for a list of accepted
formats.
When no numerical error is given, an uncertainty of 1 on the last
digit is implied.
Raises ValueError if the string cannot be parsed.
"""
match = NUMBER_WITH_UNCERT_GLOBAL_EXP_RE_MATCH(representation)
# The representation is simplified, but the global factor is
# calculated:
if match:
# We have a form with a factored exponent: (1.23 +/- 0.01)e10,
# etc.
exp_value_str = match.group('exp_value')
try:
exponent = from_superscript(exp_value_str)
except ValueError:
raise ValueError(cannot_parse_ufloat_msg_pat % representation)
factor = 10.**exponent
representation = match.group('simple_num_with_uncert')
else:
factor = 1 # No global exponential factor
match = re.match(u'(.*)(?:\+/-|±)(.*)', representation)
if match:
(nom_value, uncert) = match.groups()
try:
# Simple form 1234.45+/-1.2 or 1234.45±1.2, or 1.23e-10+/-1e-23
# or -1.2×10⁻¹²±1e23:
parsed_value = (to_float(nom_value)*factor,
to_float(uncert)*factor)
except ValueError:
raise ValueError(cannot_parse_ufloat_msg_pat % representation)
else:
# Form with error parentheses or no uncertainty:
try:
parsed_value = parse_error_in_parentheses(representation)
except NotParenUncert:
raise ValueError(cannot_parse_ufloat_msg_pat % representation)
return parsed_value
|
fbdafed553697c3052765afc5127fdfcc75e03f6
| 33,328 |
import json
def saveRecipe():
"""
Processing POST method from local for saving the new generated recipe
:return:
"""
method = request.method
if(method == "POST" and checkPassword(request, app.admin_password)):
j = json.loads(request.json)
fields = ['name','url', 'ingredients','picture']
values = []
for f in fields:
values.append(j[f])
Parser.updateSave(app.recipe_file, 'recipe', fields, values)
return "200"
|
922d4c1ec66e43bd0aa254ac561e5436cf9002ee
| 33,329 |
from typing import Dict
from typing import Any
def list_queues_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List queues in Azure storage account.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '50'
prefix = args.get('prefix')
page = arg_to_number(args.get('page') or '1')
marker = ''
readable_message = f'Queues List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1: # type: ignore
marker = get_pagination_next_marker_element(limit=limit, page=page, # type: ignore
client_request=client.list_queues_request,
params={"prefix": prefix})
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageQueue.Queue',
outputs=[],
raw_response=[]
)
response = client.list_queues_request(limit, prefix, marker)
xml_response = parse_xml_response(xml_string_response=response, tag_path="./Queues/Queue/Name", find_tag=True)
raw_response = [{"name": element.text} for element in xml_response]
readable_output = tableToMarkdown(
readable_message,
raw_response,
headers='name',
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageQueue.Queue',
outputs_key_field='name',
outputs=raw_response,
raw_response=raw_response
)
return command_results
|
7a54a290795d00d6c3594a78ace03fc3c3001961
| 33,330 |
def isUnsaturated(mol):
"""
Does the molecule have a bond that's not single?
Eg. a bond that is double or triple or benzene
"""
cython.declare(atom1=Atom,
atom2=Atom,
bonds=dict,
bond=Bond)
for atom1 in mol.atoms:
bonds = mol.getBonds(atom1)
for atom2, bond in bonds.iteritems():
if not bond.isSingle():
return True
return False
|
eaf58e2d958b02055e0eb969aec2fd983fcdd7cd
| 33,331 |
import random
def run_with_fixed_seeds(count=128, master_seed=0x243F6A8885A308D3):
"""
decorator run test method w/ multiple fixed seeds.
"""
def builder(func):
@wraps(func)
def wrapper(*args, **kwds):
rng = random.Random(master_seed)
for _ in irange(count):
kwds['seed'] = rng.getrandbits(32)
func(*args, **kwds)
return wrapper
return builder
|
1b2c75b8e57e5c090cf25307189e86ba99a13589
| 33,332 |
def merge_cv_results(cv_results):
"""
Means across CV
"""
dtypes = ["train", "dev", "test"]
props_l1 = ["mean_loss", "mean_accuracy", "mean_positive_f1", "UL-A", "Joint-A"]
props_l2 = ["accuracy", "positive_f1"]
merged_results = {}
for dtype in dtypes:
merged_results[dtype] = {}
for prop in props_l1:
summ = 0.0
for item in cv_results:
summ += item[dtype][prop]
merged_results[dtype][prop] = summ/len(cv_results)
num_labels = len(cv_results[0][dtype]["label_wise"])
merged_results[dtype]["label_wise"] = [{} for _ in range(num_labels)]
for i in range(num_labels):
for prop in props_l2:
summ = 0.0
for item in cv_results:
summ += item[dtype]["label_wise"][i][prop]
merged_results[dtype]["label_wise"][i][prop] = summ/len(cv_results)
return merged_results
|
854b0672ec31103136ad3c7285311f865a098159
| 33,333 |
from typing import List
from typing import Tuple
def _broads_cores(sigs_in: List[Tuple[str]],
shapes: Tuple[Tuple[int, ...]],
msg: str
) -> Tuple[List[Tuple[int, ...]], List[Tuple[int, ...]]]:
"""Extract broadcast and core shapes of arrays
Parameters
----------
sigs_in : Tuple[str, ...]
Core signatures of input arrays
shapes : Tuple[int, ...]
Shapes of input arrays
msg : str
Potential error message
Returns
-------
broads : List[Tuple[int, ...]]
Broadcast shape of input arrays
cores : List[Tuple[int, ...]]
Core shape of input arrays
Raises
------
ValueError
If arrays do not have enough dimensions.
"""
dims = [len(sig) for sig in sigs_in]
broads, cores = [], []
if any(len(shape) < dim for shape, dim in zip(shapes, dims)):
raise ValueError('Core array does not have enough ' + msg)
for shape, dim in zip(shapes, dims):
if dim:
broads.append(shape[:-dim])
cores.append(shape[-dim:])
else:
broads.append(shape)
cores.append(())
return broads, cores
|
228cbe06e4bc1e092cf92034255bfd60f01664c1
| 33,334 |
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
|
e7064a2f04370b5b3ffa78a585ab9341cd2321be
| 33,335 |
import random
def createRandomIntID(length):
"""
Creates a random number.\n
Useful for identifiers.
"""
global dict_of_ids
final_id = ''
for _ in range(length):
final_id += str(random.randint(0,10))
if int(final_id) in dict_of_ids:
final_id = createRandomIntID(length)
dict_of_ids[int(final_id)] = True
BrowserObject.dict_of_ids[int(final_id)] = True
return int(final_id)
|
aebd025cf3275a5a07b303c1c296e86f1f17aac5
| 33,336 |
def get_all_colors():
"""
:return: Color Options
"""
return color
|
2104c1195c6e70c20e595d25f369d99c6f00269f
| 33,337 |
import getpass
def get_username():
"""Get Windows username
Get current logged in user's username
:return: Username
:Example:
>>> get_username()
'Automagica'
Keywords
windows, login, logged in, lockscreen, user, password, account, lock, locked, freeze, hibernate, sleep
Icon
las la-user
"""
only_supported_for("Windows")
return getpass.getuser()
|
91e5713317d72577d236e45963c7037446dad572
| 33,338 |
import json
import logging
def _ParseStepLogIfAppropriate(data, log_name):
"""PConditionally parses the contents of data, based on the log type."""
if not data:
return None
if log_name.lower() == 'json.output[ninja_info]':
# Check if data is malformatted.
try:
json.loads(data)
except ValueError:
logging.error('json.output[ninja_info] is malformatted')
return None
if log_name.lower() not in ['stdout', 'json.output[ninja_info]']:
try:
return json.loads(data) if data else None
except ValueError:
logging.error(
'Failed to json load data for %s. Data is: %s.' % (log_name, data))
return None
return data
|
4f2ef1f451c271adf0285ae90f88cf10b6e8d9be
| 33,339 |
def RSIZJ(df, N=3, LL=6, LH=6):
"""
相对强弱专家系统
:param df:
:param N:
:param LL:
:param LH:
:return:
"""
CLOSE = df['close']
LC = REF(CLOSE, 1)
WRSI = SMA(MAX(CLOSE - LC, 0), N, 1) / SMA(ABS(CLOSE - LC), N, 1) * 100
ENTERLONG = CROSS(WRSI, LL)
EXITLONG = CROSS(LH, WRSI)
return pd.DataFrame({
'ENTERLONG': ENTERLONG, 'EXITLONG': EXITLONG
})
|
563f4866ecb76dd732c9da192dc8a00a1ea44126
| 33,340 |
def autocomplete_service_env(actions, objects):
"""
Returns current service_env for object. Used as a callback for
`default_value`.
Args:
actions: Transition action list
objects: Django models objects
Returns:
service_env id
"""
service_envs = [obj.service_env_id for obj in objects]
# if service-env for all objects are the same
if len(set(service_envs)) == 1:
return service_envs[0]
return None
|
04d75c96619cc60433d3df51c44f621c2a86df25
| 33,341 |
def SoftwareProvenanceConst_get_decorator_type_name():
"""SoftwareProvenanceConst_get_decorator_type_name() -> std::string"""
return _RMF.SoftwareProvenanceConst_get_decorator_type_name()
|
eda72a471a2baf9ecfc5937ec5f9a4151be1ef6d
| 33,342 |
import torch
def collate_fn(batch, train=True):
""" list of tensors to a batch tensors """
premise_batch, _ = pad_batch([row['premise'] for row in batch])
hypothesis_batch, _ = pad_batch([row['hypothesis'] for row in batch])
label_batch = torch.stack([row['label'] for row in batch])
# PyTorch RNN requires batches to be transposed for speed and integration with CUDA
transpose = (lambda b: b.t_().squeeze(0).contiguous())
return (transpose(premise_batch), transpose(hypothesis_batch), transpose(label_batch))
|
980beccc7eb84165a625ad1d58c28e00a095ac4f
| 33,343 |
def weights_and_neighbours(graph):
"""Compute weight and neighbors matrices from graph
Parameters
----------
graph : scipy.sparse.csr_matrix
sparse distance matrix representing an undirected graph
Returns
-------
weights : array_like (N,M)
corresponding weights for N nodes
with M neighbors each
nbs : array_like int (N,M)
neighbors array corresponding to the same
as it would returned by
sklearn.neighbors.NearestNeighbors
for N nodes with M neighbors each
"""
wres = np.full((graph.shape[0], graph.getnnz(axis=1).max()), 0.)
nbres = np.full((graph.shape[0], graph.getnnz(axis=1).max()), 0.)
for i in range(graph.shape[0]):
row = graph.getrow(i)
wres[i, :len(row.data)] = row.data
nbres[i, :len(row.indices)] = row.indices
return wres, nbres.astype(int)
|
78c563fa52652d25f3bc408b8eff6c418a897aa6
| 33,344 |
def aabb_with_result(a: Rectangle, b: Rectangle, result: Rectangle) -> bool:
"""
Does Axis Aligned Bounding Box collision detection between two rectangles a and b.
Also calculates the intersection rectangle.
:param a:
:param b:
:param result:
:return:
"""
# Horizontal
amin = a.x
amax = a.x + a.width
bmin = b.x
bmax = b.x + b.width
if bmin > amin:
amin = bmin
result.x = amin
if bmax < amax:
amax = bmax
result.width = amax - amin
# Vertical
amin = a.y
amax = a.y + a.height
bmin = b.y
bmax = b.y + b.height
if bmin > amin:
amin = bmin
result.y = amin
if bmax < amax:
amax = bmax
result.height = amax - amin
return not utils.rect_empty(result)
|
799348704e400ecc6b79ca15b76eee352062a7fe
| 33,346 |
from typing import List
import pickle
def make_visualizations_from_config(config: Config,
extension: str = 'pdf') -> List[Figure]:
"""
Used to generate the embedding visualizations from a given Config object
:param config: the Config used
:param extension: the extension to be saved the artifacts
:return: the list of figures for each categorical variable
"""
with open(config.get_labels_path(), 'rb') as f:
labels = pickle.load(f)
with open(config.get_weights_path(), 'rb') as f:
embeddings = pickle.load(f)
return make_visualizations(labels, embeddings, config.df, config.get_visualizations_dir(), extension)
|
687bfa06bd18112ebc01b032c9f3b6f377c7b401
| 33,347 |
import re
def get_intervals(bin_type, thresholds):
""" Returns a list of interval objects. If bin_type is *within*, then
intervals are formed by using each pair of consecutive thresholds. For
bin_type below* the interval [-np.inf, threshold] is used and for bin_type
above* the inveral [threshold, np.inf] is used.
Arguments:
bin_type one of below, below=, within, =within, within=, =within=,
above, above=
thresholds numy array of thresholds
"""
if thresholds is None:
return [verif.interval.Interval(-np.inf, np.inf, True, True)]
intervals = list()
N = len(thresholds)
if re.compile(".*within.*").match(bin_type):
N = N - 1
for i in range(0, N):
lower_eq = False
upper_eq = False
if bin_type in ["below", "below="]:
lower = -np.inf
upper = thresholds[i]
elif bin_type in ["above", "above="]:
lower = thresholds[i]
upper = np.inf
elif bin_type in ["within", "=within", "within=", "=within="]:
lower = thresholds[i]
upper = thresholds[i+1]
else:
verif.util.error("Unrecognized bintype")
if bin_type in ["below=", "within=", "=within="]:
upper_eq = True
if bin_type in ["above=", "=within", "=within="]:
lower_eq = True
intervals.append(verif.interval.Interval(lower, upper, lower_eq, upper_eq))
return intervals
|
847c99e295c3200a1c10895b373083a49698d1ef
| 33,348 |
def shapely_formatter(_, vertices, codes=None):
"""`Shapely`_ style contour formatter.
Contours are returned as a list of :class:`shapely.geometry.LineString`,
:class:`shapely.geometry.LinearRing`, and :class:`shapely.geometry.Point`
geometry elements.
Filled contours return a list of :class:`shapely.geometry.Polygon`
elements instead.
.. note:: If possible, `Shapely speedups`_ will be enabled.
.. _Shapely: http://toblerity.org/shapely/manual.html
.. _Shapely speedups: http://toblerity.org/shapely/manual.html#performance
See Also
--------
`descartes <https://bitbucket.org/sgillies/descartes/>`_ : Use `Shapely`_
or GeoJSON-like geometric objects as matplotlib paths and patches.
"""
elements = []
if codes is None:
for vertices_ in vertices:
if np.all(vertices_[0, :] == vertices_[-1, :]):
# Contour is single point.
if len(vertices) < 3:
elements.append(Point(vertices_[0, :]))
# Contour is closed.
else:
elements.append(LinearRing(vertices_))
# Contour is open.
else:
elements.append(LineString(vertices_))
else:
for vertices_, codes_ in zip(vertices, codes):
starts = np.nonzero(codes_ == MPLPATHCODE.MOVETO)[0]
stops = np.nonzero(codes_ == MPLPATHCODE.CLOSEPOLY)[0]
try:
rings = [LinearRing(vertices_[start:stop+1, :])
for start, stop in zip(starts, stops)]
elements.append(Polygon(rings[0], rings[1:]))
except ValueError as err:
# Verify error is from degenerate (single point) polygon.
if np.any(stop - start - 1 == 0):
# Polygon is single point, remove the polygon.
if stops[0] < starts[0]+2:
pass
# Polygon has single point hole, remove the hole.
else:
rings = [
LinearRing(vertices_[start:stop+1, :])
for start, stop in zip(starts, stops)
if stop >= start+2]
elements.append(Polygon(rings[0], rings[1:]))
else:
raise(err)
return elements
|
d6e0951f2ed75c37ffcbd9d18ebaa65ca2e2368b
| 33,350 |
def _patch_redirect(session):
# type: (requests.Session) -> None
"""Whether redirect policy should be applied based on status code.
HTTP spec says that on 301/302 not HEAD/GET, should NOT redirect.
But requests does, to follow browser more than spec
https://github.com/requests/requests/blob/f6e13ccfc4b50dc458ee374e5dba347205b9a2da/requests/sessions.py#L305-L314
This patches "requests" to be more HTTP compliant.
Note that this is super dangerous, since technically this is not public API.
"""
def enforce_http_spec(resp, request):
if resp.status_code in (301, 302) and \
request.method not in ['GET', 'HEAD']:
return False
return True
redirect_logic = session.resolve_redirects
def wrapped_redirect(resp, req, **kwargs):
attempt = enforce_http_spec(resp, req)
return redirect_logic(resp, req, **kwargs) if attempt else []
wrapped_redirect.is_msrest_patched = True # type: ignore
session.resolve_redirects = wrapped_redirect
|
fc3e79475c86c9aeee6c780dc25ddd83d373a27b
| 33,351 |
def cal_rank_from_proc_loc(pnx: int, pi: int, pj: int):
"""Given (pj, pi), calculate the rank.
Arguments
---------
pnx : int
Number of MPI ranks in x directions.
pi, pj : int
The location indices of this rank in x and y direction in the 2D Cartesian topology.
Returns
-------
rank : int
"""
# pylint: disable=invalid-name
return pj * pnx + pi
|
97146de9f69dd2f62173c19dfdb98d8281036697
| 33,352 |
import numpy
def width_trailing(sdf):
"""Return the FWHM width in arcmin for the trailing tail"""
# Go out to RA=245 deg
trackRADec_trailing=\
bovy_coords.lb_to_radec(sdf._interpolatedObsTrackLB[:,0],
sdf._interpolatedObsTrackLB[:,1],
degree=True)
cindx= range(len(trackRADec_trailing))[\
numpy.argmin(numpy.fabs(trackRADec_trailing[:,0]-245.))]
ws= numpy.zeros(cindx)
for ii,cc in enumerate(range(1,cindx+1)):
xy= [sdf._interpolatedObsTrackLB[cc,0],None,None,None,None,None]
ws[ii]= numpy.sqrt(sdf.gaussApprox(xy=xy,lb=True,cindx=cc)[1][0,0])
# return 2.355*60.*ws
return 2.355*60.*numpy.mean(ws)
|
a9fd52c584252470422fb39f555a64fe37a208e7
| 33,354 |
from typing import Optional
def load_report_fn(report_path: gpath.GPath) -> Optional[ExperimentReport]:
"""Tries to load report from path, if it exists. Returns None otherwise."""
report = None
if not gfile.Exists(report_path):
print(f'File {report_path} does not exist.')
return report
try:
report = hparams_utils.load_dataclass_from_disk(ExperimentReport,
report_path)
except gfile.FileError as file_error:
print(f'Failed to load file {report_path}.')
print(file_error)
return report
|
a859751c3065eb03029c7fa11885421cfe025b29
| 33,355 |
from io import StringIO
import csv
from datetime import datetime
def get_proposal_statistics_report(from_date, to_date, all_fields=False):
"""Gets the proposal statistics report from Cayuse"""
# List of fields we want to retrieve from Cayuse and include in the CSV
REPORT_FIELDS = (
'application_type_code',
'proposal_id',
'employee_id',
'first_name',
'last_name',
'division_name',
'department_name',
'agency_name',
'project_title',
'project_start_date',
'project_end_date',
'submission_date',
'department_code',
'total_costs',
'total_direct_costs',
'total_indirect_costs',
'total_direct_costs_y1',
'total_indirect_costs_y1',
'budget_first_per_start_date',
'budget_first_per_end_date',
)
response = _make_cayuse_request('custom/summary')
f = StringIO.StringIO(response.content)
reader = csv.reader(f, delimiter=',')
header = reader.next()
if all_fields:
header_fields = header
else:
header_fields = REPORT_FIELDS
proposals = []
for row in reader:
proposal = dict(zip(header, row))
status = proposal['award_proposal_status']
if status == 'SUBMITTED' and proposal['submission_date']:
submission_date = datetime.strptime(
proposal['submission_date'],
"%Y-%m-%d %H:%M:%S.%f").date()
if submission_date >= from_date and submission_date <= to_date:
entry = [proposal[field] for field in header_fields]
proposals.append(entry)
return header_fields, proposals
|
08148bd1dcc763d73477e13df56327c3bf639051
| 33,356 |
def emitter_for_format(construct_format):
""" Creates a factory method for the relevant construct format. """
def _factory():
return ConstructEmitter(construct_format)
return _factory
|
4472d4790f7469cc556a0c958003ec65673d0653
| 33,357 |
def get_client(settings):
"""Return a client for the Elasticsearch index."""
host = settings["elasticsearch_url"]
kwargs = {}
# nb. No AWS credentials here because we assume that if using AWS-managed
# ES, the cluster lives inside a VPC.
return Elasticsearch([host], **kwargs)
|
65260462e9154ee911b686ee66405fe7009f4add
| 33,358 |
def merge_model_predict(
predict: TModelPredict, predict_append: TModelPredict
) -> TModelPredict:
"""Append model predictions to an existing set of model predictions.
TModelPredict is of the form:
{metric_name: [mean1, mean2, ...],
{metric_name: {metric_name: [var1, var2, ...]}})
This will append the predictions
Args:
predict: Initial set of predictions.
other_predict: Predictions to be appended.
Returns:
TModelPredict with the new predictions appended.
"""
mu, cov = predict
mu_append, cov_append = predict_append
if len(mu) != len(mu_append) or len(cov) != len(cov_append):
raise ValueError("Both sets of model predictions must have the same metrics")
# Iterate down to the list level and simply add.
for metric_name, metric_values in mu.items():
mu[metric_name] = metric_values + mu_append[metric_name]
for metric_name, co_cov in cov.items():
for co_metric_name, cov_values in co_cov.items():
cov[metric_name][co_metric_name] = (
cov_values + cov_append[metric_name][co_metric_name]
)
return mu, cov
|
bddff5101bd85de3a02087a48aa89f9acc4ebf52
| 33,359 |
def voucher_objects(states=voucher_states()):
"""
Build ``Voucher`` instances.
"""
return builds(
Voucher,
number=vouchers(),
created=one_of(none(), datetimes()),
expected_tokens=integers(min_value=1),
state=states,
)
|
4ffd0c9071fc375dd369f8c89e0312d0eeb4e346
| 33,360 |
from bs4 import BeautifulSoup
import requests
def fetch_events_sciencehistory(base_url='https://www.sciencehistory.org'):
"""
Fetch events from Science History Institute, https://www.sciencehistory.org/events
"""
events = []
page_soup = BeautifulSoup(requests.get(
urljoin(base_url, '/events')).content, 'html.parser')
all_events = page_soup.find('div', attrs={'class': 'eventpageleft'})
all_events = all_events.find_all('div', attrs={'class': 'views-row'})
for event in all_events:
title = event.find('div', attrs={'class': 'eventtitle'}).text.strip()
date = event.find('div', attrs={'class': 'eventdate'}).text.strip()
event_url = urljoin(base_url, event.find(
'div', attrs={'class': 'eventtitle'}).find('a')['href'])
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
location = event_soup.find('div', attrs={'class': 'event-location'})
location = ', '.join(
[div.text.strip() for div in location.find_all('div') if div is not None])
event_time = event_soup.find('div', attrs={'class': 'event-time'})
event_time = event_time.text.strip() if event_time is not None else ''
starttime, endtime = find_startend_time(event_time)
descriptions = event_soup.find(
'div', attrs={'class': 'content event_padding'})
descriptions = descriptions.find_all('p')
if len(descriptions) >= 5:
descriptions = ' '.join([p.text.strip()
for p in descriptions[0:5] if p is not None])
else:
descriptions = ' '.join([p.text.strip()
for p in descriptions if p is not None])
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': descriptions,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Science History Institute'
})
return events
|
571586136255973baeb00598afddfb6f309e82b0
| 33,364 |
def _create_buffer(size):
"""Create a ctypes buffer of a given size."""
buftype = (CHAR * size)
return buftype()
|
8aa09592ebea8d799c1cfa69edc35dd0b6f93ff9
| 33,366 |
from typing import Optional
from typing import List
from typing import Dict
def collect_files(config: Config,
files: Optional[List[str]] = None,
method: Optional[str] = None) -> DFs:
"""Read a filtered memory map from a set of files."""
filenames = files if files else config.get('args.inputs', [])
if method is None:
method = config.get('collect.method', 'csv')
frames: Dict[str, List[DF]] = {}
for filename in filenames:
dfs: DFs = FILE_READERS[method](config, filename, method)
postprocess_file(config, dfs)
for k, frame in dfs.items():
if k not in frames:
frames[k] = []
frames[k].append(frame)
dfs = {}
for k, v in frames.items():
dfs[k] = pd.concat(v, ignore_index=True)
postprocess_collected(config, dfs)
return dfs
|
3870e4da457ba2a1780e262acdfd8aad81990115
| 33,367 |
def ModelInfoAddShader(builder, shader):
"""This method is deprecated. Please switch to AddShader."""
return AddShader(builder, shader)
|
69e21b53cc3c01b977115fd46f09d77f1499f3ef
| 33,368 |
def configuration(request):
"""Various things stored in settings.py"""
return {"version": VERSION,
"current_url": request.get_full_path(),
"project_long_name": settings.project_long_name,
"project_short_name": settings.project_short_name,
"project_description": settings.project_description,
"acknowledgements": settings.acknowledgements,
"semantic_domains": settings.semantic_domains,
"structural_features": settings.structural_features}
|
02a0727c93c786305c1aa019fc79c624e7dd3f91
| 33,369 |
def percentile(event_collection, target_property, percentile, timeframe=None, timezone=None, interval=None,
filters=None, group_by=None, max_age=None):
""" Performs a percentile query
Finds the percentile of a target property for events that meet the given criteria.
:param event_collection: string, the name of the collection to query
:param target_property: string, the name of the event property you would like use
:param percentile: float, the specific percentile you wish to calculate,
supporting 0-100 with two decimal places of precision for example, 99.99
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param interval: string, the time interval used for measuring data over
time example: "daily"
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param group_by: string or array of strings, the name(s) of the properties you would
like to group you results by. example: "customer.id" or ["browser","operating_system"]
:param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're
willing to trade for increased query performance, in seconds
"""
_initialize_client_from_environment()
return _client.percentile(
event_collection=event_collection,
timeframe=timeframe,
percentile=percentile,
timezone=timezone,
interval=interval,
filters=filters,
group_by=group_by,
target_property=target_property,
max_age=max_age,
)
|
36208dae6b19b28b00ba311b5d15bef856fb0992
| 33,370 |
def MolAsMolWithQueryAtoms(
mol: rdChem.Mol, strict: bool = False, includeIsotopes: bool = True
) -> rdChem.Mol:
"""If Mol contains Chem.Atoms, convert to Mol with Chem.QueryAtoms
If Mol already is composed of QueryAtoms, returns the same object
"""
if all(isinstance(atom, rdChem.QueryAtom) for atom in mol.GetAtoms()):
return mol
return MolToMolWithQueryAtoms(mol, strict=strict, includeIsotopes=includeIsotopes)
|
1a2d0e159294efa19686d2f442e8a8e223eb96d1
| 33,372 |
import pathlib
from typing import Tuple
from typing import List
def find_images(
directory: pathlib.Path,
specs: specsmod.Specs) -> Tuple[List[pathlib.Path], List[str]]:
"""
Find all the sample images beneath the directory.
:param directory: where to search
:param specs: specification of the problem
:return: list of image paths, list of class labels
"""
pths_lables = [] # type: List[Tuple[pathlib.Path, str]]
for pth in directory.glob("**/*"):
lbl = _extract_label(filename=pth.name)
if not lbl:
continue
if lbl not in specs.class_set:
raise ValueError("Unexpected label: {}, expected one of: {}".format(
lbl, specs.classes))
pths_lables.append((pth, lbl))
pths_lables = sorted(pths_lables)
return [pth for pth, _ in pths_lables], [lbl for _, lbl in pths_lables]
|
360743837c3da3142cb7060590102149adca24c4
| 33,373 |
def login():
"""
Log in user
---
tags:
- Auth
requestBody:
content:
application/x-www-form-urlencoded:
schema:
type: object
properties:
pin:
description: Redirects to next page if login successful
type: string
required:
- pin
responses:
"200":
description: OK
"""
pin = request.form.get('pin')
if 'next' in request.args:
session['next'] = request.args['next']
else:
session['next'] = None
if not pin:
flash('PIN is required')
return redirect(url_for('authentication_api.login', next=session['next']))
if not pin.isdigit():
flash('PIN must only contain digits')
return redirect(url_for('authentication_api.login', next=session['next']))
if not Executer.instance.authentication_executer.validate_pin(pin):
flash('PIN must be at least 4 digits long')
return redirect(url_for('authentication_api.login', next=session['next']))
if pin != Executer.instance.authentication_executer.DEFAULT_PIN:
flash('Invalid PIN')
return redirect(url_for('authentication_api.login', next=session['next']))
if pin == Executer.instance.authentication_executer.DEFAULT_PIN:
Executer.instance.authentication_executer.login()
if session['next'] is not None:
if Executer.instance.authentication_executer.is_safe_url(session['next']):
return redirect(session['next'])
return redirect("/")
return render_template('login.html')
|
f89e9190c5e9a29e0629e16011c4332d16874996
| 33,374 |
def T_asy(x, beta):
"""Symmetry breaking transformation.
.. math::
T_{asy}^{\\beta} (x_i) =
\\begin{cases}x_i^{
1 + \\beta \\frac{i-1}{D-1}\sqrt{x_i}} & \\text{if } x_i > 0\\\\
x_i & \\text{otherwise}
\\end{cases}
Parameters
----------
x : array-like, shape (n_dims,)
Input
Returns
-------
z : array-like, shape (n_dims,)
Output
"""
exponent = np.linspace(0, beta, len(x))
idx = np.where(x > 0)
z = x.copy()
z[idx] **= 1.0 + exponent[idx] * np.sqrt(x[idx])
return z
|
671925fdbfc7d3fbc76acb1da3e9ff591207d131
| 33,375 |
def commonChecks(L0: float, Rtot: np.ndarray, KxStar: float, Kav: np.ndarray, Ctheta: np.ndarray):
""" Check that the inputs are sane. """
Kav = jnp.array(Kav, dtype=float)
Rtot = jnp.array(Rtot, dtype=float)
Ctheta = jnp.array(Ctheta, dtype=float)
assert Rtot.ndim <= 1
assert Rtot.size == Kav.shape[1]
assert Kav.ndim == 2
assert Ctheta.ndim <= 1
Ctheta = Ctheta / jnp.sum(Ctheta)
return L0, Rtot, KxStar, Kav, Ctheta
|
9007db3f77e2036d2db7af28d24ba40234e09168
| 33,376 |
def _threed_extract(step, var, walls=False):
"""Return suitable slices and coords for 3D fields."""
is_vector = not valid_field_var(var)
hwalls = is_vector or walls
i_x = conf.field.ix
i_y = conf.field.iy
i_z = conf.field.iz
if i_x is not None or i_y is not None:
i_z = None
if i_x is not None or i_z is not None:
i_y = None
if i_x is None and i_y is None and i_z is None:
i_x = 0
if i_x is not None:
xcoord = step.geom.y_walls if hwalls else step.geom.y_centers
ycoord = step.geom.z_walls if walls else step.geom.z_centers
i_y = i_z = slice(None)
varx, vary = var + '2', var + '3'
elif i_y is not None:
xcoord = step.geom.x_walls if hwalls else step.geom.x_centers
ycoord = step.geom.z_walls if walls else step.geom.z_centers
i_x = i_z = slice(None)
varx, vary = var + '1', var + '3'
else:
xcoord = step.geom.x_walls if hwalls else step.geom.x_centers
ycoord = step.geom.y_walls if hwalls else step.geom.y_centers
i_x = i_y = slice(None)
varx, vary = var + '1', var + '2'
if is_vector:
data = (step.fields[varx].values[i_x, i_y, i_z, 0],
step.fields[vary].values[i_x, i_y, i_z, 0])
else:
data = step.fields[var].values[i_x, i_y, i_z, 0]
return (xcoord, ycoord), data
|
a71310b0e735fabbee13ce1d4b71e3a8dbd9c809
| 33,377 |
def calc_temps(session,start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
|
e82dcfe895e39c37fceb490bb9fb7c832bc7a6c5
| 33,378 |
from typing import List
def get_react_score(reactions: List[dict]) -> float:
"""
Returns score of the post according to reactions weights.
:reactions: list of dictionaries containing post info
"""
react_score = 0
for react in reactions:
react_score += reactions_dict[react.get("name", "")] * react.get("count", 0)
return react_score
|
0ca3ce2abeed8f151e607dfec16591e5e3bae79a
| 33,379 |
import ast
def _get_collections_abc_obj_id(node: ast.expr | None) -> str | None:
"""
If the node represents a subscripted object from collections.abc or typing,
return the name of the object.
Else, return None.
>>> _get_collections_abc_obj_id(_ast_node_for('AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('typing.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('typing_extensions.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('collections.abc.AsyncIterator[str]'))
'AsyncIterator'
>>> _get_collections_abc_obj_id(_ast_node_for('collections.OrderedDict[str, int]')) is None
True
"""
if not isinstance(node, ast.Subscript):
return None
return _get_name_of_class_if_from_modules(
node.value, modules=_TYPING_MODULES | {"collections.abc"}
)
|
f5cb701df687b90a0f3a247526cf5349e7b94d3e
| 33,380 |
def show_index():
"""
main site, generating sections
"""
title = "Homepage"
sql_query = 'SELECT categoryId, title FROM category'
output = myDB.execute_sql(sql_query)
sections = dict(output)
return render_template("index.html", title=title,sections = sections)
|
b64d532810fc96e985b0291654e62e7d4e0e4b89
| 33,381 |
def generate_packed_decoder(wrapped_decoder):
"""Generate an decoder for a packer type from a base type decoder"""
def length_wrapper(buf, pos):
"""Decode repeat values prefixed with the length"""
length, pos = varint.decode_varint(buf, pos)
end = pos+length
output = []
while pos < end:
value, pos = wrapped_decoder(buf, pos)
output.append(value)
if pos > end:
raise decoder._DecodeError("Invalid Packed Field Length")
return output, pos
return length_wrapper
|
c213dcb49f63055dcd7ecf7ff75187c997f2a5e7
| 33,383 |
def create_ik_handle(name, start_joint, end_joint, solver_type=None, curve=None, **kwargs):
"""
Creates a new IK handle
:param name: str
:param start_joint: str
:param end_joint: str
:param solver_type: str
:param curve: str
:param kwargs:
:return: str
"""
if solver_type is None:
solver_type = ik_utils.IkHandle.SOLVER_SC
handle = ik_utils.IkHandle(name)
handle.set_solver(solver_type)
handle.set_start_joint(start_joint)
handle.set_end_joint(end_joint)
if curve and maya.cmds.objExists(curve):
handle.set_curve(curve)
return handle.create()
|
b5f56cbea793201467146475ac266b95c4469982
| 33,384 |
import math
def FinalFitness4(intermediate_outputs):
"""
Function: FinalFitness3
========================
Compute global fitness of an individual. Intended when wanting to refine
the fitness score.
@param intermediate_outputs: the fitnesses of the tree over several sets of
values
@return: global fitness
"""
final_output=0
# each element represents one different sample or set of input data
# the size of each represents the number of examples
#each sub-element represents the value(s) obtained at the top of a three for one input
#In this particular case, we simply add the difference of all results with an ideal solution
# the ideal solution is : [adf1 = x+y adf2 = add1*(y-x)]
# build a corresponding list of two-elements sub lists
# then evaluate the sum of the difference with our built models
goal_function=[]
for nb in xrange(len(intermediate_outputs)):
for el in intermediate_outputs[nb]:
for el2 in el:
try:
if isinstance(el2, bool):
pass
elif math.isinf(el2):
return el2
except:
return float('inf')
# sum the absolute values of the differences over one example
# here we use a very very puzzling python list comprehension... This deserve a bit of explanation.
# In general, the expression "T if C is true, or F if C is false" can be written as (F, T)[bool(C)].
# This single line could be replaced by a simpler but slower expression of the type:
#z=[]
#for i in range(10):
# if C:
# z.append(T)
# else:
# z.append(F)
# In our case, if the first element of obtained_resultsis is True (the result of the if statement)
# then use the result produce by the second branch, otherwise use the result produced by the third
# branch.
# As far as we are concerned, list comprehension are faster + compact + more memory efficient.
# so for this crucial fitness calculation bit, I chose this solution...
# May the deities of the Python programming pantheon forgive me (Sorry Guido...).
final_output= sum([(math.fabs(settings.ideal_results[x][y]-intermediate_outputs[2][x][y]),math.fabs(settings.ideal_results[x][y]-intermediate_outputs[1][x][y])) [intermediate_outputs[0][x][y]] for x in xrange(len(intermediate_outputs[1])) for y in xrange(len(intermediate_outputs[1][x]))])
return final_output
|
0e7337424a15439fbe1951b66fdfe52bc47edbbf
| 33,385 |
def create_with_deletion_protection(ledger_name):
"""
Create a new ledger with the specified name and with deletion protection enabled.
:type ledger_name: str
:param ledger_name: Name for the ledger to be created.
:rtype: dict
:return: Result from the request.
"""
logger.info("Let's create the ledger with name: {}...".format(ledger_name))
result = qldb_client.create_ledger(Name=ledger_name, PermissionsMode='ALLOW_ALL')
logger.info('Success. Ledger state: {}'.format(result.get('State')))
return result
|
2832988d222ab835984b2aeeabc4e9fc726e53c5
| 33,386 |
def run(command):
"""Execute command as user:<command>"""
with hide('everything'), settings(warn_only=True):
result = api.run(command)
print("["+env.host+"] " + command)
if result != '':
print(result)
return result
|
339b8d674da1962383314c66e3f8644f1427552e
| 33,387 |
def dummy_request():
"""Fixture to return a single dummy request."""
return testing.DummyRequest()
|
7a4382ec76c2047a0b8cc7be4a6408c4000b5493
| 33,388 |
def refill(pop):
"""Get rid of duplicates."""
for lst in pop:
lst[9] = sgn(lst[9])
lst[-1] = abs(lst[-1])
pop.sort()
newpop = [0] * Npop
i = 0
last = None
for lst in pop:
if lst != last:
newpop[i] = lst
i += 1
last = lst
for j in range(i, Npop):
a = rng.random() % i
b = rng.random() % i
newpop[j] = breed(newpop[a], newpop[b])
return newpop
|
701fec91d9e59085bb0d9d1d26dfd0803f9dc42b
| 33,389 |
def parse_code(body):
"""
Parse the code from the body
"""
# Regex to match code block reddit comment
regex = ur"^((?:(?:(?:[ ]{4}).*|)(?:[\r\n]+|$))+)"
matches = re.findall(regex, body, re.MULTILINE)
# remove all empty lines
matches = [match for match in matches if match != "\n"]
# remove leading 4 spaces used to create code blocks in reddit comments
return re.sub(r"^ ", "", matches[0].strip('\r\n'), flags=re.M) if matches else ""
|
3e978cfb55186946c1f6c577054349ff50914ec9
| 33,390 |
from polaris.integrations import registered_deposit_integration as rdi
from typing import Tuple
from typing import Optional
def get_or_create_transaction_destination_account(
transaction: Transaction,
) -> Tuple[Optional[Account], bool, bool]:
"""
Returns:
Tuple[Optional[Account]: The account(s) found or created for the Transaction
bool: boolean, True if created, False otherwise.
bool: boolean, True if trustline doesn't exist, False otherwise.
If the account doesn't exist, Polaris must create the account using an account provided by the
anchor. Polaris can use the distribution account of the anchored asset or a channel account if
the asset's distribution account requires non-master signatures.
If the transacted asset's distribution account does not require non-master signatures, Polaris
can create the destination account using the distribution account.
If the transacted asset's distribution account does require non-master signatures, the anchor
should save a keypair of a pre-existing Stellar account to use as the channel account via
DepositIntegration.create_channel_account(). See the function docstring for more info.
On failure to create the destination account, a RuntimeError exception is raised.
"""
try:
account, json_resp = get_account_obj(
Keypair.from_public_key(transaction.stellar_account)
)
return account, False, is_pending_trust(transaction, json_resp)
except RuntimeError:
master_signer = None
if transaction.asset.distribution_account_master_signer:
master_signer = transaction.asset.distribution_account_master_signer
thresholds = transaction.asset.distribution_account_thresholds
if master_signer and master_signer["weight"] >= thresholds["med_threshold"]:
source_account_kp = Keypair.from_secret(transaction.asset.distribution_seed)
source_account, _ = get_account_obj(source_account_kp)
else:
rdi.create_channel_account(transaction)
source_account_kp = Keypair.from_secret(transaction.channel_seed)
source_account, _ = get_account_obj(source_account_kp)
builder = TransactionBuilder(
source_account=source_account,
network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE,
# this transaction contains one operation so base_fee will be multiplied by 1
base_fee=settings.MAX_TRANSACTION_FEE_STROOPS
or settings.HORIZON_SERVER.fetch_base_fee(),
)
transaction_envelope = builder.append_create_account_op(
destination=transaction.stellar_account,
starting_balance=settings.ACCOUNT_STARTING_BALANCE,
).build()
transaction_envelope.sign(source_account_kp)
try:
settings.HORIZON_SERVER.submit_transaction(transaction_envelope)
except BaseHorizonError as submit_exc: # pragma: no cover
raise RuntimeError(
"Horizon error when submitting create account to horizon: "
f"{submit_exc.message}"
)
transaction.status = Transaction.STATUS.pending_trust
transaction.save()
logger.info(
f"Transaction {transaction.id} is now pending_trust of destination account"
)
account, _ = get_account_obj(
Keypair.from_public_key(transaction.stellar_account)
)
return account, True, True
except BaseHorizonError as e:
raise RuntimeError(f"Horizon error when loading stellar account: {e.message}")
|
2bea36fbeed6a6cf1c6f95f46eb2f2220571cf81
| 33,392 |
from pathlib import Path
def test_loading(tmpdir):
"""
Load an object.
"""
class A(FSObject):
_config_file = 'a.json'
def __init__(self,**kwargs):
super().__init__(**kwargs)
@property
def state(self) -> dict:
return {}
@classmethod
def load(cls, base_path: Path):
self = super().load(base_path)
return self
a = A(parent_path=tmpdir, name='a')
b = A.load(f'{tmpdir}/{a.hash}')
assert b.name == 'a'
assert b.hash == a.hash
assert b.date == a.date
|
add506c113f7fb518541a7151aa20a9d64c69097
| 33,393 |
from datetime import datetime
import random
def random_date(start=None, end=None):
"""Get a random date between two dates"""
if start is None and end is None:
end = datetime.datetime.now()
start = end - datetime.timedelta(days=365)
stime = date_to_timestamp(start)
etime = date_to_timestamp(end)
ptime = stime + random.random() * (etime - stime)
return datetime.date.fromtimestamp(ptime)
|
67e4338808ccf6195fd786c50635371bdfcb49a4
| 33,394 |
def _chg_float(something):
"""
floatに変換できたらfloatに変換する
"""
try:
f = float(something)
return f
except ValueError:
pass
return something
|
d0119c255b0842b2de4e60293c8037ff6f75b181
| 33,395 |
def get_seeding_programs():
"""Returns the list of seeding program names"""
try:
seed_catalogs = read_plist(SEED_CATALOGS_PLIST)
return list(seed_catalogs.keys())
except (OSError, IOError, ExpatError, AttributeError, KeyError) as err:
log.warn(err)
return ""
|
aad36b6ea85fa2d8a723a958281e8711e8d12af0
| 33,396 |
import re
import requests
import time
def query_ols(iri):
""" Gets the name field of measurementTechnique, infectiousAgent, infectiousDisease, and species in our nde schema
ols api doc here: https://www.ebi.ac.uk/ols/docs/api
Returns the formatted dictionary {name: ####, url: ####} if an url was given or {name: ####}
"""
url = "https://www.ebi.ac.uk/ols/api/terms?"
pattern = re.compile("^https?://")
if pattern.match(iri):
params = {
# isnt the best way but good enough to get http: https://stackoverflow.com/questions/9760588/how-do-you-extract-a-url-from-a-string-using-python
"iri": re.search("(?P<url>http?://[^\s]+)", iri).group("url")
}
request = requests.get(url, params).json()
# no documentation on how many requests can be made
time.sleep(0.5)
return {'name': request['_embedded']['terms'][0]['label'], 'url': iri}
else:
return {'name': iri}
|
03f6a1d6088f74b9b8a13528d5e4709c236cfd5e
| 33,397 |
def cylinder( **named ):
"""Create a cylinder, adding to current scene"""
return _newNode( geometry.VPCylinder, named )
|
5e012725d5787ffd802215f2ddbe549150a0429b
| 33,398 |
def mrr_finesse(a, r):
"""
description: Calculate the finesse of the MRR, i.e., finesse=FSR/FWHM=pi*sqrt(ra)/(1-ra) (Bogaerts et al., Silicon microring resonators, Laser and Photonics Review 2011, Eq.(21))\\
a {float} Attention coefficient\\
r {float} Self-coupling coefficient\\
return finesse {float} Finesse of the MRR
"""
ra = r * a
finesse = np.pi * ra ** 0.5 / (1 - ra)
return finesse
|
718bfe5a0a3d8a727a604bff7d21c1b6e3cb1715
| 33,399 |
import decimal
def is_numeric(value):
"""
Check whether *value* is a numeric value (i.e. capable of being represented as a floating point value without loss
of information).
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is numeric.
:rtype: bool
"""
if not isinstance(value, (decimal.Decimal, float, int)):
return False
if isinstance(value, bool):
return False
return True
|
4224e3a1af56e75256d5c6d7f7d990ea3ae9343a
| 33,400 |
def _position_to_features(sam_reader, allele_counter, region, position,
exclude_contig):
"""Extracts the AlleleCount data from a given position."""
# We build the AlleleCount at the position.
reads = sam_reader.query(region)
for read in reads:
allele_counter.add(read)
counts = allele_counter.counts()
assert len(counts) == 1
allele_count = counts[0]
if not _is_valid(allele_count) or (position.reference_name == exclude_contig):
return None
# We turn that AlleleCount into a vector feedable to scikitlearn.
row = dict()
row['ref_nonconfident_read_count'] = allele_count.ref_nonconfident_read_count
row['ref_supporting_read_count'] = allele_count.ref_supporting_read_count
# We need to make sure that all columns exist.
for operation in _ALLELE_OP_STR.values():
row[operation] = 0
for _, allele in allele_count.read_alleles.iteritems():
row[_ALLELE_OP_STR[allele.type]] += allele.count
row['label'] = position.label
row['reference_name'] = position.reference_name
row['position'] = position.start
return row
|
7b430c6488a7f21661505a8ac9212f2fe2289744
| 33,401 |
def p_name(username):
"""
根据用户名获取昵称
:param username: 用户名
:return:
"""
friend = itchat.search_friends(userName=username)
if not friend:
return ''
return friend.get('RemarkName') or friend.get('NickName')
|
4a07df0a973f533bd740b6d8d11ca2eb981617d5
| 33,402 |
def user_profile(self):
"""Return this user's UserProfile, or None if one doesn't exist."""
try:
return self.userprofile
except UserProfile.DoesNotExist:
return None
|
7ca38535ed779c591d7dc76b52861e6a9a126c8f
| 33,403 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.