content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import torch
def compute_accuracy(outputs, targets, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, preds = outputs.topk(maxk, 1, True, True)
preds = preds.t()
corrects = preds.eq(targets[None])
result_list = []
for k in topk:
correct_k = corrects[:k].flatten().sum(dtype=torch.float32)
result_list.append(correct_k * (100.0 / batch_size))
return result_list | 6cfcc9e43aaaed09baae567f9cc27818c555fe5f | 9,566 |
import io
def unpack_text_io_wrapper(fp, encoding):
"""
If *fp* is a #io.TextIOWrapper object, this function returns the underlying
binary stream and the encoding of the IO-wrapper object. If *encoding* is not
None and does not match with the encoding specified in the IO-wrapper, a
#RuntimeError is raised.
"""
if isinstance(fp, io.TextIOWrapper):
if fp.writable() and encoding is not None and fp.encoding != encoding:
msg = 'TextIOWrapper.encoding({0!r}) != {1!r}'
raise RuntimeError(msg.format(fp.encoding, encoding))
if encoding is None:
encoding = fp.encoding
fp = fp.buffer
return fp, encoding | f2c93babab4bff1f08e6fe5c04fbd97dd1ee8a84 | 9,567 |
def dummy_blob(size_arr=(9, 9, 9), pixdim=(1, 1, 1), coordvox=None):
"""
Create an image with a non-null voxels at coordinates specified by coordvox.
:param size_arr:
:param pixdim:
:param coordvox: If None: will create a single voxel in the middle of the FOV.
If tuple: (x,y,z): Create single voxel at specified coordinate
If list of tuples: [(x1,y1,z1), (x2,y2,z2)]: Create multiple voxels.
:return: Image object
"""
# nx, ny, nz = size_arr
data = np.zeros(size_arr)
# if not specified, voxel coordinate is set at the middle of the volume
if coordvox is None:
coordvox = tuple([round(i / 2) for i in size_arr])
elif isinstance(coordvox, list):
for icoord in coordvox:
data[icoord] = 1
elif isinstance(coordvox, tuple):
data[coordvox] = 1
else:
ValueError("Wrong type for coordvox")
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
return img | 2426ca5cddfa3da660bd5e7436f8093b1d7fa109 | 9,568 |
import torch
def poly_edges(P, T):
"""
Returns the ordered edges from the given polygons
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
tuple
a tuple containing the edges of the given polygons
"""
p = P[torch.cat((T, T[0].unsqueeze(0)), dim=0)]
return tuple(p[1:]-p[:-1]) | c8d838bf1ada319cebc5c08719f66846959ce2c2 | 9,569 |
def make_list(v):
"""
If the object is not a list already, it converts it to one
Examples:
[1, 2, 3] -> [1, 2, 3]
[1] -> [1]
1 -> [1]
"""
if not jsoncfg.node_is_array(v):
if jsoncfg.node_is_scalar(v):
location = jsoncfg.node_location(v)
line = location.line
column = location.column
else:
line = v.line
column = v.column
a = jsoncfg.config_classes.ConfigJSONArray(line, column)
a._append(v)
return a
return v | c5288cc726d103667e5f51055bc4e8cd4a90816e | 9,570 |
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, 1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return score | 74a8c4b44ff2caec31f38f136c3fc2336909759f | 9,571 |
def add_plot(
lon, lat, kind=None, props=None, ax=None, break_on_change=False, transform=identity
):
"""Add a plot with different props for different 'kind' values to an existing map
Parameters
----------
lon : sequence of float
lat : sequence of float
kind : sequence of hashable, optional
Controls what props are used. Length must match lon/lat and values
are used to index into the `props` map.
props : dict, optional.
Maps `kind` of first and last point of each segment to plot style.
By default, sorted values from `kind`
are mapped to 'axes.prop_cycle'. `props` for segments between
points with different `kind` value are looked up under `None`.
If `None` is missing, these points are not plotted.
ax : matplotlib axes object, optional
break_on_change : bool, optional
Whether to create a new segment when kind changes. Generally True for fishing plots
and False for vessel plots.
transform : cartopy.crs.Projection, optional
Returns
-------
dict mapping keys to Line2D
Values are suitable for passing to legend.
"""
if ax is None:
ax = plt.gca()
assert len(lon) == len(lat)
if kind is None:
kind = np.ones(len(lon))
else:
kind = np.asarray(kind)
assert len(kind) == len(lon)
if props is None:
props = styles.create_props(np.unique(kind))
handles = {}
for k1, k2 in sorted(props.keys()):
mask = _build_mask(kind, k1, k2, break_on_change)
if mask.sum():
ml_coords = _build_multiline_string_coords(lon, lat, mask, break_on_change)
mls = MultiLineString(ml_coords)
p = props[k1, k2].copy()
if "legend" in p:
key = p.pop("legend")
else:
key = k1 if (k1 == k2) else f"{k1}-{k2}"
ax.add_geometries([mls], crs=transform, **p)
if key:
handles[key] = Line2D(
[0], [0], color=p["edgecolor"], lw=p.get("linewidth", 1)
)
return handles | c5d6b5234fe560e9d954d4ea8d0a7aef0e810f89 | 9,572 |
def can_review_faults(user):
"""
users can review faults if one of the the following applies:
a) No fault review groups exist and they have can_review permissions
b) Fault review groups exist, they are a member of one, and they have
review permissions
"""
can_review = user.has_perm("faults.can_review")
review_groups = [frg.group for frg in FaultReviewGroup.objects.select_related("group")]
if review_groups:
can_review = can_review and len(set(review_groups) & set(user.groups.all())) > 0
return can_review | c66f022b6f52144d8e9fde6865f0a8a263819813 | 9,573 |
import requests
def create_freshservice_object(obj_type, data):
"""Use the Freshservice v2 API to create an object.
Accepts an object name (string) and a dict of key values.
"""
url = '{}/{}'.format(settings.FRESHSERVICE_ENDPOINT, obj_type)
resp = requests.post(url, auth=FRESHSERVICE_AUTH, json=data)
return resp | 597348b744d6193beb12dcf2a3a4958808f09d24 | 9,574 |
def print_begin(*args, sep=' ', end='\n', file=None, ret_value='') -> str:
"""Print the function name and start."""
print(_prefix('begin'), *args, sep=sep, end=end, file=file, flush=True)
return ret_value | 8e9ac418d161a0d2b5b7c0c9de7b81da42ea5017 | 9,575 |
def scale_bounding_box(bounding_box,scale):
"""Scales bounding box coords (in dict from {x1,y1,x2,y2}) by x and y given by sclae in dict form {x,y}"""
scaled_bounding_box = {
"x1" : int(round(bounding_box["x1"]*scale["x"]))
,"y1" : int(round(bounding_box["y1"]*scale["y"]))
,"x2" : int(round(bounding_box["x2"]*scale["x"]))
,"y2" : int(round(bounding_box["y2"]*scale["y"]))
}
return scaled_bounding_box | 8aa374537ed2ae3ae2324bd8a4819e981f281b71 | 9,576 |
import click
def is_command(obj) -> bool:
"""
Return whether ``obj`` is a click command.
:param obj:
"""
return isinstance(obj, click.Command) | 8159aea42baca70b3218a0b82e2f4dc3f34278aa | 9,577 |
def GetContigs(orthologs):
"""get map of contigs to orthologs.
An ortholog can be part of only one contig, but the same ortholog_id can
be part of several contigs.
"""
contigs = {}
for id, oo in orthologs.items():
for o in oo:
if o.contig not in contigs:
contigs[o.contig] = []
contigs[o.contig].append(o)
return contigs | 0c449a31e60f1a149317de815d630c4d8a817ca1 | 9,578 |
def set_backwards_pass(op, backwards):
"""
Returns new operation which behaves like `op` in the forward pass but
like `backwards` in the backwards pass.
"""
return backwards + tf.stop_gradient(op - backwards) | 13287ac73c52ac01808c41c81ba5311bc3f49b91 | 9,580 |
def remove_hydrogens(list_of_lines):
"""
Removes hydrogen from the pdb file.
To add back the hydrogens, run the reduce program on the file.
"""
return (line for line in list_of_lines if line['element']!=" H") | 164ac79171cf6b3632fe7909ace91ffe75192b61 | 9,581 |
def crash_random_instance(org: str, space: str, appname: str, configuration: Configuration, count: int = 1):
"""
Crash one or more random application instances.
:param org: String; Cloud Foundry organization containing the application.
:param space: String; Cloud Foundry space containing the application.
:param appname: String; Application in Cloud Foundry which is to be targeted.
:param count: int; Number of instances to kill.
:param configuration: Configuration; Configuration details, see `README.md`.
:return: A JSON Object representing the application which was targeted.
"""
return run_ctk(
lambda app: app.crash_random_instance(count=count),
configuration, org, space, appname,
"Crashing {} random app instance(s)...".format(count)
) | 652ab95038d405b6a193809804aae7f3bc15978f | 9,582 |
def spc_dict_from_spc_info(spc_info: dict, resonance: bool = True) -> dict:
"""
Generate a species dictionary from species info.
Args:
spc_info (dict): Species info contains the label and species geom info.
resonance (bool): Whether generate resonance geom in the species dictionary.
Returns:
dict: The species dictionary generated from the spc_info.
"""
spc_dict = {}
for label, spc in spc_info.items():
species = species_from_spc_info(spc)
if not species:
continue
if resonance:
species.generate_resonance_structures()
spc_dict[label] = species
return spc_dict | 0a291f2fd50134b1c1259adc36b5637e30e21118 | 9,583 |
import torch
def label_smooth_loss(log_prob, label, confidence=0.9):
"""
:param log_prob: log probability
:param label: one hot encoded
:param confidence: we replace one (in the one hot) with confidence. 0 <= confidence <= 1.
:return:
"""
N = log_prob.size(0)
C = log_prob.size(1)
smoothed_label = torch.full(size=(N, C), fill_value=(1-confidence) / (C - 1)).to(log_prob)
smoothed_label.scatter_(dim=1, index=torch.unsqueeze(label, dim=1), value=confidence)
loss = - torch.sum(log_prob * smoothed_label) / N
return loss | f1164d1a41d2c275ae4e406e2a46a0d50a2d240d | 9,584 |
def update(self, using=None, **kwargs):
"""
Updates specified attributes on the current instance.
"""
assert self.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(self.__class__, instance=self)
for field in self._meta.fields:
if getattr(field, 'auto_now', False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(self, False)
affected = self.__class__._base_manager.using(using).filter(pk=self.pk).update(**kwargs)
for k, v in kwargs.iteritems():
if isinstance(v, ExpressionNode):
v = resolve_expression_node(self, v)
setattr(self, k, v)
if affected == 1:
signals.post_save.send(sender=self.__class__, instance=self, created=False)
return True
elif affected == 0:
return False
elif affected < 0:
raise ValueError("Somehow we have updated a negative amount of rows, you seem to have a problem with your db backend.")
else:
raise ValueError("Somehow we have updated multiple rows, and you are now royally fucked.") | b3400f43c0a744de17225ee6c029fc41465b784d | 9,585 |
def differential_privacy_with_risk( dfg_freq, dfg_time, delta, precision, aggregate_type=AggregateType.SUM):
"""
This method adds the differential privacy to the DFG of both time and frequencies.
* It calculates the epsilon using the guessing advantage technique.
* It adds laplace noise to the DFGs.
* It calculates the distance resulted from the noise
"""
accuracy=1
# calculate epsilon
epsilon_freq,senstivity_freq=calculate_epsilon_freq(dfg_freq,delta)
epsilon_time,senstivity_time=calculate_epsilon_time(dfg_time,delta,precision, aggregate_type)
# adding laplace noise to DFG freq
dfg_freq_new = add_laplace_noise_freq(dfg_freq, epsilon_freq)
# adding laplace noise to DFG time
dfg_time, dfg_time_new = add_laplace_noise_time(aggregate_type, dfg_time, epsilon_time)
# Calculate earth moving distance
emd_freq=earth_mover_dist(dfg_freq,dfg_freq_new)
emd_time=earth_mover_dist(dfg_time,dfg_time_new)
#calculating the APE, MAPE, and SMAPE
MAPE_freq, SMAPE_freq, APE_dist_freq,SMAPE_dist_freq=error_calculation(dfg_freq,dfg_freq_new)
MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_time = error_calculation(dfg_time,dfg_time_new)
# return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, emd_freq, emd_time, percent_freq,percent_time,percent_freq_dist,percent_time_dist
return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, MAPE_freq, SMAPE_freq, APE_dist_freq, MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_freq, SMAPE_dist_time | a85035b8786bb6bf9a5cc0af88433a490faac77f | 9,586 |
from typing import Iterable
from typing import Counter
def get_all_values(string: str) -> Iterable[int]:
"""Return all kinds of candidates, with ordering: Dec, Hex, Oct, Bin."""
if string.startswith('0x'):
return filter(bool, [parse_hex(string[2:])]) # type: ignore[list-item]
if string.startswith('0o'):
return filter(bool, [parse_oct(string[2:])]) # type: ignore[list-item]
if string.startswith('0b'):
return filter(bool, [parse_bin(string[2:])]) # type: ignore[list-item]
# try each base when no prefix
return Counter(filter(bool, map(lambda f: f(string), # type: ignore[arg-type,return-value]
[parse_dec, parse_hex, parse_oct, parse_bin]))) | d9e12290339cbf31dc572c9e3d49ec503949250d | 9,587 |
def svn_auth_get_simple_provider(*args):
"""svn_auth_get_simple_provider(apr_pool_t pool)"""
return _core.svn_auth_get_simple_provider(*args) | e91c2198f5ee214fb1db9e8969711a806caf19c6 | 9,588 |
def preferred_language():
""" It just returns first language from acceptable
"""
return acceptable_languages()[0] | 6e5c2b069f84c5a6601b579616858457598f2cf4 | 9,589 |
def get_frequencies(trial = 1):
"""
get frequency lists
"""
if trial =="run_fast_publish":
lb_targ, ub_targ, obs_hz = 340, 350, 10
elif trial == 1:
lb_targ, ub_targ, obs_hz = 210, 560, int(320 / 2)
elif trial == 2:
lb_targ, ub_targ, obs_hz = 340, 640, 280
elif trial == 3:
lb_targ, ub_targ, obs_hz = 340, 350, 20#40
elif trial == 4:
lb_targ, ub_targ, obs_hz = 60, 350, 40
elif trial == 5:
lb_targ, ub_targ, obs_hz = 50, 200, 40
if trial == 6:
lb_targ, ub_targ, obs_hz = 130, 530, 130
if trial == 7:
lb_targ, ub_targ, obs_hz = 500, 900, 250
obs_list = list( range( lb_targ - obs_hz, lb_targ))
obs_list += list( range( ub_targ, ub_targ + obs_hz))
resp_list = list( range( lb_targ, ub_targ))
return obs_list, resp_list | e6c7f33865ffd76532a19426f0748d4dd22e37f8 | 9,590 |
import re
def parse_field_pubblicazione(field):
"""
Extracts year, place and publisher from the field `pubblicazione` by applying a cascade of regexps.
"""
exp2 = r'^(?P<place>\D+)(?:\s?\W\s?)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp1 = r'^(?P<place>.*?)(?::)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp3 = r'(?:.*?)?(?P<year>\d{4})'
exp4 = r'^(?P<place>\D{3,})$'
not_matched = 0
partly_matched = 0
result = {}
result1 = re.match(exp1,field)
if(result1 is None):
result2 = re.match(exp2,field)
if(result2 is None):
result3 = re.match(exp3,field)
if(result3 is None):
result4 = re.match(exp4,field)
if(result4 is None):
not_matched += 1
else:
result = result4.groupdict()
else:
result = result3.groupdict()
else:
result = result2.groupdict()
else:
result = result1.groupdict()
return result | 91aee4dabf62b3ec5bccff2a07d664312226448c | 9,592 |
def test_api_calendar():
"""Return a test calendar object used in API responses."""
return TEST_API_CALENDAR | 1c73e63bf19cef92dbbe328825c2ae4e867c1e84 | 9,593 |
def apply_custom_colormap(image_gray, cmap=plt.get_cmap("seismic")):
"""
Implementation of applyColorMap in OpenCV using colormaps in Matplotlib.
"""
assert image_gray.dtype == np.uint8, "must be np.uint8 image"
if image_gray.ndim == 3:
image_gray = image_gray.squeeze(-1)
# Initialize the matplotlib color map
sm = plt.cm.ScalarMappable(cmap=cmap)
# Obtain linear color range
color_range = sm.to_rgba(np.linspace(0, 1, 256))[:, 0:3] # color range RGBA => RGB
color_range = (color_range * 255.0).astype(np.uint8) # [0,1] => [0,255]
color_range = np.squeeze(
np.dstack([color_range[:, 2], color_range[:, 1], color_range[:, 0]]), 0
) # RGB => BGR
# Apply colormap for each channel individually
channels = [cv2.LUT(image_gray, color_range[:, i]) for i in range(3)]
return np.dstack(channels) | e2f3c9a8900f47c0e7183f4ebe72f41a7f6d26b9 | 9,594 |
from typing import Callable
def _cond_with_per_branch_args(pred,
true_operand, true_fun: Callable,
false_operand, false_fun: Callable):
"""Conditionally apply ``true_fun`` or ``false_fun``.
Has equivalent semantics to this Python implementation::
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
Pred has to be a scalar type, collection types (list, tuple) are not supported
"""
if not (callable(true_fun) and callable(false_fun)):
raise TypeError("lax.cond: true_fun and false_fun arguments should be callable.")
return _cond(pred,
lambda op: true_fun(op[0]),
lambda op: false_fun(op[1]),
(true_operand, false_operand)) | e942124beafebb69fed80e3175164a34f088cb9e | 9,595 |
import urllib
def msgSet(key, notUsed, queryString, body):
"""no treatment on the body (we send exactly the body like we received it)"""
dict = urllib.parse.parse_qs(body.decode('utf-8'))
#sendSMS.writeRawMsg(body)
user = dict['user'][0]
print(dict)
sendSMS.writeMsgUser(dict['msg'][0], user)
return "Message sent to " + user | b67663f516f54af9a7dbbece67933ee1d04ee7a2 | 9,596 |
import re
def _strip_build_number(api_version):
"""Removes the build number component from a full api version string."""
match = re.match(r"^([A-Z]+-)?([0-9]+)(\.[0-9]+){2}$", api_version)
if match:
return api_version[:match.start(3)]
# if there aren't exactly 3 version number components, just leave it unchanged
return api_version | 20d8023281f05dfcb8c9fdd021b77796c72e1001 | 9,597 |
def get_me():
"""サインインしている自分自身の情報を取得"""
jia_user_id = get_user_id_from_session()
return {"jia_user_id": jia_user_id} | c31f6a1a8c794e4a2aa70779f9c8b2559baccd84 | 9,598 |
def dec_file(name, out=None, **kwargs):
"""
This is a helper function to decrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.dec_file(name, out, **kwargs) | 3ff74b9300fa8b441a22daf65d546f329e414447 | 9,599 |
async def conversation_steps(month: int = Query(default=1, ge=2, le=6), current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches the number of conversation steps that took place in the chat between the users and the agent
"""
return Utility.trigger_history_server_request(
current_user.get_bot(),
f'/api/history/{current_user.get_bot()}/metrics/conversation/steps',
{'month': month}
) | 9845cf39290f056395351953e3d7accbcb14ae06 | 9,600 |
def off(app: str) -> dict:
"""
Switches the app offline, if it isn't already.
:param app: The name of the Heroku app in which you want formation
:return: dictionary containing information about the app
"""
return Herokron(app).off() | 8aa6cef16d8924ce682fa9a7b886cee87d4e02c5 | 9,601 |
def count_weekday(start, stop, wd_target=0):
"""
Returns the number of days between start and stop inclusive which is the
first day of the month and is the specified weekday, with 0 being Monday.
"""
counter = 0
while start != stop + timedelta(days=1):
if start.weekday() == wd_target and start.day == 1:
counter += 1
start += timedelta(days=1)
return counter | 27dd8ce6493ac1c24c65c92208767159a6406348 | 9,603 |
import collections
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts | c4b388d71b2c16e6c324718b8a07db8531c83413 | 9,604 |
def pkg_topics_list(data_dict):
"""
Get a list of topics
"""
pkg = model.Package.get(data_dict['id'])
vocabulary = model.Vocabulary.get('Topics')
topics = []
if vocabulary:
topics = pkg.get_tags(vocab=vocabulary)
return topics | 7594ea421ade2a530d8e08490b542bbd05d1a962 | 9,605 |
def five_five(n):
"""
This checks if n is a power of 2 (or 0).
This is because the only way that n and (n-1) have none of the same bits (the
& check) is when n is a power of 2, or 0.
"""
return ((n & (n-1)) == 0) | 0b1cc310b5d8bd6dab6299b6a999a5dd0720ea80 | 9,607 |
from main import bot
from typing import Optional
import asyncio
async def send_message(user_id: int,
text: str,
buttons: Optional[list[dict[str, str]]] = None,
disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].
A button can have all the same keys that InlineKeyboardButton() take
:param disable_notification:
:return:
"""
try:
await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(
row_width=2,
resize_keyboard=True,
one_time_keyboard=True, ).add(
*[InlineKeyboardButton(**button) for button in buttons])
if buttons else None,
disable_notification=disable_notification)
log.info(f"Sent message to target [ID:{user_id}]")
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text, buttons) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return True
return False | e2cb9879a1eea95d639f6ff3c7b7bf7c5b19ef68 | 9,608 |
def convert_bytes_to_size(some_bytes):
"""
Convert number of bytes to appropriate form for display.
:param some_bytes: A string or integer
:return: A string
"""
some_bytes = int(some_bytes)
suffix_dict = {
'0': 'B',
'1': 'KiB',
'2': 'MiB',
'3': 'GiB',
'4': 'TiB',
'5': 'PiB'
}
counter = 0
while some_bytes > 1 and counter <= 5:
tmp = some_bytes / 1024
if tmp < 1:
break
else:
some_bytes = tmp
counter += 1
return str(format(some_bytes, '.2f')) + ' ' + str(suffix_dict[str(counter)]) | d1579e0fc0850a98145910c056b3fac8be7c66f1 | 9,609 |
def create_bbregister_func_to_anat(fieldmap_distortion=False,
name='bbregister_func_to_anat'):
"""
Registers a functional scan in native space to structural. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
fieldmap_distortion : bool, optional
If field map-based distortion correction is being run, FLIRT should
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.anat_skull : string (nifti file)
Corresponding full-head scan of subject
inputspec.linear_reg_matrix : string (mat file)
Affine matrix from linear functional to anatomical registration
inputspec.anat_wm_segmentation : string (nifti file)
White matter segmentation probability mask in anatomical space
inputspec.bbr_schedule : string (.sch file)
Boundary based registration schedule file for flirt command
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat_skull',
'linear_reg_matrix',
'anat_wm_segmentation',
'bbr_schedule',
'fieldmap',
'fieldmapmask'
]),
name='inputspec')
inputNode_echospacing = pe.Node(
util.IdentityInterface(fields=['echospacing']),
name='echospacing_input')
inputNode_pedir = pe.Node(util.IdentityInterface(fields=['pedir']),
name='pedir_input')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'anat_func']),
name='outputspec')
wm_bb_mask = pe.Node(interface=fsl.ImageMaths(),
name='wm_bb_mask')
wm_bb_mask.inputs.op_string = '-thr 0.5 -bin'
register_bbregister_func_to_anat.connect(inputspec, 'anat_wm_segmentation',
wm_bb_mask, 'in_file')
def bbreg_args(bbreg_target):
return '-cost bbr -wmseg ' + bbreg_target
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
register_bbregister_func_to_anat.connect(inputspec, 'bbr_schedule',
bbreg_func_to_anat, 'schedule')
register_bbregister_func_to_anat.connect(wm_bb_mask, ('out_file', bbreg_args),
bbreg_func_to_anat, 'args')
register_bbregister_func_to_anat.connect(inputspec, 'func',
bbreg_func_to_anat, 'in_file')
register_bbregister_func_to_anat.connect(inputspec, 'anat_skull',
bbreg_func_to_anat, 'reference')
register_bbregister_func_to_anat.connect(inputspec, 'linear_reg_matrix',
bbreg_func_to_anat, 'in_matrix_file')
if fieldmap_distortion:
def convert_pedir(pedir):
# FSL Flirt requires pedir input encoded as an int
conv_dct = {'x': 1, 'y': 2, 'z': 3, '-x': -1, '-y': -2, '-z': -3}
if not isinstance(pedir, str):
raise Exception("\n\nPhase-encoding direction must be a "
"string value.\n\n")
if pedir not in conv_dct.keys():
raise Exception("\n\nInvalid phase-encoding direction "
"entered: {0}\n\n".format(pedir))
return conv_dct[pedir]
register_bbregister_func_to_anat.connect(inputNode_pedir, ('pedir', convert_pedir),
bbreg_func_to_anat, 'pedir')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmap',
bbreg_func_to_anat, 'fieldmap')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmapmask',
bbreg_func_to_anat, 'fieldmapmask')
register_bbregister_func_to_anat.connect(inputNode_echospacing, 'echospacing',
bbreg_func_to_anat, 'echospacing')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_file',
outputspec, 'anat_func')
return register_bbregister_func_to_anat | 0598cef86fdebe697bfdc1627554c4340303a86b | 9,610 |
def pointShiftFromRange(dataSize, x = all, y = all, z = all, **args):
"""Calculate shift of points given a specific range restriction
Arguments:
dataSize (str): data size of the full image
x,y,z (tuples or all): range specifications
Returns:
tuple: shift of points from original origin of data to origin of range reduced data
"""
if isinstance(dataSize, str):
dataSize = self.dataSize(dataSize)
dataSize = list(dataSize)
d = len(dataSize)
rr = []
if d > 0:
rr.append(toDataRange(dataSize[0], r = x))
if d > 1:
rr.append(toDataRange(dataSize[1], r = y))
if d > 2:
rr.append(toDataRange(dataSize[2], r = z))
if d > 3 or d < 1:
raise RuntimeError('shiftFromRange: dimension %d to big' % d)
return [r[0] for r in rr] | dbe5c2049c5b76bfdbb839faa2a3e6cb942c8249 | 9,611 |
def callparser():
"""Parses a group of expressions."""
def cull_seps(tokens):
return tokens[0] or tokens[1]
return RepeatParser(exprparser() + OptionParser(dlmparser(',')) ^ cull_seps) | af8fbf81044b90d6a1a9ea769a513109237692d4 | 9,612 |
def write_section(section_name, section, keys, writer) -> bool:
"""
Saves the specified section to the specified writer starting at the current
point in the writer. It will not throw an exception. On error (IO exception
or not being able to write the section) it will return false. WARNING: It can
not scan the destination to see if this section has already been written, so
typically this method is called when writing out an entire configuration with
multiple sections in sequence.
Returns True on success and False on failure.
"""
keys = keys if keys else section.keys()
ret = False
# OBSOLETE with io.TextIOWrapper(writer) as writer2:
try:
writer.write(section_name + ":\n")
for k in keys:
val = section.get(k)
if val:
output = " " + k + _COLONSPACE + val + "\n"
writer.write(output)
ret = True
except OSError as err:
_printerr(err) # Just return false
return ret | 368f0cac04d392b9ea8946d30538a3fb0265c593 | 9,613 |
def _rotation_270(image):
"""Rotate an image with 270 degrees (clockwise).
Parameters
----------
image : np.ndarray
Image to rotate with shape (y, x, channels).
Returns
-------
image_rotated : np.ndarray
Image rotated with shape (y, x, channels).
"""
image_rotated = _flip_v(image)
image_rotated = _transpose(image_rotated)
return image_rotated | 3cd291c9283a32d0bc66902bff7861db855f4420 | 9,614 |
def classification_id_for_objs(object_id: str, url: str, token: str):
"""
Get classification id for a given object
Arguments
----------
object_id : str
Object id to get classification id for
url : str
Skyportal url
token : str
Skyportal token
Returns
----------
status_code : int
HTTP status code
data : list
List of classification ids and their author ids
"""
classifications = api(
"GET",
f"{url}/api/sources/{object_id}/classifications",
token=token,
)
data = {}
if classifications.status_code == 200:
data = {
"id": classifications.json()["data"][0]["id"],
"author_id": classifications.json()["data"][0]["author_id"],
}
return classifications.status_code, data | b03bb7ff18235cafd1b171e5042d64c65c19cffc | 9,615 |
import math
def ciede2000(Lab_1, Lab_2):
"""Calculates CIEDE2000 color distance between two CIE L*a*b* colors."""
C_25_7 = 6103515625 # 25**7
L1, a1, b1 = Lab_1[0], Lab_1[1], Lab_1[2]
L2, a2, b2 = Lab_2[0], Lab_2[1], Lab_2[2]
C1 = math.sqrt(a1**2 + b1**2)
C2 = math.sqrt(a2**2 + b2**2)
C_ave = (C1 + C2) / 2
G = 0.5 * (1 - math.sqrt(C_ave**7 / (C_ave**7 + C_25_7)))
L1_, L2_ = L1, L2
a1_, a2_ = (1 + G) * a1, (1 + G) * a2
b1_, b2_ = b1, b2
C1_ = math.sqrt(a1_**2 + b1_**2)
C2_ = math.sqrt(a2_**2 + b2_**2)
if b1_ == 0 and a1_ == 0: h1_ = 0
elif a1_ >= 0: h1_ = math.atan2(b1_, a1_)
else: h1_ = math.atan2(b1_, a1_) + 2 * math.pi
if b2_ == 0 and a2_ == 0: h2_ = 0
elif a2_ >= 0: h2_ = math.atan2(b2_, a2_)
else: h2_ = math.atan2(b2_, a2_) + 2 * math.pi
dL_ = L2_ - L1_
dC_ = C2_ - C1_
dh_ = h2_ - h1_
if C1_ * C2_ == 0: dh_ = 0
elif dh_ > math.pi: dh_ -= 2 * math.pi
elif dh_ < -math.pi: dh_ += 2 * math.pi
dH_ = 2 * math.sqrt(C1_ * C2_) * math.sin(dh_ / 2)
L_ave = (L1_ + L2_) / 2
C_ave = (C1_ + C2_) / 2
_dh = abs(h1_ - h2_)
_sh = h1_ + h2_
C1C2 = C1_ * C2_
if _dh <= math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2
elif _dh > math.pi and _sh < 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 + math.pi
elif _dh > math.pi and _sh >= 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 - math.pi
else: h_ave = h1_ + h2_
T = 1 - 0.17 * math.cos(h_ave - math.pi / 6) + 0.24 * math.cos(2 * h_ave) + 0.32 * math.cos(3 * h_ave + math.pi / 30) - 0.2 * math.cos(4 * h_ave - 63 * math.pi / 180)
h_ave_deg = h_ave * 180 / math.pi
if h_ave_deg < 0: h_ave_deg += 360
elif h_ave_deg > 360: h_ave_deg -= 360
dTheta = 30 * math.exp(-(((h_ave_deg - 275) / 25)**2))
R_C = 2 * math.sqrt(C_ave**7 / (C_ave**7 + C_25_7))
S_C = 1 + 0.045 * C_ave
S_H = 1 + 0.015 * C_ave * T
Lm50s = (L_ave - 50)**2
S_L = 1 + 0.015 * Lm50s / math.sqrt(20 + Lm50s)
R_T = -math.sin(dTheta * math.pi / 90) * R_C
k_L, k_C, k_H = 1, 1, 1
f_L = dL_ / k_L / S_L
f_C = dC_ / k_C / S_C
f_H = dH_ / k_H / S_H
dE_00 = math.sqrt(f_L**2 + f_C**2 + f_H**2 + R_T * f_C * f_H)
return dE_00 | f95bc8338fbabe09f2038cea34e7a8fcad87f3bf | 9,616 |
import torch
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data | 6752dd94c690d8a8d3d0d625a693cd711c12c9c0 | 9,617 |
def _make_options(context, base):
"""Return pyld options for given context and base."""
options = {}
if context is None:
context = default_context()
options['expandContext'] = context
if base is not None:
options['base'] = base
return options | 8fcd514d9b0d11020ea197a29af6e76a53201306 | 9,618 |
def datatable(table_config: DatatableConfig, table_id: str, class_name: str = ''):
"""
Deprecated, use instead
<table id="{table_id}" data-datatable-url="{url}" class="{class_name}"></table>
"""
return {
"rich_columns": table_config.enabled_columns,
"search_box_enabled": table_config.search_box_enabled,
"table_id": table_id,
"class_name": class_name,
"expand_client_renderer": table_config.expand_client_renderer
} | 777d19f0eaa6f1adbb53cc1fa6042fbec3df4398 | 9,619 |
def npelpt(point, ellipse):
"""npelpt(ConstSpiceDouble [3] point, ConstSpiceDouble [NELLIPSE] ellipse)"""
return _cspyce0.npelpt(point, ellipse) | f81ff9a993f0166ed4899338c66b58e5329382ce | 9,620 |
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [('/faq', utils_faq.FaqHandler),('/allresources', utils_allresources.AllResourcesHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course',
'FAQ Module',
[], courses_routes, notify_module_enabled = notify)
return custom_module | e4fe1ae4d3b05a4c396155ae3b471e941de56f7d | 9,621 |
def degreeList(s):
"""Convert degrees given on command line to a list.
For example, the string '1,2-5,7' is converted to [1,2,3,4,5,7]."""
l = []
for r in s.split(','):
t = r.split('-')
if len(t) == 1:
l.append(int(t[0]))
else:
a = int(t[0])
b = int(t[1])
l.extend(range(a,b+1, (1 if a <= b else -1)))
return sorted(l) | 3b517831ddab47da5cd0e36fa5913d6d59e73715 | 9,622 |
def _get_corrected_msm(msm: pd.DataFrame, elevation: float, ele_target: float):
"""MSMデータフレーム内の気温、気圧、重量絶対湿度を標高補正
Args:
df_msm(pd.DataFrame): MSMデータフレーム
ele(float): 平均標高 [m]
elevation(float): 目標地点の標高 [m]
Returns:
pd.DataFrame: 補正後のMSMデータフレーム
"""
TMP = msm['TMP'].values
PRES = msm['PRES'].values
MR = msm['MR'].values
# 標高差
ele_gap = ele_target - elevation
# 気温補正
TMP_corr = get_corrected_TMP(TMP, ele_gap)
# 気圧補正
PRES_corr = get_corrected_PRES(PRES, ele_gap, TMP_corr)
# 重量絶対湿度補正
MR_corr = get_corrected_mixing_ratio(
MR=MR,
TMP=TMP_corr,
PRES=PRES_corr
)
# 補正値をデータフレームに戻す
msm = msm.copy()
msm['TMP'] = TMP_corr
msm['PRES'] = PRES_corr
msm['MR'] = MR_corr
# なぜ 気圧消すのか?
# msm.drop(['PRES'], axis=1, inplace=True)
return msm | 5cbfafa077c02ff5b7b74e47eff30c99e6201ff8 | 9,624 |
def get_answers_by_qname(sim_reads_sam_file):
"""Get a dictionary of Direction Start CIGAR MDtag by ReadID (qname)."""
answers_by_qname = {}
reads_file = open(sim_reads_sam_file)
reads_file.next() #skip header line
for line in reads_file:
id, dir, start, cigar, mdtag = line.strip().split('\t')
answers_by_qname[id] = (dir, start, cigar, mdtag)
reads_file.close()
return answers_by_qname | eae27387f4ac0e20b16392ca699fad7e6489c6e9 | 9,625 |
def post_times(post: Post) -> html_tag:
"""Display time user created post.
If user has edited their post show the timestamp for that as well.
:param post: Post ORM object.
:return: Rendered paragraph tag with post's timestamp information.
"""
p = tags.p(cls="small")
p.add(f"{_('Posted')}: ")
p.add(moment(post.created).fromNow())
if post.edited is not None:
p.add(tags.br(), f"{_('Edited')}: ", moment(post.edited).fromNow())
return p | 8e64d6f49ed5bcf8f9a9ea1f3a5350880bbe7b39 | 9,626 |
def read_articles_stat(path):
"""
读取articles_stat文件,生成可以读取法条正负样本数量的字典列表
:param path: articles_stat文件位置
:return: ret: [{'第一条': (负样本数量, 正样本数量), ...}, {...}, ..., {...}]
"""
df = pd.read_csv(path, header=0, index_col=0)
ret = [{} for i in range(4)]
for index, row in df.iterrows():
ret[row['name']][row['number']] = (row['negatives'], row['positives'])
# print(ret)
return ret | be35e11a508e22241188b4719dc6fa0db14f4395 | 9,627 |
def get_bounding_box(font):
""" Returns max and min bbox of given truetype font """
ymin = 0
ymax = 0
if font.sfntVersion == 'OTTO':
ymin = font['head'].yMin
ymax = font['head'].yMax
else:
for g in font['glyf'].glyphs:
char = font['glyf'][g]
if hasattr(char, 'yMin') and ymin > char.yMin:
ymin = char.yMin
if hasattr(char, 'yMax') and ymax < char.yMax:
ymax = char.yMax
return ymin, ymax | 98161ef3426c2bb9b6dc4079c69f5c1f9d4e93a2 | 9,628 |
def create_user(client, profile, user, resend=False):
""" Creates a new user in the specified user pool """
try:
if resend:
# Resend confirmation email for get back password
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
MessageAction="RESEND",
)
else:
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
UserAttributes=[
{"Name": "email", "Value": user.email},
{"Name": "email_verified", "Value": "true"},
],
)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
if resend:
print(f"Resend confirmation to user {user.email} successfully")
else:
print(f"User {user.email} was created successfully")
return response
except client.exceptions.UsernameExistsException as error:
print(f"User {user.email} exists")
return error.response
except client.exceptions.ClientError as error:
print(f"Fail to create user {user.email}: {error.response}")
return error.response | 4c1f83c0ab7fd28dc7b1e2d8f2efa224360dfdb1 | 9,629 |
def generate_move_probabilities(
in_probs: np.ndarray,
move_dirn: float,
nu_par: float,
dir_bool: np.ndarray
):
""" create move probabilities from a 1d array of values"""
out_probs = np.asarray(in_probs.copy())
if np.isnan(out_probs).any():
print('NANs in move probabilities!')
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs = out_probs.clip(min=0.)
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
#out_probs = np.random.rand(len(out_probs))
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs /= np.sum(out_probs)
out_probs = np.power(out_probs, nu_par)
out_probs /= np.sum(out_probs)
return out_probs | 4ea9ef914b905b6ab79933ba90a3604b0391f038 | 9,630 |
def _is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, tf.linalg.LinearOperatorIdentity) or
isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(x, tf.linalg.LinearOperatorDiag)) | de3bb0ab2313c5432abab4bf7b0c1e227bc682d7 | 9,631 |
def index():
"""Index Controller"""
return render_template('login.html') | 53499d68c734e6315e3f24927d70cb7cddca346a | 9,632 |
def match_twosided(desc1,desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1,desc2)
matches_21 = match(desc2,desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12 | a86d1cfb19afa5404d8c4950dd8b24a130a6a003 | 9,633 |
import re
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links | 58e1a73a524333cbd019387866047d434c7de494 | 9,634 |
def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype = args
rstate = np.random
# Resampling.
npoints, ndim = points.shape
idxs = rstate.randint(npoints, size=npoints) # resample
idx_in = np.unique(idxs) # selected objects
sel = np.ones(npoints, dtype='bool')
sel[idx_in] = False
idx_out = np.where(sel)[0] # "missing" objects
if len(idx_out) < 2: # edge case
idx_out = np.append(idx_out, [0, 1])
points_in, points_out = points[idx_in], points[idx_out]
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist | 0492f316c53b434faf79445313ec853830f87867 | 9,636 |
def _clip_grad(clip_value, grad):
"""
Clip gradients.
Inputs:
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
dt = ops.dtype(grad)
new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt))
return new_grad | 31cd4693a2bd80af7d3dd4be6a830b2982f8fce8 | 9,638 |
def sample_cast(user, name='David'):
"""Creates a sample Cast"""
return Cast.objects.create(user=user, name=name) | 3e4d03878697dfac931babbeaacaa7687d520189 | 9,639 |
import re
def sort_special_vertex_groups(vgroups,
special_vertex_group_pattern='STYMO:',
global_special_vertex_group_suffix='Character'):
"""
Given a list of special vertex group names, all with the prefix of
special_vertex_group_pattern, selects all that start with global_special_vertex_group_suffix
and puts them at the start of the list. This enables e.g. to easily define
top-level vertex groups that always go first, followed by details that
overwrite top level assignments.
"""
global_vg_name_pattern = special_vertex_group_pattern + \
global_special_vertex_group_suffix
first = []
last = []
for g in vgroups:
if re.match(global_vg_name_pattern, g) is not None:
first.append(g)
else:
last.append(g)
first.sort()
last.sort()
first.extend(last)
return first | 0cc8f0992553e5da5b37ea9a9886996cb9013582 | 9,640 |
def _GetFullDesktopName(window_station, desktop) -> str:
"""Returns a full name to a desktop.
Args:
window_station: Handle to window station.
desktop: Handle to desktop.
"""
return "\\".join([
win32service.GetUserObjectInformation(handle, win32service.UOI_NAME)
for handle in [window_station, desktop]
]) | e9a2aeebdb6f705efab1a0c1997ca66f4079cc07 | 9,641 |
def decrypt(plain_text: str, a: np.ndarray, b: np.ndarray, space: str) -> str:
"""Decrypts the given text with given a, b and space
:param plain_text: Text you want to decrypt
:type plain_text: str
:param a: An integer that corresponds to the A parameter in block cypher
:type a: np.ndarray
:param b: An integer that corresponds to the B parameter in block cypher
:type b: np.ndarray
:param space: Target space
:type space: str
:return: Decrypted text in string form
:rtype: str
"""
result = []
t = math_utils.get_inverse_matrix(a)
pairs = cryption_utils.get_pairs_of_int_two_from_text(plain_text, space)
for pair in pairs:
c = math_utils.create_nested_list_from_flat_list(pair)
subtracted_matrix = math_utils.sub_matrices(c, b)
dot_product = math_utils.dot_product_with_multiple_matrices(
[t, np.array(subtracted_matrix)]
)
result_list = space_utils.convert_nested_ints_to_char(dot_product, space)
result.append("".join(result_list))
return "".join(result) | 642b47d3459c64c5c7b280401aa96bd8f37cfa59 | 9,642 |
def well2D_to_df1D(xlsx_path, sheet, data_col):
"""
Convert new 2D output format (per well) to 1D dataframe
:param str xlsx_path: path to the xlsx file
:param str sheet: sheet name to load
:param str data_col: new column name of the linearized values
:return dataframe df: linearized dataframe
"""
df = pd.read_excel(xlsx_path, sheet_name=sheet, index_col=0)
df = df.unstack().reset_index(name=data_col) # unpivot (linearize) the table
df.rename(columns={'level_1': 'row_id', 'level_0': 'col_id'}, inplace=True)
df['well_id'] = df.row_id + df.col_id.map(str)
df = df[['well_id', data_col]]
return df | 0d8403b311c50cbc7f723e044f3aa93c50f17e80 | 9,644 |
import hashlib
def obtain_file_hash(path, hash_algo="md5"):
"""Obtains the hash of a file using the specified hash algorithm
"""
hash_algo = hashlib.sha256() if hash_algo=="sha256" else hashlib.md5()
block_size = 65535
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(block_size),b''):
hash_algo.update(chunk)
return hash_algo.hexdigest() | daa996339c638eaab4f3d067dcaaa4b865a6f923 | 9,645 |
def b_q_bar(z_c):
"""Result of integrating from z_c to 1/2 of the
hard collinear part of the quark splitting function"""
b_q_zc = CF * (-3. + 6. * z_c + 4.* np.log(2. - 2.*z_c))/2.
return b_q_zc | c7e68a2b4b17e035081fd07784aeef017fcedabc | 9,646 |
def getsize(store, path=None):
"""Compute size of stored items for a given path."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
prefix = _path_to_prefix(path)
size = 0
for k in listdir(store, path):
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1 | e537a231c49ac1edb6153d4751bd7f1b01979778 | 9,647 |
def split_2DL5AB(GL, cursor, log):
"""
splits the KIR2DL5 GL-string into 2 separate GL strings for 2DL5A and 2DL5B
:param GL: GL-string for KIR2DL5, combining both A and B
:param cursor: cursor to a connection to the nextype archive
:param log: logger instance
"""
log.info("Splitting 2DL5-alleles...")
proc_name = "GL_STRINGS_MGMT.SPLIT_GL_STRING_2DL5@ngsa"
proc_params = [GL]
proc_params2 = [2, 'KIR', 'J', 'J', '2DL5', 'J', '2DL5', 'N']
success, values = call_procedure(proc_name, proc_params, 2, proc_params2, cursor, log)
if success:
log.info("\t=> Success!")
[part1, part2] = values
if "2DL5A" in part1:
A = part1
B = part2
else:
A = part2
B = part1
A_alleles = A.replace("2DL5A*", "")
B_alleles = B.replace("2DL5B*", "")
else:
log.info("\t=> Procedure call did not work. :-(")
A_alleles = ""
B_alleles = ""
return A_alleles, B_alleles | e4c5eb51927b9e9cd607f95c1e2d1f853f4f2a3e | 9,648 |
def set_system_bios(context, settings, system_id=None, workaround=False):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
workaround: Indicates if workarounds should be attempted for non-conformant services
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system(context, system_id)
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings
# object
if "Bios" not in system.dict:
raise RedfishSystemBiosNotFoundError("System '{}' does not support representing BIOS".format(system.dict["Id"]))
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get(bios_uri)
etag = bios.getheader("ETag")
if "@Redfish.Settings" in bios.dict:
bios_settings = get_system_bios_settings(context, bios, system.dict["Id"], workaround)
bios_uri = bios_settings.dict["@odata.id"]
etag = bios_settings.getheader("ETag")
# Update the settings
payload = {"Attributes": settings}
headers = None
if etag is not None:
headers = {"If-Match": etag}
response = context.patch(bios_uri, body=payload, headers=headers)
verify_response(response)
return response | c28f52db53363399df534efacc506a7e25c99930 | 9,649 |
def geometric_augmentation(images,
flow = None,
mask = None,
crop_height = 640,
crop_width = 640,
probability_flip_left_right = 0.5,
probability_flip_up_down = 0.1,
probability_scale = 0.8,
probability_relative_scale = 0.,
probability_stretch = 0.8,
probability_rotation = 0.0,
probability_relative_rotation = 0.0,
probability_crop_offset = 0.0,
min_bound_scale = -0.2,
max_bound_scale = 0.6,
max_strech_scale = 0.2,
min_bound_relative_scale = -0.1,
max_bound_relative_scale = 0.1,
max_rotation_deg = 15,
max_relative_rotation_deg = 3,
max_relative_crop_offset = 5,
return_full_scale=False):
"""Applies geometric augmentations to an image pair and corresponding flow.
Args:
images: Image pair of shape [2, height, width, channels].
flow: Corresponding forward flow field of shape [height, width, 2].
mask: Mask indicating which positions in the flow field hold valid flow
vectors of shape [height, width, 1]. Non-valid poisitions are encoded with
0, valid positions with 1.
crop_height: Height of the final augmented output.
crop_width: Width of the final augmented output.
probability_flip_left_right: Probability of applying left/right flip.
probability_flip_up_down: Probability of applying up/down flip
probability_scale: Probability of applying scale augmentation.
probability_relative_scale: Probability of applying scale augmentation to
only the second frame of the the image pair.
probability_stretch: Probability of applying stretch augmentation (scale
without keeping the aspect ratio).
probability_rotation: Probability of applying rotation augmentation.
probability_relative_rotation: Probability of applying rotation augmentation
to only the second frame of the the image pair.
probability_crop_offset: Probability of applying a relative offset while
cropping.
min_bound_scale: Defines the smallest possible scaling factor as
2**min_bound_scale.
max_bound_scale: Defines the largest possible scaling factor as
2**max_bound_scale.
max_strech_scale: Defines the smallest and largest possible streching factor
as 2**-max_strech_scale and 2**max_strech_scale.
min_bound_relative_scale: Defines the smallest possible scaling factor for
the relative scaling as 2**min_bound_relative_scale.
max_bound_relative_scale: Defines the largest possible scaling factor for
the relative scaling as 2**max_bound_relative_scale.
max_rotation_deg: Defines the maximum angle of rotation in degrees.
max_relative_rotation_deg: Defines the maximum angle of rotation in degrees
for the relative rotation.
max_relative_crop_offset: Defines the maximum relative offset in pixels for
cropping.
return_full_scale: bool. If this is passed, the full size images will be
returned in addition to the geometrically augmented (cropped and / or
resized) images. In addition to the resized images, the crop height,
width, and any padding applied will be returned.
Returns:
if return_full_scale is False:
Augmented images, flow and mask (if not None).
if return_full_scale is True:
Augmented images, flow, mask, full_size_images, crop_h, crop_w, pad_h,
and pad_w.
"""
# apply geometric augmentation
if probability_flip_left_right > 0:
images, flow, mask = random_flip_left_right(
images, flow, mask, probability_flip_left_right)
if probability_flip_up_down > 0:
images, flow, mask = random_flip_up_down(
images, flow, mask, probability_flip_up_down)
if probability_scale > 0 or probability_stretch > 0:
images, flow, mask = random_scale(
images,
flow,
mask,
min_scale=min_bound_scale,
max_scale=max_bound_scale,
max_strech=max_strech_scale,
probability_scale=probability_scale,
probability_strech=probability_stretch)
if probability_relative_scale > 0:
images, flow, mask = random_scale_second(
images, flow, mask,
min_scale=min_bound_relative_scale,
max_scale=max_bound_relative_scale,
probability_scale=probability_relative_scale)
if probability_rotation > 0:
images, flow, mask = random_rotation(
images, flow, mask,
probability=probability_rotation,
max_rotation=max_rotation_deg, not_empty_crop=True)
if probability_relative_rotation > 0:
images, flow, mask = random_rotation_second(
images, flow, mask,
probability=probability_relative_rotation,
max_rotation=max_relative_rotation_deg, not_empty_crop=True)
images_uncropped = images
images, flow, mask, offset_h, offset_w = random_crop(
images, flow, mask, crop_height, crop_width,
relative_offset=max_relative_crop_offset,
probability_crop_offset=probability_crop_offset)
# Add 100 / 200 pixels to crop height / width for full scale warp
pad_to_size_h = crop_height + 200
pad_to_size_w = crop_width + 400
if return_full_scale:
if pad_to_size_w:
uncropped_shape = tf.shape(images_uncropped)
if images.shape[1] > uncropped_shape[1] or images.shape[
2] > uncropped_shape[2]:
images_uncropped = images
uncropped_shape = tf.shape(images_uncropped)
offset_h = tf.zeros_like(offset_h)
offset_w = tf.zeros_like(offset_w)
if uncropped_shape[1] > pad_to_size_h:
crop_ht = offset_h - (200 // 2)
crop_hb = offset_h + crop_height + (200 // 2)
crop_hb += tf.maximum(0, -crop_ht)
crop_ht -= tf.maximum(0, -(uncropped_shape[1] - crop_hb))
crop_ht = tf.maximum(crop_ht, 0)
crop_hb = tf.minimum(crop_hb, uncropped_shape[1])
offset_h -= crop_ht
images_uncropped = images_uncropped[:, crop_ht:crop_hb, :, :]
if uncropped_shape[2] > pad_to_size_w:
crop_wt = offset_w - (400 // 2)
crop_wb = offset_w + crop_width + (400 // 2)
crop_wb += tf.maximum(0, -crop_wt)
crop_wt -= tf.maximum(0, -(uncropped_shape[2] - crop_wb))
crop_wt = tf.maximum(crop_wt, 0)
crop_wb = tf.minimum(crop_wb, uncropped_shape[2])
offset_w -= crop_wt
images_uncropped = images_uncropped[:, :, crop_wt:crop_wb, :]
uncropped_shape = tf.shape(images_uncropped)
# remove remove_pixels_w from the width while keeping the crop centered
pad_h = pad_to_size_h - uncropped_shape[1]
pad_w = pad_to_size_w - uncropped_shape[2]
with tf.control_dependencies([
tf.compat.v1.assert_greater_equal(pad_h, 0),
tf.compat.v1.assert_greater_equal(pad_w, 0)
]):
images_uncropped = tf.pad(images_uncropped,
[[0, 0], [pad_h, 0], [pad_w, 0], [0, 0]])
images_uncropped = tf.ensure_shape(images_uncropped,
[2, pad_to_size_h, pad_to_size_w, 3])
return images, flow, mask, images_uncropped, offset_h, offset_w, pad_h, pad_w
return images, flow, mask | f1a9ce6983edfd47388360b9d777ad5909c046e7 | 9,650 |
def edit_distance_between_seqs(seq1, seq2):
"""Input is two strings. They are globally aligned
and the edit distance is returned. An indel of any length
is counted as one edit"""
aln1, aln2 = _needleman_wunsch(seq1, seq2)
return edit_distance_from_aln_strings(aln1, aln2) | 88e98475c1652311745af69c6f521bba0497e633 | 9,651 |
import torch
def sentence_prediction(sentence):
"""Predict the grammar score of a sentence.
Parameters
----------
sentence : str
The sentence to be predicted.
Returns
-------
float
The predicted grammar probability.
"""
tokenizer = config.TOKENIZER.from_pretrained(
config.MODEL_PATH, local_files_only=True
)
model = config.MODEL.from_pretrained(config.MODEL_PATH, local_files_only=True)
max_len = config.MAX_LEN
sentence = str(sentence)
sentence = " ".join(sentence.split())
inputs = tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=max_len,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
ids = torch.LongTensor(inputs["input_ids"][0]).unsqueeze(0)
mask = torch.LongTensor(inputs["attention_mask"][0]).unsqueeze(0)
ids = ids.to(DEVICE)
mask = mask.to(DEVICE)
model.to(DEVICE)
outputs = model(ids, token_type_ids=None, attention_mask=mask, return_dict=True)
outputs = torch.sigmoid(outputs.logits).cpu().detach().numpy()
return outputs[0][0] | 14d7c8efa76df4727419c2d99d685707ef46eb25 | 9,652 |
def build_seq(variants, phased_genotype, ref, pre_start, ref_end=None):
"""
Build or extend the haplotype according to provided genotype. We marked the start position iterator of each haplotype and
update with variant alternative base.
"""
seqs = ""
position = pre_start
for variant, phased in zip(variants, phased_genotype):
if variant.start < pre_start:
if variant.start == pre_start - 1 and phased != 0: # this only happen when pre pos is deletion and current pos is insertion
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased - 1]
if len(alt_base) > len(ref_base): # is an insertion
# print ('has insertion and deletion overlap'.format(variant.start))
return alt_base[1:], position
if phased != 0: # impossible # sometimes happen in true vcf
return None, None
else:
return "", pre_start # do not do anything if 0 allele
else:
seqs += ref.query(pre_start, variant.start)
allele = variant.reference_bases if phased == 0 else variant.alternate_bases[phased - 1]
if phased == 0:
allele = allele[0]
position = variant.start + 1
seqs += allele # only add one ref base
else:
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased-1]
ref_base, alt_base = remove_common_suffix(ref_base, [alt_base])
end = variant.start + len(ref_base)
position = end
seqs += alt_base[0]
return seqs, position | b5f5168603b941fe8a55df5bd3bbf69898db3804 | 9,654 |
def handle_str(x):
"""
handle_str returns a random string of the same length as x.
"""
return random_string(len(x)) | 856341d0e3ff6d41c4c0f14beda5133b7285478c | 9,655 |
def get_process_causality_network_activity_query(endpoint_ids: str, args: dict) -> str:
"""Create the process causality network activity query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_causality_id_list = args.get('process_causality_id', '')
if not process_causality_id_list:
raise DemistoException('Please provide a process_causality_id argument.')
process_causality_id_list = wrap_list_items_in_double_quotes(process_causality_id_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = NETWORK
and actor_process_causality_id in ({process_causality_id_list}) | fields agent_hostname, agent_ip_addresses,agent_id,
action_local_ip, action_remote_ip, action_remote_port, dst_action_external_hostname,dns_query_name,
action_app_id_transitions, action_total_download, action_total_upload, action_country,action_as_data,
actor_process_image_sha256, actor_process_image_name , actor_process_image_path,actor_process_signature_vendor,
actor_process_signature_product, actor_causality_id,actor_process_image_command_line, actor_process_instance_id''' | 97330c89f7599cf4096322088ed7e7bad0699d49 | 9,657 |
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
client = TickTick()
client.login(data.get("username"), data.get("password"))
except RequestException as exc:
raise CannotConnect from exc
except ValueError as exc:
raise InvalidAuth from exc
# Return some info we want to store in the config entry.
return {"title": "TickTick"} | 7f6989ae0a87579f2270aab479247634b7d1f7e8 | 9,658 |
def _shard_batch(xs):
"""Shards a batch for a pmap, based on the number of devices."""
local_device_count = jax.local_device_count()
def _prepare(x):
return x.reshape((local_device_count, -1) + x.shape[1:])
return jax.tree_map(_prepare, xs) | 5c6fb53a97af3543b9e147abfb896719f83a0a28 | 9,659 |
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit | 4699ff18459a434a93fb50f8ac8bcc569ceb5e63 | 9,660 |
def keras_decay(step, decay=0.0001):
"""Learning rate decay in Keras-style"""
return 1. / (1. + decay * step) | f26f1f100ecf1622d6da9958d0a6cd95a37b8b2a | 9,661 |
def get_swagger():
""" Request handler for the /swagger path.
GET: returns the My Cars API spec as a swagger json doc.
"""
try:
return _make_response(response=validator.get_swagger_spec())
except Exception as e:
return _make_error(500, e.message) | a7ce1def456264d180dcb15e6039cd32e4df7597 | 9,662 |
def subtract(value, *args, **kwargs):
"""
Return the difference between ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value - relativedelta(*args, **kwargs) | 9f3c17b07c4010d9b1bfcff93280f0a59247fc5f | 9,663 |
def plot_coastline(
axes,
bathymetry,
coords='grid',
isobath=0,
xslice=None,
yslice=None,
color='black',
server='local',
zorder=2,
):
"""Plot the coastline contour line from bathymetry on the axes.
The bathymetry data may be specified either as a file path/name,
or as a :py:class:`netCDF4.Dataset` instance.
If a file path/name is given it is opened and read into a
:py:class:`netCDF4.Dataset` so,
if this function is being called in a loop,
it is best to provide it with a bathymetry dataset to avoid
the overhead of repeated file reads.
:arg axes: Axes instance to plot the coastline contour line on.
:type axes: :py:class:`matplotlib.axes.Axes`
:arg bathymetry: File path/name of a netCDF bathymetry data file
or a dataset object containing the bathymetry data.
:type bathymetry: str or :py:class:`netCDF4.Dataset`
:arg coords: Type of plot coordinates to set the aspect ratio for;
either :kbd:`grid` (the default) or :kbd:`map`.
:type coords: str
:arg isobath: Depth to plot the contour at; defaults to 0.
:type isobath: float
:arg xslice: X dimension slice to defined the region for which the
contour is to be calculated;
defaults to :kbd:`None` which means the whole domain.
If an xslice is given,
a yslice value is also required.
:type xslice: :py:class:`numpy.ndarray`
:arg yslice: Y dimension slice to defined the region for which the
contour is to be calculated;
defaults to :kbd:`None` which means the whole domain.
If a yslice is given,
an xslice value is also required.
:type yslice: :py:class:`numpy.ndarray`
:arg color: Matplotlib colour argument
:type color: str, float, rgb or rgba tuple
:arg zorder: Plotting layer specifier
:type zorder: integer
:returns: Contour line set
:rtype: :py:class:`matplotlib.contour.QuadContourSet`
"""
# Index names based on results server
if server == 'local':
lon_name = 'nav_lon'
lat_name = 'nav_lat'
bathy_name = 'Bathymetry'
elif server == 'ERDDAP':
lon_name = 'longitude'
lat_name = 'latitude'
bathy_name = 'bathymetry'
else:
raise ValueError('Unknown results server name: {}'.format(server))
if any((
xslice is None and yslice is not None,
xslice is not None and yslice is None,
)):
raise ValueError('Both xslice and yslice must be specified')
if not hasattr(bathymetry, 'variables'):
bathy = nc.Dataset(bathymetry)
else:
bathy = bathymetry
depths = bathy.variables[bathy_name]
if coords == 'map':
lats = bathy.variables[lat_name]
lons = bathy.variables[lon_name]
if xslice is None and yslice is None:
contour_lines = axes.contour(
np.array(lons), np.array(lats), np.array(depths),
[isobath], colors=color, zorder=zorder)
else:
contour_lines = axes.contour(
lons[yslice, xslice], lats[yslice, xslice],
depths[yslice, xslice].data, [isobath], colors=color,
zorder=zorder)
else:
if xslice is None and yslice is None:
contour_lines = axes.contour(
np.array(depths), [isobath], colors=color, zorder=zorder)
else:
contour_lines = axes.contour(
xslice, yslice, depths[yslice, xslice].data,
[isobath], colors=color, zorder=zorder)
if not hasattr(bathymetry, 'variables'):
bathy.close()
return contour_lines | 1166ea3e942bf5c9212cf07e69326c38f6e77f96 | 9,664 |
def safelog(func):
"""Version of prism.log that has prism as an optional dependency.
This prevents the sql database, which may not be available, from becoming a strict dependency."""
@wraps(func)
def inner(self, update, context):
try:
self.bot.cores["prism"].log_user(update.effective_user)
if update.effective_user.id != update.effective_chat.id: # If the chat is not a one-to-one chat with the user.
self.bot.cores["prism"].log_chat(update.effective_chat)
except KeyError: # If the prism core is not loaded.
pass
func(self, update, context)
return inner | fbd1ad03417151705640f0fd20c0caa685896496 | 9,665 |
def draw(args):
"""
Draw a GraphML with the tribe draw method.
"""
G = nx.read_graphml(args.graphml[0])
draw_social_network(G, args.write)
return "" | f5347dceaf6f79ab22218eb8838944d4f3e5a8ea | 9,666 |
import re
def extract_stem_voc(x):
"""extract word from predefined vocbulary with stemming and lemmatization
Args:
x ([string]): [a sentence]
Returns:
[list]: [word after stemming and lemmatization]
"""
stem = PorterStemmer()
# wnl = WordNetLemmatizer()
all_words = set(words.words())
# lemma_word = [word for word in map(lambda x: wnl.lemmatize(stem.stem(x)), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words]
lemma_word = [word for word in map(lambda x: stem.stem(x), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words]
return lemma_word | 0e882eb8f9b938fc8eb50e69dda2864d2d8a12da | 9,667 |
from typing import Union
from typing import Optional
from typing import List
from typing import Dict
def plot_without_vis_spec(
conditions_df: Union[str, pd.DataFrame],
grouping_list: Optional[List[IdsList]] = None,
group_by: str = 'observable',
measurements_df: Optional[Union[str, pd.DataFrame]] = None,
simulations_df: Optional[Union[str, pd.DataFrame]] = None,
plotted_noise: str = MEAN_AND_SD,
subplot_dir: Optional[str] = None,
plotter_type: str = 'mpl',
format_: str = 'png',
) -> Optional[Dict[str, plt.Subplot]]:
"""
Plot measurements and/or simulations. What exactly should be plotted is
specified in a grouping_list.
If grouping list is not provided, measurements (simulations) will be
grouped by observable, i.e. all measurements for each observable will be
visualized on one plot.
Parameters
----------
grouping_list:
A list of lists. Each sublist corresponds to a plot, each subplot
contains the Ids of datasets or observables or simulation conditions
for this plot.
group_by:
Grouping type.
Possible values: 'dataset', 'observable', 'simulation'
conditions_df:
A condition DataFrame in the PEtab format or path to the condition
file.
measurements_df:
A measurement DataFrame in the PEtab format or path to the data file.
simulations_df:
A simulation DataFrame in the PEtab format or path to the simulation
output data file.
plotted_noise:
A string indicating how noise should be visualized:
['MeanAndSD' (default), 'MeanAndSEM', 'replicate', 'provided']
subplot_dir:
A path to the folder where single subplots should be saved.
PlotIDs will be taken as file names.
plotter_type:
Specifies which library should be used for plot generation. Currently,
only matplotlib is supported
format_:
File format for the generated figure.
(See :py:func:`matplotlib.pyplot.savefig` for supported options).
Returns
-------
ax: Axis object of the created plot.
None: In case subplots are saved to a file.
"""
if measurements_df is None and simulations_df is None:
raise TypeError('Not enough arguments. Either measurements_data '
'or simulations_data should be provided.')
vis_spec_parser = VisSpecParser(conditions_df, measurements_df,
simulations_df)
figure, dataprovider = vis_spec_parser.parse_from_id_list(
grouping_list, group_by, plotted_noise)
if plotter_type == 'mpl':
plotter = MPLPlotter(figure, dataprovider)
else:
raise NotImplementedError('Currently, only visualization with '
'matplotlib is possible.')
return plotter.generate_figure(subplot_dir, format_=format_) | dba21fae889057e83dd8084b727e7c6312c3cd0f | 9,668 |
import torch
def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.Tensor:
"""
Helper function used to find all the faces in Meshes which are
fully outside the view frustum. A face is culled if all 3 vertices are outside
the same axis of the view frustum.
Args:
face_verts: An (F,3,3) tensor, where F is the number of faces in
the packed representation of Meshes. The 2nd dimension represents the 3 vertices
of a triangle, and the 3rd dimension stores the xyz locations of each
vertex.
frustum: An instance of the ClipFrustum class with the information on the
position of the clipping planes.
Returns:
faces_culled: An boolean tensor of size F specifying whether or not each face should be
culled.
"""
clipping_planes = (
(frustum.left, 0, "<"),
(frustum.right, 0, ">"),
(frustum.top, 1, "<"),
(frustum.bottom, 1, ">"),
(frustum.znear, 2, "<"),
(frustum.zfar, 2, ">"),
)
faces_culled = torch.zeros(
[face_verts.shape[0]], dtype=torch.bool, device=face_verts.device
)
for plane in clipping_planes:
clip_value, axis, op = plane
# If clip_value is None then don't clip along that plane
if frustum.cull and clip_value is not None:
if op == "<":
verts_clipped = face_verts[:, axis] < clip_value
else:
verts_clipped = face_verts[:, axis] > clip_value
# If all verts are clipped then face is outside the frustum
faces_culled |= verts_clipped.sum(1) == 3
return faces_culled | edb9594b4a9d5fe6c3d7fcf24e9b0e312b94d3cb | 9,669 |
import collections
def _build_pep8_output(result):
"""
Build the PEP8 output based on flake8 results.
Results from both tools conform to the following format:
<filename>:<line number>:<column number>: <issue code> <issue desc>
with some issues providing more details in the description within
parentheses.
:param result: output from flake8
:returns: list of flake8 output lines by error
"""
# Aggregate individual errors by error
_dict = collections.defaultdict(list)
for line in str(result).split("\n"):
if line:
# Preserve only the code and brief description for each issue to
# facilitate aggregating the results. For example,
#
# E501 line too long (178 > 79 characters) -> E501 line too long
# E303 too many blank lines (4) -> E303 too many blank lines
parts = line.replace("(", ":").split(":")
line_num, col_num, base_issue = parts[1:4]
# Strip the whitespace around the base <issue code> <description>.
#
# Also restore the missing colon, stripped above, if the issue
# was 'missing whitespace' surrounding a colon.
issue = base_issue.strip()
key = "{}:'".format(issue) if issue.endswith("after '") else issue
_dict[key].append("{} ({})".format(line_num, col_num))
# Build the output as one issue per entry
return ["{}: {}".format(k, ", ".join(_dict[k])) for k in
sorted(_dict.keys())] | a4abda2f9d3a2d9b3524c60429b047cbfe0285d9 | 9,670 |
def form_value(request, entity, attribute):
"""
Return value from request params or the given entity.
:param request: Pyramid request.
:param entity: Instance to get attribute from if it isn't found in the request
params.
:param str attribute: Name of attribute to search for in the request params or
on as an attribute of the given entity.
"""
# Check for contains, because we want the request value even if it's empty
if attribute in request.params:
return request.params.get(attribute, '')
if entity:
# Don't provide a default value, because we want to make attribute typos clear
return getattr(entity, attribute)
return '' | 1daea77474dae5a1cb6fdab0b075a5b2f5c40865 | 9,671 |
def process_batch_data(batch_words, batch_tags=None):
"""
Padding batched dataset.
Args:
batch_words: Words in a batch.
batch_tags: Punctuations in a batch.
Returns: Words and punctuations after padding.
"""
b_words, b_words_len = pad_sequences(batch_words)
if batch_tags is None:
return {"words": b_words, "seq_len": b_words_len, "batch_size": len(b_words)}
else:
b_tags, _ = pad_sequences(batch_tags)
return {"words": b_words, "tags": b_tags, "seq_len": b_words_len, "batch_size": len(b_words)} | 2428b1009cfcaf55df8ef5be275d87f1053643fd | 9,672 |
import torch
import math
def adjust_learning_rate(
optimizer: torch.optim,
base_lr: float,
iteration: int,
warm_iter: int,
max_iter: int,
) -> float:
""" warmup + cosine lr decay """
start_lr = base_lr / 10
if iteration <= warm_iter:
lr = start_lr + (base_lr - start_lr) * iteration / warm_iter
else:
lr = start_lr + (base_lr - start_lr) * 0.5 * (1 + math.cos((iteration - warm_iter) * math.pi / (max_iter - warm_iter)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | 1304e22abb712cfb6c589a2adf199971c058986f | 9,675 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).drop_duplicates()
df = filter_data(df)
return df.drop("price", axis=1), df.filter(['price']) | a5d044fa5be8ceefdb3cee7fb212608110f8dae5 | 9,677 |
def face_at_link(shape, actives=None, inactive_link_index=BAD_INDEX_VALUE):
"""Array of faces associated with links.
Returns an array that maps link ids to face ids. For inactive links,
which do not have associated faces, set their ids to
*inactive_link_index*. Use the *actives* keyword to specify an array that
contains the ids of all active links in the grid. The default assumes
that only the perimeter nodes are inactive.
Examples
--------
>>> from landlab.utils.structured_grid import face_at_link
>>> faces = face_at_link((3, 4), inactive_link_index=-1)
>>> faces # doctest: +NORMALIZE_WHITESPACE
array([-1, 0, 1, -1, -1, 2, 3,
-1, -1, -1, -1, 4, 5, 6, -1, -1, -1])
"""
if actives is None:
actives = active_links(shape)
num_links = link_count(shape)
link_faces = np.empty(num_links, dtype=np.int)
link_faces.fill(inactive_link_index)
link_faces[actives] = np.arange(len(actives))
return link_faces | db7e3e87144354fb850b0741a7531d06e73227f6 | 9,678 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.