content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import random
def create_solution_board(width=6, height=6):
"""Randomly generates a new board
with width by height size
"""
if type(width) != int or type(height) != int:
raise TypeError('Arguments must be int type')
boxes = width * height
if boxes % 2 != 0:
raise ValueError('Number of boxes is not multiple of two')
numbers = list(range(1, boxes // 2 + 1))
numbers = numbers + numbers
random.shuffle(numbers)
board = []
for index in range(height):
board.append([])
for _ in range(width):
random_number = numbers.pop()
board[index].append(random_number)
board[index] = board[index]
return board | 0b6e30d726cec61581d93c909761f80d739eb917 | 10,842 |
from pydft.poisson import _O_operator, _L_operator, _B_operator
def _getE(s,R,W,V = None):
"""The sum of the energies for the states present in the solution.
Args:
s (list of int): The number of samples points along each
basis vector.
R (numpy.ndarray): The basis vectors for the unit cell.
W (numpy.ndarray): A matrix containing the expansion coefficients
for the wavefunctions
Returns:
E (numpy.ndarray): A vector of the energies at the sample points.
"""
if V == None: #pragma: no cover
V = _sho_V
O_t = _O_operator(s,R,W)
U = np.dot(np.conj(W.T),_O_operator(s,R,W))
Vt = np.transpose(np.conj(_Vdual(s,R, V = V)))
IW = _B_operator(s,R,W)
Uinv = np.linalg.inv(U)
IWU = _B_operator(s,R,np.dot(W,Uinv))
n = _diagouter(IW,IWU)
Ew = np.trace(np.dot(np.conj(np.transpose(W)),_L_operator(s,R,np.dot(W,Uinv))))
E = (-1.)*Ew/2. + np.dot(Vt,n)
return E | 7759c68e5774f809cfac1038014144cabe5c9410 | 10,843 |
def get_gt_list(request):
""" This view returns the list of groundtruths associated to a user and a specific configuration of institute,
usecase and language.
.js files: InfoAboutConfiguration.js DownloadGT.js"""
groundTruths = 0
json_resp = {}
ins = request.GET.get('inst',None)
lang = request.GET.get('lang',None)
use = request.GET.get('use',None)
action = request.GET.get('action',None)
token = request.GET.get('token',None)
reptype = request.GET.get('reptype',None)
annotation_mode = request.GET.get('annotation_mode','Human')
if ins == '':
ins = None
if use == '':
use = None
if lang == '':
lang = None
if token == 'all':
ns_robot = NameSpace.objects.get(ns_id='Robot')
ns_human = NameSpace.objects.get(ns_id='Human')
# rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot)
list_gt = GroundTruthLogFile.objects.filter(ns_id=ns_human).count()
groundTruths = list_gt
# gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user)
i = 0
# print(groundTruths)
# for el in gt_rob:
# gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time)
# gts_count = gts.count()
# # print('count: '+str(i)+' '+str(gts.count()))
# i = i+1
# groundTruths = groundTruths + gts_count
else:
with connection.cursor() as cursor:
if reptype == 'reports':
if annotation_mode == 'Human':
cursor.execute(
"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE r.institute = COALESCE(%s,r.institute) AND t.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s",
[ins, use, lang, action, 'Human','PUBMED'])
groundTruths = cursor.fetchone()[0]
else:
if annotation_mode == 'Human':
cursor.execute(
"SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE t.name = %s AND r.language = %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s",
[use, 'english', action, 'Human','PUBMED'])
groundTruths = cursor.fetchone()[0]
json_resp['ground_truths'] = groundTruths
# print(json_resp)
return JsonResponse(json_resp) | 46cb039c9811eac5a43c08776b59b8cef12c7133 | 10,844 |
from typing import Any
from typing import MutableMapping
from typing import Hashable
def to_dict(item: Any) -> MutableMapping[Hashable, Any]:
"""Converts 'item' to a MutableMapping.
Args:
item (Any): item to convert to a MutableMapping.
Raises:
TypeError: if 'item' is a type that is not registered.
Returns:
MutableMapping: derived from 'item'.
"""
if isinstance(item, MutableMapping):
return item
else:
raise TypeError(
f'item cannot be converted because it is an unsupported type: '
f'{type(item).__name__}') | c3ba483bde73a35ed036debcc4b87575b1c8b962 | 10,845 |
import copy
def node(*args, **kwargs):
"""
args[0] -- a XML tag
args[1:] -- an array of children to append to the newly created node
or if a unicode arg is supplied it will be used to make a text node
kwargs -- attributes
returns a xml.dom.minidom.Element
"""
blocked_attributes = ['tag']
tag = args[0] if len(args) > 0 else kwargs['tag']
args = args[1:]
result = DetachableElement(tag)
unicode_args = [u for u in args if type(u) == unicode]
assert len(unicode_args) <= 1
parsed_string = False
# kwargs is an xml attribute dictionary,
# here we convert it to a xml.dom.minidom.Element
for k, v in iter(kwargs.items()):
if k in blocked_attributes:
continue
if k == 'toParseString':
if v is True and len(unicode_args) == 1:
parsed_string = True
# Add this header string so parseString can be used?
s = u'<?xml version="1.0" ?><'+tag+'>' + unicode_args[0]\
+ u'</'+tag+'>'
parsed_node = parseString(s.encode("utf-8")).documentElement
# Move node's children to the result Element
# discarding node's root
for child in parsed_node.childNodes:
result.appendChild(copy.deepcopy(child))
else:
result.setAttribute(k, v)
if len(unicode_args) == 1 and not parsed_string:
text_node = PatchedText()
text_node.data = unicode_args[0]
result.appendChild(text_node)
for n in args:
if type(n) == int or type(n) == float or type(n) == bytes:
text_node = PatchedText()
text_node.data = unicode(n)
result.appendChild(text_node)
elif type(n) is not unicode:
try:
result.appendChild(n)
except:
raise Exception(type(n), n)
return result | 2a0f9a953d07a114e0a426f4225fb3c5076513ee | 10,846 |
import math
def mylog10(x):
"""Return the base-10 logarithm of x."""
return math.log10(x) | d32113c16047175125e1b79c9ce0ea8822e4853c | 10,848 |
import numpy
def get_RGB_to_RGB_matrix(in_colorspace, out_colorspace, primaries_only=False):
"""Return RGB to RGB conversion matrix.
Args:
in_colorspace (str): input colorspace.
out_colorspace (str): output colorspace.
Kwargs:
primaries_only (bool): primaries matrix only, doesn't include white point.
Returns:
.numpy.matrix (3x3)
"""
# Get colorspace in to XYZ matrix
in_matrix = get_colorspace_matrix(in_colorspace, primaries_only)
# Get XYZ to colorspace out matrix
out_matrix = get_colorspace_matrix(out_colorspace, primaries_only, inv=True)
# Return scalar product of the 2 matrices
return numpy.dot(out_matrix, in_matrix) | 6c864fc45d254c38bc00a381f55dc3d2ad80aa9a | 10,849 |
import re
import string
def normalize_string(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | 85a77dca1110460a1c445cc32f78cadb8c70ebd5 | 10,850 |
def full_reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None,
scheme=None, domain=None, subdomain=None):
"""
First, obtains the absolute path of the URL matching given
``viewname`` with its parameters.
Then, prepends the path with the scheme name and the authority
part (domain and subdomain) and returns it.
Args::
viewname (str): Name of the URL pattern.
urlconf (str): Path of the module containing URLconfs.
args (list): Positional arguments of the URL pattern.
kwargs (dict): Keyword arguments of the URL pattern.
current_app (str): App identifier.
scheme (str): Scheme name (commonly called protocol).
domain (str): Domain name.
subdomain (str): Subdomain name.
Returns::
The full URL matching given view with its parameters.
Examples::
>>> full_reverse('client-detail-view', args=[client.id])
'http://example.com/clients/client/123/'
>>> full_reverse('client-list-view', scheme='https', subdomain='admin')
'https://admin.example.com/clients/'
Raises::
NoReverseMatch: If no URL pattern matches the given ``viewname``.
ValueError: If both ``args`` and ``kwargs`` are given.
"""
location = reverse(viewname, urlconf, args, kwargs, current_app)
return build_full_url(location, scheme, domain, subdomain) | b061cdc1369af0c60b95da58f262563e5ea93aa3 | 10,852 |
def get_object_or_none(model_class, **kwargs):
"""Identical to get_object_or_404, except instead of returning Http404,
this returns None.
"""
try:
return model_class.objects.get(**kwargs)
except model_class.DoesNotExist:
return None | d74b84e9186d9fb4faabb7eaa70f53672665d304 | 10,853 |
def GenerateTests():
"""Generate all tests."""
filelist = []
for ii in range(len(_GROUPS)):
filename = GenerateFilename(_GROUPS[ii])
filelist.append(filename)
WriteTest(filename, ii, ii + 1)
return filelist | 1160454ae0fab7008051bf9d4f5d2b94a74888b9 | 10,854 |
def shift_df_generator(empty_df, day_lower_hr_lim, day_upper_hr_lim):
"""Generate day and night dataframe.
Parameters
----------
empty_df : DataFrame
A DataFrame with timestamp and 'Temperature (Celsius)' with all zeros.
day_lower_hr_lim : int
The lower hour limit that constitutes the start of the day shift.
day_upper_hr_lim : int
The upper hour limit that constitutes the end of the day shift.
Returns
-------
day_df : DataFrame
A DataFrame containing only dayshift values.
night_df : DataFrame
A DataFrame containing only nightshift values.
"""
# Create 2 temporary dataframes (1 for dayshift, 1 for nightshift)
day_df = empty_df.loc[(empty_df['Timestamp'].dt.hour >= day_lower_hr_lim) &
(empty_df['Timestamp'].dt.hour < day_upper_hr_lim)]
# Night dataframe will consist of rows with indices not taken by day_df
night_df = empty_df[~empty_df.index.isin(day_df.index)]
return day_df, night_df | cc8f3675d88dc920fd1762894c859cd93a523aab | 10,855 |
import json
import regex
def json_loads(data, handle=False):
"""
封装的json load
:param data:
:param handle: 补丁, False: 默认不特殊处理: True: 不走正则
:return:
"""
if handle:
return json.loads(data.strip())
return json.loads(regex.sub(r"\\\\", data.strip())) | 34156a594b203af041fba8da65601bb17da95a3e | 10,856 |
def elina_linexpr0_size(linexpr):
"""
Return the size of an ElinaLinexpr0.
Parameters
----------
linexpr : ElinaLinexpr0Ptr
Pointer to the ElinaLinexpr0 that needs to be checked for its size.
Returns
-------
size_linexpr = c_size_t
Size of the ElinaLinexpr0.
"""
size_linexpr = None
try:
elina_linexpr0_size_c = elina_auxiliary_api.elina_linexpr0_size
elina_linexpr0_size_c.restype = c_size_t
elina_linexpr0_size_c.argtypes = [ElinaLinexpr0Ptr]
size_linexpr = elina_linexpr0_size_c(linexpr)
except:
print('Problem with loading/calling "elina_linexpr0_size" from "libelinaux.so"')
print('Make sure you are passing ElinaLinexpr0Ptr to the function')
return size_linexpr | b68a9874dd795876dae1ff2ffe3de98728e521a7 | 10,857 |
def _make_index(df, cols=META_IDX, unique=True):
"""Create an index from the columns/index of a dataframe or series"""
def _get_col(c):
try:
return df.index.get_level_values(c)
except KeyError:
return df[c]
index = list(zip(*[_get_col(col) for col in cols]))
if unique:
index = pd.unique(index)
return pd.MultiIndex.from_tuples(index, names=tuple(cols)) | 4356de2531f150c80bc364315ebf547fd345967f | 10,858 |
import json
def handler(event, context):
""" Lambda Handler.
Returns Hello World and the event and context objects
"""
print(event)
print(context)
return {
"body": json.dumps('Hello World!')
} | 561326fec784aa72a133b217f1e2cecaf12ec1ad | 10,859 |
from typing import List
def flatten_concat(tensors: List[tf.Tensor], batch_dims: int = 1) -> tf.Tensor:
"""Flatten given inputs and concatenate them."""
# tensors [(B, ...), (B, ...)]
flattened: List[tf.Tensor] = list() # [(B, X), (B, Y) ...]
for tensor in tensors:
final_dim = -1
if all(i is not None for i in tensor.shape[batch_dims:]):
# We know all the dimensions
final_dim = tf.reduce_prod(tensor.shape[batch_dims:])
flat_tensor = tf.reshape(
tensor, tf.concat([tf.shape(tensor)[:batch_dims], [final_dim]], 0)
)
flattened.append(flat_tensor)
return tf.concat(flattened, -1) | 1a4b9bbf12f75aff43273a7f44c659b7afecdddc | 10,860 |
def analyze_friends (names,phones,all_areacodes,all_places):
"""
names: tuple of names
phones: tuple of phone numbers (cleaned)
all_areacodes: tuple of area codes (3char ints)
all_places: tuple of places
Goal: Print out how many friends you have and every unique state
"""
# For TESTING MAKE THE PHONE NUMBER FIRST 3 DIGITS THE SAME AS THE AREA CODE
# def get_unique_area_codes():
# """
# Returns a tuple of all unique area codes
# """
# area_codes = ()
# for ph in phones:
# if ph[0:3] not in area_codes:
# area_codes += (ph[0:3],)
# return area_codes
def get_States(some_areacodes):
"""
some_areacodes: tuple of area codes
Return a tuple of states ASSOCIATED with area codes
"""
states = ()
for ac in some_areacodes:
if ac not in all_areacodes:
states += ("BAD AREA CODE",)
else:
index = all_areacodes.index(ac)
states += (all_places[index],)
return states
num_friends = len(names) # Gets number of friends
# unique_areacodes = get_unique_area_codes()
unique_states = get_States(all_areacodes)
print("You have", num_friends, "friends!")
print("They live in", unique_states)
# Function ends with the print, no returns | b90f938c9c019dc331c38cafb36d1a7e0cb3f83f | 10,861 |
def fast_autoregressive_predict_fn(context, seq_len):
"""Given a context, autoregressively generate the rest of a sine wave."""
core = hk.LSTM(32)
dense = hk.Linear(1)
state = core.initial_state(context.shape[0])
# Unroll over the context using `hk.dynamic_unroll`.
# As before, we `hk.BatchApply` the Linear for efficiency.
context_outs, state = hk.dynamic_unroll(
core,
context,
state,
time_major=False,
)
context_outs = hk.BatchApply(dense)(context_outs)
# Now, unroll one step at a time using the running recurrent state.
ar_outs = []
x = context_outs[:, -1, :]
times = range(seq_len - context.shape[1])
for _ in times:
x, state = core(x, state)
x = dense(x)
ar_outs.append(x)
ar_outs = jnp.stack(ar_outs)
ar_outs = ar_outs.transpose(1, 0, 2)
return jnp.concatenate([context_outs, ar_outs], axis=1) | bf61799a8f34045cb214fd68095e1b9346fc797f | 10,862 |
def get_entities(corpus_name):
""" Load the dataset from the filesystem corresponding to corpus_name
(to see the list of allowed names, use utils.list_corpora() ), and extract
all annotated entities.
Returns a dict, in which each key is an entity type, which contains a list
of entity mentions in the corpus.
"""
r = read_conll(corpus_name); data = list(r)
data2 = [ [(w,iob) for ((w,p),iob) in d] for d in data]
data3 = [i for u in data2 for i in u]
tags = sentence_utils.get_tagset(data, with_prefix=True)
taglist = set([t[2:] for t in list(tags) if t !='O'])
entities = {}
for key in taglist:
entities[key] = []
data3.append((u'O',u'O'))
ent = []
entitytype = 'None'
for i,item in enumerate(data3[0:-1]):
if item[1] != 'O':
if item[1][0] == 'B':
ent = []
ent.append(item[0])
else: # == I
if item[1][0] != 'I':
raise ValueError("Should be I")
ent.append(item[0])
if data3[i+1][1][2:] != item[1][2:] or data3[i+1][1][0] == 'B':
#print i, item
entitytype = item[1][2:]
entities[entitytype].append(' '.join(ent))
return entities | 274d82c4d5ae978452aaa7cf3aae14a7b86b3030 | 10,863 |
def _reporthook(t):
"""``reporthook`` to use with ``urllib.request`` that prints the
process of the download.
Uses ``tqdm`` for progress bar.
**Reference:**
https://github.com/tqdm/tqdm
"""
last_b = [0]
def inner(b: int = 1, bsize: int = 1, tsize: int = None):
"""
:param b: Number of blocks just transferred [default: 1].
:param bsize: Size of each block (in tqdm units) [default: 1].
:param tsize: Total size (in tqdm units).
If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner | 9a4d527ff0b964e4220db7a22a522657947e91cb | 10,864 |
def serial_rx(sysclk, reset_n, n_stop_bits_i, half_baud_rate_tick_i, baud_rate_tick_i, recieve_i, data_o, ready_o):
""" Serial
This module implements a reciever serial interface
Ports:
-----
sysclk: sysclk input
reset_n: reset input
half_baud_rate_tick_i: half baud rate tick
baud_rate_tick_i: the baud rate
n_stop_bits_i: number of stop bits
recieve_i: rx
data_o: the data output in 1 byte
ready_o: indicates data_o is valid
-----
"""
END_OF_BYTE = 7
state_reg = Signal(t_State.ST_WAIT_START_BIT)
state = Signal(t_State.ST_WAIT_START_BIT)
data_reg = Signal(intbv(0, min = 0, max = 256))
data = Signal(intbv(0, min = 0, max = 256))
ready_reg = Signal(bool(0))
ready = Signal(bool(0))
count_8_bits_reg = Signal(intbv(0, min = 0, max = 8))
count_8_bits = Signal(intbv(0, min = 0, max = 8))
count_stop_bits_reg = Signal(intbv(0, min = 0, max = 8))
count_stop_bits = Signal(intbv(0, min = 0, max = 8))
@always_comb
def outputs():
data_o.next = data_reg
ready_o.next = ready_reg
@always_seq(sysclk.posedge, reset = reset_n)
def sequential_process():
state_reg.next = state
data_reg.next = data
ready_reg.next = ready
count_8_bits_reg.next = count_8_bits
count_stop_bits_reg.next = count_stop_bits
@always_comb
def combinational_process():
state.next = state_reg
data.next = data_reg
ready.next = ready_reg
count_8_bits.next = count_8_bits_reg
count_stop_bits.next = count_stop_bits_reg
if state_reg == t_State.ST_WAIT_START_BIT:
ready.next = False
if baud_rate_tick_i == True:
if recieve_i == False:
state.next = t_State.ST_GET_DATA_BITS
elif state_reg == t_State.ST_GET_DATA_BITS:
if baud_rate_tick_i == True:
data.next[count_8_bits_reg] = recieve_i
if count_8_bits_reg == END_OF_BYTE:
count_8_bits.next = 0
state.next = t_State.ST_GET_STOP_BITS
else:
count_8_bits.next = count_8_bits_reg + 1
state.next = t_State.ST_GET_DATA_BITS
elif state_reg == t_State.ST_GET_STOP_BITS:
if baud_rate_tick_i == True:
if count_stop_bits_reg == (n_stop_bits_i - 1):
count_stop_bits.next = 0
ready.next = True
state.next = t_State.ST_WAIT_START_BIT
else:
count_stop_bits.next = count_stop_bits_reg + 1
else:
raise ValueError("Undefined State")
return outputs, sequential_process, combinational_process | 62f215644004b61738db9fd249f28a4abc1391ea | 10,865 |
def zpad(x, l):
""" Left zero pad value `x` at least to length `l`.
>>> zpad('', 1)
'\x00'
>>> zpad('\xca\xfe', 4)
'\x00\x00\xca\xfe'
>>> zpad('\xff', 1)
'\xff'
>>> zpad('\xca\xfe', 2)
'\xca\xfe'
"""
return b'\x00' * max(0, l - len(x)) + x | 605aab22fa54f9df85397793c65d46dcf2ec3588 | 10,866 |
def clf2D_slope_intercept(coef=None, intercept=None, clf=None):
"""
Gets the slop an intercept for the separating hyperplane of a linear
classifier fit on a two dimensional dataset.
Parameters
----------
coef:
The classification normal vector.
intercept:
The classifier intercept.
clf: subclass of sklearn.linear_model.base.LinearClassifierMixin
A sklearn classifier with attributes coef_ and intercept_
Output
------
slope, intercept
"""
if clf is not None:
coef = clf.coef_.reshape(-1)
intercept = float(clf.intercept_)
else:
assert coef is not None and intercept is not None
slope = - coef[0] / coef[1]
intercept = - intercept / coef[1]
return slope, intercept | 9376c34a3836ee028c4b0497e1088ddd50bb1fc6 | 10,867 |
def build_driver_for_task(task):
"""Builds a composable driver for a given task.
Starts with a `BareDriver` object, and attaches implementations of the
various driver interfaces to it. They come from separate
driver factories and are configurable via the database.
:param task: The task containing the node to build a driver for.
:returns: A driver object for the task.
:raises: DriverNotFound if node.driver could not be found in the
"ironic.hardware.types" namespaces.
:raises: InterfaceNotFoundInEntrypoint if some node interfaces are set
to invalid or unsupported values.
:raises: IncompatibleInterface the requested implementation is not
compatible with it with the hardware type.
"""
node = task.node
hw_type = get_hardware_type(node.driver)
check_and_update_node_interfaces(node, hw_type=hw_type)
bare_driver = driver_base.BareDriver()
_attach_interfaces_to_driver(bare_driver, node, hw_type)
return bare_driver | 5283b91e5a42fe7ebec20b91e0f1463abbc8b724 | 10,869 |
import torch
def evaluate(eval_model, criterion, ntokens, data_source, cnf):
"""
Evaluates the training loss of the given model
"""
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.0
src_mask = generate_square_subsequent_mask(cnf.input_length).to(cnf.device)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, cnf.input_length):
data, targets = get_batch(data_source, i, cnf)
if data.size(0) != cnf.input_length:
src_mask = generate_square_subsequent_mask(data.size(0)).to(cnf.device)
output = eval_model(data, src_mask)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1) | 4570f5e7751683157ca8f3155052e484a1b3962e | 10,870 |
def km_to_meters(kilometers):
"""
(int or float) -> float
Takes a distance in kilometers and returns the distance in meters.
"""
return kilometers * 1000.0 | 33e40914c9d2b10009889ebfcbc543863a9ca363 | 10,871 |
from typing import List
from typing import Optional
from typing import Dict
from typing import Callable
from typing import Any
def build(plan: List[Step], instances_stock: Optional[Dict[Callable, Any]] = None):
""" Build instances dictionary from a plan """
instances_stock = instances_stock or {}
instances = {}
for cls, kwargs_spec in plan:
if cls in instances_stock:
instances[cls] = instances_stock[cls]
else:
instances[cls] = cls(**kwargs_spec.kwargs(instances))
return instances | a1b3ecc98097d9a5d998cca1484b22a4b83124ca | 10,872 |
def make_module_spec(options, weight_file):
"""Makes a module spec.
Args:
options: LM hyperparameters.
weight_file: location of the hdf5 file with LM weights.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
# init
_bos_id = 256
_eos_id = 257
_bow_id = 258
_eow_id = 259
_pad_id = 260
_max_word_length = 50
_parallel_iterations = 10
_max_batch_size = 1024
id_dtype = tf.int32
id_nptype = np.int32
max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length')
version = tf.constant('from_dp_1', dtype=tf.string, name='version')
# the charcter representation of the begin/end of sentence characters
def _make_bos_eos(c):
r = np.zeros([_max_word_length], dtype=id_nptype)
r[:] = _pad_id
r[0] = _bow_id
r[1] = c
r[2] = _eow_id
return tf.constant(r, dtype=id_dtype)
bos_ids = _make_bos_eos(_bos_id)
eos_ids = _make_bos_eos(_eos_id)
def token2ids(token):
with tf.name_scope("token2ids_preprocessor"):
char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids')
char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2],
[1], name='slice2resized_token')
ids_num = tf.shape(char_ids)[0]
fill_ids_num = (_max_word_length - 2) - ids_num
pads = tf.fill([fill_ids_num], _pad_id)
bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],
0, name='concat2bow_token_eow_pads')
return bow_token_eow_pads
def sentence_tagging_and_padding(sen_dim):
with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
sen = sen_dim[0]
dim = sen_dim[1]
extra_dim = tf.shape(sen)[0] - dim
sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen')
bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos')
bos_sen_eos_plus_one = bos_sen_eos + 1
bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]],
"CONSTANT", name='pad2bos_sen_eos_pads')
return bos_sen_eos_pads
# Input placeholders to the biLM.
tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens')
sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len')
tok_shape = tf.shape(tokens)
line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
tok_ids = tf.map_fn(
token2ids,
line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(tok_ids, sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Build the biLM graph.
bilm = BidirectionalLanguageModel(options, str(weight_file),
max_batch_size=_max_batch_size)
embeddings_op = bilm(sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)
weighted_op = elmo_output['weighted_op']
mean_op = elmo_output['mean_op']
word_emb = elmo_output['word_emb']
lstm_outputs1 = elmo_output['lstm_outputs1']
lstm_outputs2 = elmo_output['lstm_outputs2']
hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len},
{"elmo": weighted_op,
"default": mean_op,
"word_emb": word_emb,
"lstm_outputs1": lstm_outputs1,
"lstm_outputs2": lstm_outputs2,
"version": version})
# #########################Next signature############################# #
# Input placeholders to the biLM.
def_strings = tf.placeholder(shape=(None), dtype=tf.string)
def_tokens_sparse = tf.string_split(def_strings)
def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,
output_shape=def_tokens_sparse.dense_shape,
sparse_values=def_tokens_sparse.values,
default_value=''
)
def_mask = tf.not_equal(def_tokens_dense, '')
def_int_mask = tf.cast(def_mask, dtype=tf.int32)
def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)
def_tok_shape = tf.shape(def_tokens_dense)
def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens')
with tf.device('/cpu:0'):
def_tok_ids = tf.map_fn(
token2ids,
def_line_tokens,
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_tok_ids')
def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids')
with tf.device('/cpu:0'):
def_sen_ids = tf.map_fn(
sentence_tagging_and_padding,
(def_tok_ids, def_sequence_len),
dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
name='map_fn2get_sen_ids')
# Get ops to compute the LM embeddings.
def_embeddings_op = bilm(def_sen_ids)
# Get an op to compute ELMo (weighted average of the internal biLM layers)
def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True)
def_weighted_op = def_elmo_output['weighted_op']
def_mean_op = def_elmo_output['mean_op']
def_word_emb = def_elmo_output['word_emb']
def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
def_lstm_outputs2 = def_elmo_output['lstm_outputs2']
hub.add_signature("default", {"strings": def_strings},
{"elmo": def_weighted_op,
"default": def_mean_op,
"word_emb": def_word_emb,
"lstm_outputs1": def_lstm_outputs1,
"lstm_outputs2": def_lstm_outputs2,
"version": version})
return hub.create_module_spec(module_fn) | 2293f00186438a6cc3318be6a25ab5223b8e9a91 | 10,873 |
import math
def get_initial_scoreboard():
"""
Retrieve the initial scoreboard (first pages of global and student views).
If a user is logged in, the initial pages will instead be those on which
that user appears, and their group scoreboards will also be returned.
Returns: dict of scoreboard information
"""
def get_user_pos(scoreboard, tid):
for pos, team in enumerate(scoreboard):
if team["tid"] == tid:
return pos
return 1
user = None
if api.user.is_logged_in():
user = api.user.get_user()
result = {'tid': 0, 'groups': []}
global_board = api.stats.get_all_team_scores(include_ineligible=True)
result['global'] = {
'name': 'global',
'pages': math.ceil(len(global_board) / scoreboard_page_len),
'start_page': 1
}
if user is None:
result['global']['scoreboard'] = global_board[:scoreboard_page_len]
else:
result['tid'] = user['tid']
global_pos = get_user_pos(global_board, user["tid"])
start_slice = math.floor(global_pos / 50) * 50
result['global']['scoreboard'] = global_board[start_slice:
start_slice + 50]
result['global']['start_page'] = math.ceil((global_pos + 1) / 50)
result['country'] = user["country"]
student_board = api.stats.get_all_team_scores()
student_pos = get_user_pos(student_board, user["tid"])
start_slice = math.floor(student_pos / 50) * 50
result['student'] = {
'name': 'student',
'pages': math.ceil(len(student_board) / scoreboard_page_len),
'scoreboard': student_board[start_slice:start_slice + 50],
'start_page': math.ceil((student_pos + 1) / 50),
}
for group in api.team.get_groups(user['tid']):
# this is called on every scoreboard pageload and should be
# cached to support large groups
group_board = api.stats.get_group_scores(gid=group['gid'])
group_pos = get_user_pos(group_board, user["tid"])
start_slice = math.floor(group_pos / 50) * 50
result['groups'].append({
'gid':
group['gid'],
'name':
group['name'],
'scoreboard':
group_board[start_slice:start_slice + 50],
'pages':
math.ceil(len(group_board) / scoreboard_page_len),
'start_page':
math.ceil((group_pos + 1) / 50),
})
return result | 3e5998a0cc94a6c99ca58336ef0a350a4170240e | 10,874 |
async def resolve(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True,
source_port=0, lifetime=None, search=None, backend=None):
"""Query nameservers asynchronously to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
See ``dns.asyncresolver.Resolver.resolve`` for more information on the
parameters.
"""
return await get_default_resolver().resolve(qname, rdtype, rdclass, tcp,
source, raise_on_no_answer,
source_port, lifetime, search,
backend) | 90a79f18d5c8887cbede733e7e05778ea78b36eb | 10,875 |
import re
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root) | 6632c723c2461dcb34b7e4f0bca0b4d096b5def8 | 10,876 |
def _tf_equal(a, b):
"""Overload of "equal" for Tensors."""
return gen_math_ops.equal(a, b) | 899cff2abe9613d798fb59190c1860ef6a6599d7 | 10,877 |
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html") | 67bbdcc713789f71b0506206ef8a4f2a56b3f1a1 | 10,878 |
def render_table(sheet, header, width, data, header_style, data_style, tt_id_style):
"""Рендерим страницу"""
# Render table header
for i in range(len(header)):
sheet.write(0, i, header[i], header_style)
sheet.col(i).width = width[i]
sheet.row(1).height = 2500
# Render table data
i = 1
for d in data:
sheet.row(i + 1).height = 2500
cols = [i, 'name', 'location', 'link', 'theme']
for col in range(len(cols)):
if col == 0:
sheet.write(i, col, i, tt_id_style)
elif col == 1:
sheet.write(i, col, d[cols[col]], tt_id_style)
else:
try:
if col == 9:
sheet.write(i, col, (round((d[cols[col]] / 30), 2)), data_style)
else:
sheet.write(i, col, d[cols[col]], data_style)
except KeyError:
sheet.write(i, col, 0, data_style)
i = i + 1
return sheet | bc181ff96319daef3cad10e5072124a6c43172a6 | 10,879 |
def texsafe(value):
""" Returns a string with LaTeX special characters stripped/escaped out """
special = [
[ "\\xc5", 'A'], #'\\AA'
[ "\\xf6", 'o'],
[ "&", 'and'], #'\\"{o}'
]
for char in ['\\', '^', '~', '%', "'", '"']: # these mess up things
value = value.replace(char, '')
for char in ['#','$','_', '{', '}', '<', '>']: # these can be escaped properly
value = value.replace(char, '\\' + char)
for char, new_char in special:
value = eval(repr(value).replace(char, new_char))
return value | b40b60a34629f75dfdac298bd2937af52ef797b1 | 10,880 |
def match_against_host_software_profile(db_session, hostname, software_packages):
"""
Given a software package list, return an array of dictionaries indicating if the
software package matches any software package defined in the host software profile package list.
"""
results = []
system_option = SystemOption.get(db_session)
if system_option.check_host_software_profile:
host = get_host(db_session, hostname)
if host is not None and len(software_packages) > 0:
software_profile = get_software_profile_by_id(db_session, host.software_profile_id)
if software_profile is not None:
software_profile_package_dict = get_matchable_package_dict(software_profile.packages.split(','))
software_package_dict = get_matchable_package_dict(software_packages)
for software_package, pattern in software_package_dict.items():
matched = True if pattern in software_profile_package_dict.values() else False
results.append({'software_package': software_package, 'matched': matched})
return results | 30a1bbf8a548a9578324a60aa3bc18998457671a | 10,881 |
from typing import Mapping
from typing import Any
def get_inputs_by_op(op: Op, store: Mapping[str, Any], copy_on_write: bool = False) -> Any:
"""Retrieve the necessary input data from the data dictionary in order to run an `op`.
Args:
op: The op to run.
store: The system's data dictionary to draw inputs out of.
copy_on_write: Whether to copy read-only data to make it writeable before returning it.
Returns:
Input data to be fed to the `op` forward function.
"""
if op.in_list:
data = []
else:
data = None
if op.inputs:
data = []
for key in op.inputs:
elem = store[key]
if copy_on_write and isinstance(elem, np.ndarray) and not elem.flags.writeable:
elem = deepcopy(elem)
store[key] = elem
data.append(elem)
if not op.in_list:
data = data[0]
return data | 1f3ee5bfe98793c4e8002f2a7f7ea834bf0d93c0 | 10,882 |
from typing import Type
def finalize_post(func, store: Type['ParameterStore']):
"""Finalizes the store prior to executing the function
Parameters
----------
func : callable
The function to wrap.
store : ParameterStore
The parameter store to finalize.
Returns
-------
callable
The wrapped function.
Raises
------
MissingParameterException
If there's a parameter missing from the required parameters in
the given `store`.
"""
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if not store.final:
store.finalize()
return ret
return wrapper | 92195a0005b94dad7606609f99da4c824e39d5b1 | 10,883 |
def searchaftertext(filename, startterm, searchterm):
"""Start search after a certain text in a file"""
#print startterm
#print searchterm
startline = findLastString (filename, startterm)
searchtermfound = findLastString (filename, searchterm)
if searchtermfound > startline:
return True
return False | 32adc5bebab42ac721c04c8f16bceea53f9e0d79 | 10,884 |
def vec_list_to_tensor(vec_list):
"""Convert list to vector tensor."""
return jnp.stack(vec_list, axis=-1) | 8e4dd60199c17dade87392f059412e00ae9defcc | 10,885 |
from datetime import datetime
def to_ecma_datetime_string(dt, default_timezone=local):
"""
Convert a python datetime into the string format defined by ECMA-262.
See ECMA international standard: ECMA-262 section 15.9.1.15
``assume_local_time`` if true will assume the date time is in local time if the object is a naive date time object;
else assumes the time value is utc.
"""
assert isinstance(dt, datetime.datetime)
dt = get_tz_aware_dt(dt, default_timezone).astimezone(utc)
return "%4i-%02i-%02iT%02i:%02i:%02i.%03iZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond / 1000) | bec3a52976552a0c0cc9ff5afde5bbf5578ff020 | 10,886 |
def _logfile_readme() -> str:
"""Returns a string containing a 'how to read this logfile' message.
Returns
-------
str
Returns a formatted paragraph-long message with tips on reading log file output.
"""
line1 = "Messages are displayed below in the format"
line2 = " <DATE> <TIME> <LOGGER NAME> @ <FILE>:<LINE> - <LEVEL> - <FUNCTION>:<MESSAGE>"
line3 = "where <DATE> is the date in 'YYYY-MM-DD' format, <TIME> is the time in 'HH:MM:SS,milliseconds' format, <LOGGER NAME> is the name of the logger that generated the message (which should be the __name__ of the file where the logger was initialized), <FILE> and <LINE> is the file name and line number where the message was generated, <LEVEL> is the priority level that the message was generated at, <FUNCTION> is the name of the function that the message was generated inside, and <MESSAGE> is the actual message that was generated. "
message = f"{line1}\n\n{line2}\n\n{line3}\n\n"
return message | 5e418b20df1ebb486d0b1c3ecf38d6c72ae8a5a7 | 10,887 |
def taxon_lookup(es, body, index, taxonomy_index_template, opts, return_type):
"""Query elasticsearch for a taxon."""
taxa = []
with tolog.DisableLogger():
res = es.search_template(body=body, index=index, rest_total_hits_as_int=True)
if "hits" in res and res["hits"]["total"] > 0:
if return_type == "taxon_id":
taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]]
else:
taxa = [hit for hit in res["hits"]["hits"]]
else:
template = taxonomy_index_template(opts["taxonomy-source"].lower(), opts)
index = template["index_name"]
with tolog.DisableLogger():
res = es.search_template(
body=body, index=index, rest_total_hits_as_int=True
)
if "hits" in res and res["hits"]["total"] > 0:
if return_type == "taxon_id":
taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]]
else:
taxa = [hit for hit in res["hits"]["hits"]]
return taxa | 52604947804581f633603d0728a68bc16f198503 | 10,888 |
async def get_south_services(request):
"""
Args:
request:
Returns:
list of all south services with tracked assets and readings count
:Example:
curl -X GET http://localhost:8081/fledge/south
"""
if 'cached' in request.query and request.query['cached'].lower() == 'false':
_get_installed_plugins.cache_clear()
storage_client = connect.get_storage_async()
cf_mgr = ConfigurationManager(storage_client)
try:
south_cat = await cf_mgr.get_category_child("South")
south_categories = [nc["key"] for nc in south_cat]
except:
return web.json_response({'services': []})
response = await _services_with_assets(storage_client, cf_mgr, south_categories)
return web.json_response({'services': response}) | a134bcf3c899212afc4b805ddaa9a19db901578a | 10,889 |
def filter_bam_file(bamfile, chromosome, outfile):
"""
filter_bam_file uses samtools to read a <bamfile> and read only
the reads that are mapped to <chromosome>.
It saves the filtered reads into <outfile>.
"""
inputs = [bamfile]
outputs = [outfile]
options = {
'cores': 1,
'memory': '4g',
'account': 'NChain',
'walltime': '01:00:00'
}
directory = "/".join(outfile.split("/")[:-1])
spec = '''
source /com/extra/samtools/1.6.0/load.sh
mkdir -p {dirc}
samtools view -b {infile} {chrom} > {out}
'''.format(infile=bamfile, chrom=chromosome, out=outfile, dirc=directory)
return inputs, outputs, options, spec | 317e1283d4722483e4bc98080ef99abd9876d045 | 10,890 |
def import_teachers():
"""
Import the teachers from Moodle.
:return: Amount of imported users.
:rtype: int
"""
course_list = dict(Course.objects.values_list("courseId", "pk"))
teachers_list = parse_get_teachers(get_teachers(list(course_list.keys())))
teacher_group = create_auth_group()
users = create_teachers(teachers_list)
add_courses_and_group_to_users(course_list, teacher_group, teachers_list, users)
return users.count() | 25b03c5b79d348171d23bce54a67ebbab2911440 | 10,891 |
def baryvel(dje, deq):
"""
Calculate helio- and barycentric velocity.
.. note:: The "JPL" option present in IDL is not provided here.
Parameters
----------
dje : float
Julian ephemeris date
deq : float
Epoch of mean equinox of helio- and barycentric velocity output.
If `deq` is zero, `deq` is assumed to be equal to `dje`.
Returns
-------
dvelh : array
Heliocentric velocity vector [km/s].
dvelb : array
Barycentric velocity vector [km/s].
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
pro baryvel, dje, deq, dvelh, dvelb, JPL = JPL
NAME:
BARYVEL
PURPOSE:
Calculates heliocentric and barycentric velocity components of Earth.
EXPLANATION:
BARYVEL takes into account the Earth-Moon motion, and is useful for
radial velocity work to an accuracy of ~1 m/s.
CALLING SEQUENCE:
BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
INPUTS:
DJE - (scalar) Julian ephemeris date.
DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
then deq is assumed to be equal to dje.
OUTPUTS:
DVELH: (vector(3)) heliocentric velocity component. in km/s
DVELB: (vector(3)) barycentric velocity component. in km/s
The 3-vectors DVELH and DVELB are given in a right-handed coordinate
system with the +X axis toward the Vernal Equinox, and +Z axis
toward the celestial pole.
OPTIONAL KEYWORD SET:
JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
to compute the Earth velocity using the full JPL ephemeris.
The JPL ephemeris FITS file JPLEPH.405 must exist in either the
current directory, or in the directory specified by the
environment variable ASTRO_DATA. Alternatively, the JPL keyword
can be set to the full path and name of the ephemeris file.
A copy of the JPL ephemeris FITS file is available in
http://idlastro.gsfc.nasa.gov/ftp/data/
PROCEDURES CALLED:
Function PREMAT() -- computes precession matrix
JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
NOTES:
Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
Stumpf claimed an accuracy of 42 cm/s for the velocity. A
comparison with the JPL FORTRAN planetary ephemeris program PLEPH
found agreement to within about 65 cm/s between 1986 and 1994
If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
given in the ICRS system; otherwise in the FK4 system.
EXAMPLE:
Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
using both the original Stumpf algorithm and the JPL ephemeris
IDL> jdcnv, 1994, 2, 15, 0, jd ;==> JD = 2449398.5
IDL> baryvel, jd, 2000, vh, vb ;Original algorithm
==> vh = [-17.07243, -22.81121, -9.889315] ;Heliocentric km/s
==> vb = [-17.08083, -22.80471, -9.886582] ;Barycentric km/s
IDL> baryvel, jd, 2000, vh, vb, /jpl ;JPL ephemeris
==> vh = [-17.07236, -22.81126, -9.889419] ;Heliocentric km/s
==> vb = [-17.08083, -22.80484, -9.886409] ;Barycentric km/s
IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians
IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians
IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star
vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
REVISION HISTORY:
Jeff Valenti, U.C. Berkeley Translated BARVEL.FOR to IDL.
W. Landsman, Cleaned up program sent by Chris McCarthy (SfSU) June 1994
Converted to IDL V5.0 W. Landsman September 1997
Added /JPL keyword W. Landsman July 2001
Documentation update W. Landsman Dec 2005
"""
# Define constants
dc2pi = 2 * np.pi
cc2pi = 2 * np.pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 # days in Julian year
dcbes = 0.313
dctrop = 365.24219572 # days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
# Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6]
dcfel = np.resize(dcfel, (8, 3))
# constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8]
ccsel = [1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00,
1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7]
ccsel = np.resize(ccsel, (17, 3))
# Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, -
1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2]
dcargs = np.resize(dcargs, (15, 2))
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6,
1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0]
ccamps = np.resize(ccamps, (15, 5))
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = [1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01]
ccsec = np.resize(ccsec, (4, 3))
# Sidereal rates.
dcsld = 1.990987e-7 # sidereal rate in longitude
ccsgd = 1.990969e-7 # sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830e0, 8.3286911095275e3, 5.4913150e0, -
7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]
dcargm = np.resize(dcargm, (3, 2))
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8,
1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]
ccampm = np.resize(ccampm, (3, 4))
# ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = idlMod(np.dot(dcfel, tvec), dc2pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0] # old fortran equivalence
deps = idlMod(np.sum(tvec * dceps), dc2pi)
sorbel = idlMod(np.dot(ccsel, tvec), dc2pi)
e = sorbel[0] # old fortran equivalence
# Secular perturbations in longitude.
dummy = np.cos(2.0)
sn = np.sin(idlMod(np.dot(ccsec[::, 1:3], tvec[0:2]), cc2pi))
# Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[::, 0] * sn) + (dt * ccsec3 * sn[2])
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in smo.range(15):
a = idlMod((dcargs[k, 0] + dt * dcargs[k, 1]), dc2pi)
cosa = np.cos(a)
sina = np.sin(a)
pertl = pertl + ccamps[k, 0] * cosa + ccamps[k, 1] * sina
pertr = pertr + ccamps[k, 2] * cosa + ccamps[k, 3] * sina
if k < 11:
pertld = pertld + (ccamps[k, 1] * cosa -
ccamps[k, 0] * sina) * ccamps[k, 4]
pertrd = pertrd + (ccamps[k, 3] * cosa -
ccamps[k, 2] * sina) * ccamps[k, 4]
# Elliptic part of the motion of the emb.
phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 *
np.sin(2 * g) + (13 / 3e0) * e * np.sin(3 * g))
f = g + phi
sinf = np.sin(f)
cosf = np.cos(f)
dpsi = (dc1 - e * e) / (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf +
e * (1.25 - 0.5 * sinf * sinf))
psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e)
# Perturbed heliocentric motion of the emb.
d1pdro = dc1 + pertr
drd = d1pdro * (psid + dpsi * pertrd)
drld = d1pdro * dpsi * (dcsld + phid + pertld)
dtl = idlMod((dml + phi + pertl), dc2pi)
dsinls = np.sin(dtl)
dcosls = np.cos(dtl)
dxhd = drd * dcosls - drld * dsinls
dyhd = drd * dsinls + drld * dcosls
# Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl = 0.0
pertld = 0.0
pertp = 0.0
pertpd = 0.0
for k in smo.range(3):
a = idlMod((dcargm[k, 0] + dt * dcargm[k, 1]), dc2pi)
sina = np.sin(a)
cosa = np.cos(a)
pertl = pertl + ccampm[k, 0] * sina
pertld = pertld + ccampm[k, 1] * cosa
pertp = pertp + ccampm[k, 2] * cosa
pertpd = pertpd - ccampm[k, 3] * sina
# Heliocentric motion of the earth.
tl = forbel[1] + pertl
sinlm = np.sin(tl)
coslm = np.cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma * (ccmld + pertld)
b = sigma * pertpd
dxhd = dxhd + a * sinlm + b * coslm
dyhd = dyhd - a * coslm + b * sinlm
dzhd = -sigma * ccfdi * np.cos(forbel[2])
# Barycentric motion of the earth.
dxbd = dxhd * dc1mme
dybd = dyhd * dc1mme
dzbd = dzhd * dc1mme
for k in smo.range(4):
plon = forbel[k + 3]
pomg = sorbel[k + 1]
pecc = sorbel[k + 9]
tl = idlMod((plon + 2.0 * pecc * np.sin(plon - pomg)), cc2pi)
dxbd = dxbd + ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg))
dybd = dybd - ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg))
dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5])
# Transition to mean equator of date.
dcosep = np.cos(deps)
dsinep = np.sin(deps)
dyahd = dcosep * dyhd - dsinep * dzhd
dzahd = dsinep * dyhd + dcosep * dzhd
dyabd = dcosep * dybd - dsinep * dzbd
dzabd = dsinep * dybd + dcosep * dzbd
# Epoch of mean equinox (deq) of zero implies that we should use
# Julian ephemeris date (dje) as epoch of mean equinox.
if deq == 0:
dvelh = AU * np.array([dxhd, dyahd, dzahd])
dvelb = AU * np.array([dxbd, dyabd, dzabd])
return dvelh, dvelb
# General precession from epoch dje to deq.
deqdat = (dje - dcto - dcbes) / dctrop + dc1900
prema = np.transpose(premat(deqdat, deq, FK4=True))
dvelh = AU * np.dot([dxhd, dyahd, dzahd], prema)
dvelb = AU * np.dot([dxbd, dyabd, dzabd], prema)
return dvelh, dvelb | 76f6dccceb697996541748704b293de6cfe77cf6 | 10,892 |
def uniform(low=0.0, high=1.0, size=None):
"""This function has the same `nlcpy.random.RandomState.uniform`
See Also
--------
nlcpy.random.RandomState.uniform : Draws samples from a uniform distribution.
"""
rs = generator._get_rand()
return rs.uniform(low, high, size=size) | 48de653a1721e5602eeefc2bf5182e0100759a31 | 10,893 |
def parse_time_interval(interval_str):
"""Convert a human-readable time interval to a tuple of start and end value.
Args:
interval_str: (`str`) A human-readable str representing an interval
(e.g., "[10us, 20us]", "<100s", ">100ms"). Supported time suffixes are
us, ms, s.
Returns:
`Interval` object where start and end are in microseconds.
Raises:
ValueError: if the input is not valid.
"""
str_interval = _parse_interval(interval_str)
interval_start = 0
interval_end = float("inf")
if str_interval.start:
interval_start = parse_readable_time_str(str_interval.start)
if str_interval.end:
interval_end = parse_readable_time_str(str_interval.end)
if interval_start > interval_end:
raise ValueError(
"Invalid interval %s. Start must be before end of interval." %
interval_str)
return Interval(interval_start, str_interval.start_included,
interval_end, str_interval.end_included) | 4edbc180722ddb84f6f2fae1e9854db14571f2d3 | 10,894 |
def submatrix(M, x):
"""If x is an array of integer row/col numbers and M a matrix,
extract the submatrix which is the all x'th rows and cols.
i.e. A = submatrix(M,x) => A_ij = M_{x_i}{x_j}
"""
return M[np.ix_(x,x)] | ba3aab45b77d8f7462fd0f2a29c96fb573618d62 | 10,895 |
def inventory_report(products: list) -> str:
"""Gives a detailed report on created products"""
unique_names, average_price, average_weight, average_flam = _build_report_metrics(products)
report = f'''ACME CORPORATION OFFICIAL INVENTORY REPORT
Unique product names: {unique_names}
Average price: {average_price}
Average weight: {average_weight}
Average flammability: {average_flam}'''
return print(report) | 96080f5aff04ae8d8578be3940f756b471fdce48 | 10,896 |
def iou(a, b):
""" Calculates intersection over union (IOU) over two tuples """
(a_x1, a_y1), (a_x2, a_y2) = a
(b_x1, b_y1), (b_x2, b_y2) = b
a_area = (a_x2 - a_x1) * (a_y2 - a_y1)
b_area = (b_x2 - b_x1) * (b_y2 - b_y1)
dx = min(a_x2, b_x2) - max(a_x1, b_x1)
dy = min(a_y2, b_y2) - max(a_y1, b_y1)
if (dx>=0) and (dy>=0):
overlap = dx * dy
iou = overlap / (a_area + b_area - overlap)
return iou
return 0 | 0e72d00a672c430cce69246cb7d7889ae41ae216 | 10,897 |
def svn_path_is_empty(*args):
"""svn_path_is_empty(char path) -> int"""
return _core.svn_path_is_empty(*args) | bf6db11940db6767c50a002104a528cf8c7a5363 | 10,898 |
def compute_lorentz(Phi, omega, sigma):
"""In a time-harmonic discretization with quantities
.. math::
\\begin{align}
A &= \\Re(a \\exp(\\text{i} \\omega t)),\\\\
B &= \\Re(b \\exp(\\text{i} \\omega t)),
\\end{align}
the time-average of :math:`A\\times B` over one period is
.. math::
\\overline{A\\times B} = \\frac{1}{2} \\Re(a \\times b^*),
see http://www.ece.rutgers.edu/~orfanidi/ewa/ch01.pdf.
Since the Lorentz force generated by the current :math:`J` in the magnetic
field :math:`B` is
.. math::
F_L = J \\times B,
its time average is
.. math::
\\overline{F_L} = \\frac{1}{2} \\Re(j \\times b^*).
With
.. math::
J &= \\Re(\\exp(\\text{i} \\omega t) j e_{\\theta}),\\\\
B &= \\Re\\left(
\\exp(i \\omega t) \\left(
-\\frac{\\text{d}\\phi}{\\text{d}z} e_r
+ \\frac{1}{r} \\frac{\\text{d}(r\\phi)}{\\text{d}r} e_z
\\right)
\\right),
we have
.. math::
\\overline{F_L}
&= \\frac{1}{2} \\Re\\left(j \\frac{d\\phi^*}{dz} e_z
+ \\frac{j}{r} \\frac{d(r\\phi^*)}{dr} e_r\\right)\\\\
&= \\frac{1}{2}
\\Re\\left(\\frac{j}{r} \\nabla(r\\phi^*)\\right)\\\\
In the workpiece, we can assume
.. math::
j = -\\text{i} \\sigma \\omega \\phi
which gives
.. math::
\\begin{align*}
\\overline{F_L}
&= \\frac{\\sigma\\omega}{2r} \\Im\\left(
\\phi \\nabla(r \\phi^*)
\\right)\\\\
&= \\frac{\\sigma\\omega}{2r} \\left(
\\Im(\\phi) \\nabla(r \\Re(\\phi))
-\\Re(\\phi) \\nabla(r \\Im(\\phi))
\\right)
\\end{align*}
"""
mesh = Phi[0].function_space().mesh()
r = SpatialCoordinate(mesh)[0]
return (
0.5
* sigma
* omega
/ r
* (+Phi[1] * grad(r * Phi[0]) - Phi[0] * grad(r * Phi[1]))
) | 5b82df614d8245565e3427277ace2e0ba3fd27c5 | 10,900 |
def play_db(cursor, query_string, lookup_term):
"""
Given a query string and a term, retrieve the list of plays associated with
that term
"""
play_list = []
try:
cursor.execute(query_string, [lookup_term])
play_res = cursor.fetchall()
except DatabaseError as err:
LOG.error(
"Error retrieving plays for %s: %s", lookup_term, err
)
return play_list
for row in play_res:
play_list.append(row)
if not play_list:
LOG.info("No plays for %s", lookup_term)
return play_list | 35ee0f96e122cddf65dbce7b127a8123b703b8f8 | 10,901 |
def find_nocc(two_arr, n):
"""
Given two sorted arrays of the SAME lengths and a number,
find the nth smallest number a_n and use two indices to indicate
the numbers that are no larger than a_n.
n can be real. Take the floor.
"""
l = len(two_arr[0])
if n >= 2 * l: return l, l
if n == 0: return 0, 0
res, n = n % 1, int(n)
lo, hi = max(0, n - l - 1), min(l - 1, n - 1)
while lo <= hi:
mid = int((lo + hi) / 2) # image mid is the right answer
if mid + 1 < l and n - mid - 2 >= 0:
if two_arr[0][mid + 1] < two_arr[1][n - mid - 2]:
lo = mid + 1
continue
if n - mid - 1 < l:
if two_arr[1][n - mid - 1] < two_arr[0][mid]:
hi = mid
continue
break
if n - mid - 1 >= l or mid + 1 < l and two_arr[0][mid + 1] < two_arr[1][n - mid - 1]:
return mid + res + 1, n - mid - 1
else:
return mid + 1, n - mid - 1 + res | 42c8998e24095f03b0d873a0c9ad1f63facab8cb | 10,902 |
import json
def get_dict(str_of_dict: str, order_key='', sort_dict=False) -> list:
"""Function returns the list of dicts:
:param str_of_dict: string got form DB
(e.g. {"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...),
:param order_key: the key by which dictionaries will be sorted (required if flag 'sort_dict=True'),
:param sort_dict: flag for sorting the dictionary (boolean).
:return: list of dicts (e.g. [{"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...])"""
result_dict = list()
if str_of_dict:
result_dict = json.loads('[' + str_of_dict + ']')
if sort_dict and order_key:
try:
result_dict = sorted(result_dict, key=lambda i: i[order_key])
return result_dict
except KeyError:
return result_dict
return result_dict
else:
return result_dict | 81d20db2dbe929693994b5b94aa971850ef9c838 | 10,903 |
import hashlib
import struct
def get_richpe_hash(pe):
"""Computes the RichPE hash given a file path or data.
If the RichPE hash is unable to be computed, returns None.
Otherwise, returns the computed RichPE hash.
If both file_path and data are provided, file_path is used by default.
Source : https://github.com/RichHeaderResearch/RichPE
"""
if pe.RICH_HEADER is None:
return None
# Get list of @Comp.IDs and counts from Rich header
# Elements in rich_fields at even indices are @Comp.IDs
# Elements in rich_fields at odd indices are counts
rich_fields = pe.RICH_HEADER.values
if len(rich_fields) % 2 != 0:
return None
# The RichPE hash of a file is computed by computing the md5 of specific
# metadata within the Rich header and the PE header
md5 = hashlib.md5()
# Update hash using @Comp.IDs and masked counts from Rich header
while len(rich_fields):
compid = rich_fields.pop(0)
count = rich_fields.pop(0)
mask = 2 ** (count.bit_length() // 2 + 1) - 1
count |= mask
md5.update(struct.pack("<L", compid))
md5.update(struct.pack("<L", count))
# Update hash using metadata from the PE header
md5.update(struct.pack("<L", pe.FILE_HEADER.Machine))
md5.update(struct.pack("<L", pe.FILE_HEADER.Characteristics))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.Subsystem))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MajorLinkerVersion))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MinorLinkerVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorSubsystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorSubsystemVersion))
return md5.hexdigest() | 30e5437f36f76a6225eaba579d55218440ab46b9 | 10,904 |
def get_input(label, default=None):
"""Prompt the user for input.
:param label: The label of the prompt.
:param label: str
:param default: The default value.
:rtype: str | None
"""
if default:
_label = "%s [%s]: " % (label, default)
else:
_label = "%s: " % label
print("")
value = input(_label)
if not value:
return default
return value | 11de813f0fcfd16f1198299030656c07392f95c9 | 10,905 |
import logging
def get_pretrain_data_text(data, batch_size, num_ctxes, shuffle,
num_buckets, vocab, tokenizer, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask,
num_parts=1, part_idx=0, num_workers=1):
"""Get a data iterator from raw text documents.
Parameters
----------
batch_size : int
The batch size per GPU.
num_ctxes : int
The number of GPUs.
shuffle : bool
Whether to shuffle the data.
num_buckets : int
The number of buckets for the FixedBucketSampler for training.
vocab : BERTVocab
The vocabulary.
tokenizer : BERTTokenizer or BERTSPTokenizer
The tokenizer.
max_seq_length : int
The hard limit of maximum sequence length of sentence pairs.
short_seq_prob : float
The probability of sampling sequences shorter than the max_seq_length.
masked_lm_prob : float
The probability of replacing texts with masks/random words/original words.
max_predictions_per_seq : int
The hard limit of the number of predictions for masked words
whole_word_mask : bool
Whether to use whole word masking.
num_parts : int
The number of partitions for the dataset.
part_idx : int
The index of the partition to read.
num_workers : int
The number of worker processes for dataset contruction.
"""
num_files = len(nlp.utils.glob(data))
logging.info('%d files are found.', num_files)
assert num_files >= num_parts, \
'The number of training text files must be no less than the number of ' \
'workers/partitions (%d). Only %d files at %s are found.'%(num_parts, num_files, data)
dataset_params = {'tokenizer': tokenizer, 'max_seq_length': max_seq_length,
'short_seq_prob': short_seq_prob, 'masked_lm_prob': masked_lm_prob,
'max_predictions_per_seq': max_predictions_per_seq, 'vocab':vocab,
'whole_word_mask': whole_word_mask}
dataset_fn = SimpleDatasetFn(BERTPretrainDataset, dataset_params)
sampler_fn = BERTSamplerFn(batch_size, shuffle, num_ctxes, num_buckets)
dataloader_fn = BERTDataLoaderFn(num_ctxes, vocab)
split_sampler = nlp.data.SplitSampler(num_files, num_parts=num_parts, part_index=part_idx)
dataloader = DatasetLoader(data, split_sampler, dataset_fn, sampler_fn, dataloader_fn,
num_dataset_workers=num_workers)
return dataloader | 986ba7afc87f8ce5b054816de365e1c2793f6876 | 10,906 |
def define_app_flags(scenario_num):
""" Define the TensorFlow application-wide flags
Returns:
FLAGS: TensorFlow flags
"""
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean('save_model', False, 'save model to disk')
tf.app.flags.DEFINE_string('summaries_dir', './logs', 'tensorboard summaries')
tf.app.flags.DEFINE_string('ckpt_dir', './saved_models/', 'check point dir')
tf.app.flags.DEFINE_string('scenario_num', scenario_num, 'Scenario number')
tf.app.flags.DEFINE_string('errors_dir', './errors/', 'Errors dir')
return FLAGS | de79e076db37f7981633b3b2b38db6b462155709 | 10,907 |
def longitude_validator(value):
"""Perform longitude validation.
"""
valid = -180 < value < 180
if not valid:
raise ValidationError(_('longitude not in range of -90 < value < 90'))
return value | 866c45da71d1b4d6b2d5bd60e331caecb365f297 | 10,908 |
def test_create_batch_multi_record_update_fails(shared_zone_test_context):
"""
Test recordsets with multiple records cannot be edited in batch (relies on config, skip-prod)
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
# record sets to setup
a_update_name = generate_record_name()
a_update_fqdn = a_update_name + ".ok."
a_update = get_recordset_json(ok_zone, a_update_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_update_name = generate_record_name()
txt_update_fqdn = txt_update_name + ".ok."
txt_update = get_recordset_json(ok_zone, txt_update_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
a_delete_name = generate_record_name()
a_delete_fqdn = a_delete_name + ".ok."
a_delete = get_recordset_json(ok_zone, a_delete_name, "A", [{"address": "1.1.1.1"}, {"address": "1.1.1.2"}], 200)
txt_delete_name = generate_record_name()
txt_delete_fqdn = txt_delete_name + ".ok."
txt_delete = get_recordset_json(ok_zone, txt_delete_name, "TXT", [{"text": "hello"}, {"text": "again"}], 200)
batch_change_input = {
"comments": "this is optional",
"changes": [
get_change_A_AAAA_json(a_update_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(a_update_fqdn, address="1.2.3.4"),
get_change_A_AAAA_json(a_update_fqdn, address="4.5.6.7"),
get_change_TXT_json(txt_update_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_update_fqdn, text="some-multi-text"),
get_change_TXT_json(txt_update_fqdn, text="more-multi-text"),
get_change_A_AAAA_json(a_delete_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(txt_delete_fqdn, change_type="DeleteRecordSet"),
# adding an HVD so this will fail if accidentally run against wrong config
get_change_A_AAAA_json("high-value-domain")
]
}
to_delete = []
try:
for rs in [a_update, txt_update, a_delete, txt_delete]:
create_rs = client.create_recordset(rs, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_rs, 'Complete'))
response = client.create_batch_change(batch_change_input, status=400)
def existing_err(name, type):
return 'RecordSet with name {} and type {} cannot be updated in a single '.format(name, type) + \
'Batch Change because it contains multiple DNS records (2).'
def new_err(name, type):
return 'Multi-record recordsets are not enabled for this instance of VinylDNS. ' \
'Cannot create a new record set with multiple records for inputName {} and type {}.'.format(name,
type)
assert_error(response[0], error_messages=[existing_err(a_update_fqdn, "A")])
assert_error(response[1], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")])
assert_error(response[2], error_messages=[existing_err(a_update_fqdn, "A"), new_err(a_update_fqdn, "A")])
assert_error(response[3], error_messages=[existing_err(txt_update_fqdn, "TXT")])
assert_error(response[4],
error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")])
assert_error(response[5],
error_messages=[existing_err(txt_update_fqdn, "TXT"), new_err(txt_update_fqdn, "TXT")])
assert_error(response[6], error_messages=[existing_err(a_delete_fqdn, "A")])
assert_error(response[7], error_messages=[existing_err(txt_delete_fqdn, "TXT")])
finally:
clear_recordset_list(to_delete, client) | b2fe0cea07af57996058cdb0f9a31cbbf11a88ce | 10,911 |
from typing import OrderedDict
def _build_colormap(data, hue, palette, order):
"""Builds a colormap."""
if hue is None:
color_map = {}
else:
if palette is None:
palette = sns.color_palette()
if order is None:
order = data[hue].unique()
color_map = OrderedDict(zip(order, palette))
return color_map | 82294634a1295fc68e5d3afb05fa00d83dfdc6ea | 10,912 |
def f_is_oword(*args):
"""
f_is_oword(F, arg2) -> bool
See 'is_oword()'
@param F (C++: flags_t)
"""
return _ida_bytes.f_is_oword(*args) | a6d75a65b527ebdd029a5d3e65a756bcbb86561a | 10,913 |
def aggregate_CSV_files(data_path):
""" Aggregate the data in CSV files, specified in the config file, into a
single pandas DataFrame object. """
merge_queue = []
for path in data_path:
data_df = pd.read_csv(path, na_values = ['.']);
data_df.index = pd.to_datetime(data_df['DATE'], format='%Y-%m-%d')
data_df = data_df[data_df.index > c.START_DATE]
del data_df['DATE']
merge_queue.append(data_df)
aggregate_df = pd.concat(merge_queue, sort = True, axis = 1)
aggregate_df.sort_index(inplace = True)
return aggregate_df | 281ca2a5e84e2dfbb2c2269083d0d2be5654fb75 | 10,914 |
def dR2(angle: np_float) -> np.ndarray:
"""Derivative of a rotation matrix around the second axis with respect to the rotation angle
Args:
angle: Scalar, list or numpy array of angles in radians.
Returns:
Numpy array: Rotation matrix or array of rotation matrices.
"""
zero = _zero(angle)
cosA, sinA = np.cos(angle), np.sin(angle)
return _roll_axes(np.array([[-sinA, zero, -cosA], [zero, zero, zero], [cosA, zero, -sinA]])) | 5080c78c46505ed9e155fb76ae4a9be3b6e5d685 | 10,915 |
def clear_predecessor(n):
"""
Sets n's predecessor to None
:param n: node on which to call clear_predecessor
:return: string of response
"""
def clear(node):
node.predecessor = None
n.event_queue.put(clear)
resp_header = {"status": STATUS_OK}
return utils.create_request(resp_header, {}) | e5c071572799c8df6b629d0bb1cbde4d106a4e95 | 10,917 |
def get_local_variable_influence(model, form_data):
"""
"""
row = format_data_to_row(form_data)
model_obj = read_model(model.path, model.file_type)
df = load_dataset_sample(model.dataset, nrows=50)
df = df[model.dataset.model_columns]
explainer = load_model_explainer_from_obj(model_obj, df)
prediction = list()
prediction.append(model_obj.predict(row)[0])
if hasattr(model_obj, 'predict_proba'):
prediction.append(model_obj.predict_proba(row)[0])
base_value = explainer.explainer.expected_value
variable_influence = compute_local_influence(explainer, row)
return variable_influence, prediction, base_value | 403c2e89937a7b8bfbeb1ce44d49147fe9c35ddc | 10,919 |
def submit_experiment(body, **kwargs):
"""Submit an experiment
:param body: experiment payload
:type body: dict | bytes
:rtype: StatusSerializer
"""
serializer = ExperimentSerializer.from_dict(body)
check_experiment_permission(serializer, kwargs["token_info"])
stub = get_experiments_services_stub()
response = stub.Submit(job_pb2.Experiment(**body))
if response.status != 200:
return ErrorSerializer(status=response.status, title="Api Error",
detail=response.message), response.status
return StatusSerializer.from_dict(util.deserialize_protobuf(response)) | 21b91876f1d9ffa4b55c296a2e1dc9a2c66e1026 | 10,920 |
def obj_assert_check(cls):
"""
The body of the assert check for an accessor
We allow all versions of add/delete/modify to use the same accessors
"""
if cls in ["of_flow_modify", "of_flow_modify_strict",
"of_flow_delete", "of_flow_delete_strict",
"of_flow_add"]:
return "IS_FLOW_MOD_SUBTYPE(obj->object_id)"
else:
return "obj->object_id == %s" % cls.upper() | 4ebddebdd87c0bdb28e7687ec2b0da623507f89e | 10,921 |
from typing import List
import hashlib
def ripemd160(data: List[int]) -> List[int]:
"""
:param data:
:return:
"""
try:
bytes_data = bytes(data)
except TypeError:
raise NativeContractException
digest = hashlib.new("ripemd160", bytes_data).digest()
padded = 12 * [0] + list(digest)
return list(bytearray(bytes(padded))) | bfa29479b6d2633c0075462f558f21562fc96a04 | 10,922 |
def has_duplicates(s:list) -> dict:
"""Returns True if any element appears more than once in a sequence."""
d = dict()
for char in s:
if char in d:
return True
d[char] = 1
return False | f702e53cded0c18a0e1b7cffb58bccbff3386bce | 10,923 |
def get_from_chain(J, domain, nof_coefficients, ncap=10000, disc_type='sp_quad', interval_type='lin',
mapping_type='lan_bath', permute=None, residual=True, low_memory=True, stable=False,
get_trafo=False, force_sp=False, mp_dps=30, sort_by=None, **kwargs):
"""
Returns star coefficients, constructed from chain coefficients via diagonalization
see chain.get and convert_chain_to_star for an explanation of the arguments.
Sort_by sorts the couplings and energies (if passed and not None), see utils.sorting.sort_star_coefficients
for details on the parameters.
:returns: gamma (couplings), xi (energies), info dict from both the conversion and the chain mapping
if get_trafo is set True, the dict only contains the latest transformation (from chain to star here)
"""
c0, omega, t, info = get_chain(J, domain, nof_coefficients, ncap=ncap, disc_type=disc_type,
interval_type=interval_type, mapping_type=mapping_type, permute=permute,
residual=residual, low_memory=low_memory, stable=stable,
get_trafo=False, **kwargs)
gamma, xi, trafo_info = convert_chain_to_star(c0, omega, t, force_sp=force_sp, mp_dps=mp_dps, get_trafo=get_trafo)
gamma, xi = sort_star_coefficients(gamma, xi, sort_by)
return gamma, xi, info.update(trafo_info) | d5cd09a088d4946015eb9556b0fed3ca5be55187 | 10,924 |
from typing import Type
def factory(kernel_type, cuda_type=None, gpu_mode=None, *args, **kwargs):
"""Return an instance of a kernel corresponding to the requested kernel_type"""
if cuda_type is None:
cuda_type = default.dtype
if gpu_mode is None:
gpu_mode = default.gpu_mode
# turn enum string to enum object
if isinstance(kernel_type, str):
try:
for c in [' ', '-']: # chars to be replaced for normalization
kernel_type = kernel_type.replace(c, '_')
kernel_type = Type[kernel_type.upper()]
except:
raise TypeError('kernel_type ' + kernel_type + ' could not be found')
if not isinstance(kernel_type, Type):
raise TypeError('kernel_type must be an instance of KernelType Enum')
if kernel_type in [Type.UNDEFINED, Type.NO_KERNEL]:
return None
res = None
hash = AbstractKernel.hash(kernel_type, cuda_type, gpu_mode, *args, **kwargs)
if hash not in instance_map:
res = kernel_type.value(gpu_mode=gpu_mode, cuda_type=cuda_type, *args, **kwargs) # instantiate
instance_map[hash] = res
else:
res = instance_map[hash]
assert res is not None
return res | 2d9bf5fb0fd45e367d31b76656dfc611912f7202 | 10,925 |
from typing import Dict
from typing import Union
def init_scaler(
scaler_parameters: Dict, fit_data: np.ndarray,
) -> Union[MinMaxScaler, StandardScaler, RobustScaler]:
"""Initialize and return scaler.
Args:
scaler_parameters: Parameters of scaler.
fit_data: Data to be fit.
Returns:
Selected scaler.
"""
scaler_type = scaler_parameters["scaler_type"]
if scaler_type == "RobustScaler":
scaler = RobustScaler()
elif scaler_type == "StandardScaler":
scaler = StandardScaler()
else:
scaler = MinMaxScaler()
scaler.fit(fit_data)
return scaler | 18f15e8e6bebb32ad659636f46ee2e5f54ccc69d | 10,926 |
def get_dynamic_resource(previous_length: str):
"""Get the job with job_name.
Returns:
None.
"""
name_to_node_usage = redis_controller.get_resource_usage(
previous_length=int(previous_length)
)
return name_to_node_usage | 05efd928f66b8237e39bd04df2482d8b24259700 | 10,927 |
def _margo_bin(exe=""):
"""Returns the path of the margo executable.
"""
return gs.home_path("bin", exe or INSTALL_EXE) | a540357e84411ec84820163966440d75ae142d8b | 10,928 |
def csl_density(basis, mini_cell, plane):
"""
returns the CSL density of a given plane and its d_spacing.
"""
plane = np.array(plane)
c = csl_vec(basis, mini_cell)
h = np.dot(c.T, plane)
h = smallest_integer(h)[0]
h = common_divisor(h)[0]
g = np.linalg.inv(np.dot(c.T, c))
h_norm = np.sqrt(np.dot(h.T, np.dot(g, h)))
density = 1/(h_norm * np.linalg.det(c))
return abs(density), 1 / h_norm | 852ba976f1bfc9b1fa30ba660f8b660e023bed94 | 10,929 |
def mw_Av():
"""Build the A_V attenuation by the MW towards M31."""
curve = SF11ExtinctionCurve()
ratio = curve['Landolt V'] # A_V / E(B-V) from T6 of SF2011
return ratio * 0.07 | ff53a5c302945ab6020a3734950bc8449c522faa | 10,930 |
def load_data(filenames):
"""Load a single file or sequence of files using skimage.io"""
filenames = [filenames, ] if isinstance(filenames, str) else filenames
loadfunc = tifffile.imread if all(f.lower().endswith("tif")
for f in filenames) else skio.imread
if len(filenames) > 1:
return np.array([loadfunc(f) for f in filenames], dtype=float)
elif len(filenames) == 1:
return loadfunc(filenames[0]).astype(float)
else:
raise Exception("load_data received an empty list") | be9c451c5aa3469a2bcaceb2fb6ab8ab09195794 | 10,932 |
def GetInverseMatrix(matrix):
"""
:param matrix: the matrix which will get its inverse matrix
:return: the inverse matrix(two dimensions only)
"""
matrix[0, 0], matrix[1, 1] = -matrix[1, 1], -matrix[0, 0]
matrix = matrix / -(matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0])
return matrix | c4fdba364cc6b73a3b72a40f980a0fa402a1968f | 10,933 |
import re
def petsc_memory_stats(log):
"""Return the memory stats section of PETSc's -log_view output as a dictionary."""
# first search for the 'Memory usage' header, then match anything that follows
# after the first line starting with --- up until the first line starting with =====
# re.DOTALL makes . match newlines as well
try:
memory_profile = re.finditer('Memory usage is given in bytes:.*?\n---[^\n].*?\n(.*?)\n===========', log, re.DOTALL).next().group(1)
except StopIteration:
# no memory stats section found (did you run with -log_view ?)
return None
stats = {}
for line in memory_profile.split('\n'):
try:
(object, profile) = re.finditer('(\s.*?)([0-9]+.*)', line).next().groups()
except StopIteration:
continue
profile = profile.split()
stats[object.strip()] = [int(x) for x in profile[0:3]] + [float(profile[3]),]
return stats | c7756190ae2a4c25f5cf7a16764ace06da95b0f6 | 10,934 |
import torch
def track2result(bboxes, labels, ids, num_classes):
"""Convert tracking results to a list of numpy arrays.
Args:
bboxes (torch.Tensor | np.ndarray): shape (n, 5)
labels (torch.Tensor | np.ndarray): shape (n, )
ids (torch.Tensor | np.ndarray): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): tracking results of each class.
"""
valid_inds = ids > -1
bboxes = bboxes[valid_inds]
labels = labels[valid_inds]
ids = ids[valid_inds]
if bboxes.shape[0] == 0:
return [np.zeros((0, 6), dtype=np.float32) for i in range(num_classes)]
else:
if isinstance(bboxes, torch.Tensor):
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
ids = ids.cpu().numpy()
return [
np.concatenate((ids[labels == i, None], bboxes[labels == i, :]),
axis=1) for i in range(num_classes)
] | ae2dda3abd32d8b6c3dd0fc0c8c5f65268a8e747 | 10,935 |
def build_result_dataframe(gh, pred, df):
""" Construct a datarame that contain the prediction.
:param gh: the geohas6 code of the prediction
:param pred: numpy array of prediction
:param df: the dataframe used for prediction
:returns: prediction dataframe
:rtype: pandas.core.frame.DataFrame
"""
# generate a sequence of timestamp
start_time = df.timestamp.values.max() + np.timedelta64(15, 'm')
timestamps = pd.date_range(start_time, periods=len(pred), freq='15T')
# calulate 'day' colum of the dataframe
dtdelta = (timestamps.date - df.timestamp.max().date())
dtdelta = list(map(lambda x: x.days, dtdelta))
days = dtdelta + df.day.max()
# calulate time of day
tod = list(map(lambda x: x.strftime('%H:%M'), timestamps.time))
# construct the result dictionary
res = {'geohash6': [gh] * len(pred),
'day': days,
'timestamp': tod,
'demand': pred
}
return pd.DataFrame(res) | 0b1523aa42c7a31aa286522ee81ec93690dfbf0c | 10,936 |
def day_1_puzzle_1_solution() -> int:
"""Use this function to return the total fuel requirements for all of the modules.
This function is used for reading the text file of puzzle data and returning the
total amount of fuel that is required for the modules.
:return: the total fuel requirement.
"""
return sum([calculate_fuel(int(mass)) for mass in get_puzzle_input()]) | 625497ea7e7619b1e84abc9eb8dfdfd1076af392 | 10,938 |
def is_description_style(style):
""" True if this is a style used for Relationships paragraph text """
return is_style(style, 'Normal') or is_style(style, 'Note') | 0e96d9977f7d18e8253a87e3af59f31e8326f4ae | 10,939 |
def inject_content_head_last(html, content):
"""
将文本内容插入到head的尾部
:type html: str
:type content: str
:rtype: str
"""
head_end_pos = html.find("</head") # 找到 </head> 标签结束的位置
if head_end_pos == -1:
# 如果没有 </head> 就不进行插入
return html
return html[:head_end_pos] + content + html[head_end_pos:] | 61792831f859a966e8cfa01ca56a6b9be10ede4d | 10,940 |
from typing import Union
def download(ticker: str,
start: Union[pd.Timestamp, str] = None,
end: Union[pd.Timestamp, str] = None,
frequency: str = "day") -> pd.DataFrame:
"""
Download market data from yahoo finance using the yfinance library from ticker `ticker` from `start` to `end`
at a specific frequency (day, hour or minute).
:param str ticker: Ticker, e.g. "AAPL" or "GOOG".
:param pd.Timestamp,str start: Starting date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str.
If None, the oldest possible date is used by yfinance. `start` is **always** truncated to max 730 days
from today for `frequency="1h"`and to max 30 days for `frequency="1m"`. Default is None.
:param pd.Timestamp,str end: End date for fetching the data as a pd.Timestamp or a "YYYY-MM-DD HH:MM:SS" str.
If None, today is used ( `pd.Timestamp.today().floor("D")` ). Default is None.
:param str frequency: Frequency at which the data is sampled, can be daily ("day", "daily", "d", "1d"), hourly
("hour", "hourly", "h", "1h") or every minute ("minute", "m", "1m"). Default is "day".
:return: market data as a pd.DataFrame with columns "Open", "High", "Low", "Close", "Adj Close", "Volume".
"""
today = pd.Timestamp.today().floor('D')
if end is None:
end = today
elif isinstance(end, str):
end = pd.Timestamp(end)
day_set = {"day", "daily", "d", "1d"}
hour_set = {"hour", "hourly", "h", "1h"}
minute_set = {"minute", "m", "1m"}
if frequency.lower() in day_set:
df = yf.download(ticker, start=start, end=end, interval="1d")
elif frequency.lower() in hour_set.union(minute_set):
if frequency.lower in hour_set:
frequency = "1h"
# Range is limited to 730 days max (including today so 729)
limit = pd.Timedelta(days=729)
# Dummy limit for the download
batchlimit = pd.Timedelta(days=1000)
else:
frequency = "1m"
# Range is limited to 30 days max (including today)
limit = pd.Timedelta(days=29)
# Limit of 7 days for the download of minute data
batchlimit = pd.Timedelta(days=7)
# Check the start point
if start is None:
start = today - limit
start = max(end - limit, today - limit)
# Download by batches (effective only for minute data)
local_start = start
local_end = min(local_start + batchlimit, end)
df = yf.download(ticker, start=local_start, end=local_end, interval=frequency)
while local_end < end:
local_start = local_end
local_end = min(local_start + batchlimit, end)
df = pd.concat((df, yf.download(ticker, start=local_start, end=local_end, interval=frequency)))
else:
raise ValueError(f"Wrong `frequency` argument ({frequency}). "
f"Should be in {day_set}, {hour_set} or {minute_set}.")
if df is None:
raise EmptyDataError
elif not isinstance(df, pd.DataFrame):
raise EmptyDataError
else:
if len(df) == 0:
raise EmptyDataError
if df.columns.nlevels == 2:
df = df.swaplevel(axis=1)
df.sort_index(axis=1, inplace=True)
return df | 733d5ac8244ca6fdbcb73a11585067c90dd7210b | 10,941 |
def _ps_run_one_reset_kwargs(G, reset_kwargs: tuple, eval: bool):
"""
Sample one rollout with given init state and domain parameters, passed as a tuple for simplicity at the other end.
This function is used when a minimum number of rollouts was given.
"""
if len(reset_kwargs) != 2:
raise pyrado.ShapeErr(given=reset_kwargs, expected_match=(2,))
if not isinstance(reset_kwargs[0], np.ndarray):
raise pyrado.TypeErr(given=reset_kwargs[0], expected_type=np.ndarray)
if not isinstance(reset_kwargs[1], dict):
raise pyrado.TypeErr(given=reset_kwargs[1], expected_type=dict)
return rollout(
G.env, G.agent, eval=eval, reset_kwargs=dict(init_state=reset_kwargs[0], domain_param=reset_kwargs[1])
) | 95e23ad682d6afc3014bfa7932b00a955cc5bd3d | 10,944 |
from exopy_pulses.testing.context import TestContext
from typing import OrderedDict
def test_compiling_a_sequence_not_compiling2(workspace, root, monkeypatch,
exopy_qtbot, dialog_sleep):
"""Test compiling a sequence that can be evaluated but not compiled.
"""
def __raise(*args, **kwargs):
return False, {}, {'test': False}
monkeypatch.setattr(TestContext, 'compile_and_transfer_sequence',
__raise)
workbench = workspace.workbench
ui = workbench.get_plugin('enaml.workbench.ui')
ui.show_window()
exopy_qtbot.wait(10 + dialog_sleep)
root.external_vars = OrderedDict({'a': 1.5})
pulse1 = Pulse(def_1='1.0', def_2='{a}')
pulse2 = Pulse(def_1='{a} + 1.0', def_2='3.0')
pulse3 = Pulse(def_1='{4_start} + 0.5',
def_2='{4_start}+{4_duration}-0.5')
pulse4 = Pulse(def_1='2.0', def_2='0.5', def_mode='Start/Duration')
pulse5 = Pulse(def_1='3.0', def_2='0.5', def_mode='Start/Duration')
sequence2 = BaseSequence(time_constrained=True,
def_1='{3_stop} + 0.5', def_2='6')
sequence2.add_child_item(0, pulse3)
sequence1 = BaseSequence()
add_children(sequence1, (pulse2, sequence2, pulse4))
add_children(root, (pulse1, sequence1, pulse5))
workspace.state.sequence = root
dial = CompileDialog(workspace=workspace)
dial.show()
wait_for_window_displayed(exopy_qtbot, dial)
comp_widget = dial.central_widget().widgets()[0]
comp_widget.widgets()[-1].clicked = True
def assert_exec():
assert comp_widget.elapsed_time
assert comp_widget.errors
assert comp_widget.widgets()[-2].background == parse_color('red')
exopy_qtbot.wait_until(assert_exec) | eec3ee453346a75398da230e59013bfaa47f8b23 | 10,945 |
import warnings
def deprecated(message, exception=PendingDeprecationWarning):
"""Throw a warning when a function/method will be soon deprecated
Supports passing a ``message`` and an ``exception`` class
(uses ``PendingDeprecationWarning`` by default). This is useful if you
want to alternatively pass a ``DeprecationWarning`` exception for already
deprecated functions/methods.
Example::
>>> import warnings
>>> from functools import wraps
>>> message = "this function will be deprecated in the near future"
>>> @deprecated(message)
... def foo(n):
... return n+n
>>> with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... foo(4)
... assert len(w) == 1
... assert issubclass(w[-1].category, PendingDeprecationWarning)
... assert message == str(w[-1].message)
... assert foo.__name__ == 'foo'
8
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(message, exception, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator | 86ccfeb53048d130a7fe35a0609dc5e95440da23 | 10,946 |
def x_dot(y):
"""x_dot(y)
Describes the differential equation for position as given in CW 12.
"""
return y | 7fa01584b09c6e83e28ddf63b300323fdcb7fa0b | 10,948 |
def get_comp_depends(comp_info, comps):
""" Get comp depends from comp index """
depends = []
for comp in comps:
if comp in comp_info:
depends += comp_info[comp]["dependencies"]
if depends:
depends += get_comp_depends(comp_info, depends)
return list(set(depends)) | 79a8b51e329cf9be414391508cc0ecbe76ff0707 | 10,949 |
def get_naiveb_model(x_train: pd.DataFrame, y_train: pd.Series) -> GaussianNB:
"""
Trains and returns a naive Bayes model
Data must all be on the same scale in order to use naive Bayes
"""
gnb = GaussianNB(priors=None)
gnb.fit(x_train, y_train)
return gnb | f1b93acf80ee88f1eb0be7a61aa0d9ac94248966 | 10,950 |
def updateDF(df, fields, id_patient):
"""
fields is a dictionary of column names and values.
The function updates the row of id_patient with the values in fields.
"""
for key in fields:
df.loc[df["id_patient"] == id_patient, key] = fields[key][0]
return df | 5ced64eca8d8736836f82dacd1750cb8ac612989 | 10,951 |
def gcd(num1: int, num2: int) -> int:
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while num2 != 0:
num1, num2 = num2, num1 % num2
return num1 | c53ff5be770570278f497d7ce2a2146a3ac3d9da | 10,952 |
import json
async def check_user_name(request):
"""Check if a user exists with provided username."""
log_request(request)
conn = await create_connection()
response = await users_query.users_search_duplicate(
conn, request.args.get("username")
)
conn.close()
return json({"exists": bool(response)}) | 72ff533a02e6377b78bfbfc631e87acc5fe59779 | 10,954 |
def azip_longest(*aiterables, fillvalue=None):
"""async version of izip_longest with parallel iteration"""
return _azip(*aiterables, fillvalue=fillvalue, stop_any=False) | 22f4ef6b4f1294ccca71a59337913a64e89a9e62 | 10,955 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.