content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_nearest_point_distance(points, wire1, wire2):
"""
>>> get_nearest_point_distance([(0, 0), (158, -12), (146, 46), (155, 4), (155, 11)], [((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))], [((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))])
610
>>> get_nearest_point_distance([(0, 0), (107, 47), (124, 11), (157, 18), (107, 71), (107, 51)], [((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))], [((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))])
410
"""
def get_distance(point):
d = 0
for wire in (wire1, wire2):
for part in wire:
intersection = get_intersection_point(part, (point, point))
if intersection == []:
d += abs(part[0][0] - part[1][0]) + abs(part[0][1] - part[1][1])
else:
d += abs(part[0][0] - point[0]) + abs(part[0][1] - point[1])
break
return d
points.sort(key=get_distance)
return get_distance(points[1]) | 917ae2370497ec4ea753daed89a2ee82724887fc | 22,383 |
def strip_tokens(tokenized: str) -> str:
"""Replaces all tokens with the token's arguments."""
result = []
pos = 0
match = RX_TOKEN.search(tokenized, pos)
while match:
start, end = match.span()
result.append(tokenized[pos:start])
result.append(match.groupdict()['argument'])
pos = end
match = RX_TOKEN.search(tokenized, pos)
result.append(tokenized[pos:])
return ''.join(result) | b70c58ee45fc24e88269c99067a2f161b2b37e75 | 22,384 |
def circular(P=365, K=0.1, T=0, gamma=0, t=None):
"""
circular() simulates the radial velocity signal of a planet in a
circular orbit around a star.
The algorithm needs improvements.
Parameters:
P = period in days
K = semi-amplitude of the signal
T = velocity at zero phase
gamma = average velocity of the star
t = time
space = We want an observation every time/space days
Returns:
t = time
RV = rv signal generated
"""
if t is None:
print('Time needed')
RV = [K*_np.sin(2*_np.pi*x/P - T) + gamma for x in t]
#RV = [x for x in RV] #m/s
return t, RV | 33d3ea97d21ce1a14b07d02216597fe8977b2400 | 22,385 |
def set(isamAppliance, dsc, check_mode=False, force=False):
"""
Updating the tracing levels
"""
check_value,warnings = _check(isamAppliance, dsc)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Updating the tracing levels",
"/isam/cluster/tracing/v1",
{
'dsc': dsc
}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | 1451ace2ed5ef6820e34ebb05d0879e3d5e3917b | 22,386 |
from astropy.table import Table
from astropy.time import Time
def parse_logfile(logfile):
"""
Read iotime log entries from logfile
Return Table with columns function duration readwrite filename timestamp datetime
"""
rows = list()
with open(logfile) as fx:
for line in fx:
row = parse(line)
if row is not None:
rows.append(row)
timing = Table(rows=rows)
timing['datetime'] = Time(timing['timestamp']).datetime
return timing | 8fb27648694df32e9035d0d24cae80f9ff9e654a | 22,388 |
def histogram2d(x,y,n=10,range=None,density=False,keep_outliers=False,out=None):
"""2D histogram with uniform bins. Accelerated by numba
x, y: array_like
x and y coordinates of each point. x and y will be flattened
n : scalar or (nx, ny)
number of bins in x and y
range : None or ((xmin,xmax),(ymin,ymax))
range of bins. If any is None, the min/max is computed
density : optional, bool
if True, compute bin_count / (sample_count * bin_area)
keep_outliers : optional, bool
if True, add rows and columns to each edge of the histogram to count the outliers
out : array_like, optional, shape = (nx, ny)
Array to store output. Note that for compatibility with numpy's histogram2d, out
is indexed out[x,y]. If keep_outliers is True, out must have shape (nx+2,ny+2)
"""
x = np.asarray(x)
y = np.asarray(y)
if x.shape != y.shape:
raise RuntimeError("x and y must be same shape")
x = x.reshape(-1)
y = y.reshape(-1)
if range is None:
xmin,xmax = None,None
ymin,ymax = None,None
else:
xmin,xmax = range[0]
ymin,ymax = range[1]
if xmin is None or xmax is None:
xmm = aminmax(x)
if xmin is None: xmin = xmm[0]
if xmax is None: xmax = xmm[1]
if ymin is None or ymax is None:
ymm = aminmax(y)
if ymin is None: ymin = ymm[0]
if ymax is None: ymax = ymm[1]
if np.isscalar(n):
nx,ny = n,n
else:
nx,ny = n
if keep_outliers:
out_shape = (nx+2,ny+2)
else:
out_shape = (nx,ny)
if density:
# 1/ (sample_count * bin_area)
d = (nx*ny)/(len(x)*(xmax-xmin)*(ymax-ymin))
if out is None:
out = np.empty(out_shape,np.float64)
else:
d = 1
if out is None:
out = np.empty(out_shape,np.uint64)
_histogram2d(out, x,y,nx,ny,xmin,xmax,ymin,ymax,d,keep_outliers)
return out | 8faf591e769630540345565ff3a34f33e83f70ce | 22,389 |
def _anatomical_swaps(pd):
"""Return swap and flip arrays for data transform to anatomical
use_hardcoded: no-brain implementation for 90deg rots
"""
use_hardcoded = True
# hardcoded for 90degs
if use_hardcoded:
if _check90deg(pd) != True:
raise(Exception('Not implemented'))
ori = pd['orient']
if ori == 'trans':
#swap = [0,1,2]
swap = [0,2,1]
flip = [1,2]
elif ori == 'trans90':
#swap = [1,0,2]
swap = [2,0,1]
flip = [0]
elif ori == 'sag':
#swap = [1,2,0]
swap = [2,1,0]
flip = [1,2]
elif ori == 'sag90':
#swap = [2,1,0]
swap = [1,2,0]
flip = [0]
elif ori == 'cor':
#swap = [0,2,1]
swap = [0,1,2]
flip = [1]
elif ori == 'cor90':
swap = [1,0,2]
flip = []
return swap, flip
# with rot matrix
else:
rot_matrix = vj.core.niftitools._qform_rot_matrix(pd)
inv = np.linalg.inv(rot_matrix).astype(int)
swap = inv.dot(np.array([1,2,3], dtype=int))
flipaxes = []
for num, i in enumerate(swap):
if i < 0:
flipaxes.append(num)
swapaxes = (np.abs(swap) - 1).astype(int)
return swapaxes, flipaxes | 2b958571597b72ca38de1310ca9a9e6a2caa69ac | 22,391 |
from typing import List
from typing import Dict
def block_variants_and_samples(variant_df: DataFrame, sample_ids: List[str],
variants_per_block: int,
sample_block_count: int) -> (DataFrame, Dict[str, List[str]]):
"""
Creates a blocked GT matrix and index mapping from sample blocks to a list of corresponding sample IDs. Uses the
same sample-blocking logic as the blocked GT matrix transformer.
Requires that:
- Each variant row has the same number of values
- The number of values per row matches the number of sample IDs
Args:
variant_df : The variant DataFrame
sample_ids : The list of sample ID strings
variants_per_block : The number of variants per block
sample_block_count : The number of sample blocks
Returns:
tuple of (blocked GT matrix, index mapping)
"""
assert check_argument_types()
first_row = variant_df.selectExpr("size(values) as numValues").take(1)
if not first_row:
raise Exception("DataFrame has no values.")
num_values = first_row[0].numValues
if num_values != len(sample_ids):
raise Exception(
f"Number of values does not match between DataFrame ({num_values}) and sample ID list ({len(sample_ids)})."
)
__validate_sample_ids(sample_ids)
blocked_gt = glow.transform("block_variants_and_samples",
variant_df,
variants_per_block=variants_per_block,
sample_block_count=sample_block_count)
index_map = __get_index_map(sample_ids, sample_block_count, variant_df.sql_ctx)
output = blocked_gt, index_map
assert check_return_type(output)
return output | 4cf50f74b235adf5ef92ebde3a4a9259a3a49d87 | 22,392 |
def sparql_service_update(service, update_query):
"""
Helper function to update (DELETE DATA, INSERT DATA, DELETE/INSERT) data.
"""
sparql = SPARQLWrapper(service)
sparql.setMethod(POST)
sparql.setRequestMethod(POSTDIRECTLY)
sparql.setQuery(update_query)
result = sparql.query()
#SPARQLWrapper is going to throw an exception if result.response.status != 200:
return 'Done' | 8e68c6672222a831203e457deaaac1ed73169fbf | 22,393 |
def filtered_events(request):
"""Get the most recent year of stocking and
pass the information onto our annual_events view.
"""
dataUrl = reverse("api:api-get-stocking-events")
maxEvents = settings.MAX_FILTERED_EVENT_COUNT
return render(
request,
"stocking/found_events.html",
context={"dataUrl": dataUrl, "maxEvents": maxEvents},
) | b99d772656bab246b9f412f50674e402b7ca7476 | 22,394 |
import requests
from datetime import datetime
def track_user_session(user=None, request=None):
"""Creates, filters and updates UserSessions on the core and sends UserSessions to the hub on next login
Filter the local UserSession objects per user and get their most recent user_session object
If its a LOGIN REQUEST and the UserSession exists, we send this UserSession to the hub
Then FeedbackActivity model hours_used_release field is calculated on the hub
Then we create a new UserSession on the core-service
Else any other type of REQUEST (e.q. verify, refresh), overwrite session_end
Args:
user: The user objects from people.models
request: The request object from django_rest_framework
Returns:
None
"""
user_session = UserSession.objects.filter(user_email=user.email).order_by('session_start').last()
# serialize, then transform to JSON format (.data)
user_session_serial = UserSessionSerializer(user_session).data
request_url = request.META.get('PATH_INFO', '')
if '/api-token-auth/' in request_url: # User login, new session start
if user_session:
hub_token = ServiceToServiceAuth().generate_hub_token()
request_params = {
'url': settings.HUB_URL + 'feedback/user-sessions/',
'json': user_session_serial,
'headers': {"Authorization": "STS-JWT {0}".format(hub_token)}
}
try:
hub_response = requests.post(**request_params)
new_user_session = UserSession.objects.create(user_email=user.email, session_start=datetime.now(),
session_end=datetime.now(),
tag=settings.VERSION) # settings.VERSION
except requests.ConnectionError:
return create_error_response('Failed to connect to hub service: {url}'.format(**request_params))
else: # Any other request e.q. refresh, verify
user_session.session_end = datetime.now()
user_session.save() | f5733eed845d23dac945c7d23d09a4cccb2f4e14 | 22,395 |
def score(string, goal):
"""
Compare randomly generated string to the goal, check how many
letters are correct and return
"""
check_counter = 0
string = generate(values)
for i in range(len(string)):
if string[i] == goal[i]:
check_counter += 1
return check_counter | afcfcc565a898b2cdd22e05793144c710fb58e26 | 22,396 |
from datetime import datetime
def validate_parent():
"""
This api validates a parent in the DB.
"""
parent_id = request.json.get('parent_id', None)
decision = request.json.get('decision', 0)
parent = query_existing_user(parent_id)
if parent:
parent.validated = decision
parent.approver_id = get_jwt_identity().get('id')
parent.approve_time = datetime.utcnow()
db.session.add(parent)
db.session.commit()
return jsonify(message="Parent validation updated"), 201
else:
return jsonify(message='User does not exist'), 201 | 3fe52863b4de24705c96aeaa67ee264916b37dbe | 22,397 |
def parse_pdu(data, **kwargs):
"""Parse binary PDU"""
command = pdu.extract_command(data)
if command is None:
return None
new_pdu = make_pdu(command, **kwargs)
new_pdu.parse(data)
return new_pdu | 0a5a84368793f8d5983b08bc2123bd0adb994be6 | 22,398 |
def alchemy_nodes(mol):
"""Featurization for all atoms in a molecule. The atom indices
will be preserved.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule object
Returns
-------
atom_feats_dict : dict
Dictionary for atom features
"""
atom_feats_dict = defaultdict(list)
is_donor = defaultdict(int)
is_acceptor = defaultdict(int)
fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
mol_featurizer = ChemicalFeatures.BuildFeatureFactory(fdef_name)
mol_feats = mol_featurizer.GetFeaturesForMol(mol)
mol_conformers = mol.GetConformers()
assert len(mol_conformers) == 1
for i in range(len(mol_feats)):
if mol_feats[i].GetFamily() == 'Donor':
node_list = mol_feats[i].GetAtomIds()
for u in node_list:
is_donor[u] = 1
elif mol_feats[i].GetFamily() == 'Acceptor':
node_list = mol_feats[i].GetAtomIds()
for u in node_list:
is_acceptor[u] = 1
num_atoms = mol.GetNumAtoms()
for u in range(num_atoms):
atom = mol.GetAtomWithIdx(u)
atom_type = atom.GetAtomicNum()
num_h = atom.GetTotalNumHs()
atom_feats_dict['node_type'].append(atom_type)
h_u = []
h_u += atom_type_one_hot(atom, ['H', 'C', 'N', 'O', 'F', 'S', 'Cl'])
h_u.append(atom_type)
h_u.append(is_acceptor[u])
h_u.append(is_donor[u])
h_u += atom_is_aromatic(atom)
h_u += atom_hybridization_one_hot(atom, [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3])
h_u.append(num_h)
atom_feats_dict['n_feat'].append(F.tensor(np.array(h_u).astype(np.float32)))
atom_feats_dict['n_feat'] = F.stack(atom_feats_dict['n_feat'], dim=0)
atom_feats_dict['node_type'] = F.tensor(np.array(
atom_feats_dict['node_type']).astype(np.int64))
return atom_feats_dict | 8bccd62bafa77f6dc95bfba8992df48756b9ac0e | 22,399 |
def generate(*, artifacts: artifacts_types.ModelArtifacts, name: str) -> str:
"""
Generate the class source from the schema.
Args:
schema: The schema of the model.
name: The name of the model.
Returns:
The source code for the model class.
"""
model_artifacts = models_file_artifacts.calculate(artifacts=artifacts, name=name)
return _source.generate(artifacts=model_artifacts) | 6a510f031a9971a49057e114cc129f05737226df | 22,400 |
def get_rot_mat_kabsch(p_matrix, q_matrix):
"""
Get the optimal rotation matrix with the Kabsch algorithm. Notation is from
https://en.wikipedia.org/wiki/Kabsch_algorithm
Arguments:
p_matrix: (np.ndarray)
q_matrix: (np.ndarray)
Returns:
(np.ndarray) rotation matrix
"""
h = np.matmul(p_matrix.transpose(), q_matrix)
u, _, vh = np.linalg.svd(h)
d = np.linalg.det(np.matmul(vh.transpose(), u.transpose()))
int_mat = np.identity(3)
int_mat[2, 2] = d
rot_matrix = np.matmul(np.matmul(vh.transpose(), int_mat), u.transpose())
return rot_matrix | 2cdd46af7f6f05acec23a6cf20e8ca561126c4f2 | 22,401 |
def _msrest_next(iterator):
""""To avoid:
TypeError: StopIteration interacts badly with generators and cannot be raised into a Future
"""
try:
return next(iterator)
except StopIteration:
raise _MsrestStopIteration() | fe1877cfe4c05adb8bc082ea6a7d1ef54e324386 | 22,402 |
def record_export(record=None, export_format=None, pid_value=None, permissions=None):
"""Export marc21 record page view."""
exporter = current_app.config.get("INVENIO_MARC21_RECORD_EXPORTERS", {}).get(
export_format
)
if exporter is None:
abort(404)
options = current_app.config.get(
"INVENIO_MARC21_RECORD_EXPORTER_OPTIONS",
{
"indent": 2,
"sort_keys": True,
},
)
serializer = obj_or_import_string(exporter["serializer"])(options=options)
exported_record = serializer.serialize_object(record.to_dict())
return render_template(
"invenio_records_marc21/records/export.html",
pid_value=pid_value,
export_format=exporter.get("name", export_format),
exported_record=exported_record,
record=Marc21UIJSONSerializer().dump_one(record.to_dict()),
permissions=permissions,
) | 2875c0fb019d5b8851c7cdcc31ac6ccd8e6b4002 | 22,403 |
from typing import List
from typing import Tuple
from typing import Optional
def body_range(
operators: List[str],
font_changes: List[Tuple]) -> Tuple[Optional[int], Optional[int]]:
"""given some assumptions about how headers and footers are formatted,
find the operations describing the body text of of a page"""
# font_changes: (idx, weight, size)
thresh = 20.0
if font_changes[0][2] > thresh:
# if the first font is big, this is a chapter heading page
# we want everything after the next font change
# find the first Td after this point
if len(font_changes) < 2:
start_idx = None
else:
start_idx = font_changes[1][0]
# And last three operations (for the page number) can be discarded.
end_idx = len(operators) - 3
elif font_changes[0][1] == "regular":
# otherwise, we are looking for a (regular bold regular) pattern
if len(font_changes) < 3:
start_idx = None
else:
start_idx = font_changes[2][0] + 1
# discard the final operation
end_idx = len(operators) - 1
elif font_changes[0][1] == "bold":
# or (bold regular) pattern
if len(font_changes) < 2:
start_idx = None
else:
start_idx = font_changes[1][0] + 1 + 2 # (to skip over page number)
# discard the final operation
end_idx = len(operators) - 1
else:
start_idx = None
end_idx = None
if start_idx is not None and start_idx < len(operators):
start_idx = operators[start_idx:].index(b"Td") + start_idx
if end_idx is not None and end_idx > len(operators):
end_idx = None
return start_idx, end_idx | aac320631a53653a770ba362c4826fca9f8fe673 | 22,404 |
import math
def sine(value, default=_SENTINEL):
"""Filter and function to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
if default is _SENTINEL:
warn_no_default("sin", value, value)
return value
return default | 48cfcdef750ce497f8f36acee74c440ec244d31f | 22,405 |
def wmt_affine_base_1e4():
"""Set of hyperparameters."""
hparams = wmt_affine_base()
hparams.kl_reg = 1e-4
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 8000
return hparams | f6d047737846aa0d4518045709f87b7b5f66d6fa | 22,406 |
import random
def random_crop_with_constraints(bbox, size, height, width, min_scale=0.3, max_scale=1,
max_aspect_ratio=2, constraints=None,
max_trial=1000):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_. More details can be found in
data augmentation section of the original paper.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Parameters
----------
bbox : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
size : tuple
Tuple of length 2 of image shape as (width, height).
min_scale : float
The minimum ratio between a cropped region and the original image.
The default value is :obj:`0.3`.
max_scale : float
The maximum ratio between a cropped region and the original image.
The default value is :obj:`1`.
max_aspect_ratio : float
The maximum aspect ratio of cropped region.
The default value is :obj:`2`.
constraints : iterable of tuples
An iterable of constraints.
Each constraint should be :obj:`(min_iou, max_iou)` format.
If means no constraint if set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`.
If this argument defaults to :obj:`None`, :obj:`((0.1, None), (0.3, None),
(0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used.
max_trial : int
Maximum number of trials for each constraint before exit no matter what.
Returns
-------
numpy.ndarray
Cropped bounding boxes with shape :obj:`(M, 4+)` where M <= N.
tuple
Tuple of length 4 as (x_offset, y_offset, new_width, new_height).
"""
# default params in paper
if constraints is None:
constraints = (
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, 1),
)
if len(bbox) == 0:
constraints = []
w, h = size
candidates = []
for min_iou, max_iou in constraints:
min_iou = -np.inf if min_iou is None else min_iou
max_iou = np.inf if max_iou is None else max_iou
for _ in range(max_trial):
scale = random.uniform(min_scale, max_scale)
aspect_ratio = random.uniform(
max(1 / max_aspect_ratio, scale * scale),
min(max_aspect_ratio, 1 / (scale * scale)))
crop_h = int(height * scale / np.sqrt(aspect_ratio))
crop_w = int(width * scale * np.sqrt(aspect_ratio))
crop_t = random.randrange(h - crop_h)
crop_l = random.randrange(w - crop_w)
crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h))
iou = bbox_iou(bbox, crop_bb[np.newaxis])
if min_iou <= iou.min() and iou.max() <= max_iou:
top, bottom = crop_t, crop_t + crop_h
left, right = crop_l, crop_l + crop_w
candidates.append((left, top, right-left, bottom-top))
break
# random select one
while candidates:
crop = candidates.pop(np.random.randint(0, len(candidates)))
new_bbox = bbox_crop(bbox, crop, allow_outside_center=False)
if new_bbox.size < 1:
continue
new_crop = (crop[0], crop[1], crop[2], crop[3])
return new_bbox, new_crop
return random_crop_with_constraints(bbox, (w, h), height, width,min_scale=0.9,max_scale=1,max_trial=50) | 787c341f3f496eeedce18c42462efa5f5ba6515b | 22,407 |
import re
def unravelContent(originalData):
"""
This is the primary function responsible for creating an alternate data stream of unraveled data.
Args:
contentData: Script content
Returns:
contentData: Unraveled additional content
"""
contentData = normalize(originalData)
loopCount = 0
while True:
modificationFlag = None
# Reversed Strings - Changes STATE
# Looks only in originalData, can be problematic flipping unraveled content back and forth.
reverseString = ["noitcnuf", "marap", "nruter", "elbairav", "tcejbo-wen", "ecalper",]
if any(entry in originalData.lower() for entry in reverseString):
contentData, modificationFlag = reverseStrings(originalData, contentData, modificationFlag)
# Decompress Streams - Changes STATE
if all(entry in contentData.lower() for entry in ["streamreader", "frombase64string"]) or \
all(entry in contentData.lower() for entry in ["deflatestream", "decompress"]) or \
all(entry in contentData.lower() for entry in ["memorystream", "frombase64string"]):
contentData, modificationFlag = decompressContent(contentData, modificationFlag)
# Base64 Decodes - Changes STATE
if re.search("[A-Za-z0-9+/=]{30,}", contentData):
contentData, modificationFlag = decodeBase64(contentData, modificationFlag)
# Decrypts SecureStrings - Changes STATE
if "convertto-securestring" in contentData.lower() and \
re.search("(?:[0-9]{1,3},){15,}[0-9]{1,3}", contentData.replace(" ", "")) and \
re.search("[A-Za-z0-9+=/]{255,}", contentData):
contentData, modificationFlag = decryptStrings(contentData, modificationFlag)
# Normalize / De-Obfuscate the new contents before proceeding.
contentData = normalize(contentData)
if modificationFlag == None:
break
loopCount += 1
return contentData | 16c5c5b0bc1de026aa4ed8f931e171e11d3ecacc | 22,408 |
import hashlib
import struct
def quote_verify(data, validation, aik, pcrvalues):
"""Verify that a generated quote came from a trusted TPM and matches the
previously obtained PCR values
:param data: The TPM_QUOTE_INFO structure provided by the TPM
:param validation: The validation information provided by the TPM
:param aik: The object representing the Attestation Identity Key
:param pcrvalues: A dictionary containing the PCRs read from the TPM
:returns: True if the quote can be verified, False otherwise
"""
select = 0
maxpcr = 0
# Verify that the validation blob was generated by a trusted TPM
pubkey = aik.get_pubkey()
n = m2.bin_to_bn(pubkey)
n = m2.bn_to_mpi(n)
e = m2.hex_to_bn("010001")
e = m2.bn_to_mpi(e)
rsa = M2Crypto.RSA.new_pub_key((e, n))
m = hashlib.sha1()
m.update(data)
md = m.digest()
try:
ret = rsa.verify(md, str(validation), algo='sha1')
except M2Crypto.RSA.RSAError:
return False
# And then verify that the validation blob corresponds to the PCR
# values we have
values = bytearray()
for pcr in sorted(pcrvalues):
values += pcrvalues[pcr]
select |= (1 << pcr)
maxpcr = pcr
if maxpcr < 16:
header = struct.pack('!H', 2)
header += struct.pack('@H', select)
header += struct.pack('!I', len(values))
else:
header = struct.pack('!H', 4)
header += struct.pack('@I', select)
header += struct.pack('!I', len(values))
pcr_blob = header + values
m = hashlib.sha1()
m.update(pcr_blob)
pcr_hash = m.digest()
if pcr_hash == data[8:28]:
return True
else:
return False | bf23af17310252ff4a6fe73d16fbd2fffbc49618 | 22,410 |
def _rectify_base(base):
"""
transforms base shorthand into the full list representation
Example:
>>> assert _rectify_base(NoParam) is DEFAULT_ALPHABET
>>> assert _rectify_base('hex') is _ALPHABET_16
>>> assert _rectify_base('abc') is _ALPHABET_26
>>> assert _rectify_base(10) is _ALPHABET_10
>>> assert _rectify_base(['1', '2']) == ['1', '2']
>>> import pytest
>>> assert pytest.raises(TypeError, _rectify_base, 'uselist')
"""
if base is NoParam or base == 'default':
return DEFAULT_ALPHABET
elif base in [26, 'abc', 'alpha']:
return _ALPHABET_26
elif base in [16, 'hex']:
return _ALPHABET_16
elif base in [10, 'dec']:
return _ALPHABET_10
else:
if not isinstance(base, (list, tuple)):
raise TypeError(
'Argument `base` must be a key, list, or tuple; not {}'.format(
type(base)))
return base | d88ce9edc3a4e134d04b97c6e5804590da90eb67 | 22,411 |
import gzip
import json
def loadjson(filename):
""" Load a python object saved with savejson."""
if filename.endswith('.gz'):
with gzip.open(filename, "rb") as f:
obj = json.loads(f.read().decode("ascii"))
else:
with open(filename, 'rt') as fh:
obj = json.load(fh)
return obj | 5c8bc1446c5d48ebee51f11711b38ddce74835d2 | 22,412 |
def _write_ffxml(xml_compiler, filename=None):
"""Generate an ffxml file from a compiler object.
Parameters
----------
xml_compiler : _TitratableForceFieldCompiler
The object that contains all the ffxml template data
filename : str, optional
Location and name of the file to save. If not supplied, returns the ffxml template as a string.
Returns
-------
str or None
"""
# Generate the string version.
xmlstring = etree.tostring(
xml_compiler.ffxml, encoding="utf-8", pretty_print=True, xml_declaration=False
)
xmlstring = xmlstring.decode("utf-8")
if filename is not None:
with open(filename, "w") as fstream:
fstream.write(xmlstring)
else:
return xmlstring | b74434aeb481b11b7a9537932778fb596be205e7 | 22,413 |
from typing import List
from typing import Union
from datetime import datetime
import pytz
def get_utctime(
md_keys: List[str], md: Union[pyexiv2.metadata.ImageMetadata, None]
) -> Union[datetime, None]:
"""Extract the datetime (to the nearest millisecond)"""
utctime = None
dt_key = "Exif.Image.DateTime"
if md is not None:
if dt_key in md_keys:
utctime = datetime.strptime(md[dt_key].raw_value, "%Y:%m:%d %H:%M:%S")
# utctime can also be obtained with DateTimeOriginal:
# utctime = datetime.strptime(
# md["Exif.Photo.DateTimeOriginal"].raw_value, "%Y:%m:%d %H:%M:%S"
# )
# extract the millisecond from the EXIF metadata:
subsec = int(md["Exif.Photo.SubSecTime"].raw_value)
sign = -1.0 if subsec < 0 else 1.0
millisec = sign * 1e3 * float("0.{}".format(abs(subsec)))
utctime += timedelta(milliseconds=millisec)
timezone = pytz.timezone("UTC")
utctime = timezone.localize(utctime)
return utctime | 6f95e50725e865b36a31be48722376301cca4cc1 | 22,414 |
def find_file_recursively(file_name, start_dir=getcwd(), stop_dir=None):
"""
This method will walk trough the directory tree upwards
starting at the given directory searching for a file with
the given name.
:param file_name: The name of the file of interest. Make sure
it does not contain any path information.
:type file_name: str
:param start_dir: The directory where the search should start.
If it is omitted, the cwd is used.
:type start_dir: str
:param stop_dir: The directory where the search should stop. If
this is omitted, it will stop at the root directory.
:type stop_dir: str
:rtype: str
:return: The file path where the file was first found.
"""
cur_dir = abspath(start_dir) if not isabs(start_dir) else start_dir
while True:
if exists(join(cur_dir, file_name)):
# The file of interest exists in the current directory
# so return it.
return join(cur_dir, file_name)
# The file was not found yet so try in the parent directory.
parent_dir = dirname(cur_dir)
if parent_dir == cur_dir or parent_dir == stop_dir:
# We are either at the root directory or reached the stop
# directory.
return None
else:
cur_dir = parent_dir | a7100e37a4f6244090c8b4363cda2b9893d27768 | 22,415 |
def text_cleaning(any_text, nlp):
"""
The function filters out stop words from any text and returns tokenized and lemmatized words
"""
doc = nlp(any_text.lower())
result = []
for token in doc:
if token.text in nlp.Defaults.stop_words:
continue
# if token.is_punct:
# continue
result.append(token.lemma_)
clean_text = " ".join(result)
return clean_text | 7383f075a501c7c11565eac2c825c55f37e2a637 | 22,416 |
def shave(q,options,undef=MISSING,has_undef=1,nbits=12):
"""
Shave variable. On input, nbits is the number of mantissa bits to keep
out of maximum of 24.
"""
# no compression, no shave
# ------------------------
if not options.zlib:
return q
# Determine shaving parameters
# ----------------------------
xbits = 24 - nbits
shp = q.shape
rank = len(shp)
if rank == 2: # yx
chunksize = shp[0]*shp[1]
elif rank == 3: # zyx
chunksize = shp[1]*shp[2]
else:
raise ValueError, "invalid rank=%d"%rank
# Shave it
# --------
qs, rc = shave32(q.ravel(),xbits,has_undef,undef,chunksize)
if rc:
raise ValueError, "error on return from shave32, rc=%d"%rc
return qs.reshape(shp) | 7d5f907f46a703c49f02f9cff3107d67ecc8c2a6 | 22,417 |
from typing import Union
from typing import List
from typing import Sized
from typing import Iterable
def any_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> bool:
"""Can be used with a single value or a collection of values. Returns `True` if any item in `values` are
`None`, `np.Nan`, `pd.NA`, `pd.NaT` or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN
"""
# pylint: disable=too-many-return-statements
if values is None or values is np.NaN or values is pd.NA or values is pd.NaT: # pylint: disable=nan-comparison
return True
if isinstance(values, Sized) and not isinstance(values, str) and len(values) == 0:
return True
if isinstance(values, pd.Series):
return values.isnull().any() or values.isna().any()
if isinstance(values, pd.DataFrame):
return values.isnull().any().any() or values.isna().any().any()
if isinstance(values, Iterable) and not isinstance(values, str):
if len(values) == 0:
return True
return any((any_none_nan(x) for x in values))
try:
if not isinstance(values, str) and None in values:
return True
except Exception: # pylint: disable=broad-except # noqa
pass
try:
if np.isnan(values).any():
return True
except TypeError:
return False
return False | 659e3d90fe02487820a3cdc711422a7054500b89 | 22,418 |
def get_or_create_package(name, epoch, version, release, arch, p_type):
""" Get or create a Package object. Returns the object. Returns None if the
package is the pseudo package gpg-pubkey, or if it cannot create it
"""
package = None
name = name.lower()
if name == 'gpg-pubkey':
return
if epoch in [None, 0, '0']:
epoch = ''
try:
with transaction.atomic():
package_names = PackageName.objects.all()
p_name, c = package_names.get_or_create(name=name)
except IntegrityError as e:
error_message.send(sender=None, text=e)
p_name = package_names.get(name=name)
except DatabaseError as e:
error_message.send(sender=None, text=e)
package_arches = PackageArchitecture.objects.all()
with transaction.atomic():
p_arch, c = package_arches.get_or_create(name=arch)
try:
with transaction.atomic():
packages = Package.objects.all()
package, c = packages.get_or_create(name=p_name,
arch=p_arch,
epoch=epoch,
version=version,
release=release,
packagetype=p_type)
except IntegrityError as e:
error_message.send(sender=None, text=e)
package = packages.get(name=p_name,
arch=p_arch,
epoch=epoch,
version=version,
release=release,
packagetype=p_type)
except DatabaseError as e:
error_message.send(sender=None, text=e)
return package | e8a13b1a34e16c5f4e6de96bb66fb314fd301c10 | 22,419 |
def sort_dict(value, case_sensitive=False, by='key', reverse=False, index=0):
"""
字典排序
:param value: 字典对象
:param case_sensitive: 是否大小写敏感
:param by: 排序对象
:param reverse: 排序方式(正序:True、倒序:False)
:param index: 索引号(此处针对 value 为 list 情况下可根据 list 的某一 index 排序)
:return:
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
if index:
try:
value = value[index]
except:
pass
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func, reverse=reverse) | a41034cbd9ebd35cddcd0b4f321ffe8dc2613791 | 22,420 |
def initialize_system(name=None):
"""Initializes a distributed NPU system for use with TensorFlow.
Args:
name: Name of ops.
Returns:
The npu init ops which will open the NPU system using `Session.run`.
"""
return NPUInit(name) | 3b1d50862954e57c8206af974412f79492982e23 | 22,421 |
def zmap_1perm_2samp(X, cat1, cat2=None, rand_seed=-1, fstat=None, name=None):
""" une permutation
X (D, N, P) K points, N subjects, D dim
return:
Y (D,) zvalue at each point
"""
if fstat is None:
fstat = hotelling_2samples
#name = "MP-Hotelling"
if cat2 is None:
cat2 = np.logical_not(cat1)
# Données
if rand_seed < 0:
# Sans permutation (on peut remplacer cat par idx[cat])
ix1 = cat1
ix2 = cat2
else:
# Avec permutation
np.random.seed(rand_seed)
idx = np.arange(X.shape[1])[cat1 | cat2]
per = np.random.permutation(idx.size)
nsplit = cat1.sum()
ix1 = idx[per][:nsplit]
ix2 = idx[per][nsplit:]
# Run
Y = fstat(X[:, ix1, :], X[:, ix2, :])
if name is not None:
print(name + " {0}, {1}\n".format(Y.min(), Y.max()))
return Y | 87ffc6a0c49750e9a39295c2775483c4812d0205 | 22,422 |
def grids_have_same_coords(grid0, grid1):
"""Whether two `ESMF.Grid` instances have identical coordinates.
:Parameters:
grid0, grid1: `ESMF.Grid`, `ESMF.Grid`
The `ESMF` Grid instances to be compared
:Returns:
`bool`
Whether or not the Grids have identical coordinates.
"""
coords0 = grid0.coords
coords1 = grid1.coords
if len(coords0) != len(coords1):
return False
for c, d in zip(coords0, coords1):
if len(c) != len(d):
return False
for a, b in zip(c, d):
if not np.array_equal(a, b):
return False
return True | 2fc5001a85694ac9b7b31a383436cc8792b665a4 | 22,423 |
def get_mwa_eor_spec(nu_obs=150.0, nu_emit=1420.40575, bw=8.0, tint=1000.0,
area_eff=21.5, n_stations=50, bmax=100.0):
"""
Parameters
----------
nu_obs : float or array-like, optional
observed frequency [MHz]
nu_emit : float or array-like, optional
rest frequency [MHz]
bw : float or array-like, optional
frequency bandwidth [MHz]
tint : float or array-like, optional
integration time [hour]
area_eff : float or array-like, optional
effective area per station [m ** 2]
n_stations : int or array-like, optional
number of stations
bmax : float or array-like, optional
maximum baseline [wavelength]
Returns
-------
nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax
"""
return nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax | 5bc97d666df938c4e5f42d2d429505e2b7f74004 | 22,425 |
def baseModel(data):
"""
原有模型
"""
formula = "label_code ~ education_num + capital_gain + capital_loss + hours_per_week"
model = sm.Logit.from_formula(formula, data=data)
re = model.fit()
return re | 7d66020dc2b527198c0b432b8c8fa9b703335d72 | 22,426 |
def load_screen(options: list) -> int:
"""Callback for loading a screen."""
return get_selection(options) | 2c48fad6a644dad3ccf9ce1d2b4cbff8b841b043 | 22,427 |
def count_cells(notebook):
"""
The function takes a notebook and returns the number of cells
Args:
notebook(Notebook): python object representing the notebook
Returns:
len(nb_dict["cells"]): integer value representing the number of cells into the notebook
A way you might use me is
cells_count = count_cells(nb)
"""
nb_dict = notebook.nb_dict
return len(nb_dict["cells"]) | 19ec2631888ecbba51fa51870694a7217024e5ae | 22,429 |
from typing import OrderedDict
def assignments():
"""
This is called for the assignments tab on the instructor interface
When an assignment is selected get_assignment is called to gather the details
for that assignment.
"""
response.title = "Assignments"
cur_assignments = db(db.assignments.course == auth.user.course_id).select(
orderby=db.assignments.duedate
)
assigndict = OrderedDict()
for row in cur_assignments:
assigndict[row.id] = row.name
tags = []
tag_query = db(db.tags).select()
for tag in tag_query:
tags.append(tag.tag_name)
course = get_course_row(db.courses.ALL)
base_course = course.base_course
chapter_labels = []
chapters_query = db(db.chapters.course_id == base_course).select(
db.chapters.chapter_label
)
for row in chapters_query:
chapter_labels.append(row.chapter_label)
# See `models/db_ebook.py` for course_attributes table
set_latex_preamble(course.base_course)
return dict(
coursename=auth.user.course_name,
confirm=False,
course_id=auth.user.course_name,
assignments=assigndict,
tags=tags,
chapters=chapter_labels,
toc=_get_toc_and_questions(), # <-- This Gets the readings and questions
course=course,
) | ac7834a96b876cadbcc91a7df9b59b9e51794142 | 22,430 |
from typing import Iterable
from typing import Dict
def get_sentences(data: Iterable[JSON_Object],
match_by: str) -> Dict[Hash, JSON_Object]:
"""
Collect sentence objects w.r.t. matching criteria.
:param data: Iterable of sentence objects
:param match_by: Matching criteria / method
:return: Dict of hash: sentence objects
"""
return {
hash_sentence(sentence, match_by): sentence
for sentence in data
} | 53430fcd65315dc98bd4bd0ac0c8a4d51dcef651 | 22,431 |
def str2bool(value):
"""
Args:
value - text to be converted to boolean
True values: y, yes, true, t, on, 1
False values: n, no, false, off, 0
"""
return value in ['y', 'yes', 'true', 't', '1'] | 876a58c86b449ba3fac668a4ef2124ea31fda350 | 22,433 |
def numeric_float(max_abs: float = 1e3) -> st.SearchStrategy:
"""Search strategy for numeric (non-inf, non-NaN) floats with bounded absolute value."""
return st.floats(min_value=-max_abs, max_value=max_abs, allow_nan=False, allow_infinity=False) | b44764d88147793792ef90d96c17ec9770737fde | 22,434 |
def add2dict(dict, parent_list, key, value):
""" Add a key/value pair to a dictionary; the pair is added following the
hierarchy of 'parents' as define in the parent_list list. That is
if parent list is: ['5', '1'], and key='k', value='v', then the new,
returned dictionary will have a value:
dict['5']['1'][k] = v
"""
d = dict
for p in parent_list:
if p not in d:
d[p] = {}
d = d[p]
d[key] = value
return dict | 32252d3253283110eee2edb2eb216cfd777a710f | 22,435 |
def transform(x):
"""
transform
x1 x2 ---> 1 x1 x2 x1**2 x2**2 x1x2 |x1 - x2| |x1 + x2|
"""
ones = np.ones(len(x))
x1 = x[:,0]
x2 = x[:,1]
x1_sqr = x1**2
x2_sqr = x2**2
x1x2 = x1 * x2
abs_x1_minus_x2 = abs(x1-x2)
abs_x1_plus_x2 = abs(x1+x2)
return np.stack([ones, x1, x2, x1_sqr, x2_sqr, x1x2, abs_x1_minus_x2, abs_x1_plus_x2], axis=1) | 380cc6e8f181cc192b11e3fd466526093a75e74b | 22,436 |
import json
def gen_new_contact_json(csv_data):
"""
Generate json with data about Subnets and theirs Contacts
:param csv_data: entry data
:return: Stats about created subnets
"""
dist = {"subnets": csv_data}
with open(f'{PATH}{CONTACTS_SUFIX}', 'w') as out_file:
out_file.write(json.dumps(dist, indent=2, sort_keys=True))
stat = len(dist["subnets"])
return f'Reloaded {stat} subnets and their contacts. ' | 7615c76ea1c9fe392bd6d6d689b2b33a53beaa22 | 22,437 |
def resize(a, shape):
"""
if array a is larger than shape, crop a; if a is smaller than shape, pad a with zeros
Args:
a (numpy array): 2D array to resize
shape: desired shape of the return
Returns:
numpy array: array a resized according to shape
"""
if a.shape[0] < shape[0]:
a = np.pad(a, ((0, shape[0]-a.shape[0]), (0, 0)), mode="constant")
if a.shape[1] < shape[1]:
a = np.pad(a, ((0, 0), (0, shape[1]-a.shape[1])), mode="constant")
if a.shape[0] > shape[0]:
a = a[0:shape[0], :]
if a.shape[1] > shape[1]:
a = a[:, 0:shape[1]]
return a | 40e0829b8680ea5753b12b4bd24e591b9b222bcf | 22,438 |
def _Run(args, holder, url_map_arg, release_track):
"""Issues requests necessary to import URL maps."""
client = holder.client
url_map_ref = url_map_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)
try:
url_map = export_util.Import(
message_type=client.messages.UrlMap,
stream=data,
schema_path=_GetSchemaPath(release_track))
except yaml_validator.ValidationError as e:
raise exceptions.ToolException(e.message)
# Get existing URL map.
try:
url_map_old = url_maps_utils.SendGetRequest(client, url_map_ref)
except apitools_exceptions.HttpError as error:
if error.status_code != 404:
raise error
# Url Map does not exist, create a new one.
return _SendInsertRequest(client, url_map_ref, url_map)
# No change, do not send requests to server.
if url_map_old == url_map:
return
console_io.PromptContinue(
message=('Url Map [{0}] will be overwritten.').format(url_map_ref.Name()),
cancel_on_no=True)
# Populate id and fingerprint fields. These two fields are manually
# removed from the schema files.
url_map.id = url_map_old.id
url_map.fingerprint = url_map_old.fingerprint
return _SendPatchRequest(client, url_map_ref, url_map) | a04b277fa704e3cc8889a7a1feb7cf16b6040e91 | 22,439 |
import logging
def resolve_function(module, function):
"""
Locate specified Python function in the specified Python package.
:param module: A Python module
:type module: ``types.ModuleType.``
:param function: Name of Python function
:type ``str``
:return: Function or None if not found.
"""
func = None
if function_exists(module, function):
func = getattr(module, function)
if not func:
nuoca_log(logging.ERROR, "Cannot find Python function %s in module %s" % (
function, module
))
return func | 5885755f485d4dc243075aa9df6677cd52f3ebf8 | 22,440 |
import select
def from_table(table, engine, limit=None):
"""
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
"""
sql = select([table])
if limit is not None:
sql = sql.limit(limit)
result_proxy = engine.execute(sql)
return from_db_cursor(result_proxy.cursor) | df66b3f179d3bde600786b3bc590810ac410b6eb | 22,441 |
import tqdm
import requests
import zipfile
from io import StringIO
def futures_sgx_daily(trade_date: str = "2020/03/06", recent_day: str = "3") -> pd.DataFrame:
"""
Futures daily data from sgx
P.S. it will be slowly if you do not use VPN
:param trade_date: it means the specific trade day you want to fetch
:type trade_date: str e.g., "2020/03/06"
:param recent_day: the data range near the specific trade day
:type recent_day: str e.g. "3" means 3 day before specific trade day
:return: data contains from (trade_date - recent_day) to trade_day
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
index_df = get_country_index(country="新加坡", index_name="FTSE Singapore", start_date="2020/01/01", end_date=trade_date)
index_df.sort_index(inplace=True)
index_df.reset_index(inplace=True)
index_df.reset_index(inplace=True)
index_df.index = index_df["index"] + 5840
date_start = index_df.index[-1] + 1 - int(recent_day)
date_end = index_df.index[-1] + 1
for page in tqdm(range(date_start, date_end)):
# page = 5883
url = f"https://links.sgx.com/1.0.0/derivatives-daily/{page}/FUTURE.zip"
r = requests.get(url)
with zipfile.ZipFile(BytesIO(r.content)) as file:
with file.open(file.namelist()[0]) as my_file:
data = my_file.read().decode()
if file.namelist()[0].endswith("txt"):
data_df = pd.read_table(StringIO(data))
else:
data_df = pd.read_csv(StringIO(data))
big_df = big_df.append(data_df)
return big_df | 4b2aba7adb48066db1343469541b2007caa82d37 | 22,442 |
def draw_spectra(md, ds):
""" Generate best-fit spectra for all the test objects
Parameters
----------
md: model
The Cannon spectral model
ds: Dataset
Dataset object
Returns
-------
best_fluxes: ndarray
The best-fit test fluxes
best_ivars:
The best-fit test inverse variances
"""
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar | 03344230339e66ac03ffa0ac2d5744475c311591 | 22,443 |
def get_response(
schema, # type: GraphQLSchema
params, # type: RequestParams
catch_exc, # type: Type[BaseException]
allow_only_query=False, # type: bool
**kwargs # type: Any
):
# type: (...) -> Optional[ExecutionResult]
"""Get an individual execution result as response, with option to catch errors.
This does the same as execute_graphql_request() except that you can catch errors
that belong to an exception class that you need to pass as a parameter.
"""
# noinspection PyBroadException
execute = (
execute_graphql_request_as_promise
if kwargs.get("return_promise", False)
else execute_graphql_request
)
try:
execution_result = execute(schema, params, allow_only_query, **kwargs)
except catch_exc:
return None
return execution_result | c451514b588956c59046140c42c18d79bb151170 | 22,444 |
def _get_status_arrays():
""" Get status for all arrays.
"""
results = []
try:
# Get array(s) status for a site.
result = get_status_arrays()
if result is not None:
results = result
return results
except Exception as err:
message = str(err)
current_app.logger.info(message)
raise Exception(message) | 911a418f728e10949e756098414b7e0809fc2108 | 22,445 |
def get_cycle_amplitude(data, cycle, metric_to_use, hourly_period_to_exclude):
"""
given data (eg results[opposite_pair]
[substratification][
substratification_level]
['take_simple_means_by_group_no_individual_mean'])
and a cycle and a metric to use (max_minus_min or average_absolute_difference_from_mean)
computes the cycle amplitude.
"""
data = deepcopy(data)
assert metric_to_use in ['max_minus_min' ,'average_absolute_difference_from_mean']
assert cycle in ['date_relative_to_period', 'local_hour', 'weekday', 'month', 'week_of_year']
if cycle == 'date_relative_to_period':
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:np.abs(x) <= 14)]
assert list(data[cycle].index) == list(range(-14, 15))
if cycle == 'local_hour':
if hourly_period_to_exclude is None:
assert list(data[cycle].index) == list(range(24))
else:
assert len(hourly_period_to_exclude) == 2
assert hourly_period_to_exclude[0] < hourly_period_to_exclude[1]
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:(x < hourly_period_to_exclude[0]) or (x > hourly_period_to_exclude[1]))]
assert list(data[cycle].index) == [a for a in list(range(24)) if a < hourly_period_to_exclude[0] or a > hourly_period_to_exclude[1]]
if cycle == 'weekday':
assert list(data[cycle].index) == list(['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday',
'Wednesday'])
if cycle == 'month':
assert list(data[cycle].index) == list(range(1, 13))
if cycle == 'week_of_year':
assert list(data[cycle].index) == list(range(52))
y = np.array(data[cycle]['mean'])
y_mu = y.mean()
average_absolute_difference_from_mean = np.mean(np.abs(y - y_mu))
largest_difference = y.max() - y.min()
if metric_to_use == 'max_minus_min':
metric_val = largest_difference
else:
metric_val = average_absolute_difference_from_mean
return metric_val | bbbb3a0e7d97da4710319135deb583705fbb5a55 | 22,446 |
import requests
import tqdm
def _download(url: str, dst: str) -> int:
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(urlopen(url).info().get("Content-Length", -1))
r = requests.get(url, stream=True)
with open(get_full_data_path(dst), "wb") as f:
pbar = tqdm(total=int(r.headers['Content-Length']))
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
pbar.close()
return file_size | 55d748465d83d9d2a5408d4995c2363cf88090b2 | 22,448 |
def target2line(target, img_size, k, eval=False):
"""
target: line representetitve in grid [L, grid_h, grid_w]
img_size: (width, height): Input image size, PIL Image size
eval=False : Default. For inference. Line width not big.
eval=True : For iou. Line width is bigger.
return line_img
"""
line_img = Image.new("L", img_size)
draw = ImageDraw.Draw(line_img)
resolution = 32 / pow(2, k)
grid_h = int(img_size[1] // resolution)
grid_w = int(img_size[0] // resolution)
line_width = 4
if not eval:
line_width = 2
for i in range(1, grid_h):
grid = []
grid.append(((0, i * img_size[1]/grid_h)) )
grid.append(((img_size[0], i * img_size[1]/grid_h)) )
draw.line(grid, fill='blue', width=0)
for i in range(grid_w):
grid = []
grid.append(((i * img_size[0]/grid_w, 0)) )
grid.append(((i * img_size[0]/grid_w, img_size[1])) )
draw.line(grid, fill='blue', width=0)
targets = np.transpose(target, [1, 2, 0])
targets = targets.reshape(grid_h, grid_w, -1, 4)
offset_x = np.linspace(0, grid_w - 1, grid_w)
offset_y = np.linspace(0, grid_h - 1, grid_h)
off_w, off_h = np.meshgrid(offset_x, offset_y)
indexes = np.argwhere( np.sum( targets.reshape(-1, 4) , axis=1, keepdims=False ) > 0 )
targets = np.transpose(targets, (3, 2, 0, 1))
targets[0,:] += off_w
targets[1,:] += off_h
targets[2,:] += off_w
targets[3,:] += off_h
targets = (targets * resolution)
targets = np.transpose(targets, ( 2, 3, 1, 0)).reshape(-1, 4)
detected = targets[indexes[..., 0]]
# print( 'detected lines shape: ', detected.shape)
[draw.line([(x1, y1), (x2,y2)], fill='white', width=line_width) for (x1, y1, x2, y2) in detected ]
return line_img | ca0ad3938261cdb31de5c1ad502bef2d16d3e6cb | 22,449 |
import types
def unmarshal(raw, signature):
"""Unmarshal objects.
The elements of the returned tuple will be of types according
to the column *Python OUT* in the :ref:`types summary <ref-types-table>`.
:param RawData raw: raw message data
:param signature: see :class:`~dcar.signature.Signature`
:return: tuple of unmarshalled data
:rtype: tuple
:raises ~dcar.MessageError: if the data could not be unmarshalled
"""
signature = _signature(signature)
data = []
for t, s in signature:
data.append(types[t].unmarshal(raw, s))
return tuple(data) | 958761030418450cb65aeef2d966ef3d9157167c | 22,450 |
def remove_keys(d, to_remove):
""" This function removes the given keys from the dictionary d. N.B.,
"not in" is used to match the keys.
Args:
d (dict): a dictionary
to_remove (list): a list of keys to remove from d
Returns:
dict: a copy of d, excluding keys in to_remove
"""
ret = {
k:v for k,v in d.items() if k not in to_remove
}
return ret | 94146bb19e8d39ea28c0940307c4c998fe5b7063 | 22,451 |
def get_crypto_quotes(**kwargs):
"""
Top-level function for obtaining all available cryptocurrency quotes
"""
return CryptoReader(**kwargs).fetch() | 1eb94bf698e10b43e0cb4c794d7fb66a823295c3 | 22,453 |
def mutual_information(y_true, y_pred):
"""Mutual information score.
"""
# This is a simple wrapper for returning the score as given in y_pred
return y_pred | fae45b40fb3ca285bef57e06b30c42d7f87b5286 | 22,454 |
from typing import Sequence
from typing import Optional
def AvgPool(window_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
normalize_edges: bool = False,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayerMasked:
"""Layer construction function for an average pooling layer.
Based on `jax.example_libraries.stax.AvgPool`.
Args:
window_shape: The number of pixels over which pooling is to be performed.
strides: The stride of the pooling window. `None` corresponds to a stride of
`(1, 1)`.
padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR`
uses periodic boundary conditions on the image.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Pool(_Pooling.AVG, window_shape, strides, padding, normalize_edges,
batch_axis, channel_axis) | 17e523a6092db0d90eb6960e76a3f8fece94d57a | 22,455 |
def pairwise_distances(x, y):
"""Computes pairwise squared l2 distances between tensors x and y.
Args:
x: Tensor of shape [n, feature_dim].
y: Tensor of shape [m, feature_dim].
Returns:
Float32 distances tensor of shape [n, m].
"""
# d[i,j] = (x[i] - y[j]) * (x[i] - y[j])'
# = sum(x[i]^2, 1) + sum(y[j]^2, 1) - 2 * x[i] * y[j]'
xs = tf.reduce_sum(x * x, axis=1)[:, tf.newaxis]
ys = tf.reduce_sum(y * y, axis=1)[tf.newaxis, :]
d = xs + ys - 2 * tf.matmul(x, y, transpose_b=True)
return d | 4928e02c0580f97b4a97db48c6d67f8e15000d46 | 22,456 |
import re
def timerange(rstring):
"""
range from string specifier
| 2010-M08 -> range of August 2010
| 2009-Q1 -> range of first quarter, 2009
| 2001-S1 -> range of first "semi" 2001
| 2008 -> range of year 2008
:param rstring: range string
:rtype: timerange dictionary
"""
m_match = re.search(r'(\d{4})-M(\d{2})', rstring)
if m_match:
return month_range(int(m_match.group(1)), int(m_match.group(2)))
q_match = re.search(r'(\d{4})-Q(\d{1})', rstring)
if q_match:
return quarter_range(int(q_match.group(1)), int(q_match.group(2)))
s_match = re.search(r'(\d{4})-S(\d{1})', rstring)
if s_match:
return semi_range(int(s_match.group(1)), int(s_match.group(2)))
y_match = re.search(r'(\d{4})', rstring)
if y_match:
return year_range(int(y_match.group(1))) | d623e28ad4b040f833d96fed932117b8873f28ac | 22,457 |
def comp_height_wire(self):
"""Return bar height
Parameters
----------
self : CondType21
A CondType21 object
Returns
-------
H: float
Height of the bar [m]
"""
return self.Hbar | 98f98d021774166aa960080f353b6fbc01229eab | 22,458 |
from datetime import datetime
import logging
def get_update_seconds(str_time: str) -> int:
"""This function calculates the seconds between the current time and the
scheduled time utelising the datetime module.
Args:
str_time (str): Time of scheduled event taken from user input as a
string
Returns:
int: Returns the seconds until the scheduled event should occur
"""
#creates timedeltas for current time and update time
interval_bin = datetime(1900,1,1)
update_time = datetime.strptime(str_time, '%H:%M') - interval_bin
current_time = datetime.now()
current_timedelta = timedelta(hours=current_time.hour,
minutes = current_time.minute, seconds= current_time.second)
#calculates update interval by comparing the two timedeltas
if update_time >= current_timedelta:
update_interval = update_time - current_timedelta
if update_time < current_timedelta:
update_time+= timedelta(hours=24)
update_interval = update_time - current_timedelta
logging.info('UPDATE INTERVAL: ' + str(update_interval.seconds))
return update_interval.seconds | 0440ac847bc6c19290bdfa231971d33236335904 | 22,459 |
def full_name(decl, with_defaults=True):
"""
Returns declaration full qualified name.
If `decl` belongs to anonymous namespace or class, the function will return
C++ illegal qualified name.
:param decl: :class:`declaration_t`
:type decl: :class:`declaration_t`
:rtype: full name of declarations.
"""
if None is decl:
raise RuntimeError("Unable to generate full name for None object!")
if with_defaults:
if not decl.cache.full_name:
path = declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name..
decl.cache.full_name = ""
else:
decl.cache.full_name = full_name_from_declaration_path(path)
return decl.cache.full_name
else:
if not decl.cache.full_partial_name:
path = partial_declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name.
decl.cache.full_partial_name = ""
else:
decl.cache.full_partial_name = \
full_name_from_declaration_path(path)
return decl.cache.full_partial_name | b9828bf4045baa2edbec0c5007406309d90391c5 | 22,460 |
def zero_pad(data, window_size):
"""
Pads with window_size / 2 zeros the given input.
Args:
data (numpy.ndarray): data to be padded.
window_size (int): parameter that controls the size of padding.
Returns:
numpy.ndarray: padded data.
"""
pad_width = ceil(window_size / 2)
padded = np.pad(data, (pad_width, pad_width), 'constant', constant_values=(0,0))
return padded | 234f27f06bba9dff3a38292e1190b01a767bd56b | 22,462 |
import requests
def get_results(url_id):
"""Get the scanned results of a URL"""
r = requests.get('https://webcookies.org/api2/urls/%s' % url_id, headers=headers)
return r.json() | be5b660acd847066ec4c476dfe25d2fe21f8e2c4 | 22,463 |
def build_dense_constraint(base_name, v_vars, u_exprs, pos, ap_x):
"""Alias for :func:`same_act`"""
return same_act (base_name, v_vars, u_exprs, pos, ap_x) | b35b07c3825a76ac046d8b32490cb5836ae6176a | 22,464 |
from typing import Tuple
from typing import Union
def list_snapshots(client, data_args) -> Tuple[str, dict, Union[list, dict]]:
""" List all snapshots at the system.
:type client: ``Client``
:param client: client which connects to api.
:type data_args: ``dict``
:param data_args: request arguments.
:return: human readable format, context output and the original raw response.
:rtype: ``tuple``
"""
limit = arg_to_number(data_args.get('limit'))
offset = arg_to_number(data_args.get('offset'))
params = assign_params(limit=limit, offset=offset)
raw_response = client.do_request(method='GET',
url_suffix='/plugin/products/threat-response/api/v1/snapshot',
params=params)
snapshots = raw_response.get('snapshots', [])
for snapshot in snapshots:
if created := snapshot.get('created'):
try:
snapshot['created'] = timestamp_to_datestring(created)
except ValueError:
pass
context = createContext(snapshots, removeNull=True)
headers = ['uuid', 'name', 'evidenceType', 'hostname', 'created']
outputs = {'Tanium.Snapshot(val.uuid === obj.uuid)': context}
human_readable = tableToMarkdown('Snapshots:', snapshots, headers=headers,
headerTransform=pascalToSpace, removeNull=True)
return human_readable, outputs, raw_response | 5452b162d372a016fc63fded510e9361869581b8 | 22,465 |
def data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501
"""data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get
returns tapi.topology.LinkRef # noqa: E501
:param uuid: Id of path
:type uuid: str
:param topology_uuid: Id of link
:type topology_uuid: str
:param link_uuid: Id of link
:type link_uuid: str
:rtype: TapiTopologyLinkRef
"""
return 'do some magic!' | 98c1b8721fb3edcc8cb3acc196e8c5aa8eb8a4f6 | 22,466 |
def ipfs_qm_hash_to_32_bytes(ipfs_qm: str) -> str:
"""
Transform IPFS base58 Qm... hash to a 32 bytes sting (without 2 heading '0x' bytes).
:param ipfs_qm: IPFS base58 Qm... hash.
:return: 32 bytes sting (without 2 heading bytes).
"""
return f"0x{b58decode(ipfs_qm).hex()[4:]}" | e60386928c3836edcd3ebef3740e1bbc8b095724 | 22,467 |
def get_service_state(scheduler):
"""Return the current state of the job service."""
return {"state": get_service_state_str(scheduler)}, 200 | e18f66a2d2a2a97a37aed427178b65c2a9c8d919 | 22,468 |
def determine_file_type(filename):
"""
:param filename: str
:rtype: FileType
"""
if filename.endswith('.cls'):
return FileType.CLS
elif filename.endswith('.java'):
return FileType.JAVA
elif filename.endswith('.js'):
return FileType.JAVASCRIPT
elif filename.endswith('.php'):
return FileType.PHP
elif filename.endswith('.py'):
return FileType.PYTHON
elif (
filename.endswith(
('.yaml', '.yml'),
)
):
return FileType.YAML
return FileType.OTHER | 030d11266a8b93056c1d82778ba95a67fea7a799 | 22,469 |
def sanitise_text(text):
"""When we process text before saving or executing, we sanitise it
by changing all CR/LF pairs into LF, and then nuking all remaining CRs.
This consistency also ensures that the files we save have the correct
line-endings depending on the operating system we are running on.
It also turns out that things break when after an indentation
level at the very end of the code, there is no empty line. For
example (thanks to Emiel v. IJsseldijk for reproducing!):
def hello():
print "hello" # and this is the last line of the text
Will not completely define method hello.
To remedy this, we add an empty line at the very end if there's
not one already.
"""
text = text.replace('\r\n', '\n')
text = text.replace('\r', '')
lines = text.split('\n')
if lines and len(lines[-1]) != 0:
return text + '\n'
else:
return text | 1d7d047fba7c8697748d0cf115e0f74fcad8c1c4 | 22,470 |
import string
def create_regression(
n_samples=settings["make_regression"]["n_samples"]
) -> pd.DataFrame:
"""Creates a fake regression dataset with 20 features
Parameters
----------
n_samples : int
number of samples to generate
Returns
-------
pd.DataFrame of features and targets:
feature names are lowercase letters, targets are in the column "target"
"""
X, y = make_regression(n_samples=n_samples, n_features=20, n_informative=5)
features = pd.DataFrame(X, columns=list(string.ascii_lowercase[: X.shape[1]]))
targets = pd.Series(y, name="target")
data = features.join(targets)
return data | b1d91b5e56366a8a2df7731550baedcf154e8c9a | 22,471 |
def _divide_no_nan(x, y, epsilon=1e-8):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
# need manual broadcast...
safe_y = tf.where(
tf.logical_and(tf.greater_equal(y, -epsilon), tf.less_equal(y, epsilon)),
tf.ones_like(y), y)
return tf.where(
tf.logical_and(
tf.greater_equal(tf.broadcast_to(y, x.get_shape()), -epsilon),
tf.less_equal(tf.broadcast_to(y, x.get_shape()), epsilon)),
tf.zeros_like(x), x / safe_y) | c7dc806bbdd7968fe61a9c7be76369b5608d9636 | 22,472 |
def make_release(t, **params_or_funcs):
"""Create particle release table to be used for testing"""
t = np.array(t)
i = np.arange(len(t))
params = {
k: (p(i, t) if callable(p) else p) + np.zeros_like(t)
for k, p in params_or_funcs.items()
}
start_date = np.datetime64("2000-01-02T03")
minute = np.timedelta64(60, "s")
dates = start_date + np.array(t) * minute
return pd.DataFrame(data={**dict(release_time=dates.astype(str)), **params}) | 3dd2778dcf6962171d585244fc276832a300557c | 22,473 |
import re
def get_apartment_divs(driver):
"""Scrapes the url the driver is pointing at and extract
any divs with "listitems". Those divs are used as
apartment objects at Immowelt.
Args:
driver (Webdriver): A Webdriver instance.
Returns:
list: returns a list of all divs of class listitem...
"""
source = get_list_source(driver)
regex = re.compile('listitem.*relative js-listitem')
return set(source.findAll("div", regex)) | ccef9159d7731ce78d5deeff92364e4bc43f5f3e | 22,474 |
def smart_apply(tensor, static_fn, dynamic_fn):
"""
Apply transformation on `tensor`, with either `static_fn` for static
tensors (e.g., Numpy arrays, numbers) or `dynamic_fn` for dynamic
tensors.
Args:
tensor: The tensor to be transformed.
static_fn: Static transformation function.
dynamic_fn: Dynamic transformation function.
Returns:
Tensor: The transformed tensor.
"""
if isinstance(tensor, (tf.Tensor, tf.Variable, StochasticTensor,
zs.StochasticTensor)):
return dynamic_fn(tensor)
else:
return static_fn(tensor) | e2798377891ff6fc0ba4440357b83f927760bc59 | 22,475 |
import time
def new_scan(host, publish = "off", start_new = "on", all = "done", ignoreMismatch = "on"):
"""This function requests SSL Labs to run new scan for the target domain."""
if helpers.is_ip(host):
print(red("[!] Your target host must be a domain, not an IP address! \
SSL Labs will onyl scan domains."))
exit()
else:
path = "analyze"
payload = {'host': host, 'publish': publish, 'start_new': start_new, 'all': all, 'ignoreMismatch': ignoreMismatch}
results = request_api(path, payload)
payload.pop('start_new')
while results['status'] != 'READY' and results['status'] != 'ERROR':
print("Scan in progress, please wait for the results.")
time.sleep(30)
results = request_api(path, payload)
return results | 7c7341778028fac5d7c7829f9b57174f0fdb251c | 22,476 |
def removeBubbles(I, kernelSize = (11,11)):
"""remove bright spots (mostly bubbles) in retardance images. Need to add a size filter
Parameters
----------
I
kernelSize
Returns
-------
"""
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
Bg = cv2.morphologyEx(I, cv2.MORPH_OPEN, kernel)
I8bit = I/np.nanmax(I[:])*255 # rescale to 8 bit as OpenCV only takes 8 bit (REALLY????)
I8bit = I8bit.astype(np.uint8, copy=False) # convert to 8 bit
ITh = cv2.adaptiveThreshold(I8bit,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,201,-1)
kernelSize = (3,3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
IThBig = cv2.morphologyEx(ITh, cv2.MORPH_CLOSE, kernel)
kernelSize = (21,21)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
IThBig = cv2.morphologyEx(IThBig, cv2.MORPH_OPEN, kernel)
ITh=ITh-IThBig
IBi = ITh.astype(np.bool_, copy=True) # convert to 8 bit
INoBub = np.copy(I)
INoBub[IBi] = Bg[IBi]
figSize = (8,8)
fig = plt.figure(figsize = figSize)
a=fig.add_subplot(2,2,1)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(imadjust(I), cmap='gray')
plt.title('Retardance (MM)')
plt.show()
a=fig.add_subplot(2,2,2)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(IThBig, cmap='gray')
plt.title('Orientation (MM)')
plt.show()
a=fig.add_subplot(2,2,3)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(ITh, cmap='gray')
plt.title('Retardance (Py)')
plt.show()
a=fig.add_subplot(2,2,4)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(imadjust(INoBub), cmap='gray')
plt.title('Orientation (Py)')
plt.show()
return INoBub | 193863cb63ed3a1a785aa2c64367eb7a3518c671 | 22,477 |
import operator
import json
def assert_json_response(response, status_code, body, headers=None, body_cmp=operator.eq):
"""Assert JSON response has the expected status_code, body, and headers.
Asserts that the response's content-type is application/json.
body_cmp is a callable that takes the JSON-decoded response body and
expected body and returns a boolean stating whether the comparison
succeeds.
body_cmp(json.loads(response.data.decode('utf-8')), body)
"""
headers = dict(headers or {})
headers['Content-Type'] = 'application/json'
def json_cmp(response_body, body):
return body_cmp(json.loads(response_body.decode('utf-8')), body)
assert_response(response, status_code, body, headers, json_cmp) | db910cb0cb68bdbf9ad4b9490f3bbc6a87d1545d | 22,478 |
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
# Examples
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if name is None:
name = 'variable' + str(get_uid('variable'))
if constraint is not None:
raise NotImplementedError('Constraints are not supported')
if is_tensor(value):
variable = av.variable_from_node(name, value)
else:
if dtype is None:
value = np.array(value)
if value.dtype == 'int64':
value = np.array(value, dtype='int32')
dtype = 'int32'
elif value.dtype == 'float64':
dtype = floatx()
value = np.array(value, dtype=floatx())
else:
dtype = value.dtype.name
else:
value = np.array(value, dtype=dtype)
variable = av.variable(
name, value.shape, avalanche_dtype(dtype),
av.value_initializer(value))
variable._uses_learning_phase = False
variable._keras_shape = value.shape
variable._is_variable = True
return variable | 3dbb67493e4529469ca0da73159ec495b1d30f07 | 22,479 |
from typing import Protocol
import json
def autoprotocol_protocol(protocol_id):
"""Get autoprotocol-python representation of a protocol."""
current_protocol = Protocol.query.filter_by(id=protocol_id).first()
if not current_protocol:
flash('No such specification!', 'danger')
return redirect('.')
if current_protocol.public:
print("PUBLIC")
else:
print("NOT PUBLIC")
if current_protocol.user != current_user and not current_protocol.public:
flash('Not your project!', 'danger')
return redirect('.')
if not current_protocol.protocol:
return ""
protocol_object = json.loads(current_protocol.protocol)
converter = AutoProtocol()
resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description))
resp.headers['Content-Type'] = "text"
resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-autoprotocol.py"
return resp | 85d0a3d5a215c50124c86b17114c82c60b07fae5 | 22,480 |
from re import T
def tensor_to_P(tensor, wig3j = None):
"""
Transform an arbitray SO(3) tensor into real P which transforms under the irreducible
representation with l = 1. Wigner-3j symbols can be provided or calculated on
the fly for faster evaluation. If providedn, wig3j should be an array
with indexing [l1,l2,m,m1,m2]
"""
P = []
n_rad, n_l = get_max(tensor)
lam = 1
# It is faster to pre-evaluate the wigner-3j symbol, even faster if it is passed
if not isinstance(wig3j, np.ndarray):
wig3j = np.zeros([n_l,n_l,2*n_l+1,2*n_l+1,2*n_l+1])
wig3j = wig3j.astype(np.complex128)
for l1 in range(n_l):
for l2 in range(n_l):
for m in range(-lam,lam+1):
for m1 in range(-n_l,n_l+1):
for m2 in range(-n_l,n_l+1):
wig3j[l2,l1,m,m1,m2] = N(wigner_3j(lam,l2,l1,m,m1,m2))
for mu in range(-lam,lam + 1):
P.append([])
for n1 in range(n_rad):
for n2 in range(n_rad):
for l1 in range(n_l):
for l2 in range(n_l):
if (l1 + l2)%2 == 0: continue
p = 0
for m in range(-n_l, n_l+1):
wig = wig3j[l2,l1,mu,(m-mu),-m]
if wig != 0:
p += tensor['{},{},{}'.format(n1,l1,m)]*tensor['{},{},{}'.format(n2,l2,m-mu)].conj() *\
(-1)**m * wig
p *= (-1)**(lam-l2)
P[mu+lam].append(p)
p_real = []
for pi in np.array(P).T:
p_real.append((T.dot(pi))[[2,0,1]])
P = np.array(p_real).T
if not np.allclose(P.imag,np.zeros_like(P)):
raise Exception('Ooops, something went wrong. P not purely real.')
return P.real.T | c4cf2746ac0376b29df437e1938644c9df8e1dd2 | 22,481 |
from soc.modules.ghop.logic.helper import notifications as ghop_notifications
from soc.modules.ghop.logic.models import comment as ghop_comment_logic
from soc.modules.ghop.logic.models import task_subscription as \
def createNotificationMail(request, *args, **kwargs):
"""Appengine task that sends mail to the subscribed users.
Expects the following to be present in the POST dict:
comment_key: Specifies the comment id for which to send the notifications
task_key: Specifies the task key name for which the comment belongs to
Args:
request: Django Request object
"""
ghop_task_subscription_logic
# set default batch size
batch_size = 10
post_dict = request.POST
comment_key = post_dict.get('comment_key')
task_key = post_dict.get('task_key')
if not (comment_key and task_key):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid createNotificationMail data: %s' % post_dict)
comment_key = long(comment_key)
# get the task entity under which the specified comment was made
task_entity = ghop_task_logic.logic.getFromKeyName(task_key)
# get the comment for the given id
comment_entity = ghop_comment_logic.logic.getFromID(
comment_key, task_entity)
if not comment_entity:
# invalid comment specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid comment specified: %s/%s' % (comment_key, task_key))
# check and retrieve the subscriber_start_key that has been done last
if 'subscriber_start_index' in post_dict:
subscriber_start_index = post_dict['subscriber_start_index']
else:
subscriber_start_index = 0
# get all subscribers to GHOP task
fields = {
'task': task_entity,
}
ts_entity = ghop_task_subscription_logic.logic.getForFields(
fields, unique=True)
subscribers = db.get(ts_entity.subscribers[
subscriber_start_index:subscriber_start_index+batch_size])
task_url = "http://%(host)s%(task)s" % {
'host': system.getHostname(),
'task': redirects.getPublicRedirect(
task_entity, {'url_name': 'ghop/task'}),
}
# create the data for the mail to be sent
message_properties = {
'task_url': task_url,
'redirect_url': "%(task_url)s#c%(cid)d" % {
'task_url': task_url,
'cid': comment_entity.key().id_or_name()
},
'comment_entity': comment_entity,
'task_entity': task_entity,
}
subject = DEF_TASK_UPDATE_SUBJECT_FMT % {
'title': task_entity.title,
}
for subscriber in subscribers:
ghop_notifications.sendTaskUpdate(entity, subject, message_properties)
if len(subscribers) == batch_size:
# spawn task for sending out notifications to next set of subscribers
next_start = subscriber_start_index + batch_size
task_params = {
'comment_key': comment_key,
'task_key': task_key,
'subscriber_start_index': next_start
}
task_url = '/tasks/ghop/task/mail/create'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
# return OK
return http.HttpResponse() | 859372c83fb37d440456c380cf313a27b029f018 | 22,482 |
def mergediscnodes(tree):
"""Reverse transformation of ``splitdiscnodes()``."""
treeclass = tree.__class__
for node in tree.subtrees():
merge = defaultdict(list) # a series of queues of nodes
# e.g. merge['VP_2*'] = [Tree('VP_2', []), ...]
# when origin is present (index after *), the node is moved to where
# the next one is expected, e.g., VP_2*1 after VP_2*0 is added.
nodes = list(node) # the original, unmerged children
node[:] = [] # the new, merged children
for child in nodes:
if not isinstance(child, Tree):
node.append(child)
continue
match = SPLITLABELRE.search(child.label)
if not match:
node.append(child)
continue
label, part, _ = match.groups()
grandchildren = list(child)
child[:] = []
if not merge[child.label]:
merge[child.label].append(treeclass(label, []))
node.append(merge[child.label][0])
merge[child.label][0].extend(grandchildren)
if part:
nextlabel = '%s*%d' % (label, int(part) + 1)
merge[nextlabel].append(merge[child.label].pop(0))
return tree | 300aaebe95604c611ecf1f65373ba83f97361438 | 22,483 |
def count_go_nogo_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of go and no go trials in the go/no go tasks
"""
lever_on = get_events_indices(eventcode, ['RLeverOn', 'LLeverOn'])
(go_trials, nogo_trials) = (0, 0)
for lever in lever_on:
if eventcode[lever + 1] in ('LightOn1', 'LightOn2'):
nogo_trials += 1
else:
go_trials += 1
return go_trials, nogo_trials | 2de71a663f158a0942d2c3f01973ed5dc999b3d7 | 22,485 |
def getScriptExecutionContext():
"""
Returns the repository description instance and
the set of items selected on script action execution.
@return: Script execution context.
@rtype: L{ScriptExecutionContext<datafinder.gui.user.script_api.ScriptExecutionContext>}
"""
scriptExecutionContext = None
if not _context.scriptController.boundScriptExecutionContext is None:
repository, items = _context.scriptController.boundScriptExecutionContext
itemPaths = [item.path for item in items]
scriptExecutionContext = ScriptExecutionContext(RepositoryDescription(repository), itemPaths)
return scriptExecutionContext | e49e8cfd140f967859cf0e0c75bfe69fac87835f | 22,486 |
import copy
import numpy
def electrondensity_spin(ccdata, volume, mocoeffslist):
"""Calculate the magnitude of the electron density at every point in a volume for either up or down spin
Inputs:
ccdata -- ccData object
volume -- Volume object (will not be altered)
mocoeffslist -- list of molecular orbital to calculate electron density from;
i.e. [ccdata.mocoeffs[0][1:2]]
Output:
Volume object with wavefunction at each grid point stored in data attribute
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of NumPy arrays. The list will be of length 1.
"""
assert len(mocoeffslist) == 1, "mocoeffslist input to the function should have length of 1."
bfs = getbfs(ccdata)
density = copy.copy(volume)
density.data = numpy.zeros(density.data.shape, "d")
x, y, z = getGrid(density)
# For occupied orbitals
# `mocoeff` and `gbasis` in ccdata object is ordered in a way `homos` can specify which orbital
# is the highest lying occupied orbital in mocoeff and gbasis.
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros(density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros(density.data.shape, "d")
for i, xval in enumerate(x):
for j, yval in enumerate(y):
tmp = []
for zval in z:
tmp.append(pyamp(bfs, bs, xval, yval, zval))
data[i, j, :] = tmp
data *= mocoeff[bs]
wavefn += data
density.data += wavefn ** 2
return density | 7a232f2dbae8ff7905b2eff680a44521b010334e | 22,487 |
def create_missing_dataframe(nrows, ncols, density=.9, random_state=None, index_type=None, freq=None):
"""Create a Pandas dataframe with random missingness.
Parameters
----------
nrows : int
Number of rows
ncols : int
Number of columns
density: float
Amount of available data
random_state: float, optional
Random seed. If not given, default to 33.
index_type: float, optional
Accepts the following values: "dt" for timestamp, "int" for integer.
freq: string, optional:
Sampling frequency. This option is only available is index_type is "dt".
Returns
-------
df : pandas.DataFrame
Pandas dataframe containing sample data with random missing rows.
"""
# Create a nrows x ncols matrix
data = np.random.uniform(100, size=(nrows, ncols))
df = pd.DataFrame(data)
if index_type:
if index_type == "dt":
if freq is None:
freq='h'
idx = _makeDatetimeIndex(nrows, freq=freq)
df = df.set_index(idx)
elif index_type == "int":
return
else:
raise ValueError("Can't recognize index_type. Try the following values: 'dt', 'int'.")
i_idx, j_idx = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i_idx, j_idx] = None
return df | e3c7f44f5238f929928ee5ec65c33bdb91fd8705 | 22,488 |
def decode_name_value_pairs(buffer):
"""
Decode a name-value pair list from a buffer.
:param bytearray buffer: a buffer containing a FastCGI name-value pair list
:raise ProtocolError: if the buffer contains incomplete data
:return: a list of (name, value) tuples where both elements are unicode strings
:rtype: list
"""
index = 0
pairs = []
while index < len(buffer):
if buffer[index] & 0x80 == 0:
name_length = buffer[index]
index += 1
elif len(buffer) - index > 4:
name_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff
index += 4
else:
raise ProtocolError('not enough data to decode name length in name-value pair')
if len(buffer) - index > 1 and buffer[index] & 0x80 == 0:
value_length = buffer[index]
index += 1
elif len(buffer) - index > 4:
value_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff
index += 4
else:
raise ProtocolError('not enough data to decode value length in name-value pair')
if len(buffer) - index >= name_length + value_length:
name = buffer[index:index + name_length].decode('ascii')
value = buffer[index + name_length:index + name_length + value_length].decode('utf-8')
pairs.append((name, value))
index += name_length + value_length
else:
raise ProtocolError('name/value data missing from buffer')
return pairs | ef302eb2c6c55605fdc9b4a9ef06f59782ba1d94 | 22,489 |
def host_is_local(host: str) -> bool:
"""
Tells whether given host is local.
:param host: host name or address
:return: True if host is local otherwise False
"""
local_names = {
"localhost",
"127.0.0.1",
}
is_local = any(local_name in host for local_name in local_names)
return is_local | ce823b8c309ec842ed1dd5bb04e41356db500658 | 22,490 |
def sigma_function(coeff_matU, coeff_matX, order, V_slack):
"""
:param coeff_matU: array with voltage coefficients
:param coeff_matX: array with inverse conjugated voltage coefficients
:param order: should be prof - 1
:param V_slack: slack bus voltage vector. Must contain only 1 slack bus
:return: sigma complex value
"""
if len(V_slack) > 1:
print('Sigma values may not be correct')
V0 = V_slack[0]
coeff_matU = coeff_matU / V0
coeff_matX = coeff_matX / V0
nbus = coeff_matU.shape[1]
complex_type = nb.complex128
sigmes = np.zeros(nbus, dtype=complex_type)
if order % 2 == 0:
M = int(order / 2) - 1
else:
M = int(order / 2)
for d in range(nbus):
a = coeff_matU[1:2 * M + 2, d]
b = coeff_matX[0:2 * M + 1, d]
C = np.zeros((2 * M + 1, 2 * M + 1), dtype=complex_type)
for i in range(2 * M + 1):
if i < M:
C[1 + i:, i] = a[:2 * M - i]
else:
C[i - M:, i] = - b[:3 * M - i + 1]
lhs = np.linalg.solve(C, -a)
sigmes[d] = np.sum(lhs[M:]) / (np.sum(lhs[:M]) + 1)
return sigmes | 1accad7b95360a0652143ca367c54bca372662a7 | 22,492 |
from typing import List
def get_dbs(db_names: List[str], db_file: str = "./db_info.pub.json") -> List:
"""Read the db_file and get the databases corresponding to <<db_name>>
Args:
db_name (List[str]): A list of names of the database we want
db_file (str): The db_file we are reading from
Returns:
MongograntStore: the store we need to access
"""
db_dict = loadfn(db_file)
stores = []
for j_name in db_names:
if j_name not in db_dict:
raise ValueError(
f"The store named {j_name} is missing from the db_file")
stores.append(db_dict[j_name])
return stores | ab0074f3cc5d846f7c24bf4bca6b348bfa3d6bf3 | 22,494 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.