content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import dbm
def get_sim_data():
"""
Create the data needed to initialize a simulation
Performs the steps necessary to set up a stratified plume model simulation
and passes the input variables to the `Model` object and
`Model.simulate()` method.
Returns
-------
profile : `ambient.Profile` object
Return a profile object from the BM54 CTD data
particles : list of `PlumeParticle` objects
List of `PlumeParticle` objects containing the dispersed phase initial
conditions
z : float
Depth of the release port (m)
R : float
Radius of the release port (m)
maxit : float
Maximum number of iterations to converge between inner and outer
plumes
toler : float
Relative error tolerance to accept for convergence (--)
delta_z : float
Maximum step size to use in the simulation (m). The ODE solver
in `calculate` is set up with adaptive step size integration, so
in theory this value determines the largest step size in the
output data, but not the numerical stability of the calculation.
"""
# Get the ambient CTD data
profile = get_profile()
# Specify the release location and geometry and initialize a particle
# list
z0 = 300.
R = 0.15
particles = []
# Add a dissolving particle to the list
composition = ['oxygen', 'nitrogen', 'argon']
yk = np.array([1.0, 0., 0.])
o2 = dbm.FluidParticle(composition)
Q_N = 150. / 60. / 60.
de = 0.005
lambda_1 = 0.85
particles.append(stratified_plume_model.particle_from_Q(profile, z0, o2,
yk, Q_N, de, lambda_1))
# Add an insoluble particle to the list
composition = ['inert']
yk = np.array([1.])
oil = dbm.InsolubleParticle(True, True)
mb0 = 50.
de = 0.01
lambda_1 = 0.8
particles.append(stratified_plume_model.particle_from_mb0(profile, z0,
oil, [1.], mb0, de, lambda_1))
# Set the other simulation parameters
maxit = 2
toler = 0.2
delta_z = 1.0
# Return the results
return (profile, particles, z0, R, maxit, toler, delta_z) | e253665f451b167a188c997ff36fc406e0f3a587 | 19,171 |
import numpy as np
def _organize_arch(fils, pth):
"""Allocate data from each specific type of file (keys from the input dict) to a new dict
Arguments:
fils {dict} -- Dictionary containing type of files and list of files
Returns:
[dict] -- [description]
"""
imgdata = dict()
for i in fils.keys():
images = dict()
for ii in np.arange(len(fils[i])):
images[str('img_' + str(ii+1))] = {'path': pth + str('\\') + str(fils[i][ii]),
'coords': np.loadtxt(pth + str('\\') + str(fils[i][ii]), skiprows=1, usecols=(-2, -1))}
imgdata[i] = images
return imgdata | c62c9b23bf4735c2062090d77278ce5a8acbd668 | 19,172 |
def gather_allele_freqs(record, all_samples, males, females, pop_dict, pops, no_combos = False):
"""
Wrapper to compute allele frequencies for all sex & population pairings
"""
#Get allele frequencies
calc_allele_freq(record, all_samples)
if len(males) > 0:
calc_allele_freq(record, males, prefix = 'MALE')
if len(females) > 0:
calc_allele_freq(record, females, prefix = 'FEMALE')
if len(pops) > 0:
for pop in pops:
pop_samps = [s for s in all_samples if pop_dict.get(s, None) == pop]
calc_allele_freq(record, pop_samps, prefix = pop)
if len(males) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in males],
prefix = pop + '_MALE')
if len(females) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in females],
prefix = pop + '_FEMALE')
#Get POPMAX AF for biallelic sites
if 'MULTIALLELIC' not in record.filter and len(record.alleles) <= 2:
AFs = [record.info['{0}_AF'.format(pop)][0] for pop in pops]
popmax = max(AFs)
record.info['POPMAX_AF'] = popmax
return record | 0f74616fa64ee5b3582467da27161906abf28463 | 19,173 |
def get_selected(n=1):
"""
Return the first n selected object, or None if nothing is selected.
"""
if get_selection_len():
selection = bpy.context.selected_objects
if n == 1:
return selection[0]
elif n == -1:
return selection[:]
else:
return selection[:n]
else:
return [] | 6049900ef069731b1fbe9f40fff184085940c83e | 19,174 |
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertno : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Label are only supported with surface source spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([])
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([])
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel | a1858258b6c789557d6bdeff9428cc7aacbe4655 | 19,175 |
def get_subjects(creative_work):
"""
Returns generated html of subjects associated with the
Creative Work HTML or 0-length string
Parameters:
creative_work -- Creative Work
"""
html_output = ''
#! Using LOC Facet as proxy for subjects
facets = list(
REDIS_DATASTORE.smembers(
"{0}:hasAnnotation".format(creative_work.redis_key)))
for facet in facets:
if facet.startswith("bf:Facet"):
subject_template = loader.get_template('subject-icon.html')
loc_key = facet.split(":")[-1]
context = {
'name': REDIS_DATASTORE.hget('bf:Annotation:Facet:LOCFirstLetters',
loc_key),
'letter': loc_key}
html_output += subject_template.render(Context(context))
return mark_safe(html_output) | 328aeadb21a22972c0843efdba251b2f6c5f937d | 19,176 |
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param | 83f35eb41bc0cf7eecea932ae4f14646d9e8732f | 19,178 |
def is_comprehension(leaf):
"""
Return true if the leaf is the beginning of a list/set/dict comprehension.
Returns true for generators as well
"""
if leaf.type != 'operator' or leaf.value not in {'[', '(', '{'}:
return False
sibling = leaf.get_next_sibling()
return (sibling.type in {'testlist_comp', 'dictorsetmaker'}
and sibling.children[-1].type == 'sync_comp_for') | 11fff76ff8ed19b3d57359b56db886c003603a86 | 19,179 |
def get_class(x):
"""
x: index
"""
# Example
distribution = [0, 2000, 4000, 6000, 8000, 10000]
x_class = 0
for i in range(len(distribution)):
if x > distribution[i]:
x_class += 1
return x_class | 1ae95f3d9bc6f342169232ab10cd08a42de0f692 | 19,180 |
from re import T
def square(x):
"""Elementwise square of a tensor. """
return T.sqr(x) | c052e31a450b91eb1e6a08843f99afd6e618da9d | 19,181 |
import re
def update_email_body(parsed_email, key):
"""
Finds and updates the "text/html" and "text/plain" email body parts.
Parameters
----------
parsed_email: email.message.Message, required
EmailMessage representation the downloaded email
key: string, required
The object key that will be used for storing the message in S3
Returns
-------
email.message.Message
EmailMessage representation the updated email
"""
# template in the key for purposes of optional displaying to the recipient
this_disclaimer_text = re.sub("{key}", key, disclaimer_text)
this_footer_text = re.sub("{key}", key, footer_text)
text_charset = None
if parsed_email.is_multipart():
# Walk over message parts of this multipart email.
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
text_charset = part.get_content_charset()
new_text_body = update_text_content(part, this_disclaimer_text, this_footer_text)
part.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
elif content_type == 'text/html' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
html_charset = part.get_content_charset()
new_html_body = update_html_content(part, this_disclaimer_text, this_footer_text)
if new_html_body is not None:
part.set_content(new_html_body.encode(html_charset), "text", "html", cte=transfer_encoding)
part.set_charset(html_charset)
else:
# Its a plain email with text/plain body
transfer_encoding = parsed_email['Content-Transfer-Encoding']
text_charset = parsed_email.get_content_charset()
new_text_body = update_text_content(parsed_email, this_disclaimer_text, this_footer_text)
parsed_email.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
return parsed_email | d942a4cf47af9d7c1e36a4a2af5d0239b90464d8 | 19,182 |
import json
def create_collaborators(collaborators, destination_url, destination, credentials):
"""Post collaborators to GitHub
INPUT:
collaborators: python list of dicts containing collaborators info to be POSTED to GitHub
destination_url: the root url for the GitHub API
destination: the team and repo '<team>/<repo>' to post milestones to
OUTPUT: A list of collaborators
"""
for collaborator in collaborators:
if collaborator['login'] == credentials['user_name']:
continue
url = destination_url + "repos/" + destination + "/collaborators/" + collaborator["login"]
perm = "push"
if collaborator["permissions"]["admin"] == True or collaborator['login'] == credentials['user_name']:
perm = "admin"
# create a new collaborator that includes only the attributes needed to create a new milestone
r = put_req(url, json.dumps({"permission": perm}), credentials)
status = check_res(r)
print(status)
return {"done": "true"} | 2d39a2970d9f52af5209b1f4717c0b4d39e1cb5c | 19,183 |
import scipy
def complexity_recurrence(signal, delay=1, dimension=3, tolerance="default", show=False):
"""Recurrence matrix (Python implementation)
Fast Python implementation of recurrence matrix (tested against pyRQA). Returns a tuple
with the recurrence matrix (made of 0s and 1s) and the distance matrix (the non-binarized
version of the former).
Parameters
----------
signal : Union[list, np.ndarray, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003),
or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding
returns an array with two columns corresponding to the original signal and its delayed (by
Tau) version.
tolerance : float
Tolerance (similarity threshold, often denoted as 'r'). The radius used for detecting
neighbours (states considered as recurrent). A rule of thumb is to set 'r' so that the
percentage of points classified as recurrences is about 2-5%.
show : bool
Visualise recurrence matrix.
See Also
--------
complexity_embedding, complexity_tolerance
Returns
-------
np.ndarray
The recurrence matrix.
np.ndarray
The distance matrix.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.01)
>>>
>>> # Default r
>>> rc, _ = nk.complexity_recurrence(signal, show=True)
>>>
>>> # Larger radius
>>> rc, d = nk.complexity_recurrence(signal, tolerance=0.5, show=True)
>>>
>>> # Optimization of tolerance via recurrence matrix
>>> rc, d = nk.complexity_tolerance(signal, delay=1, dimension=3, method="recurrence", show=True)
References
----------
- Rawald, T., Sips, M., Marwan, N., & Dransch, D. (2014). Fast computation of recurrences
in long time series. In Translational Recurrences (pp. 17-29). Springer, Cham.
- Dabiré, H., Mestivier, D., Jarnet, J., Safar, M. E., & Chau, N. P. (1998). Quantification of
sympathetic and parasympathetic tones by nonlinear indexes in normotensive rats. American
Journal of Physiology-Heart and Circulatory Physiology, 275(4), H1290-H1297.
"""
if tolerance == "default":
tolerance, _ = complexity_tolerance(
signal, method="sd", delay=None, dimension=None, show=False
)
# Time-delay embedding
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance matrix
d = scipy.spatial.distance.cdist(emb, emb, metric="euclidean")
# Flip the matrix to match traditional RQA representation
d = np.flip(d, axis=0)
# Initialize the recurrence matrix filled with 0s
recmat = np.zeros((len(d), len(d)))
# If lower than tolerance, then 1
recmat[d <= tolerance] = 1
# Plotting
if show is True:
try:
fig, axes = plt.subplots(ncols=2)
axes[0].imshow(recmat, cmap="Greys")
axes[0].set_title("Recurrence Matrix")
im = axes[1].imshow(d)
axes[1].set_title("Distance")
cbar = fig.colorbar(im, ax=axes[1], fraction=0.046, pad=0.04)
cbar.ax.plot([0, 1], [tolerance] * 2, color="r")
except MemoryError as e:
raise MemoryError(
"NeuroKit error: complexity_rqa(): the recurrence plot is too large to display. ",
"You can recover the matrix from the parameters and try to display parts of it.",
) from e
return recmat, d | cc93a80ff34fffc5774f7b52109fd09d8b0ac69e | 19,184 |
def get_awb_shutter( f ):
"""
Get AWB and shutter speed from file object
This routine extracts the R and B white balance gains and the shutter speed
from a jpeg file made using the Raspberry Pi camera. These are stored as text in
a custom Makernote.
The autoexposure and AWB white balance values are not available directly until
a picture is taken and are saved in a Jpeg.
Returns 0 for the values if they're not found
"""
f.seek(256)
s = f.read(512) # Only part of the header needed
r_pos = s.find('gain_r=')
b_pos = s.find('gain_b=')
s_pos = s.find(' exp=')
gain_r = eval(s[r_pos+7:r_pos+12].split()[0]) if r_pos > -1 else 0
gain_b = eval(s[b_pos+7:b_pos+12].split()[0]) if b_pos > -1 else 0
shutter = eval(s[s_pos+5:s_pos+12].split()[0]) if s_pos > -1 else 0
return (gain_r,gain_b,shutter) | cfafdf531809729ae0ec96ab90a60a4961b9437a | 19,186 |
def rgb2lab(rgb_arr):
"""
Convert colur from RGB to CIE 1976 L*a*b*
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
-------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
return xyz2lab(rgb2xyz(rgb_arr)) | 3eba11b8017908393e1e238e6b4ae046dd265520 | 19,187 |
def format_rfidcard(rfidcard):
"""
:type rfidcard: apps.billing.models.RfidCard
"""
return {
'atqa': rfidcard.atqa if len(rfidcard.atqa) > 0 else None,
'sak': rfidcard.sak if len(rfidcard.sak) > 0 else None,
'uid': rfidcard.uid,
'registered_at': rfidcard.registered_at.isoformat(),
'user': rfidcard.user.username,
} | 120ca8e338b01235b2ba12ae3f874fd317ffebe8 | 19,188 |
def make_exposure_shares(exposure_levels, geography="geo_nm", variable="rank"):
"""Aggregate shares of activity at different levels of exposure
Args:
exposure_levels (df): employment by lad and sector and exposure ranking
geography (str): geography to aggregate over
variable (str): variable we want to calculate shares over
"""
exp_distr = (
exposure_levels.groupby(["month_year", variable, geography])["value"]
.sum()
.reset_index(drop=False)
.groupby([geography, "month_year"])
.apply(lambda x: x.assign(share=lambda df: df["value"] / df["value"].sum()))
).reset_index(drop=True)
return exp_distr | 02d990f2b08e3acb2a2b8ac01e44848770bdea71 | 19,189 |
def boxscores(sports=["basketball/nba"], output="dict", live_only=True, verbose=False):
"""
~ 10 seconds
"""
links = boxlinks(sports=sports, live_only=live_only, verbose=verbose)
boxes = [boxscore(link) for link in links]
return boxes | 9cdc1bd4ec90d8ab9593d49316669dc6b801cf2e | 19,191 |
def runningMedian(seq, M):
"""
Purpose: Find the median for the points in a sliding window (odd number in size)
as it is moved from left to right by one point at a time.
Inputs:
seq -- list containing items for which a running median (in a sliding window)
is to be calculated
M -- number of items in window (window size) -- must be an integer > 1
Otputs:
medians -- list of medians with size N - M + 1
Note:
1. The median of a finite list of numbers is the "center" value when this list
is sorted in ascending order.
2. If M is an even number the two elements in the window that
are close to the center are averaged to give the median (this
is not by definition)
"""
seq = iter(seq)
s = []
m = M // 2 #// does a truncated division like integer division in Python 2
# Set up list s (to be sorted) and load deque with first window of seq
s = [item for item in islice(seq,M)]
d = deque(s)
# Simple lambda function to handle even/odd window sizes
median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5
# Sort it in increasing order and extract the median ("center" of the sorted window)
s.sort()
medians = [median()]
# Now slide the window by one point to the right for each new position (each pass through
# the loop). Stop when the item in the right end of the deque contains the last item in seq
for item in seq:
old = d.popleft() # pop oldest from left
d.append(item) # push newest in from right
del s[bisect_left(s, old)] # locate insertion point and then remove old
insort(s, item) # insert newest such that new sort is not required
medians.append(median())
return medians | b37af61c9f6f62bd6fbd395bc2c423a770ba2797 | 19,192 |
def min_max_normalize(img):
""" Center and normalize the given array.
Parameters:
----------
img: np.ndarray
"""
min_img = img.min()
max_img = img.max()
return (img - min_img) / (max_img - min_img) | faaafbc8e0b36f26f8319b671de326dd6a97e6f9 | 19,193 |
def find_common_features(experiment: FCSExperiment,
samples: list or None = None):
"""
Generate a list of common features present in all given samples of an experiment. By 'feature' we mean
a variable measured for a particular sample e.g. CD4 or FSC-A (forward scatter)
Parameters
----------
experiment: FCSExperiment
Experiment to extract features from
samples: list, optional
List of samples to get common features of. If None, will search all samples in experiment.
Returns
-------
List
"""
if samples is None:
samples = experiment.list_samples()
assert all([s in experiment.list_samples() for s in samples]), \
'One or more samples specified do not belong to experiment'
features = [_get_features(experiment, sample_id=s) for s in samples]
common_features = set(features[0])
for f in features[1:]:
common_features.intersection_update(f)
return list(common_features) | 8022a97721eb9ff26efcba347e4f631aff3ede84 | 19,194 |
import heapq
def generate_blend_weights(positions, new_p, n_neighbors):
""" Use inverse distance and K-Nearest-Neighbors Interpolation to estimate weights
according to [Johansen 2009] Section 6.2.4
"""
distances = []
for n, p in positions.items():
distance = np.linalg.norm(new_p - p)
heapq.heappush(distances, (distance, n))
distances = distances[:n_neighbors]
weights = dict()
if distances[0][0] <= 0:
weights[distances[0][1]] = 1.0
else:
inv_k_distance = 1.0 / distances[-1][0]
inv_distances = [(1.0 / d) - inv_k_distance for d, n in distances]
new_weights = inv_distances / np.sum(inv_distances)
for idx, v in enumerate(distances):
weights[v[1]] = new_weights[idx]
return weights | f9db2b47d5847cdb2e7367cebe9b9bf81809b11d | 19,195 |
def check_method(adata):
"""Check that method output fits expected API."""
assert "connectivities" in adata.obsp
assert "distances" in adata.obsp
return True | 0ad772187c6d2960149723df17f6b0cf3fa703d1 | 19,196 |
def load_model(Model, params, checkpoint_path='', device=None):
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
if checkpoint_path == '':
model = Model(params['model_params'], **params['data_params'])
else:
print("model:", Model)
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path)
if device is not None:
model = model.eval().cuda(device)
return model | 8f1339b5548024714f731de037ef320535fc3b69 | 19,197 |
import inspect
def get_widget_type_choices():
"""
Generates Django model field choices based on widgets
in holodeck.widgets.
"""
choices = []
for name, member in inspect.getmembers(widgets, inspect.isclass):
if member != widgets.Widget:
choices.append((
"%s.%s" % (member.__module__, member.__name__),
member.name
))
return choices | 143ff91ee3bc4166e5091e344a38fb2bbb72934e | 19,198 |
import array
def iau2000a(jd_tt):
"""Compute Earth nutation based on the IAU 2000A nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each value is either a float, or a NumPy array
with the same dimensions as the input argument.
"""
# Interval between fundamental epoch J2000.0 and given date.
t = (jd_tt - T0) / 36525.0
# Compute fundamental arguments from Simon et al. (1994), in radians.
a = fundamental_arguments(t)
# ** Luni-solar nutation **
# Summation of luni-solar nutation series (in reverse order).
arg = nals_t.dot(a)
fmod(arg, TAU, out=arg)
sarg = sin(arg)
carg = cos(arg)
stsc = array((sarg, t * sarg, carg)).T
ctcs = array((carg, t * carg, sarg)).T
dpsi = tensordot(stsc, lunisolar_longitude_coefficients)
deps = tensordot(ctcs, lunisolar_obliquity_coefficients)
# Compute and add in planetary components.
if getattr(t, 'shape', ()) == ():
a = t * anomaly_coefficient + anomaly_constant
else:
a = (outer(anomaly_coefficient, t).T + anomaly_constant).T
a[-1] *= t
fmod(a, TAU, out=a)
arg = napl_t.dot(a)
fmod(arg, TAU, out=arg)
sc = array((sin(arg), cos(arg))).T
dpsi += tensordot(sc, nutation_coefficients_longitude)
deps += tensordot(sc, nutation_coefficients_obliquity)
return dpsi, deps | beadd6469a85b475dc22ca1e2a967310555140a9 | 19,199 |
import select
import json
async def _async_get_states_and_events_with_filter(
hass: HomeAssistant, sqlalchemy_filter: Filters, entity_ids: set[str]
) -> tuple[list[Row], list[Row]]:
"""Get states from the database based on a filter."""
for entity_id in entity_ids:
hass.states.async_set(entity_id, STATE_ON)
hass.bus.async_fire("any", {ATTR_ENTITY_ID: entity_id})
await async_wait_recording_done(hass)
def _get_states_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(States.entity_id).filter(
sqlalchemy_filter.states_entity_filter()
)
).all()
filtered_states_entity_ids = {
row[0]
for row in await get_instance(hass).async_add_executor_job(
_get_states_with_session
)
}
def _get_events_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(EventData.shared_data).filter(
sqlalchemy_filter.events_entity_filter()
)
).all()
filtered_events_entity_ids = set()
for row in await get_instance(hass).async_add_executor_job(
_get_events_with_session
):
event_data = json.loads(row[0])
if ATTR_ENTITY_ID not in event_data:
continue
filtered_events_entity_ids.add(json.loads(row[0])[ATTR_ENTITY_ID])
return filtered_states_entity_ids, filtered_events_entity_ids | fa0131a87ac9ac517ffd63bb563600e12bed68de | 19,201 |
def decrypt_password(private_key: PrivateKey, encrypted: str) -> str:
"""Return decrypt the given encrypted password using private_key and the RSA cryptosystem.
Your implementation should be very similar to the one from class, except now
the public key is a data class rather than a tuple.
"""
n = private_key.p * private_key.q
return ''.join([chr(pow(ord(c), private_key.d, n)) for c in encrypted]) | 607b5e33cff940aa999f56b2e39f56673d94ff7f | 19,202 |
from typing import OrderedDict
def load_jed(fn):
"""
JEDEC file generated by 1410/84 from PALCE20V8H-15 06/28/20 22:42:11*
DM AMD*
DD PALCE20V8H-15*
QF2706*
G0*
F0*
L00000 0000000000000000000000000100000000000000*
"""
ret = {}
d = OrderedDict()
with open(fn) as f:
li = 0
for l in f:
li += 1
# remove *, newline
l = l.strip()[0:-1]
if not l:
continue
if li == 2:
ret["description"] = l
continue
parts = l.split(" ")
main_line = " ".join(parts[1:])
if parts[0] == "DM":
ret["vendor"] = main_line
elif parts[0] == "DD":
ret["part"] = main_line
elif l[0:2] == "QF":
ret["len"] = int(l[2:])
elif l[0] == "L":
# L00000 0000000000000000000000000100000000000000*
addr, bits = l.split(" ")
addr = int(addr[1:], 10)
d[addr] = bits
else:
continue
ret["data"] = d
return ret | 6570bcdaabb495c13e9419a532c85b15efdf957a | 19,203 |
def plaintext(text, keeplinebreaks=True):
"""Extract the text elements from (X)HTML content
>>> plaintext('<b>1 < 2</b>')
u'1 < 2'
>>> plaintext(tag('1 ', tag.b('<'), ' 2'))
u'1 < 2'
>>> plaintext('''<b>1
... <
... 2</b>''', keeplinebreaks=False)
u'1 < 2'
:param text: `unicode` or `Fragment`
:param keeplinebreaks: optionally keep linebreaks
"""
if isinstance(text, Fragment):
text = text.as_text()
else:
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace(u'\n', u' ')
return text | c4e5e9a9b41fc7e0dc6b50995d7ec9a9bae1296f | 19,204 |
def fetch_rows(product):
"""
Returns the product and a list of timestamp and price for the given product in the current DATE,
ordered by timestamp.
"""
# We query the data lake by passing a SQL query to maystreet_data.query
# Note that when we filter by month/day, they need to be 0-padded strings,
# e.g. January is '01' and not 1.
query = f"""
SELECT
ExchangeTimestamp AS ts,
price
FROM
"prod_lake"."p_mst_data_lake".mt_trade
WHERE
y = '{DATE.year}'
AND m = '{str(DATE.month).rjust(2, '0')}'
AND d = '{str(DATE.day).rjust(2, '0')}'
AND product = '{product}'
ORDER BY
ExchangeTimestamp
"""
return product, list(md.query(md.DataSource.DATA_LAKE, query)) | 8b6f3df658ca38054bd49255b0842f40f6d4bffa | 19,206 |
def create_learner(sm_writer, model_helper):
"""Create the learner as specified by FLAGS.learner.
Args:
* sm_writer: TensorFlow's summary writer
* model_helper: model helper with definitions of model & dataset
Returns:
* learner: the specified learner
"""
learner = None
if FLAGS.learner == 'full-prec':
learner = FullPrecLearner(sm_writer, model_helper)
elif FLAGS.learner == 'weight-sparse':
learner = WeightSparseLearner(sm_writer, model_helper)
elif FLAGS.learner == 'channel':
learner = ChannelPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-gpu':
learner = ChannelPrunedGpuLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-rmt':
learner = ChannelPrunedRmtLearner(sm_writer, model_helper)
elif FLAGS.learner == 'dis-chn-pruned':
learner = DisChnPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform':
learner = UniformQuantLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform-tf':
learner = UniformQuantTFLearner(sm_writer, model_helper)
elif FLAGS.learner == 'non-uniform':
learner = NonUniformQuantLearner(sm_writer, model_helper)
else:
raise ValueError('unrecognized learner\'s name: ' + FLAGS.learner)
return learner | 76231a6413560ccc1e1d90fb974f90a83b3bb4f4 | 19,207 |
def load_decoder(autoencoder):
"""
Gets the decoders associated with the inputted model
"""
dim = len(autoencoder.get_config()['input_layers'])
mag_phase_flag = False
decoders = []
if dim == 2:
mag_phase_flag = True
decoders.append(autoencoder.get_layer('mag_decoder'))
decoders.append(autoencoder.get_layer('phase_decoder'))
else:
decoders.append(autoencoder.get_layer('decoder'))
return decoders,mag_phase_flag | 8e39470e48f5a6c147d93567c0bdb33a588c790d | 19,208 |
def translate_boarding_cards(boarding_cards):
"""Translate list of BoardingCards to readable travel instructions.
This function sorts list of random BoardingCard objects connecting starts
with ends of every stage of the trip then returns readable instructions
that include seat numbers, location names and additional data.
:param boarding_cards: list of :class:`BoardingCard` objects.
:return: list of human readable string that describe the whole trip.
"""
# Creating helper maps, one is keyed based on start locations, second one
# is keyed on end locations
starts_map = {
boarding_card.start_key: boarding_card for boarding_card
in boarding_cards
}
ends_map = {
boarding_card.end_key: boarding_card for boarding_card
in boarding_cards
}
# Guessing start and end of the trip
trip_start_keys = [
start_key for start_key in starts_map
if start_key not in ends_map
]
trip_end_keys = [
end_key for end_key in ends_map
if end_key not in starts_map
]
# Validating our guess of start and end of the trip
if len(trip_start_keys) > 1:
raise ValueError(u'More than 1 starting point in the trip!')
if not trip_start_keys:
raise ValueError(u'No starting point in the trip!')
if len(trip_end_keys) > 1:
raise ValueError(u'More than 1 ending point in the trip!')
if not trip_end_keys:
raise ValueError(u'No ending point in the trip!')
trip_start_key = trip_start_keys[0]
trip_end_key = trip_end_keys[0]
# Connecting boarding cards into ordered trip list
trip = [starts_map[trip_start_key]]
current_stop_index = 0
trip_reached_end = False
while not trip_reached_end:
last_stop = trip[current_stop_index]
if last_stop.end_key == trip_end_key:
trip_reached_end = True
else:
trip.append(starts_map[last_stop.end_key])
current_stop_index += 1
# building human readable messages from every stop of the trip
directions = [
boarding_card.human_readable_message for boarding_card in trip
]
if TRIP_FINISH_MESSAGE:
directions.append(TRIP_FINISH_MESSAGE)
return directions | 0986ab2669fa4376aebd28804586a1566544610e | 19,209 |
def detect_side(start: dict, point: dict, degrees):
"""detect to which side robot should rotate"""
if start['lat'] < point['lat'] and start['lng'] < point['lng']:
return f'{degrees} degrees right'
elif start['lat'] < point['lat'] and start['lng'] > point['lng']:
return f'{degrees} degrees left'
elif start['lat'] > point['lat'] and start['lng'] < point['lng']:
return f'{degrees + 90} degrees right'
elif start['lat'] > point['lat'] and start['lng'] > point['lng']:
return f'{degrees + 90} degrees left'
elif degrees == 0:
return f'{0} degress'
elif degrees == 180:
return f'{180} degrees right'
elif start['lat'] == point['lat'] and start['lng'] < point['lng']:
return f'{degrees} degress right'
elif start['lat'] == point['lat'] and start['lng'] > point['lng']:
return f'{degrees} degress left' | 124833bbdcdf36c280cdde8e829f15ae5301e323 | 19,210 |
import pathlib
import sh
def iterate_log_lines(file_path:pathlib.Path, n:int = 0, **kwargs):
"""Reads the file in line by line
dev note: One of the best featuers of this functions is we can use efficient
unix style operations. Because we know we are inside of a unix container
there should be no problem relying on GNU tail directly.
"""
abs_path = file_path.absolute()
def get_tail_iter(replay=0):
return sh.tail("-n", replay, "-f", str(abs_path), _iter=True)
tail_itr = get_tail_iter(replay=n)
while True:
try:
for line in tail_itr:
yield line.strip()
except KeyboardInterrupt as err:
raise err
except Exception as err:
log.error(err)
log.warning("continuing tail of file")
tail_itr = get_tail_iter(replay=0) | 54684fcc7a41b623321534202ee250e7c46760d2 | 19,212 |
import torch
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
# batch_size, 1, src_len
x = x.t().unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum | fa3cb672e23fccad75965da2ca10955134167c7e | 19,213 |
import json
import time
def _wait_for_event(event_name, redis_address, extra_buffer=0):
"""Block until an event has been broadcast.
This is used to synchronize drivers for the multi-node tests.
Args:
event_name: The name of the event to wait for.
redis_address: The address of the Redis server to use for
synchronization.
extra_buffer: An amount of time in seconds to wait after the event.
Returns:
The data that was passed into the corresponding _broadcast_event call.
"""
redis_host, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(host=redis_host, port=int(redis_port))
while True:
event_infos = redis_client.lrange(EVENT_KEY, 0, -1)
events = {}
for event_info in event_infos:
name, data = json.loads(event_info)
if name in events:
raise Exception("The same event {} was broadcast twice."
.format(name))
events[name] = data
if event_name in events:
# Potentially sleep a little longer and then return the event data.
time.sleep(extra_buffer)
return events[event_name]
time.sleep(0.1) | 0aa10f52e1682dd9d1cdc0949d245da26c26bcd4 | 19,214 |
def _stack_exists(stack_name):
""" Checks if the stack exists.
Returns True if it exists and False if not.
"""
cf = boto3.client('cloudformation')
exists = False
try:
cf.describe_stacks(StackName=stack_name)
exists = True
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'ValidationError':
exists = False
else:
raise
return exists | 5ddc6c17342e3c03317d5da0bf8b4d0a338a8f21 | 19,215 |
def make_sequence_output(detections, classes):
"""
Create the output object for an entire sequence
:param detections: A list of lists of detections. Must contain an entry for each image in the sequence
:param classes: The list of classes in the order they appear in the label probabilities
:return:
"""
return {
'detections': detections,
'classes': classes
} | 019d3b74699af20a9f3cbc43b575e8bae5e15946 | 19,216 |
def json_to_dataframe(json, subset=0):
"""Load data from path. The file needs to be a .csv
Returns:\n
Dataframe
"""
# This is to make sure it has the right format when passed to pandas
if type(json) != list:
json = [json]
try:
df = pd.DataFrame(json, [i for i in range(0, len(json))])
except KeyError as identifier:
print("There was an error")
# raise identifier
if subset == 0:
return df
return df.head(subset) | 151914e3e11759ff74283c303912a0b6842cd213 | 19,217 |
def dms_to_angle(dms):
"""
Get the angle from a tuple of numbers or strings giving its sexagesimal
representation in degrees
@param dms: (degrees, minutes, seconds)
"""
sign = 1
angle_string = dms[0]
if angle_string.startswith('-'):
sign = -1
angle_string = angle_string[1:]
angle_deg = int(angle_string)
angle_min = int(dms[1])
angle_sec = float(dms[2])
if not 0 <= angle_min < 60:
raise VdtAngleError("not a valid value for minutes: " + str(angle_min))
if not 0 <= angle_sec < 60:
raise VdtAngleError("not a valid value for seconds: " + str(angle_sec))
return sign * VAngle((angle_deg, angle_min, angle_sec), unit=u.deg) | c56c66093a877aae6474d583da6d1db81ccbc7cd | 19,218 |
def fix(text):
"""Repairs encoding problems."""
# NOTE(Jonas): This seems to be fixed on the PHP side for now.
# import ftfy
# return ftfy.fix_text(text)
return text | 7fd97db345a604131f52b272a7dd13ab4f3f9153 | 19,219 |
def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None,
affine=np.eye(4), dtype=np.int):
"""Generate a 3D volume with labeled regions.
Parameters
----------
shape: tuple
shape of returned array
n_regions: int
number of regions to generate. By default (if "labels" is None),
add a background with value zero.
labels: iterable
labels to use for each zone. If provided, n_regions is unused.
rand_gen: numpy.random.RandomState
random generator to use for generation.
affine: numpy.ndarray
affine of returned image
Returns
-------
regions: nibabel.Nifti1Image
data has shape "shape", containing region labels.
"""
n_voxels = shape[0] * shape[1] * shape[2]
if labels is None:
labels = range(0, n_regions + 1)
n_regions += 1
else:
n_regions = len(labels)
regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen)
# replace weights with labels
for n, row in zip(labels, regions):
row[row > 0] = n
data = np.zeros(shape, dtype=dtype)
data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T
return nibabel.Nifti1Image(data, affine) | 501c9bab430558fdc0cf45491498c8e3bcc7d3c4 | 19,220 |
def get_source_item_ids(portal, q=None):
"""
Get ids of hosted feature services that have an associated scene service.
Can pass in portal search function query (q).
Returns ids only for valid source items.
"""
source_item_ids = []
scene_item_ids = get_scene_service_item_ids(portal)
items = portal.search(q=q)
for item in items:
if item['type'] == 'Feature Service':
if '/Hosted/' in item['url']:
if 'Hosted Service' in item['typeKeywords']:
# if the service has been published the item
# will have 'Hosted Service' in typeKeywords
# Check if the feature service has an associated
# scene service
feat_service_name = item['url'].split('/')[-2]
for scene_id in scene_item_ids:
scene_service_name = portal.item(scene_id)['url'].split('/')[-2]
if feat_service_name == scene_service_name:
if item['id'] not in source_item_ids:
source_item_ids.append(item['id'])
return source_item_ids | 448ac2d94fda4dc3c69bd8fe9eb00587a0f0dcb2 | 19,221 |
def rasterize_poly(poly_xy, shape):
"""
Args:
poly_xy: [(x1, y1), (x2, y2), ...]
Returns a bool array containing True for pixels inside the polygon
"""
_poly = poly_xy[:-1]
# PIL wants *EXACTLY* a list of tuple (NOT a numpy array)
_poly = [tuple(p) for p in _poly]
img = Image.new('L', (shape[1], shape[0]), 0)
ImageDraw.Draw(img).polygon(_poly, outline=0, fill=1)
return np.array(img) == 1 | d1abf5cef5a1fb57286ff38d575a575a679a4002 | 19,222 |
def from_url_representation(url_rep: str) -> str:
"""Reconvert url representation of path to actual path"""
return url_rep.replace("__", "/").replace("-_-", "_") | 5cf4e1e8cb284c66449807ea275e4fa6b5a3e3ad | 19,223 |
from unittest.mock import patch
async def test_async_start_from_history_and_switch_to_watching_state_changes_multiple(
hass,
recorder_mock,
):
"""Test we startup from history and switch to watching state changes."""
hass.config.set_time_zone("UTC")
utcnow = dt_util.utcnow()
start_time = utcnow.replace(hour=0, minute=0, second=0, microsecond=0)
# Start t0 t1 t2 Startup End
# |--20min--|--20min--|--10min--|--10min--|---------30min---------|---15min--|---15min--|
# |---on----|---on----|---on----|---on----|----------on-----------|---off----|----on----|
def _fake_states(*args, **kwargs):
return {
"binary_sensor.state": [
ha.State(
"binary_sensor.state",
"on",
last_changed=start_time,
last_updated=start_time,
),
]
}
with patch(
"homeassistant.components.recorder.history.state_changes_during_period",
_fake_states,
):
with freeze_time(start_time):
await async_setup_component(
hass,
"sensor",
{
"sensor": [
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor1",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "time",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor2",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "time",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor3",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "count",
},
{
"platform": "history_stats",
"entity_id": "binary_sensor.state",
"name": "sensor4",
"state": "on",
"start": "{{ utcnow().replace(hour=0, minute=0, second=0) }}",
"duration": {"hours": 2},
"type": "ratio",
},
]
},
)
await hass.async_block_till_done()
for i in range(1, 5):
await async_update_entity(hass, f"sensor.sensor{i}")
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "0.0"
assert hass.states.get("sensor.sensor2").state == "0.0"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "0.0"
one_hour_in = start_time + timedelta(minutes=60)
with freeze_time(one_hour_in):
async_fire_time_changed(hass, one_hour_in)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.0"
assert hass.states.get("sensor.sensor2").state == "1.0"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "50.0"
turn_off_time = start_time + timedelta(minutes=90)
with freeze_time(turn_off_time):
hass.states.async_set("binary_sensor.state", "off")
await hass.async_block_till_done()
async_fire_time_changed(hass, turn_off_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "75.0"
turn_back_on_time = start_time + timedelta(minutes=105)
with freeze_time(turn_back_on_time):
async_fire_time_changed(hass, turn_back_on_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "0"
assert hass.states.get("sensor.sensor4").state == "75.0"
with freeze_time(turn_back_on_time):
hass.states.async_set("binary_sensor.state", "on")
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.5"
assert hass.states.get("sensor.sensor2").state == "1.5"
assert hass.states.get("sensor.sensor3").state == "1"
assert hass.states.get("sensor.sensor4").state == "75.0"
end_time = start_time + timedelta(minutes=120)
with freeze_time(end_time):
async_fire_time_changed(hass, end_time)
await hass.async_block_till_done()
assert hass.states.get("sensor.sensor1").state == "1.75"
assert hass.states.get("sensor.sensor2").state == "1.75"
assert hass.states.get("sensor.sensor3").state == "1"
assert hass.states.get("sensor.sensor4").state == "87.5" | 6fb66dde3fad24fbccffb0f8ce74e666e3551e56 | 19,224 |
def runningmean(data, nav):
"""
Compute the running mean of a 1-dimenional array.
Args:
data: Input data of shape (N, )
nav: Number of points over which the data will be averaged
Returns:
Array of shape (N-(nav-1), )
"""
return np.convolve(data, np.ones((nav,)) / nav, mode='valid') | 8ba55de399d8789a43624582ac14f2f4804668ef | 19,225 |
def test_space(gym_space, expected_size, expected_min, expected_max):
"""Test that an action or observation space is the correct size and bounds.
Parameters
----------
gym_space : gym.spaces.Box
gym space object to be tested
expected_size : int
expected size
expected_min : float or array_like
expected minimum value(s)
expected_max : float or array_like
expected maximum value(s)
Returns
-------
bool
True if the test passed, False otherwise
"""
return gym_space.shape[0] == expected_size \
and all(gym_space.high == expected_max) \
and all(gym_space.low == expected_min) | e43e2e4d064bec033e6cef6f9c1c905b13541cc7 | 19,226 |
from typing import Optional
from typing import List
def multiindex_strategy(
pandera_dtype: Optional[DataType] = None,
strategy: Optional[SearchStrategy] = None,
*,
indexes: Optional[List] = None,
size: Optional[int] = None,
):
"""Strategy to generate a pandas MultiIndex object.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param indexes: a list of :class:`~pandera.schema_components.Index`
objects.
:param size: number of elements in the Series.
:returns: ``hypothesis`` strategy.
"""
# pylint: disable=unnecessary-lambda
if strategy:
raise BaseStrategyOnlyError(
"The dataframe strategy is a base strategy. You cannot specify "
"the strategy argument to chain it to a parent strategy."
)
indexes = [] if indexes is None else indexes
index_dtypes = {
index.name if index.name is not None else i: str(index.dtype)
for i, index in enumerate(indexes)
}
nullable_index = {
index.name if index.name is not None else i: index.nullable
for i, index in enumerate(indexes)
}
strategy = pdst.data_frames(
[index.strategy_component() for index in indexes],
index=pdst.range_indexes(
min_size=0 if size is None else size, max_size=size
),
).map(lambda x: x.astype(index_dtypes))
# this is a hack to convert np.str_ data values into native python str.
for name, dtype in index_dtypes.items():
if dtype in {"object", "str"} or dtype.startswith("string"):
# pylint: disable=cell-var-from-loop,undefined-loop-variable
strategy = strategy.map(
lambda df: df.assign(**{name: df[name].map(str)})
)
if any(nullable_index.values()):
strategy = null_dataframe_masks(strategy, nullable_index)
return strategy.map(pd.MultiIndex.from_frame) | 580a312790d7ff5d9c5f5309f3100e4ebd490f7e | 19,227 |
def pitch_from_centers(X, Y):
"""Spot pitch in X and Y direction estimated from spot centers (X, Y).
"""
assert X.shape == Y.shape
assert X.size > 1
nspots_y, nspots_x = X.shape
if nspots_x > 1 and nspots_y == 1:
pitch_x = pitch_y = np.mean(np.diff(X, axis=1))
elif nspots_y > 1 and nspots_x == 1:
pitch_x = pitch_y = np.mean(np.diff(Y, axis=0))
else:
# both nspots_x and nspots_y are > 1
pitch_x = np.mean(np.diff(X, axis=1))
pitch_y = np.mean(np.diff(Y, axis=0))
return pitch_x, pitch_y | c9816d3bee4d658a3b00769f26f22b8c0cd0fd10 | 19,228 |
def _create_lists(config, results, current, stack, inside_cartesian=None):
"""
An ugly recursive method to transform config dict
into a tree of AbstractNestedList.
"""
# Have we done it already?
try:
return results[current]
except KeyError:
pass
# Check recursion depth and detect loops
if current in stack:
raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack))
if len(stack) > 99:
raise ConfigurationError('Rule {!r} is too deep'.format(stack[0]))
# Track recursion depth
stack.append(current)
try:
# Check what kind of list we have
listdef = config[current]
list_type = listdef[_CONF.FIELD.TYPE]
# 1. List of words
if list_type == _CONF.TYPE.WORDS:
results[current] = WordList(listdef['words'])
# List of phrases
elif list_type == _CONF.TYPE.PHRASES:
results[current] = PhraseList(listdef['phrases'])
# 2. Simple list of lists
elif list_type == _CONF.TYPE.NESTED:
results[current] = NestedList([_create_lists(config, results, x, stack,
inside_cartesian=inside_cartesian)
for x in listdef[_CONF.FIELD.LISTS]])
# 3. Cartesian list of lists
elif list_type == _CONF.TYPE.CARTESIAN:
if inside_cartesian is not None:
raise ConfigurationError("Cartesian list {!r} contains another Cartesian list "
"{!r}. Nested Cartesian lists are not allowed."
.format(inside_cartesian, current))
results[current] = CartesianList([_create_lists(config, results, x, stack,
inside_cartesian=current)
for x in listdef[_CONF.FIELD.LISTS]])
# 4. Scalar
elif list_type == _CONF.TYPE.CONST:
results[current] = Scalar(listdef[_CONF.FIELD.VALUE])
# Unknown type
else:
raise InitializationError("Unknown list type: {!r}".format(list_type))
# Return the result
return results[current]
finally:
stack.pop() | ef9a51023a44ae1cdbfbadbc762a0ffcd1959562 | 19,229 |
def encode(value):
"""
Encode strings in UTF-8.
:param value: value to be encoded in UTF-8
:return: encoded value
"""
return str(u''.join(value).encode('utf-8')) | 697f99f028d4b978b591d006273b9d5f688711f3 | 19,230 |
def get_season(months, str_='{}'):
"""
Creates a season string.
Parameters:
- months (list of int)
- str_ (str, optional): Formatter string, should contain exactly one {}
at the position where the season substring is included.
Returns:
str
"""
if months is None:
return ''
elif len(set(months).difference([1, 2, 12])) == 0:
return str_.format('DJF')
elif len(set(months).difference([3, 4, 5])) == 0:
return str_.format('MAM')
elif len(set(months).difference([6, 7, 8])) == 0:
return str_.format('JJA')
elif len(set(months).difference([9, 10, 11])) == 0:
return str_.format('SON')
elif len(set(months).difference([11, 12, 1, 2, 3])) == 0:
return str_.format('NDJFM')
elif len(set(months).difference([5, 6, 7, 8, 9])) == 0:
return str_.format('MJJAS')
else:
return str_.format('-'.join(map(str, months))) | 73b4e8169f08ef286a0b57779d22c3436538fc30 | 19,231 |
def data_availability(tags):
"""
get availability based on the validation tags
Args:
tags (pandas.DataFrame): errors tagged as true (see function data_validation)
Returns:
pandas.Series: availability
"""
return ~tags.any(axis=1) | 240bed8f169d23610f11c214d3644f02e5435412 | 19,232 |
async def fetch_image_by_id(
image_uid: str
):
"""
API request to return a single image by uid
"""
image_uid = int(image_uid)
image = utils_com.get_com_image_by_uid(image_uid)
return image | 153d24fd35ce18ae9c94d1c7ecf797154bc32c0f | 19,233 |
from datetime import datetime
def get_spring_break(soup_lst, year):
"""
Purpose:
* returns a list of the weekdays during spring break
* only relevant for spring semesters
"""
spring_break_week = set()
# search for the "Spring Break begins after last class." text
for i in range(len(soup_lst)):
if soup_lst[i] == "Spring Break begins after last class.":
pre_friday = datetime.strptime(
soup_lst[i - 1] + " " + year, "%B %d %Y")
break
next_day = pre_friday + timedelta(1)
while next_day.weekday() != 4:
if next_day.weekday() != 5 and next_day.weekday() != 6:
spring_break_week.add(next_day)
next_day += timedelta(1)
spring_break_week.add(next_day)
return spring_break_week | cfd80d12da8a22a26d66f4f64f6f8511ed7238a4 | 19,234 |
def GetProQ3Option(query_para):#{{{
"""Return the proq3opt in list
"""
yes_or_no_opt = {}
for item in ['isDeepLearning', 'isRepack', 'isKeepFiles']:
if query_para[item]:
yes_or_no_opt[item] = "yes"
else:
yes_or_no_opt[item] = "no"
proq3opt = [
"-r", yes_or_no_opt['isRepack'],
"-deep", yes_or_no_opt['isDeepLearning'],
"-k", yes_or_no_opt['isKeepFiles'],
"-quality", query_para['method_quality'],
"-output_pdbs", "yes" #always output PDB file (with proq3 written at the B-factor column)
]
if 'targetlength' in query_para:
proq3opt += ["-t", str(query_para['targetlength'])]
return proq3opt | e2fe6ba97aa96d01a19a191aabcc3e793a63c490 | 19,235 |
def is_empty_config(host):
"""
Check if any services should to be configured to run on the given host.
"""
return host.AS is None | c4ec3861c497ac49ed69ecd1d6da31ab8fe2829c | 19,236 |
def total_value(metric):
"""Given a time series of values, sum the values"""
total = 0
for i in metric:
total += i
return total | 4454bfaeb0797bc03b14819bde48dc8f5accc4d3 | 19,237 |
import re
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
return re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower()) | 0f77f223b208471a960e2829efb12a85f82b1381 | 19,239 |
def get_seed_nodes_json(json_node: dict, seed_nodes_control: dict or list) -> dict:
""" We need to seed some json sections for extract_fields.
This seeds those nodes as needed. """
seed_json_output = {}
if isinstance(seed_nodes_control, dict) or isinstance(seed_nodes_control, list):
for node in seed_nodes_control:
for key, value in node.items():
if value in json_node:
seed_json_output[key] = json_node[value]
return seed_json_output | f3672ee019ff4bb72f25582daf5c83fa7c8f72d0 | 19,240 |
def load_object(import_path):
"""
Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the
likes.
Import paths should be: "mypackage.mymodule.MyObject". It then imports the
module up until the last dot and tries to get the attribute after that dot
from the imported module.
If the import path does not contain any dots, a TypeError is raised.
If the module cannot be imported, an ImportError is raised.
If the attribute does not exist in the module, a AttributeError is raised.
"""
if not isinstance(import_path, basestring):
return import_path
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object' " +\
"must contain at least one dot.")
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name) | 5fd45ee31a440cbdd4c90e875e04f4f8f1856b3a | 19,241 |
def _inufft(kspace,
trajectory,
sensitivities=None,
image_shape=None,
tol=1e-5,
max_iter=10,
return_cg_state=False,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using iterative inverse NUFFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
trajectory = tf.convert_to_tensor(trajectory)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Infer rank from number of dimensions in trajectory.
rank = trajectory.shape[-1]
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `trajectory` implies "
f"rank {rank}.")
# Check inputs and set defaults.
if image_shape is None:
# `image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
batch_shape = tf.shape(kspace)[:-1]
# Set up system operator and right hand side.
linop_nufft = linalg_ops.LinearOperatorNUFFT(image_shape, trajectory)
operator = tf.linalg.LinearOperatorComposition(
[linop_nufft.H, linop_nufft],
is_self_adjoint=True, is_positive_definite=True)
# Compute right hand side.
rhs = tf.linalg.matvec(linop_nufft.H, kspace)
# Solve linear system using conjugate gradient iteration.
result = linalg_ops.conjugate_gradient(operator, rhs, x=None,
tol=tol, max_iter=max_iter)
# Restore image shape.
image = tf.reshape(result.x, tf.concat([batch_shape, image_shape], 0))
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return (image, result) if return_cg_state else image | 892350c74ca0b7163b4aec9278af30dd770b5e1e | 19,242 |
def adjust_cart(request, item_id):
"""Adjust the quantity of the specified product to the specified amount"""
album = get_object_or_404(Album, pk=item_id)
# Returns 404 if an invalid quantity is entered
try:
quantity = int(request.POST.get("quantity"))
except Exception as e:
return HttpResponse(status=404)
cart = request.session.get("cart", {})
# Updates quantity of existing cart item or removes from cart if quantity < 0
if quantity > 0:
cart[item_id] = quantity
messages.success(request, f"Updated {album.name} quantity to {cart[item_id]}")
else:
cart.pop(item_id)
messages.success(request, f"Removed {album.name} from your cart.")
request.session["cart"] = cart
return redirect(reverse("view_cart")) | ed455747341cf581725d2fae326292155a3b77a8 | 19,243 |
def calculate_delta_v(scouseobject, momone, momnine):
"""
Calculate the difference between the moment one and the velocity of the
channel containing the peak flux
Parameters
----------
scouseobject : instance of the scousepy class
momone : ndarray
moment one (intensity-weighted average velocity) map
momnine : ndarray
map containing the velocities of channels containing the peak flux at
each location
"""
# Generate an empty array
delta_v = np.empty(np.shape(momone))
delta_v.fill(np.nan)
delta_v = np.abs(momone.value-momnine.value)
return delta_v | a894eac64f5b88fd6230eb060583fd15552bc8d8 | 19,244 |
def _validate_image(values):
"""
Validates the incoming data and raises a Invalid exception
if anything is out of order.
:param values: Mapping of image metadata to check
"""
status = values.get('status', None)
if not status:
msg = "Image status is required."
raise exception.Invalid(msg)
if status not in STATUSES:
msg = "Invalid image status '%s' for image." % status
raise exception.Invalid(msg)
return values | d0ebb8ecbde452c3128e93e917482cff13e47947 | 19,245 |
def revcmp(x, y):
"""Does the reverse of cmp():
Return negative if y<x, zero if y==x, positive if y>x"""
return cmp(y, x) | 52e5382211379d09703996b0da89821a9521de73 | 19,246 |
def linear_regression(data: pd.DataFrame):
"""
https://www.statsmodels.org/
:param data: 数据集中要包含收盘价Close
:return: 拟合的y,k,b以及k转化的角度
"""
y_arr = data.Close.values
x_arr = np.arange(0, len(y_arr))
b_arr = sm.add_constant(x_arr)
model = regression.linear_model.OLS(y_arr, b_arr).fit()
b, k = model.params # y = kx + b : params[1] = k
y_fit = x_arr * k + b
return y_fit, k, b, np.rad2deg(k) | 9b30a6d90ed1e0131e12b2f7944eb58a90676ad3 | 19,247 |
from datetime import datetime
from operator import and_
def get_expiry():
"""
Returns the membership IDs of memberships expiring within 'time_frame' amount of MONTHS
"""
time_frame = request.args.get('time_frame')
try:
time_frame = int(time_frame)
except ValueError as e:
print(e)
return jsonify({
'code': 400,
'error': 'Not valid monthly time frame, should only be int'
})
expiring_members = []
session = Session()
now = datetime.date.today()
relativeMonths = now - relativedelta(months=time_frame)
memberShooterTable = session.query(Member, Shooter) \
.join(Shooter) \
.filter(and_(Member.endDate > relativeMonths, Member.status != Status.EXPIRED))
print("Memberships expiring with " + str(time_frame) + " months")
for row in memberShooterTable:
print(row)
print(row.Member.email)
print(row.Shooter.name)
returnMember = {'name': row.Shooter.name,
'mid': row.Member.mid,
'email': row.Member.email,
'endDate': row.Member.endDate}
expiring_members.append(returnMember)
return jsonify({
'code': 200,
'table': 'Expiring Members',
'entries': expiring_members
}) | 4fd13a5e2de1feb4b797c225c349031a903f2673 | 19,248 |
def get_functions(input_file):
"""Alias for load_data bellow."""
return load_data(input_file) | 7f286809a3c27db32e0aeb3f08d41989a7b3fad2 | 19,249 |
def is_elem_ref(elem_ref):
"""
Returns true if the elem_ref is an element reference
:param elem_ref:
:return:
"""
return (
elem_ref
and isinstance(elem_ref, tuple)
and len(elem_ref) == 3
and (elem_ref[0] == ElemRefObj or elem_ref[0] == ElemRefArr)
) | 282a5ba04b2cafedd5a043bf83b4ccbd6196ae44 | 19,250 |
from typing import Tuple
from typing import List
def analyse_subcommand(
analyser: Analyser,
param: Subcommand
) -> Tuple[str, SubcommandResult]:
"""
分析 Subcommand 部分
Args:
analyser: 使用的分析器
param: 目标Subcommand
"""
if param.requires:
if analyser.sentences != param.requires:
raise ParamsUnmatched(f"{param.name}'s required is not '{' '.join(analyser.sentences)}'")
analyser.sentences = []
if param.is_compact:
name, _ = analyser.next_data()
if name.startswith(param.name):
analyser.reduce_data(name.lstrip(param.name), replace=True)
else:
raise ParamsUnmatched(f"{name} dose not matched with {param.name}")
else:
name, _ = analyser.next_data(param.separators)
if name != param.name: # 先匹配选项名称
raise ParamsUnmatched(f"{name} dose not matched with {param.name}")
name = param.dest
res: SubcommandResult = {"value": None, "args": {}, 'options': {}}
if param.sub_part_len.stop == 0:
res['value'] = Ellipsis
return name, res
args = False
subcommand = res['options']
need_args = param.nargs > 0
for _ in param.sub_part_len:
sub_param = analyse_params(analyser, param.sub_params) # type: ignore
if sub_param and isinstance(sub_param, List):
for p in sub_param:
_current_index = analyser.current_index
_content_index = analyser.content_index
try:
subcommand.setdefault(*analyse_option(analyser, p))
break
except Exception as e:
exc = e
analyser.current_index = _current_index
analyser.content_index = _content_index
continue
else:
raise exc # type: ignore # noqa
elif not args:
res['args'] = analyse_args(analyser, param.args, param.nargs)
args = True
if need_args and not args:
raise ArgumentMissing(config.lang.subcommand_args_missing.format(name=name))
return name, res | d3be0a7709ae2ebfab414d30494fe7baeba5de8d | 19,251 |
def fetch_pg_types(columns_info, trans_obj):
"""
This method is used to fetch the pg types, which is required
to map the data type comes as a result of the query.
Args:
columns_info:
"""
# get the default connection as current connection attached to trans id
# holds the cursor which has query result so we cannot use that connection
# to execute another query otherwise we'll lose query result.
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(trans_obj.sid)
default_conn = manager.connection(did=trans_obj.did)
# Connect to the Server if not connected.
res = []
if not default_conn.connected():
status, msg = default_conn.connect()
if not status:
return status, msg
oids = [columns_info[col]['type_code'] for col in columns_info]
if oids:
status, res = default_conn.execute_dict(
u"""SELECT oid, format_type(oid,null) as typname FROM pg_type WHERE oid IN %s ORDER BY oid;
""", [tuple(oids)])
if not status:
return False, res
return status, res['rows']
else:
return True, [] | 87bdc81134ee4d83ffbce05a77abec555b55a661 | 19,252 |
def open_popup(text) -> bool:
""" Opens popup when it's text is updated """
if text is not None:
return True
return False | 8ced6b6e73531f97df8ac7fe38723438077ca6d1 | 19,253 |
def se_beta_formatter(value: str) -> str:
"""
SE Beta formatter.
This formats SE beta values. A valid SE beta values
is a positive float.
@param value:
@return:
"""
try:
se_beta = float(value)
if se_beta >= 0:
result = str(se_beta)
else:
raise ValueError(f'position expected positive float "{value}"')
except ValueError as value_error:
raise ValueError(
f'position could not be parsed as integer "{value}" details : {value_error}',
) from value_error
return result | 30dde489e1a8a70c0f1093caa1ce289c759b26d6 | 19,254 |
from typing import Optional
def replace_missing_data(
data: pd.DataFrame,
target_col: str,
source_col: str,
dropna: Optional[bool] = False,
inplace: Optional[bool] = False,
) -> Optional[pd.DataFrame]:
"""Replace missing data in one column by data from another column.
Parameters
----------
data : :class:`~pandas.DataFrame`
input data with values to replace
target_col : str
target column, i.e., column in which missing values should be replaced
source_col : str
source column, i.e., column values used to replace missing values in ``target_col``
dropna : bool, optional
whether to drop rows with missing values in ``target_col`` or not. Default: ``False``
inplace : bool, optional
whether to perform the operation inplace or not. Default: ``False``
Returns
-------
:class:`~pandas.DataFrame` or ``None``
dataframe with replaced missing values or ``None`` if ``inplace`` is ``True``
"""
_assert_is_dtype(data, pd.DataFrame)
if not inplace:
data = data.copy()
data[target_col].fillna(data[source_col], inplace=True)
if dropna:
data.dropna(subset=[target_col], inplace=True)
if inplace:
return None
return data | a94e41cb88bcf502192855276ed1f11f73b1c3a1 | 19,255 |
def jsonpath_parse(data, jsonpath, match_all=False):
"""Parse value in the data for the given ``jsonpath``.
Retrieve the nested entry corresponding to ``data[jsonpath]``. For
example, a ``jsonpath`` of ".foo.bar.baz" means that the data section
should conform to:
.. code-block:: yaml
---
foo:
bar:
baz: <data_to_be_extracted_here>
:param data: The `data` section of a document.
:param jsonpath: A multi-part key that references a nested path in
``data``.
:param match_all: Whether to return all matches or just the first one.
:returns: Entry that corresponds to ``data[jsonpath]`` if present,
else None.
Example::
src_name = sub['src']['name']
src_path = sub['src']['path']
src_doc = db_api.document_get(schema=src_schema, name=src_name)
src_secret = utils.jsonpath_parse(src_doc['data'], src_path)
# Do something with the extracted secret from the source document.
"""
jsonpath = _normalize_jsonpath(jsonpath)
p = _jsonpath_parse(jsonpath)
matches = p.find(data)
if matches:
result = [m.value for m in matches]
return result if match_all else result[0] | 3b5ab89d8315e36f8412e874f393c414c76b8587 | 19,256 |
def extract_urlparam(name, urlparam):
"""
Attempts to extract a url parameter embedded in another URL
parameter.
"""
if urlparam is None:
return None
query = name+'='
if query in urlparam:
split_args = urlparam[urlparam.index(query):].replace(query, '').split('&')
return split_args[0] if split_args else None
else:
return None | 198771d40eeddc3b7dbf2924d9d49fe7a7f0a51d | 19,257 |
from typing import Dict
from typing import Any
def _get_required_var(key: str, data: Dict[str, Any]) -> str:
"""Get a value from a dict coerced to str.
raise RequiredVariableNotPresentException if it does not exist"""
value = data.get(key)
if value is None:
raise RequiredVariableNotPresentException(f"Missing required var {key}")
return str(value) | b94db42048779df532a55c2604c7c1b5d02a4f7f | 19,259 |
def phones():
"""Return a list of phones used in the main dict."""
cmu_phones = []
for line in phones_stream():
parts = line.decode("utf-8").strip().split()
cmu_phones.append((parts[0], parts[1:]))
return cmu_phones | 972c4c0739cd3c823f98eb6314d25f07b9f720f6 | 19,260 |
def load_specific_forecast(city, provider, date, forecasts):
"""reads in the city, provider, date and forecast_path and returns the data queried from the forecast path
:param city: city for which the weather forecast is for
:type string
:param provider: provider for which the weather forecast is for
:type string
:param date: date for which the weather forecast is for, e.g. '2015-06-29'
:type datetime
:param forecasts: dataframe containing all forecasts
:type pandas dataframe
:return: dataFrame containing relevant dwd data
"""
# get rows with the correct city, provider and date
data_city = forecasts[forecasts['city']==city]
data_provider = data_city[data_city['Provider']==provider]
if provider != 'openweathermap':
# cut the time
data_provider.loc[:, 'Date'] = data_provider.loc[:, 'Date'].map(cut_time, na_action='ignore')
data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time, na_action='ignore')
else:
data_provider.loc[:, 'ref_date'] = data_provider.loc[:,'ref_date'].map(cut_time,na_action='ignore')
data_provider.loc[:, 'Date'] = data_provider.loc[:,'pred_offset'].map(cut_time, na_action='ignore')
data_provider.loc[:, 'pred_offset'] = (data_provider.loc[:,'Date'] - data_provider['ref_date']).\
map(lambda delta: delta/np.timedelta64(1, 'D'), na_action='ignore')
return data_provider[data_provider['Date'] == date] | 95f00fd07d218f1e19eb6d771898453d2495cb1d | 19,261 |
from numpy import array, isnan
from mne.channels import Montage
def eeg_to_montage(eeg):
"""Returns an instance of montage from an eeg file"""
pos = array([eeg.info['chs'][i]['loc'][:3]
for i in range(eeg.info['nchan'])])
if not isnan(pos).all():
selection = [i for i in range(eeg.info['nchan'])]
montage = Montage(pos, eeg.info['ch_names'],
selection=selection, kind='custom')
return montage
else:
return None | 9d0823bc9633ead4081b4b068717c8f9385c3e69 | 19,262 |
def mul_inv2(x:int, k:int) -> int:
""" Computes x*2^{-1} in (Z/3^kZ)*."""
return (inv2(k)*x)%(3**k) | 5789b4b9837f5b3bf6093aa586fca8f133ff8c51 | 19,263 |
def Be(Subject = P.CA(), Contract=FALSE):
"""Synonym for Agree("be")."""
return Agree("be", Subject, Contract) | e6b1f07d17c34157b9b1ca216f4c0e99c5b25c00 | 19,264 |
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=0.99):
"""
Armijo linesearch function that works with matrices
find an approximate minimum of f(xk+alpha*pk) that satifies the
armijo conditions.
Parameters
----------
f : function
loss function
xk : np.ndarray
initial position
pk : np.ndarray
descent direction
gfk : np.ndarray
gradient of f at xk
old_fval : float
loss value at xk
args : tuple, optional
arguments given to f
c1 : float, optional
c1 const in armijo rule (>0)
alpha0 : float, optional
initial step (>0)
Returns
-------
alpha : float
step that satisfy armijo conditions
fc : int
nb of function call
fa : float
loss value at step alpha
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1 * pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval
derphi0 = np.sum(gfk.T * pk)
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0)
return alpha, fc[0], phi1 | aefbe34ad1b28317e4fc21b1d80beda430183660 | 19,265 |
def lprob2sigma(lprob):
""" translates a log_e(probability) to units of Gaussian sigmas """
if (lprob>-36.):
sigma = norm.ppf(1.-0.5*exp(1.*lprob))
else:
sigma = sqrt( log(2./pi) - 2.*log(8.2) - 2.*lprob )
return float(sigma) | b224e9b50fc2a171cbb849965946ccae804648d7 | 19,266 |
def convert_from_fortran_bool(stringbool):
"""
Converts a string in this case ('T', 'F', or 't', 'f') to True or False
:param stringbool: a string ('t', 'f', 'F', 'T')
:return: boolean (either True or False)
"""
true_items = ['True', 't', 'T']
false_items = ['False', 'f', 'F']
if isinstance(stringbool, str):
if stringbool in false_items:
return False
elif stringbool in true_items:
return True
else:
raise ValueError(f"Could not convert: '{stringbool}' to boolean, "
"which is not 'True', 'False', 't', 'T', 'F' or 'f'")
elif isinstance(stringbool, bool):
return stringbool # no conversion needed...
raise TypeError(f"Could not convert: '{stringbool}' to boolean, " 'only accepts str or boolean') | b9840c41a978003e8dcc5191bd7f859fc5b0ecb7 | 19,267 |
def gaussian_device(n_subsystems):
"""Number of qubits or modes."""
return DummyDevice(wires=n_subsystems) | c2779958009ebe2dd7907a0a5f418535d782f4a0 | 19,268 |
def create_playlist(current_user, user_id):
"""
Creates a playlist.
:param user_id: the ID of the user.
:return: 200, playlist created successfully.
"""
x = user_id
user = session.query(User).filter_by(id=user_id).one()
data = request.get_json()
new_playlist = Playlist(name=data['name'],
description=data['description'],
user_id=x)
db.session.add(new_playlist)
db.session.commit()
return jsonify({ 'message' : 'playlist %s created successfully %name' }) | 6116949956bbc077205adb66bf51b160b7a0d812 | 19,269 |
def gram_matrix(y):
"""
Input shape: b,c,h,w
Output shape: b,c,c
"""
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram | 9ea7595870dccc1375626c374fb9db1436523e40 | 19,270 |
import torch
def process_pair_v2(data, global_labels):
"""
:param path: graph pair data.
:return data: Dictionary with data, also containing processed DGL graphs.
"""
# print('Using v2 process_pair')
edges_1 = data["graph_1"] #diff from v1
edges_2 = data["graph_2"] #diff from v1
edges_1 = np.array(edges_1, dtype=np.int64);
edges_2 = np.array(edges_2, dtype=np.int64);
G_1 = dgl.DGLGraph((edges_1[:,0], edges_1[:,1]));
G_2 = dgl.DGLGraph((edges_2[:,0], edges_2[:,1]));
G_1.add_edges(G_1.nodes(), G_1.nodes()) #diff from v1
G_2.add_edges(G_2.nodes(), G_2.nodes()) #diff from v1
edges_1 = torch.from_numpy(edges_1.T).type(torch.long)
edges_2 = torch.from_numpy(edges_2.T).type(torch.long)
data["edge_index_1"] = edges_1
data["edge_index_2"] = edges_2
features_1, features_2 = [], []
for n in data["labels_1"]:
features_1.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()])
for n in data["labels_2"]:
features_2.append([1.0 if global_labels[n] == i else 0.0 for i in global_labels.values()])
G_1.ndata['features'] = torch.FloatTensor(np.array(features_1));
G_2.ndata['features'] = torch.FloatTensor(np.array(features_2));
G_1.ndata['type'] = np.array(data["labels_1"]);
G_2.ndata['type'] = np.array(data["labels_2"]);
data['G_1'] = G_1;
data['G_2'] = G_2;
norm_ged = data["ged"]/(0.5*(len(data["labels_1"])+len(data["labels_2"])))
data["target"] = torch.from_numpy(np.exp(-norm_ged).reshape(1, 1)).view(-1).float()
return data | 61e0194f521132cfa4e96db925f566abf6b3b427 | 19,271 |
from typing import Tuple
def calculate_line_changes(diff: Diff) -> Tuple[int, int]:
"""Return a two-tuple (additions, deletions) of a diff."""
additions = 0
deletions = 0
raw_diff = "\n".join(diff.raw_unified_diff())
for line in raw_diff.splitlines():
if line.startswith("+ "):
additions += 1
elif line.startswith("- "):
deletions += 1
return additions, deletions | 437859735c904a3c7754091c6cb97ba528dc7e72 | 19,272 |
def get_synonyms(token):
""" get synonyms of word using wordnet
args:
token: string
returns:
synonyms: list containing synonyms as strings
"""
synonyms = []
if len(wordnet.synsets(token)) == 0:
return None
for synset in wordnet.synsets(token):
for lemma in synset.lemmas():
synonyms.append(lemma.name())
synonyms = _remove_repeated_elements(synonyms)
synonyms.remove(token)
return synonyms | ec26875e694f860c38b709979dfc8328eff17f0f | 19,273 |
def concat_experiments_on_channel(experiments, channel_name):
"""Combines channel values from experiments into one dataframe.
This function helps to compare channel values from a list of experiments
by combining them in a dataframe. E.g: Say we want to extract the `log_loss`
channel values for a list of experiments. The resulting dataframe will have
['id','x_log_loss','y_log_loss'] columns.
Args:
experiments(list): list of `neptune.experiments.Experiment` objects.
channel_name(str): name of the channel for which we want to extract values.
Returns:
`pandas.DataFrame`: Dataframe of ['id','x_CHANNEL_NAME','y_CHANNEL_NAME']
values concatenated from a list of experiments.
Examples:
Instantiate a session::
from neptune.sessions import Session
session = Session()
Fetch a project and a list of experiments::
project = session.get_projects('neptune-ai')['neptune-ai/Salt-Detection']
experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000)
Construct a channel value dataframe::
from neptunecontrib.api.utils import concat_experiments_on_channel
compare_df = concat_experiments_on_channel(experiments,'unet_0 epoch_val iout loss')
Note:
If an experiment in the list of experiments does not contain the channel with a specified channel_name
it will be omitted.
"""
combined_df = []
for experiment in experiments:
if channel_name in experiment.get_channels().keys():
channel_df = experiment.get_numeric_channels_values(channel_name)
channel_df['id'] = experiment.id
combined_df.append(channel_df)
combined_df = pd.concat(combined_df, axis=0)
return combined_df | 04c8004ccb1a2b5ec2906bb1183e685b8c8ff763 | 19,274 |
def sghmc_naive_mh_noresample_uni(u_hat_func, du_hat_func, epsilon, nt, m, M, V, theta_init, r_init, formula):
"""
This is a function to realize Naive Stochastic Gradient Hamiltonian Monte Carlo
with Metropolis-Hastings correction in unidimensional cases without resampling
procedure.
"""
B = 1/2*epsilon*V
theta = [theta_init]
r = [r_init]
for t in range(nt-1):
epsilon0 = max(epsilon, formula(t))
theta0, r0 = theta[-1], r[-1]
for i in range(m):
theta0 = theta0 + epsilon0*1/M*r0
r0 = r0 - epsilon0*du_hat_func(theta0) + np.random.normal(0, np.sqrt(2*B*epsilon0))
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_hat_func(theta0) + 1/2*r0**2*1/M
H2 = u_hat_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
r.append(r0)
return [theta, r] | 4f330bf3025506bc2bafca0891025ac8b9a4f280 | 19,275 |
def detect_voices(aud, sr=44100):
"""
Detect the presence and absence of voices in an array of audio
Args:
Returns:
"""
pcm_16 = np.round(
(np.iinfo(np.int16).max * aud)).astype(np.int16).tobytes()
voices = [
VAD.is_speech(pcm_16[2 * ix:2 * (ix + SMOOTHING_WSIZE)],
sample_rate=SAMPLING_RATE)
for ix in range(0, len(aud), SMOOTHING_WSIZE)
]
return voices | ec987cf5e3384cb20d52d07684f7afb5f38f0e98 | 19,276 |
def process_to_binary_otsu_image(img_path, inverse=False, max_threshold=255):
"""
Purpose:
Process an image to binary colours using binary otsu thresholding.
Args:
img_path - path to the image to process
inverse - if true an inverted binary thresholding will be applied (optional).
max_threshold - the max value to be given if a pixels value is more than the threshold value (optional).
Returns:
binary_image_tuple[0] - optimal threshold value found by otsu threshold.
binary_image_tuple[1] - binary image.
"""
img = cv2.imread(img_path)
gray_img = convert_bgr_to_gray(img)
if inverse:
binary_image_tuple = threshold_binary_inv_otsu(gray_img, max_threshold)
else:
binary_image_tuple = threshold_binary_otsu(gray_img, max_threshold)
return binary_image_tuple | f450d29540679f2fa7736e7cd0257a56b58c8a8d | 19,277 |
import hashlib
import random
def _fold_in_str(rng, data):
"""Folds a string into a jax.random.PRNGKey using its SHA-1 hash."""
m = hashlib.sha1()
m.update(data.encode('utf-8'))
d = m.digest()
hash_int = int.from_bytes(d[:4], byteorder='big', signed=True)
return random.fold_in(rng, hash_int) | e0b3d135a9573892cf7f4cfdcea1bc29bbc3e8c0 | 19,278 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.