content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import optparse
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--android-sdk-tools',
help='path to the Android SDK platform tools folder')
parser.add_option('--R-package', help='Java package for generated R.java')
parser.add_option('--R-dir', help='directory to hold generated R.java')
parser.add_option('--res-dir', help='directory containing resources')
parser.add_option('--crunched-res-dir',
help='directory to hold crunched resources')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('android_sdk', 'android_sdk_tools', 'R_package',
'R_dir', 'res_dir', 'crunched_res_dir')
for option_name in required_options:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
return options | 492894d0cb4faf004f386ee0f4285180d0a6c37d | 13,270 |
def get_interface_from_model(obj: Base) -> str:
"""
Transform the passed model object into an dispatcher interface name.
For example, a :class:``Label`` model will result in a string with the value `labels` being
returned.
:param obj: the model object
:return: the interface string
"""
try:
return obj.__tablename__
except AttributeError:
raise TypeError("Not a transformable model: ", obj) | 2ee8d1ac86b0d8d6433ace8d58483dbf91af997b | 13,271 |
import codecs
def get_text(string, start, end, bom=True):
"""This method correctly accesses slices of strings using character
start/end offsets referring to UTF-16 encoded bytes. This allows
for using character offsets generated by Rosette (and other softwares)
that use UTF-16 native string representations under Pythons with UCS-4
support, such as Python 3.3+ (refer to https://www.python.org/dev/peps/pep-0393/).
The offsets are adjusted to account for a UTF-16 byte order mark (BOM)
(2 bytes) and also that each UTF-16 logical character consumes 2 bytes.
'character' in this context refers to logical characters for the purpose of
character offsets; an individual character can consume up to 4 bytes (32
bits for so-called 'wide' characters) and graphemes can consume even more.
"""
if not isinstance(string, str):
raise ValueError('expected string to be of type str')
if not any(((start is None), isinstance(start, int))):
raise ValueError('expected start to be of type int or NoneType')
if not any(((end is None), isinstance(end, int))):
raise ValueError('expected end to be of type int or NoneType')
if start is not None:
start *= 2
if bom:
start += 2
if end is not None:
end *= 2
if bom:
end += 2
utf_16, _ = codecs.utf_16_encode(string)
sliced, _ = codecs.utf_16_decode(utf_16[start:end])
return sliced | ffe3c74a248215a82b0e0a5b105f5e4c94c8c2a8 | 13,272 |
import math
def init_distance(graph: dict, s: str) -> dict:
"""
初始化其他节点的距离为正无穷
防止后面字典越界
"""
distance = {s: 0}
for vertex in graph:
if vertex != s:
distance[vertex] = math.inf
return distance | dd8ceda3ca7435b5f02b7b47a363f017f796bc36 | 13,273 |
def read_cry_data(path):
"""
Read a cry file and extract the molecule's geometry.
The format should be as follows::
U_xx U_xy U_xz
U_yx U_yy U_yz
U_zx U_zy U_zz
energy (or comment, this is ignored for now)
ele0 x0 y0 z0
ele1 x1 y1 z1
...
elen xn yn zn
Where the U matrix is made of the unit cell basis vectors as column
vectors.
Parameters
----------
path : str
A path to a file to read
Returns
-------
val : LazyValues
An object storing all the data
"""
unit = []
coords = []
elements = []
with open(path, 'r') as f:
for line in f:
parts = line.strip().split()
if len(parts) == 3:
unit.append([float(x) for x in parts])
if len(parts) == 4:
elements.append(parts[0])
coords.append([float(x) for x in parts[1:]])
return LazyValues(elements=elements, coords=coords, unit_cell=unit) | 3a7a88e9d70c5f7499ad219602062ad2d852139b | 13,274 |
import torch
def get_camera_wireframe(scale: float = 0.3):
"""
Returns a wireframe of a 3D line-plot of a camera symbol.
"""
a = 0.5 * torch.tensor([-2, 1.5, 4])
b = 0.5 * torch.tensor([2, 1.5, 4])
c = 0.5 * torch.tensor([-2, -1.5, 4])
d = 0.5 * torch.tensor([2, -1.5, 4])
C = torch.zeros(3)
F = torch.tensor([0, 0, 3])
camera_points = [a, b, d, c, a, C, b, d, C, c, C, F]
lines = torch.stack([x.float() for x in camera_points]) * scale
return lines | 65bb8fa078f2f6f3edb38ac86da0603073fd413f | 13,275 |
def read_input(file):
"""
Args:
file (idx): binary input file.
Returns:
numpy: arrays for our dataset.
"""
with open(file, 'rb') as file:
z, d_type, d = st.unpack('>HBB', file.read(4))
shape = tuple(st.unpack('>I', file.read(4))[0] for d in range(d))
return np.frombuffer(file.read(), dtype=np.uint8).reshape(shape) | 91b10314a326380680898efdb8a7d15aa7a84f24 | 13,276 |
from typing import List
def maximum_segment_sum(input_list: List):
"""
Return the maximum sum of the segments of a list
Examples::
>>> from pyske.core import PList, SList
>>> maximum_segment_sum(SList([-5 , 2 , 6 , -4 , 5 , -6 , -4 , 3]))
9
>>> maximum_segment_sum(PList.from_seq([-33 , 22 , 11 , -44]))
33
>>> maximum_segment_sum(PList.from_seq([-33 , 22 , 0, 1, -3, 11 , -44, 30, -5, -13, 12]))
31
:param input_list: a PySke list of numbers
:return: a number, the maximum sum of the segments of a list
"""
best_sum, _ = input_list.map(int_to_tuple).reduce(max_and_sum, (0, 0))
return best_sum | a42b41f3a3b020e0bcba80b557c628b2a0805caf | 13,277 |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == '':
# files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'kp{year:2d}{month:02d}.tab'
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str,
two_digit_year_break=94)
if not out.empty:
out.ix[out.index[-1]+pds.DateOffset(months=1)-
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
elif tag == 'forecast':
format_str = 'kp_forecast_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
return files
elif tag == 'recent':
format_str = 'kp_recent_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
return files
else:
raise ValueError('Unrecognized tag name for Space Weather Index Kp')
else:
raise ValueError ('A data_path must be passed to the loading routine ' +
'for Kp') | a302202b7a12534186cfabe66a788df5c29b3266 | 13,278 |
def has_bookmark(uri):
"""
Returns true if the asset with given URI has been bookmarked by the
currently logged in user. Returns false if there is no currently logged
in user.
"""
if is_logged_in():
mongo.db.bookmarks.ensure_index('username')
mongo.db.bookmarks.ensure_index('asset.uri')
return mongo.db.bookmarks.find_one({
'username': current_user.username,
'asset.uri': uri
}) is not None
return False | 8571c8b50c28a0e8829a143b1d33fd549cfad213 | 13,279 |
import six
import tqdm
def evaluate(f, K, dataiter, num_steps):
"""Evaluates online few-shot episodes.
Args:
model: Model instance.
dataiter: Dataset iterator.
num_steps: Number of episodes.
"""
if num_steps == -1:
it = six.moves.xrange(len(dataiter))
else:
it = six.moves.xrange(num_steps)
it = tqdm(it, ncols=0)
results = []
for i, batch in zip(it, dataiter):
# Get features.
h = f(batch['x_s'], batch['y_s'])
if type(h) is tuple:
h, (beta, gamma, beta2, gamma2, count) = h
print('beta/count', np.stack([beta, count], axis=-1))
batch['beta'] = beta.numpy()
batch['gamma'] = gamma.numpy()
batch['beta2'] = beta2.numpy()
batch['gamma2'] = gamma2.numpy()
batch['count'] = count.numpy()
batch['h'] = h.numpy()
results.append(batch)
return results | a6feebd96907f01789dd74bfb22e5a5306010bf9 | 13,280 |
def find_period_of_function(eq,slopelist,nroots):
"""This function finds the Period of the function.
It then makes a list of x values that are that period apart.
Example Input: find_period_of_function(eq1,[0.947969,1.278602])
"""
global tan
s1 = slopelist[0]
s2 = slopelist[1]
if tan == 1:
T = 3.14159265359
else:
T = s2-s1
periodlist = []
for i in range(nroots):
periodlist.append(s1+T*i)
return periodlist | 491d853aa99a31348a3acce1c29cc508e7ab3b69 | 13,281 |
def merge_date_tags(path, k):
"""called when encountering only tags in an element ( no text, nor mixed tag and text)
Arguments:
path {list} -- path of the element containing the tags
k {string} -- name of the element containing the tags
Returns:
whatever type you want -- the value of the element
note : if you want
"""
l=k['#alldata']
#2015/01/01 12:10:30
# if "PubMedPubDate" in path[-1]:
if "date" in path[-1].lower():
month=None
year=None
day=None
hour=None
minute=None
r=""
# it should always be a dict with one key, and a subdict as value, containing an "#alldata" key
# {'month': {'#alldata': ['09']}}
for i in l:
# month
k = next(iter(i))
# ['09']
ad = i[k]['#alldata']
if k == "Year" and len(ad) == 1 and isinstance (ad[0], str):
year=ad[0]
elif k == "Month" and len(ad) == 1 and isinstance (ad[0], str):
month=ad[0]
elif k == "Day" and len(ad) == 1 and isinstance (ad[0], str):
day=ad[0]
elif k == "Hour" and len(ad) == 1 and isinstance (ad[0], str):
hour=ad[0]
if len(hour) == 1:
hour = "0"+hour
elif k == "Minute" and len(ad) == 1 and isinstance (ad[0], str):
minute=ad[0]
if len(minute) == 1:
minute = "0"+minute
if year is not None:
r=r+year
if month is not None:
r=r+"/"+month
if day is not None:
r=r+"/"+day
if hour is not None:
r=r+ " "+hour
if minute is not None:
r=r+":"+minute
#retrun only if at least "year" is present
return r
return k | 2ae3bd0dada288b138ee450103c0b4412a841336 | 13,282 |
def ocp_play():
"""Decorator for adding a method as an common play search handler."""
def real_decorator(func):
# Store the flag inside the function
# This will be used later to identify the method
if not hasattr(func, 'is_ocp_playback_handler'):
func.is_ocp_playback_handler = True
return func
return real_decorator | 9e96fe81b331820bf7485501e458cbb4efba4328 | 13,283 |
def _scatter(x_arr, y_arr, attributes, xlabel=None, xlim=None, xlog=False,
ylabel=None, ylim=None, ylog=False,
show=True, save=None):
"""Private plotting utility function."""
# initialise figure and axis settings
fig = plt.figure()
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
# plotting of a histogram
plt.scatter(x_arr, y_arr,
color=attributes['color'],
alpha=attributes['opacity'],
label=attributes['label'])
# ax.set_xticks(bins + 0.5)
# final axis setting
ax.set_xlim(xlim)
ax.set_xlabel(xlabel, color="black")
ax.set_xscale('log' if xlog==True else 'linear')
ax.set_ylim(ylim)
ax.set_ylabel(ylabel, color="black")
ax.set_yscale('log' if ylog==True else 'linear')
# add legend
if not attributes['label']==None:
legend = ax.legend(loc=0)
legend.get_frame().set_facecolor('white')
legend.get_frame().set_edgecolor('lightgrey')
# save/show figure
if save!=None:
plt.savefig(save, bbox_inches='tight')
if show:
plt.show(fig, block=False)
return fig, fig.axes | 8494f9398ea0e5d197a58ea341c626e03d47b028 | 13,284 |
def first_item(iterable, default=None):
"""
Returns the first item of given iterable.
Parameters
----------
iterable : iterable
Iterable
default : object
Default value if the iterable is empty.
Returns
-------
object
First iterable item.
"""
if not iterable:
return default
for item in iterable:
return item | f5ebbaea7cf4152382fb4b2854f68a3320d21fdc | 13,285 |
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s)) | 086d2e50b083a869fff2c06b4da7c04975b19fa3 | 13,287 |
def FileDialog(prompt='ChooseFile', indir=''):
"""
opens a wx dialog that allows you to select a single
file, and returns the full path/name of that file """
dlg = wx.FileDialog(None,
message = prompt,
defaultDir = indir)
if dlg.ShowModal() == wx.ID_OK:
outfile = dlg.GetPath()
else:
outfile = None
dlg.Destroy()
return outfile | cef7816a40297a5920359b2dba3d7e3e6f6d11ec | 13,288 |
def my_account():
"""
Allows a user to manage their account
"""
user = get_user(login_session['email'])
if request.method == 'GET':
return render_template('myAccount.html', user=user)
else:
new_password1 = request.form.get('userPassword1')
new_password2 = request.form.get('userPassword2')
if new_password1 != new_password2:
flash("Passwords do not match!")
return render_template('myAccount.html', user=user)
user.hash_password(new_password1) # set the new password hash
session.add(user)
session.commit()
flash("Your password has been changed.")
return redirect(url_for('index')) | f3e762931201ed82fa07c7b08c8bc9913c3729dd | 13,289 |
from typing import List
def __pad_assertwith_0_array4D(grad: 'np.ndarray', pad_nums) -> 'np.ndarray':
"""
Padding arrary with 0 septally.
:param grad:
:param pad_nums:
:return:
"""
gN, gC, gH, gW = grad.shape
init1 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW), dtype = grad.dtype)
init2 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW + (gW - 1) * pad_nums), dtype = grad.dtype)
boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[2])]
init1[:, :, boolean, :] = grad
boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[3])]
init2[:, :, :, boolean] = init1
return init2 | 3562e42800c25a059f82a0d163e239badefdcfd3 | 13,290 |
def islist(data):
"""Check if input data is a list."""
return isinstance(data, list) | 98769191b0215f8f863047ccc0c37e4d0af0a444 | 13,291 |
def spatial_mean(xr_da, lon_name="longitude", lat_name="latitude"):
"""
Perform averaging on an `xarray.DataArray` with latitude weighting.
Parameters
----------
xr_da: xarray.DataArray
Data to average
lon_name: str, optional
Name of x-coordinate
lat_name: str, optional
Name of y-coordinate
Returns
-------
xarray.DataArray
Spatially averaged xarray.DataArray.
"""
weights = da.cos(da.deg2rad(xr_da[lat_name]))
res = xr_da.weighted(weights).mean(dim=[lon_name, lat_name])
return res | 5afb6cb9e9a6b88cc3368da4f3544ea9b7c217be | 13,292 |
def rank(value_to_be_ranked, value_providing_rank):
"""
Returns the rank of ``value_to_be_ranked`` in set of values, ``values``.
Works even if ``values`` is a non-orderable collection (e.g., a set).
A binary search would be an optimized way of doing this if we can constrain
``values`` to be an ordered collection.
"""
num_lesser = [v for v in value_providing_rank if v < value_to_be_ranked]
return len(num_lesser) | 18c2009eb59b62a2a3c63c69d55f84a6f51e5953 | 13,293 |
def Characteristics(aVector):
"""
Purpose:
Compute certain characteristic of data in a vector
Inputs:
aVector an array of data
Initialize:
iMean mean
iMed median
iMin minimum
iMax maximum
iKurt kurtosis
iSkew skewness
iStd standard deviation
Return value:
aResults an array with calculated characteristics
"""
iMin = aVector.min().values[0]
iMax = aVector.max().values[0]
iMean = np.mean(aVector).values[0]
iMed = np.median(aVector)
iKurt = st.kurtosis(aVector)[0]
iSkew = st.skew(aVector)[0]
iStd = aVector.std().values[0]
aResults = np.array([iMin,iMax, iMean,iMed,iKurt,iSkew,iStd])
return aResults | 228ba375a7fca9a4e13920e6eadd0dab83b0847c | 13,294 |
def loglik(alpha,gamma_list,M,k):
"""
Calculate $L_{[\alpha]}$ defined in A.4.2
"""
psi_sum_gamma=np.array(list(map(lambda x: psi(np.sum(x)),gamma_list))).reshape((M,1)) # M*1
psi_gamma=psi(np.array(gamma_list)) # M*k matrix
L=M*gammaln(np.sum(alpha)-np.sum(gammaln(alpha)))+np.sum((psi_gamma-psi_sum_gamma)*(alpha.reshape((1,k))-1))
return L | 233307f7ef4e350bec162199a5d0cd8c773b4151 | 13,295 |
from gum.indexer import indexer, NotRegistered
def handle_delete(sender_content_type_pk, instance_pk):
"""Async task to delete a model from the index.
:param instance_pk:
:param sender_content_type_pk:
"""
try:
sender_content_type = ContentType.objects.get(pk=sender_content_type_pk)
sender = sender_content_type.model_class()
instance = sender.objects.get(pk=instance_pk)
except ObjectDoesNotExist:
logger.warning("Object ({}, {}) not found".format(sender_content_type_pk, instance_pk))
return None
try:
mapping_type = indexer.get_mapping_type(sender)
mapping_type.delete_document(instance)
except NotRegistered:
return None
return sender_content_type_pk, instance_pk | 049af2041e3ea33bdfddf81c72aeb95b04e5f60c | 13,296 |
def fixedcase_word(w, truelist=None):
"""Returns True if w should be fixed-case, None if unsure."""
if truelist is not None and w in truelist:
return True
if any(c.isupper() for c in w[1:]):
# tokenized word with noninitial uppercase
return True
if len(w) == 1 and w.isupper() and w not in {'A', 'K', 'N'}:
# single uppercase letter
return True
if len(w) == 2 and w[1] == '.' and w[0].isupper():
# initial with period
return True | 9047866f7117e8b1e4090c8e217c3063cfd37c38 | 13,298 |
import torch
import types
def linspace(
start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
split=None,
device=None,
comm=None,
):
"""
Returns num evenly spaced samples, calculated over the interval [start, stop]. The endpoint of the interval can
optionally be excluded.
Parameters
----------
start: scalar, scalar-convertible
The starting value of the sample interval, maybe a sequence if convertible to scalar
stop: scalar, scalar-convertible
The end value of the sample interval, unless is set to False. In that case, the sequence consists of all but the
last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint
is False.
num: int, optional
Number of samples to generate, defaults to 50. Must be non-negative.
endpoint: bool, optional
If True, stop is the last sample, otherwise, it is not included. Defaults to True.
retstep: bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype: dtype, optional
The type of the output array.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str, ht.Device or None, optional
Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device).
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this tensor.
Returns
-------
samples: ht.DNDarray
There are num equally spaced samples in the closed interval [start, stop] or the half-open interval
[start, stop) (depending on whether endpoint is True or False).
step: float, optional
Size of spacing between samples, only returned if retstep is True.
Examples
--------
>>> ht.linspace(2.0, 3.0, num=5)
tensor([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> ht.linspace(2.0, 3.0, num=5, endpoint=False)
tensor([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> ht.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
"""
# sanitize input parameters
start = float(start)
stop = float(stop)
num = int(num)
if num <= 0:
raise ValueError(
"number of samples 'num' must be non-negative integer, but was {}".format(num)
)
step = (stop - start) / max(1, num - 1 if endpoint else num)
# sanitize device and comm
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
# infer local and global shapes
gshape = (num,)
split = sanitize_axis(gshape, split)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step - step
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())
# construct the resulting global tensor
ht_tensor = dndarray.DNDarray(
data, gshape, types.canonical_heat_type(data.dtype), split, device, comm, balanced
)
if retstep:
return ht_tensor, step
return ht_tensor | 0597835fae9658f65553496b1272d786678919c2 | 13,299 |
def _h1_cmp_chi2_ ( h1 ,
h2 ,
density = False ) :
"""Compare histograms by chi2
>>> h1 = ... ## the first histo
>>> h2 = ... ## the second histo (or function or anything else)
>>> chi2ndf , probability = h1.cmp_chi2 ( h2 )
"""
assert isinstance ( h1 , ROOT.TH1 ) and 1 == h1.dim () , \
"cmp_dist: invalid type of h1 %s/%s" % ( h1 , type ( h1 ) )
if isinstance ( h2 , ROOT.TH1 ) :
assert 1 == h2.dim () , "cmp_dist: invalid type of h2 %s/%s" % ( h2 , type ( h2 ) )
if density :
h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1
h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2
cmp = _h1_cmp_chi2_ ( h1_ , h2_ , density = False )
if h1_ is not h1 : del h1_
if h2_ is not h2 : del h2_
return cmp
chi2 = 0.0
ndf = 0
for i , x , v1 in h1.items() :
v2 = h2 ( x.value() )
chi2 += v1.chi2 ( v2 )
ndf += 1
c2ndf = chi2/ndf
return c2ndf, ROOT.TMath.Prob ( chi2 , ndf ) | ff2e5c191491c18adc29edf08a5bb837fabf045f | 13,300 |
def _questionnaire_metric(name, col):
"""Returns a metrics SQL aggregation tuple for the given key/column."""
return _SqlAggregation(
name,
"""
SELECT {col}, COUNT(*)
FROM participant_summary
WHERE {summary_filter_sql}
GROUP BY 1;
""".format(
col=col, summary_filter_sql=_SUMMARY_FILTER_SQL
),
lambda v: QuestionnaireStatus.lookup_by_number(v).name,
None,
) | abd477798670733788461ed35fc0b4814ee7081d | 13,301 |
import re
def xyzToAtomsPositions(xyzFileOrStr):
"""
Returns atom positions (order) given a molecule in an xyz format.
Inchi-based algorithm.
Use this function to set the atoms positions in a reference
molecule. The idea is to assign the positions once and to never
change them again.
Arguments:
----------
xyzFileOrStr : str
input xyz molecule (either file path or xyz string)
Returns:
----------
atomsPositions: dict
dictionary whose keys correspond to atoms positions in xyz
file and values to the newly assigned positions
"""
# get inchi with extra auxiliary log
if ioutils.fileExists(xyzFileOrStr): xyzFileOrStr= ioutils.readFile(xyzFileOrStr)
xyzFileOrStr = xyzToIntertialFrame(xyzFileOrStr)
# swap all hydrogens with a heavy atom, here I picked Cl, but any other halogen atom
# should also work. this atom swap is to force inchi to considered all the atoms in its
# connectivity algorithm. note that atoms from the first group (e..g Na, Li) wont work
# as they produce solids and thus the inchi string is significantly changed
xyzFileOrStr = '\n'.join([xyz_line.replace('H','Cl') for xyz_line in xyzFileOrStr.split('\n')])
inchiWithAux = obconverter.obConvert(inputMol=xyzFileOrStr,inputMolFormat='xyz',
outputMolFormat='inchi', options=['-xa'])
inchi, inchiAux = inchiWithAux.split('\n')
# find connectivity info in the inchi string - used to detect the
# presence of heavy atoms.
atomsInchiConnectivity = re.search(r'/c(\d+?\*)?(.*?)(?=/|$)',inchi)
# read the mapping between heavy atoms (+ lone hydrogens) in xyz and inchi
# from the auxiliary log
atomsInchiAuxMap = re.search(r'/N:(.*?)(?=/|$)',inchiAux)
atomsInchiAuxEquivMap = re.search(r'/E:(.*?)(?=/|$)',inchiAux)
# create the rdkit mol object
rdkitMolFromMol = xyzconverters.xyzToMolToRdkitMol(xyzFileOrStr, removeHs=False)
numAtoms = rdkitMolFromMol.GetNumAtoms()
# initialise the atoms position dict
atomsPositions = {k:None for k in range(numAtoms)}
nextAtomId = 0
mol_frags = rdkitmolutils.rdkitMolToMolFrags(rdkitMolFromMol)
if mol_frags:
print(f'Warning: Provided xyz file contains {len(mol_frags)} molecular fragments.')
#return atomsPositions
# get the atoms based on the inchi connectivity info
if atomsInchiConnectivity is not None:
# process the atomsInchiAuxMap and extract the atoms mapping
atomsInchiAuxMap= atomsInchiAuxMap.groups()[0] \
.replace('/','').replace(';',',').split(',')
atomsInchiMatch = {int(atomId)-1: i
for i, atomId in enumerate(atomsInchiAuxMap)}
atomsInchiMatchList = list(map(lambda x: int(x)-1, atomsInchiAuxMap))
if atomsInchiMatch:
# now disambiguate any equivalent atoms
if atomsInchiAuxEquivMap:
atomsInchiAuxEquivMap= atomsInchiAuxEquivMap.groups()[0] \
.replace('/','').replace(')(','#').replace(')','') \
.replace('(','').split('#')
for i in range(len(atomsInchiAuxEquivMap)):
atomsInchiAuxEquivMap[i] = list(map(lambda x: int(x)-1, atomsInchiAuxEquivMap[i].split(',')))
atomsInchiAuxEquivMap[i] = list(map(lambda x: atomsInchiMatchList[x], atomsInchiAuxEquivMap[i]))
for equivAtomsList in atomsInchiAuxEquivMap:
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, equivAtomsList)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, equivAtomsList)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
# use four invariants to disambiguate atoms
equivAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
currentAtomsOrder = sorted([atomsInchiMatch[equivAtomId] for equivAtomId in equivAtomsList])
for equivAtomPos in equivAtomsOrder:
atomsInchiMatch[equivAtomsList[equivAtomPos]] = currentAtomsOrder.pop(0)
# add the atoms positions to the overall atomsPosition dictionary
atomsPositions = {**atomsPositions, **atomsInchiMatch}
nextAtomId = len(atomsInchiMatch)
# assign posititions to any atoms that are left
if nextAtomId < numAtoms:
loneAtomsIds = [atomId
for atomId, refId in atomsPositions.items()
if refId is None]
loneAtomsMap = {}
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, loneAtomsIds)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, loneAtomsIds)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
loneAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
for loneAtomPos in loneAtomsOrder:
loneAtomsMap[loneAtomsIds[loneAtomPos]] = nextAtomId
nextAtomId += 1
# add the remaining positions to the overall atoms positions
atomsPositions = {**atomsPositions, **loneAtomsMap}
# check for duplicate and None values at the end
hasDuplicates = len(atomsPositions.values()) > len(set(atomsPositions.values()))
hasNones = None in atomsPositions.values()
if hasDuplicates or hasNones:
print('Error: atom canoncial positions algorithm has failed.')
atomsPositions= {}
return atomsPositions | 8db5c81d0e8aca2eef686d955ed810b4b166d0db | 13,302 |
async def modify_video_favorite_list(
media_id: int,
title: str,
introduction: str = '',
private: bool = False,
credential: Credential = None):
"""
修改视频收藏夹信息。
Args:
media_id (int) : 收藏夹 ID.
title (str) : 收藏夹名。
introduction (str, optional) : 收藏夹简介. Defaults to ''.
private (bool, optional) : 是否为私有. Defaults to False.
credential (Credential, optional): Credential. Defaults to None.
Returns:
dict: API 调用结果。
"""
if credential is None:
credential = Credential()
credential.raise_for_no_sessdata()
credential.raise_for_no_bili_jct()
api = API["operate"]["modify"]
data = {
"title": title,
"intro": introduction,
"privacy": 1 if private else 0,
"cover": "",
"media_id": media_id
}
return await request("POST", api["url"], data=data, credential=credential) | e3618fc59785b63cf7f6810a0f2683bcd18d5277 | 13,303 |
def get_salesforce_log_files():
"""Helper function to get a list available log files"""
return {
"totalSize": 2,
"done": True,
"records": [
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ"
},
"Id": "0ATD000000001bROAQ",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ/LogFile",
"LogDate": "2014-03-14T00:00:00.000+0000",
"LogFileLength": 2692.0
},
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD000000001SdOAI"
},
"Id": "0ATD000000001SdOAI",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001SdOAI/LogFile",
"LogDate": "2014-03-13T00:00:00.000+0000",
"LogFileLength": 1345.0
}
]
} | 1c182898517d73c360e9f2ab36b902afea8c58d7 | 13,304 |
def remove_true_false_edges(dict_snapshots, dict_weights, index):
"""
Remove chosen true edges from the graph so the embedding could be calculated without them.
:param dict_snapshots: Dict where keys are times and values are a list of edges for each time stamp.
:param dict_weights: Dict where keys are times and values are list of weights for each edge in the time stamp, order
corresponds to the order of edges in dict_snapshots.
:param index: Index of pivot time- until pivot time (including) it is train set, afterwards it is test set.
:return: Updated dict_snapshots and dict_weights.
"""
times = list(dict_snapshots.keys())
mapping = {i: times[i] for i in range(len(times))}
keys = list(mapping.keys())
for key in keys:
if key < index:
continue
else:
del dict_snapshots[mapping[key]]
del dict_weights[mapping[key]]
return dict_snapshots, dict_weights | 3f833fda22710c20703aa7590eae0fd649b69634 | 13,305 |
def addFavoriteDir(name:str, directory:str, type:str=None, icon:str=None, tooltip:str=None, key:str=None):
"""
addFavoriteDir(name, directory, type, icon, tooltip, key) -> None.
Add a path to the file choosers favorite directory list. The path name can contain environment variables which will be expanded when the user clicks the favourites button
@param name: Favourite path entry ('Home', 'Desktop', etc.).
@param directory: FileChooser will change to this directory path.
@param type: Optional bitwise OR combination of nuke.IMAGE, nuke.SCRIPT, nuke.FONT or nuke.GEO.
@param icon: Optional filename of an image to use as an icon.
@param tooltip: Optional short text to explain the path and the meaning of the name.
@param key: Optional shortcut key.
@return: None.
"""
return None | 28cbabd79d35151877112dd76ffe2a513a2bfcec | 13,306 |
def save(data):
"""Save cleanup annotations."""
data_and_frames = data.split("_")
data = data_and_frames[0]
frames = data_and_frames[1]
if len(data) == 1:
removed = []
else:
removed = [int(f) for f in data[1:].split(':')]
frames = [int(f) for f in frames[:].split(':')]
#fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/planttag.npz'
fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/' + APP.tag_name + '.npz'
if len(removed) == 0: # Before: if len(removed) == 0
idx = np.zeros((np.amax(APP.lbls) + 1,), APP.lbls.dtype)
_id = 1
for i in range(1, len(idx)):
if i not in removed:
idx[i] = _id
_id = _id + 1
lbls = idx[APP.lbls]
else:
lbls = APP.lbls
for j in range(len(removed)):
rem = removed[j]
frame = frames[j]
# Remove that label from the frame onwards:
if APP.tag_type == "deletion-onwards":
lbls[:,:,frame:][lbls[:,:,frame:] == rem] = 0
elif APP.tag_type == "deletion-upto":
lbls[:,:,:frame][lbls[:,:,:frame] == rem] = 0
elif APP.tag_type == "deletion-single":
lbls[:,:,frame][lbls[:,:,frame] == rem] = 0
#
tag = [-1]*lbls.max()
for i in range(len(removed)):
tag[removed[i]] = frames[i]
npz = {'removed': np.asarray(removed, np.int16), 'labels': lbls, "frames": np.asarray(frames, np.int16), \
APP.tag_name: tag}
np.savez_compressed(fname, **npz)
return ' ' | be62bd3933374ebac8e735be0f66ca79a6273b35 | 13,307 |
import re
def list_runs_in_swestore(path, pattern=RUN_RE, no_ext=False):
"""
Will list runs that exist in swestore
:param str path: swestore path to list runs
:param str pattern: regex pattern for runs
"""
try:
status = check_call(['icd', path])
proc = Popen(['ils'], stdout=PIPE)
contents = [c.strip() for c in proc.stdout.readlines()]
runs = [r for r in contents if re.match(pattern, r)]
if no_ext:
runs = [r.split('.')[0] for r in runs]
return runs
except CalledProcessError:
return [] | 616089f049129b284ae6575609741620f2ac48f6 | 13,308 |
def linear_regression(
XL: ArrayLike, YP: ArrayLike, Q: ArrayLike
) -> LinearRegressionResult:
"""Efficient linear regression estimation for multiple covariate sets
Parameters
----------
XL
[array-like, shape: (M, N)]
"Loop" covariates for which N separate regressions will be run
YP
[array-like, shape: (M, O)]
Continuous traits that have had core covariates eliminated through orthogonal projection.
Q
[array-like, shape: (M, P)]
Orthonormal matrix computed by applying QR factorization to covariate matrix
Returns
-------
Dataclass containing:
beta : [array-like, shape: (N, O)]
Beta values associated with each loop covariate and outcome
t_value : [array-like, shape: (N, O)]
T statistics for each beta
p_value : [array-like, shape: (N, O)]
P values as float in [0, 1]
"""
if set([x.ndim for x in [XL, YP, Q]]) != {2}:
raise ValueError("All arguments must be 2D")
n_core_covar, n_loop_covar, n_obs, n_outcome = (
Q.shape[1],
XL.shape[1],
YP.shape[0],
YP.shape[1],
)
dof = n_obs - n_core_covar - 1
if dof < 1:
raise ValueError(
"Number of observations (N) too small to calculate sampling statistics. "
"N must be greater than number of core covariates (C) plus one. "
f"Arguments provided: N={n_obs}, C={n_core_covar}."
)
# Apply orthogonal projection to eliminate core covariates
# Note: QR factorization or SVD should be used here to find
# what are effectively OLS residuals rather than matrix inverse
# to avoid need for MxM array; additionally, dask.lstsq fails
# with numpy arrays
LS = Q @ (Q.T @ XL)
assert XL.shape == LS.shape
XLP = XL - LS
assert XLP.shape == (n_obs, n_loop_covar)
# Estimate coefficients for each loop covariate
# Note: A key assumption here is that 0-mean residuals
# from projection require no extra terms in variance
# estimate for loop covariates (columns of G), which is
# only true when an intercept is present.
XLPS = (XLP ** 2).sum(axis=0, keepdims=True).T
assert XLPS.shape == (n_loop_covar, 1)
B = (XLP.T @ YP) / XLPS
assert B.shape == (n_loop_covar, n_outcome)
# Compute residuals for each loop covariate and outcome separately
YR = YP[:, np.newaxis, :] - XLP[..., np.newaxis] * B[np.newaxis, ...]
assert YR.shape == (n_obs, n_loop_covar, n_outcome)
RSS = (YR ** 2).sum(axis=0)
assert RSS.shape == (n_loop_covar, n_outcome)
# Get t-statistics for coefficient estimates
T = B / np.sqrt(RSS / dof / XLPS)
assert T.shape == (n_loop_covar, n_outcome)
# Match to p-values
# Note: t dist not implemented in Dask so this must be delayed,
# see https://github.com/dask/dask/issues/6857
P = da.map_blocks(
lambda t: 2 * stats.distributions.t.sf(np.abs(t), dof),
map_blocks_asnumpy(T),
dtype="float64",
)
assert P.shape == (n_loop_covar, n_outcome)
P = np.asarray(P, like=T)
return LinearRegressionResult(beta=B, t_value=T, p_value=P) | 3059987940eefce0a4a401c096d8b7be0d3ce1d7 | 13,309 |
import math
def orthogonal_decomposition(C, tr_error, l_exp):
"""
Orthogonal decomposition of the covariance matrix to determine the meaningful directions
:param C: covariance matrix
:param tr_error: allowed truncation error
:param l_exp: expansion order
:return: transformation matrix Wy, number of terms N_t and meaningful directions k
"""
# eigenvalues and eigenvectors
v, w = np.linalg.eig(C)
v_sum = np.sum(v)
err_v = 1
k = 0 # meaningful directions
while err_v > tr_error:
err_v = 1 - v[k] / v_sum
k += 1
N_t = int(math.factorial(l_exp + k) / (math.factorial(k) * math.factorial(l_exp))) # number of terms
Wy = w[:,:k] # and for now, do not define Wz
return Wy, N_t, k | d6920b31a0503ad15b98631de352da690b6761b8 | 13,310 |
import http
import json
def get_data():
"""Reads the current state of the world"""
server = http.client.HTTPConnection(URL)
server.request('GET','/data')
response = server.getresponse()
if (response.status == 200):
data = response.read()
response.close()
return json.loads(data.decode())
else:
return UnexpectedResponse(response) | 3c0563f2776c60ea103db154c63e2053b1d7d045 | 13,311 |
def chi_angles(filepath, model_id=0):
"""Calculate chi angles for a given file in the PDB format.
:param filepath: Path to the PDB file.
:param model_id: Model to be used for chi calculation.
:return: A list composed by a list of chi1, a list of chi2, etc.
"""
torsions_list = _sidechain_torsions(filepath, model_id)
chis = [item[2] for item in torsions_list]
return list(zip(*chis)) | 85c192fe6c272cad5cdd7dbb4f570f1f78284057 | 13,312 |
def surface_sphere(radius):
"""
"""
phi, theta = np.mgrid[0.0:np.pi:100j, 0.0:2.0*np.pi:100j]
x_blank_sphere = radius*np.sin(phi)*np.cos(theta)
y_blank_sphere = radius*np.sin(phi)*np.sin(theta)
z_blank_sphere = radius*np.cos(phi)
sphere_surface = np.array(([x_blank_sphere,
y_blank_sphere,
z_blank_sphere]))
return sphere_surface | 25750b7c4a57dd3a2f3ebb5a2a041fa1f5e56c89 | 13,313 |
import re
def format_bucket_objects_listing(bucket_objects):
"""Returns a formated list of buckets.
Args:
buckets (list): A list of buckets objects.
Returns:
The formated list as string
"""
out = ""
i = 1
for o in bucket_objects:
# Shorten to 24 chars max, remove linebreaks
name = re.sub(r'[\n\r]', ' ',
o.name[:63] + '..'
if len(o.name) > 65
else o.name)
size = sizeof_fmt(o.size)
time = f"{o.time_modified:%Y-%m-%d %H:%M}" \
if o.time_modified is not None else ""
out += (f"{i:>4} {name:65} {size:8} {time:16}\n")
i += 1
return out | f5268d148687338ed606b1593065a0c1842cac00 | 13,314 |
def charts(chart_type, cmid, start_date, end_date=None):
"""
Get the given type of charts for the artist.
https://api.chartmetric.com/api/artist/:id/:type/charts
**Parameters**
- `chart_type`: string type of charts to pull, choose from
'spotify_viral_daily', 'spotify_viral_weekly',
'spotify_top_daily', 'spotify_top_weekly',
'applemusic_top', 'applemusic_daily',
'applemusic_albums', 'itunes_top',
'itunes_albums', 'shazam', 'beatport'
- `cmid`: string or int Chartmetric artist ID
- `start_date`: string of start data in ISO format
- `end_date`: string of end date in ISO format
**Returns**
A list of dictionaries of specific type of charts for the given artist.
"""
urlhandle = f"/artist/{cmid}/{chart_type}/charts"
params = {
"since": start_date,
"until": end_date if end_date else utilities.strDateToday(),
}
data = utilities.RequestData(urlhandle, params)
return utilities.RequestGet(data)["data"] | 2dfafd09f53bf2add20afcba8c85a6f081e551af | 13,315 |
def search_candidates(api_key, active_status="true"):
"""
https://api.open.fec.gov/developers#/candidate/get_candidates_
"""
query = """https://api.open.fec.gov/v1/candidates/?sort=name&sort_hide_null=false&is_active_candidate={active_status}&sort_null_only=false&sort_nulls_last=false&page=1&per_page=20&api_key={api_key}""".format(
api_key=api_key,
active_status=active_status
)
return get_response(
query=query
) | 9ec1c39541cda87f1d1618d4e5497b8215a5f4b4 | 13,317 |
def load_dat(file_name):
"""
carga el fichero dat (Matlab) especificado y lo
devuelve en un array de numpy
"""
data = loadmat(file_name)
y = data['y']
X = data['X']
ytest = data['ytest']
Xtest = data['Xtest']
yval = data['yval']
Xval = data['Xval']
return X,y,Xtest,ytest,Xval,yval | 5c3de04f60ea803e1c70dbc2103425b10ee58567 | 13,318 |
def get_specific_pos_value(img, pos):
"""
Parameters
----------
img : ndarray
image data.
pos : list
pos[0] is horizontal coordinate, pos[1] is verical coordinate.
"""
return img[pos[1], pos[0]] | 3929b29fa307a7e8b5282783c16639cacb2ab805 | 13,319 |
from typing import List
from typing import Tuple
from typing import Dict
from typing import Any
from typing import Set
def transpose_tokens(
cards: List[MTGJSONCard]
) -> Tuple[List[MTGJSONCard], List[Dict[str, Any]]]:
"""
Sometimes, tokens slip through and need to be transplanted
back into their appropriate array. This method will allow
us to pluck the tokens out and return them home.
:param cards: Cards+Tokens to iterate
:return: Cards, Tokens as two separate lists
"""
# Order matters with these, as if you do cards first
# it will shadow the tokens lookup
# Single faced tokens are easy
tokens = [
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
for card in cards
if card.get("layout") in ["token", "emblem"]
]
# Do not duplicate double faced tokens
done_tokens: Set[str] = set()
for card in cards:
if (
card.get("layout") == "double_faced_token"
and card.get("scryfallId") not in done_tokens
):
tokens.append(
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
)
done_tokens.add(card.get("scryfallId"))
# Remaining cards, without any kind of token
cards = [
card
for card in cards
if card.get("layout") not in ["token", "double_faced_token", "emblem"]
]
return cards, tokens | 339e8d18a4c80e168411c874f8afac97b14db77b | 13,320 |
from datetime import datetime
def from_local(local_dt, timezone=None):
"""Converts the given local datetime to a universal datetime."""
if not isinstance(local_dt, datetime.datetime):
raise TypeError('Expected a datetime object')
if timezone is None:
a = arrow.get(local_dt)
else:
a = arrow.get(local_dt, timezone)
return a.to('UTC').naive | 6b4eb44aa66c04a23aa8dac2bbe882e5619cd45f | 13,321 |
import re
def mrefresh_to_relurl(content):
"""Get a relative url from the contents of a metarefresh tag"""
urlstart = re.compile('.*URL=')
_, url = content.split(';')
url = urlstart.sub('', url)
return url | 90cc3dbace5d4b001698612f9263309fa95aac8b | 13,322 |
import torch
def simclr_loss_func(
z1: torch.Tensor,
z2: torch.Tensor,
temperature: float = 0.1,
extra_pos_mask=None,
) -> torch.Tensor:
"""Computes SimCLR's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
temperature (float): temperature factor for the loss. Defaults to 0.1.
extra_pos_mask (Optional[torch.Tensor]): boolean mask containing extra positives other
than normal across-view positives. Defaults to None.
Returns:
torch.Tensor: SimCLR loss.
"""
device = z1.device
b = z1.size(0)
z = torch.cat((z1, z2), dim=0)
z = F.normalize(z, dim=-1)
logits = torch.einsum("if, jf -> ij", z, z) / temperature
logits_max, _ = torch.max(logits, dim=1, keepdim=True)
logits = logits - logits_max.detach()
# positive mask are matches i, j (i from aug1, j from aug2), where i == j and matches j, i
pos_mask = torch.zeros((2 * b, 2 * b), dtype=torch.bool, device=device)
pos_mask[:, b:].fill_diagonal_(True)
pos_mask[b:, :].fill_diagonal_(True)
# if we have extra "positives"
if extra_pos_mask is not None:
pos_mask = torch.bitwise_or(pos_mask, extra_pos_mask)
# all matches excluding the main diagonal
logit_mask = torch.ones_like(pos_mask, device=device).fill_diagonal_(0)
exp_logits = torch.exp(logits) * logit_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positives
mean_log_prob_pos = (pos_mask * log_prob).sum(1) / pos_mask.sum(1)
# loss
loss = -mean_log_prob_pos.mean()
return loss | b9d7880ec1c8a66321623a0061d201f9bbaaa426 | 13,323 |
def find_node_types(G, edge_type):
"""
:param G: NetworkX graph.
:param edge_type: Edge type.
:return: Node types that correspond to the edge type.
"""
for e in G.edges:
if G[e[0]][e[1]][e[2]]['type'] == edge_type:
u, v = e[0], e[1]
break
utype = G.nodes[u]['type']
vtype = G.nodes[v]['type']
try:
if int(utype) > int(vtype):
return utype, vtype
else:
return vtype, utype
except:
return utype, vtype | 970bbbabe172460a974dbf961500def2280b9fe1 | 13,324 |
import scipy
def distance_point_point(p1, p2):
"""Calculates the euclidian distance between two points or sets of points
>>> distance_point_point(np.array([1, 0]), np.array([0, 1]))
1.4142135623730951
>>> distance_point_point(np.array([[1, 1], [0, 0]]), np.array([0, 1]))
array([1., 1.])
>>> distance_point_point(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, -3]]))
array([1., 3.])
"""
return scipy.spatial.minkowski_distance(p1, p2) | 481733330a99576540d2a80676d51d315b6406f7 | 13,325 |
def switch(
confs=None, remain=False, all_checked=False, _default=None, **kwargs
):
"""
Execute first statement among conf where task result is True.
If remain, process all statements conf starting from the first checked
conf.
:param confs: task confs to check. Each one may contain a task action at
the key 'action' in conf.
:type confs: str or dict or list
:param bool remain: if True, execute all remaining actions after the
first checked condition.
:param bool all_checked: execute all statements where conditions are
checked.
:param _default: default task to process if others have not been checked.
:type _default: str or dict
:return: statement result or list of statement results if remain.
:rtype: list or object
"""
# init result
result = [] if remain else None
# check if remain and one task has already been checked.
remaining = False
if confs is not None:
if isinstance(confs, string_types) or isinstance(confs, dict):
confs = [confs]
for conf in confs:
# check if task has to be checked or not
check = remaining
if not check:
# try to check current conf
check = run(conf=conf, **kwargs)
# if task is checked or remaining
if check:
if STATEMENT in conf: # if statements exist, run them
statement = conf[STATEMENT]
statement_result = run(statement, **kwargs)
# save result
if not remain: # if not remain, result is statement_result
result = statement_result
else: # else, add statement_result to result
result.append(statement_result)
# if remain
if remain:
# change of remaining status
if not remaining:
remaining = True
elif all_checked:
pass
else: # leave execution if one statement has been executed
break
# process _default statement if necessary
if _default is not None and (remaining or (not result) or all_checked):
last_result = run(_default, **kwargs)
if not remain:
result = last_result
else:
result.append(last_result)
return result | aba656d4a6d06f721551aa49ec1521d0fa9444d3 | 13,326 |
def makeProcesses(nChildren):
"""
Create and start all the worker processes
"""
global taskQueue,resultsQueue,workers
if nChildren < 0:
print 'makeProcesses: ',nChildren, ' is too small'
return False
if nChildren > 3:
print 'makeProcesses: ',nChildren, ' is too large'
return False
# Create a task queue for each worker to receive the image segment
taskQueue = []
for k in range(nChildren):
taskQueue.append(Queue())
resultsQueue = Queue() # Single results queue
#Create and start the workers
workers = []
for k in range(nChildren):
p = Process(target=worker, args=(k,taskQueue[k],resultsQueue))
workers.append(p)
for p in workers:
p.start()
time.sleep(2)
return True | 748372d9c83917841eeba5e400f37a5ecf5961dd | 13,327 |
def create_moleculenet_model(model_name):
"""Create a model.
Parameters
----------
model_name : str
Name for the model.
Returns
-------
Created model
"""
for func in [create_bace_model, create_bbbp_model, create_clintox_model, create_esol_model,
create_freesolv_model, create_hiv_model, create_lipophilicity_model,
create_muv_model, create_pcba_model, create_sider_model, create_tox21_model,
create_toxcast_model]:
model = func(model_name)
if model is not None:
return model
return None | 19f15eb4fd1a5c1befaef306cb7d146d7933919e | 13,328 |
from typing import Collection
from typing import Optional
def detect_daml_lf_dir(paths: "Collection[str]") -> "Optional[str]":
"""
Find the biggest Daml-LF v1 version in the set of file names from a Protobuf archive, and return
the path that contains the associated files (with a trailing slash).
If there is ever a Daml-LF 2, then this logic will need to be revisited; however, when that
happens, there are likely to be even larger changes required so we won't worry about this too
much right now.
:param paths: The paths in a Protobuf zipfile to examine.
:return: The root directory of a target Daml-LF protobuf version, stripped of a prefix.
>>> detect_daml_lf_dir([
... "protos-1.15.0/com/daml/daml_lf_1_10/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_9/something.proto",
... "protos-1.15.0/com/daml/daml_lf_dev/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_what/something.proto",
... ])
'com/daml/daml_lf_1_10/'
"""
daml_lf_prefix = "com/daml/daml_lf_1_"
minor_versions = set() # type: Set[int]
for p in paths:
_, _, truncated_path = p.partition("/")
if truncated_path.startswith(daml_lf_prefix):
version_str, _, _ = truncated_path[len(daml_lf_prefix) :].partition("/")
try:
minor_versions.add(int(version_str))
except ValueError:
# skip over unrecognized directory names
pass
if minor_versions:
return f"{daml_lf_prefix}{max(minor_versions)}/"
else:
return None | acd2b99236a3534ec64c375893b40511995e6dfc | 13,329 |
def random_mini_batches(X, Y, mini_batch_size):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (m, n_H, n_W, c)
Y -- true "label" vector of shape (m, num_classes)
mini_batch_size -- size of mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
# Extract the input data shapes.
m = X.shape[0]
num_classes = Y.shape[1]
# Instantiate an empty list to hold mini batch X-Y tuples with size batch_size.
mini_batches = []
# Shuffle X and Y.
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :, :, :]
shuffled_Y = Y[permutation, :]
# Divide (shuffled_X, shuffled_Y) into batches minus the end case.
num_complete_minibatches = m // mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[ k*mini_batch_size:(k+1)*mini_batch_size, :,:,:]
mini_batch_Y = shuffled_Y[ k*mini_batch_size:(k+1)*mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handle the end case if the last mini-batch < mini_batch_size.
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[ num_complete_minibatches*mini_batch_size: , :,:,:]
mini_batch_Y = shuffled_Y[ num_complete_minibatches*mini_batch_size: , :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches | fbad986073bfb867f5e35bf1a0ee639b644f00bb | 13,330 |
import types
def classifyContent(text):
"""
Uses the NLP provider's SDK to perform a content classification operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.classify_text(document=document)
values = []
for category in response.categories:
values.append({
"category": category.name,
"confidence": category.confidence
})
return(Classiciation(values, ""))
except Exception as e:
return Classiciation([], str(e.args)) | eebb4ebef4811748d5fb9e130e582dae289f9ce7 | 13,331 |
def print_instance_summary(instance, use_color='auto'):
""" Print summary info line for the supplied instance """
colorize_ = partial(colorize, use_color=use_color)
name = colorize_(instance.name, "yellow")
instance_type = instance.extra['gonzo_size']
if instance.state == NodeState.RUNNING:
status_colour = "green"
else:
status_colour = "red"
instance_status = NodeState.tostring(instance.state)
status = colorize_(instance_status, status_colour)
if 'owner' in instance.extra['gonzo_tags']:
owner = instance.extra['gonzo_tags']['owner']
else:
owner = "---"
uptime = format_uptime(instance.extra['gonzo_created_time'])
uptime = colorize_(uptime, "blue")
availability_zone = instance.extra['gonzo_az']
result_list = [
name,
instance_type,
status,
owner,
uptime,
availability_zone,
]
return result_list | e250645e040fba4bbd9df0e86bc3711d3f8ac51e | 13,332 |
from datetime import datetime
def generate_blob_sas_token(blob, container, blob_service, permission=BlobPermissions.READ):
"""Generate a blob URL with SAS token."""
sas_token = blob_service.generate_blob_shared_access_signature(
container, blob.name,
permission=permission,
start=datetime.datetime.utcnow() - datetime.timedelta(minutes=15),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
return blob_service.make_blob_url(container, quote(blob.name.encode('utf-8')), sas_token=sas_token) | e3993c3dd075516bce07221cf9351ab74a431a27 | 13,333 |
from typing import Sequence
def rewrite_complex_signature(function, signature: Sequence[tf.TensorSpec]):
"""Compatibility layer for testing complex numbers."""
if not all([spec.dtype.is_complex for spec in signature]):
raise NotImplementedError("Signatures with mixed complex and non-complex "
"tensor specs are not supported.")
# Rewrite the signature, replacing all complex tensors with pairs of real
# and imaginary tensors.
real_imag_signature = []
for spec in signature:
new_dtype = tf.float32 if spec.dtype.size == 8 else tf.float64
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
return _complex_wrapper(function), real_imag_signature | e541ef96c6b4f2492847443e8ca45f18fc9383ff | 13,334 |
def get_args(argv: list):
"""gets the args and dictionarize them"""
if len(argv) not in [5,7]:
Errors.args_error()
data = {}
# getting the type of the title
if "-" in argv[1]:
data["type"] = "series" if argv[1] == "-s" else "movie" if argv[1] == "-m" else None
else:
Errors.args_error()
# getting the title itself
data["title"] = argv[2]
data["format"] = argv[3]
data["language"] = argv[4]
if data["type"] == "series":
if len(argv) != 7:
Errors.args_error()
try:
data["season"] = int(argv[5])
data["episode"] = int(argv[6])
except:
Errors.args_error()
return data | 6f29e63bc19b57cdf9f49cf2dd7b099c62a604a0 | 13,335 |
def fund_with_erc20(
to_fund_address, erc20_token_contract, ether_amount=0.1, account=None
):
"""Send a specified amount of an ERC20 token to an address.
Args:
to_fund_address (address): Address to send to the tokens to.
erc20_token_contract (Contract): Contract of the ERC20 token.
ether_amount (float, optional): Amount to be sent, in ETHER. Defaults to 0.1.
account (address, optional): Account from which to send the transaction. Defaults to None.
Returns:
TransactionReceipt
"""
account = account if account else get_account()
print(
f"Funding {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}..."
)
tx = erc20_token_contract.transfer(
to_fund_address,
Web3.toWei(ether_amount, "ether"),
{"from": account},
)
tx.wait(1)
print(
f"Funded {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}."
)
return tx | 6eaf46519645b8b6bbf36b39ea106c05924ab51f | 13,336 |
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
def energy_adj_ground_to_sig_end(ds):
"""
Waveform energy from the ground peak. We calculated senergy_whrc as the energy of the waveform (in digital counts) from the ground peak
to the signal end multiplied by two. Ground peak defined as whichever of the two lowest peaks has greater amplitude. We then applied the
following linear transformation in order to calculate on the same scale as data published by Margolis et al. (2015)
senergy = -4.397006 + 0.006208 * senergy_whrc
"""
path = 'gs://carbonplan-climatetrace/inputs/volt_table.csv'
volt_table = pd.read_csv(path)
volt_to_digital_count = volt_table.set_index('volt_value')['ind'].to_dict()
wf_in_digital_count = xr.apply_ufunc(
volt_to_digital_count.__getitem__,
ds.rec_wf.astype(float).round(6).fillna(-0.195279),
vectorize=True,
dask='parallelized',
output_dtypes=[int],
)
ds = get_dist_metric_value(ds, metric='adj_ground_peak_dist_actual_wf')
# the processed wf is from sig beg to sig end, select adj ground peak to sig end instead
ground_energy = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=wf_in_digital_count,
signal_begin_dist=ds.adj_ground_peak_dist_actual_wf,
signal_end_dist=ds.sig_end_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
ground_energy = ground_energy.transpose(dims[0], dims[1])
senergy_whrc = ground_energy.sum(dim="rec_bin") * 2
return -4.397006 + 0.006208 * senergy_whrc | b1f2f9acacb2186694aec3249632fea1fd4f7a58 | 13,337 |
import logging
def get_previous_version(versions: dict, app: str) -> str:
"""Looks in the app's .version_history to retrieve the prior version"""
try:
with open(f"{app}/.version_history", "r") as fh:
lines = [line.strip() for line in fh]
except FileNotFoundError:
logging.warning(f"No .version_history for {app}")
return ""
if versions[app] != lines[-1]:
logging.warning(
f"Mismatch in data:\n\tCurrent version is {versions[app]}"
f" but most recent line in .version_history is {lines[-1]}"
)
return ""
elif len(lines) < 2:
logging.warning("No prior version recorded")
return ""
return lines[-2] | d3a4aec5c3bc842181aa3901971774761866c3e5 | 13,338 |
def healthcheck() -> bool:
"""FastAPI server healthcheck."""
return True | 1767229ccda121e88264093c479d2bccf994a7e9 | 13,339 |
def ToHexStr(num):
"""
将返回的错误码转换为十六进制显示
:param num: 错误码 字符串
:return: 十六进制字符串
"""
chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'}
hexStr = ""
if num < 0:
num = num + 2**32
while num >= 16:
digit = num % 16
hexStr = chaDic.get(digit, str(digit)) + hexStr
num //= 16
hexStr = chaDic.get(num, str(num)) + hexStr
return hexStr | b6cf482defdc9f4fcf9ce64903e7a718e096bacb | 13,340 |
import requests
def getSBMLFromBiomodelsURN(urn):
""" Get SBML string from given BioModels URN.
Searches for a BioModels identifier in the given urn and retrieves the SBML from biomodels.
For example:
urn:miriam:biomodels.db:BIOMD0000000003.xml
Handles redirects of the download page.
:param urn:
:return: SBML string for given model urn
"""
if ":" not in urn:
raise ValueError("The URN", urn, "is not in the correct format: it must be divided by colons in a format such as 'urn:miriam:biomodels.db:BIOMD0000000003.xml'.")
core = urn.split(":")[-1].split(".")[0]
url = "https://www.ebi.ac.uk/biomodels/model/download/" + core + "?filename="+ core + "_url.xml"
response = requests.get(url, allow_redirects=True)
response.raise_for_status()
sbml = response.content
# bytes array in py3
try:
sbml_str = str(sbml.decode("utf-8"))
except:
sbml_str = str(sbml)
return sbml_str | 9a28f4a0619ebed6f9e272d84331482442ae9fb8 | 13,341 |
def draw(k, n):
"""
Select k things from a pool of n without replacement.
"""
# At k == n/4, an extra 0.15*k draws are needed to get k unique draws
if k > n/4:
result = rng.permutation(n)[:k]
else:
s = set()
result = np.empty(k, 'i')
for i in range(k):
p = rng.randint(n)
while p in s:
p = rng.randint(n)
s.add(p)
result[i] = p
return result | d7135d659fc4e702942ea2da0f794fcb9d77bfd2 | 13,342 |
def print_version(args):
"""Print the version (short or long)"""
# Long version
if len(args) > 0 and args[0] == '--full':
apk_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_CLIENT, 'apk_version')
bundle_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_BINDINGS, 'version')
python_version = constants.VERSION
print("Python Version: %s" % python_version)
print("dtfClient Version: %s" % apk_version)
print("Bindings Version Date: %s" % bundle_version)
else:
print(constants.VERSION)
return 0 | b7bec22239e765d3e9c2131302144b0e44360f2a | 13,343 |
from datetime import datetime
def naturalTimeDifference(value):
"""
Finds the difference between the datetime value given and now()
and returns appropriate humanize form
"""
if isinstance(value, datetime):
delta = datetime.now() - value
if delta.days > 6:
return value.strftime("%b %d") # May 15
if delta.days > 1:
return value.strftime("%A") # Wednesday
elif delta.days == 1:
return 'yesterday' # yesterday
elif delta.seconds > 3600:
if delta.seconds < 7200:
return '1 hour ago'
else:
return str(delta.seconds / 3600 ) + ' hours ago' # 3 hours ago
elif delta.seconds > 60:
if delta.seconds < 120:
return '1 minute ago'
else:
return str(delta.seconds/60) + ' minutes ago' # 29 minutes ago
elif delta.seconds > 10:
return str(delta.seconds) + ' seconds ago' # 15 seconds ago
else:
return 'a moment ago' # a moment ago
return defaultfilters.date(value)
else:
return str(value) | ce285358b1b99a4b2df460e6193d2a0970aa4eff | 13,344 |
def raises_Invalid(function):
"""A decorator that asserts that the decorated function raises
dictization_functions.Invalid.
Usage:
@raises_Invalid
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
"""
def call_and_assert(*args, **kwargs):
with pytest.raises(df.Invalid):
function(*args, **kwargs)
return call_and_assert | b1dcaea71cfe95e25029be360645c68a0906346d | 13,345 |
from pathlib import Path
def load_dataset(dataset_path: Path) -> [Instruction]:
"""Returns the program as a list of alu instructions."""
with open_utf8(dataset_path) as file:
program = []
for line in file:
if len(line.strip()) > 0:
instruction = line.strip().split(" ")
if len(instruction) == 2:
instruction.append(None) # No b value
else:
try: # B instruction is constant.
instruction[2] = int(instruction[2])
except ValueError:
pass # B instruction is string reference.
program.append(
Instruction(
func=getattr(alu, instruction[0]),
a=instruction[1],
b=instruction[2],
)
)
return program | 6d9fd90401c750a4aa5d83491c9610984b95ebd1 | 13,346 |
import json
def process_info(args):
"""
Process a single json file
"""
fname, opts = args
with open(fname, 'r') as f:
ann = json.load(f)
f.close()
examples = []
skipped_instances = 0
for instance in ann:
components = instance['components']
if len(components[0]['poly']) < 3:
continue
if 'class_filter'in opts.keys() and instance['label'] not in opts['class_filter']:
continue
# if instance['image_url'].find('Bhoomi') == -1:
# continue
candidates = [c for c in components]
instance['components'] = candidates
if candidates:
examples.append(instance)
return examples, skipped_instances | 8ade5b21db3cca57d9de91311fc57754161673de | 13,347 |
def logout():
"""
退出登录
:return:
"""
# pop是移除session中的数据(dict),pop会有一个返回值,如果移除的key不存在返回None
session.pop('user_id', None)
session.pop('mobile', None)
session.pop('nick_name', None)
# 要清除is_admin的session值,不然登录管理员后退出再登录普通用户又能访问管理员后台
session.pop('is_admin', None)
return jsonify(errno=RET.OK, errmsg="退出成功") | a60e91457ddb32bceeda01a66027209adaf8eecb | 13,348 |
def lift_to_dimension(A,dim):
"""
Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
Assumes a numpy array as input
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim>dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim==dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape)) | bc21d0af45e8073f2e8da6ed57c441739a7385f5 | 13,349 |
def search(keyword=None):
"""
Display search results in JSON format
Parameters
----------
keyword : str
Search keyword. Default None
"""
return get_json(False, keyword) | 558a34fedb4e05e7a31b655effa47287cbc46202 | 13,350 |
from typing import List
def min_offerings(heights: List[int]) -> int:
"""
Get the max increasing sequence on the left and the right side of current index,
leading upto the current index.
current index's value would be the max of both + 1.
"""
length = len(heights)
if length < 2:
return length
left_inc = [0] * length
right_inc = [0] * length
for index in range(1, length):
if heights[index] > heights[index - 1]:
left_inc[index] = left_inc[index - 1] + 1
if heights[length - 1 - index] > heights[length - index]:
right_inc[length - 1 - index] = right_inc[length - index] + 1
return sum(1 + max(left_inc[index], right_inc[index]) for index in range(length)) | 952ea82815ecb4db6d4d0347f16b0cf5b299f7d3 | 13,351 |
def pretty(value, width=80, nl_width=80, sep='\n', **kw):
# type: (str, int, int, str, **Any) -> str
"""Format value for printing to console."""
if isinstance(value, dict):
return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:])
elif isinstance(value, tuple):
return '{}{}{}'.format(
sep, ' ' * 4, pformat(value, width=nl_width, **kw),
)
else:
return pformat(value, width=width, **kw) | d2af8d83c2e116ebb1a6e65cd369c3a33adf4585 | 13,352 |
def niceNumber(v, maxdigit=6):
"""Nicely format a number, with a maximum of 6 digits."""
assert(maxdigit >= 0)
if maxdigit == 0:
return "%.0f" % v
fmt = '%%.%df' % maxdigit
s = fmt % v
if len(s) > maxdigit:
return s.rstrip("0").rstrip(".")
elif len(s) == 0:
return "0"
else:
return s | d57f83272a819d5abf12d71fdac84fe8e92eeb05 | 13,355 |
def query_incident(conditions: list, method=None, plan_status="A", mulitple_fields=False):
"""
Queries incidents in Resilient/CP4S
:param condition_list: list of conditions as [field_name, field_value, method] or a list of list conditions if multiple_fields==True
:param method: set all field conditions to this method (save user from typing it for each field)
:param plan_status: "A" == Active, "C" == Closed
:param multiple_fields: query more than one field
"""
def buildConditionDict(conditions, method=method):
return {
'field_name': conditions[0],
'value': conditions[1],
"method": method if method else conditions[2],
}
conditionList = []
query_uri = u"/incidents/query?return_level=normal"
if not mulitple_fields:
conditionList.append(buildConditionDict(conditions))
query_uri += u"&field_handle={}".format(conditions[0])
else:
for condition in conditions:
conditionList.append(buildConditionDict(condition))
query_uri += u"&field_handle={}".format(condition[0])
conditionList.append({
'field_name': 'plan_status',
'method': 'equals',
'value': plan_status
})
query = {
'filters': [{
'conditions': conditionList
}],
"sorts": [{
"field_name": "create_date",
"type": "desc"
}]
}
client = create_authenticated_client()
return client.post(query_uri, query) | 9c037b5d864248bd280db644f4c3868557b59721 | 13,356 |
def get_banner():
"""Return a banner message for the interactive console."""
global _CONN
result = ''
# Note how we are connected
result += 'Connected to %s' % _CONN.url
if _CONN.creds is not None:
result += ' as %s' % _CONN.creds[0]
# Give hint about exiting. Most people exit with 'quit()' which will
# not return from the interact() method, and thus will not write
# the history.
result += '\nPress Ctrl-D to exit'
return result | 5c5e1f2d32548d112f75c933ce4c4e842cdfc993 | 13,357 |
def generate_fish(
n,
channel,
interaction,
lim_neighbors,
neighbor_weights=None,
fish_max_speeds=None,
clock_freqs=None,
verbose=False,
names=None
):
"""Generate some fish
Arguments:
n {int} -- Number of fish to generate
channel {Channel} -- Channel instance
interaction {Interaction} -- Interaction instance
lim_neighbors {list} -- Tuple of min and max neighbors
neighbor_weight {float|list} -- List of neighbor weights
fish_max_speeds {float|list} -- List of max speeds
clock_freqs {int|list} -- List of clock speeds
names {list} -- List of names for your fish
"""
if neighbor_weights is None:
neighbor_weights = [1.0] * n
elif not isinstance(neighbor_weights, list):
neighbor_weights = [neighbor_weights] * n
if fish_max_speeds is None:
fish_max_speeds = [1.0] * n
elif not isinstance(fish_max_speeds, list):
fish_max_speeds = [fish_max_speeds] * n
if clock_freqs is None:
clock_freqs = [1] * n
elif not isinstance(clock_freqs, list):
clock_freqs = [clock_freqs] * n
if names is None:
names = ['Unnamed'] * n
fish = []
for i in range(n):
fish.append(Fish(
id=i,
channel=channel,
interaction=interaction,
lim_neighbors=lim_neighbors,
neighbor_weight=neighbor_weights[i],
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
verbose=verbose,
name=names[i]
))
return fish | 58d8fb4626d18caa5b093b30588603f335074e4b | 13,358 |
import functools
def log_exception(function):
"""Exception logging wrapper."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except:
err = "There was an exception in "
err += function.__name__
logger.exception(err)
return wrapper | f2c86b168550c12d73d87f1b2001a3caab46ceda | 13,359 |
def calculate_triad_connectivity(tt1, tt2, tt3, ipi1, ipi2, tau_z_pre, tau_z_post,
base_time, base_ipi, resting_time, n_patterns):
"""
This function gives you the connectivity among a triad, assuming that all the other temporal structure outside of
the trial is homogeneus
:param tt1:
:param tt2:
:param tt3:
:param ipi1:
:param ipi2:
:param tau_z_pre:
:param tau_z_post:
:param base_time:
:param base_ipi:
:param resting_time:
:param n_patterns:
:return:
"""
Tt = (n_patterns - 3) * base_time + tt1 + tt2 + tt3 + ipi1 + ipi2 + \
(n_patterns - 2) * base_ipi + resting_time
# Single probabilities
p1_pre = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p2_pre = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p3_pre = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p1_post = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p2_post = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p3_post = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
# joint-self probabilities
p11 = calculate_self_probability_theo(T1=tt1, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p22 = calculate_self_probability_theo(T1=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p33 = calculate_self_probability_theo(T1=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
# Joint probabilities
Ts = tt1 + ipi1
p21 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p31 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1
p12 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p32 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p13 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p23 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
# Weights
w11 = np.log10(p11 / (p1_pre * p1_post))
w12 = np.log10(p12 / (p1_pre * p2_post))
w13 = np.log10(p13 / (p1_pre * p3_post))
w21 = np.log10(p21 / (p2_pre * p1_post))
w22 = np.log10(p22 / (p2_pre * p2_post))
w23 = np.log10(p23 / (p2_pre * p3_post))
w31 = np.log10(p31 / (p3_pre * p1_post))
w32 = np.log10(p32 / (p3_pre * p2_post))
w33 = np.log10(p33 / (p3_pre * p3_post))
# Betas
beta1 = np.log10(p1_post)
beta2 = np.log10(p2_post)
beta3 = np.log10(p3_post)
# Bs (un-normalized)
B12 = w22 - w12 + beta2 - beta1
B13 = w33 - w13 + beta3 - beta1
B21 = w11 - w21 + beta1 - beta2
B23 = w33 - w32 + beta3 - beta2
B31 = w11 - w31 + beta1 - beta3
B32 = w22 - w32 + beta2 - beta3
return locals() | 6497a68bfdbf9db12a6cbef0784c0aacc3f5e055 | 13,360 |
from datetime import datetime
import random
def random_date_from(date,
min_td=datetime.timedelta(seconds=0),
max_td=datetime.timedelta(seconds=0)):
"""
Produces a datetime at a random offset from date.
Parameters:
date: datetime
The reference datetime.
min_td: timedelta, optional
The minimum offset from the reference datetime (could be negative).
max_td: timedelta, optional
The maximum offset from the reference datetime (could be negative).
Return:
datetime
A new_date such that (date + min_td) <= new_date < (date + max_td).
"""
min_s = min(min_td.total_seconds(), max_td.total_seconds())
max_s = max(min_td.total_seconds(), max_td.total_seconds())
offset = random.uniform(min_s, max_s)
return date + datetime.timedelta(seconds=offset) | 2392e6684de81f5e693a7e6fbe4934940df5eada | 13,361 |
def generate_log_normal_dist_value(frequency, mu, sigma, draws, seed_value):
"""
Generates random values using a lognormal distribution,
given a specific mean (mu) and standard deviation (sigma).
https://stackoverflow.com/questions/51609299/python-np-lognormal-gives-infinite-
results-for-big-average-and-st-dev
The parameters mu and sigma in np.random.lognormal are not the mean
and STD of the lognormal distribution. They are the mean and STD
of the underlying normal distribution.
Parameters
----------
mu : int
Mean of the desired distribution.
sigma : int
Standard deviation of the desired distribution.
draws : int
Number of required values.
Returns
-------
random_variation : float
Mean of the random variation over the specified itations.
"""
if seed_value == None:
pass
else:
frequency_seed_value = seed_value * frequency * 100
np.random.seed(int(str(frequency_seed_value)[:2]))
normal_std = np.sqrt(np.log10(1 + (sigma/mu)**2))
normal_mean = np.log10(mu) - normal_std**2 / 2
hs = np.random.lognormal(normal_mean, normal_std, draws)
return round(np.mean(hs),2) | f0613688a5af83a867825b3e91c8f1f8a99c05ba | 13,362 |
from re import VERBOSE
def compute_mean_field(
grain_index_field,
field_data,
field_name,
vx_size=(1.0, 1.0, 1.0),
weighted=False,
compute_std_dev=False,
):
"""
Compute mean shear system by grains.
Args:
grain_index_field : VTK field containing index
field_data : VTK field containing shear field
field_name : the requested name of field
vx_size=(1.,1.,1.) : the voxel size
weighted=False : whether or not the mean and stddev is weighted
by grain volume ratio
compute_std_dev=False : whether we compute standard deviation
for `field_name`
Returns:
value_by_grain: 2D numpy array with every mean value for each grains
mean_field: 3D numpy array containing mean shear field
std_field: 3D numpy array containing standard_dev grains field
if compute_std_dev is True
"""
real_indx_grains = np.unique(grain_index_field)
field = field_data.PointData[field_name]
field_dimension = field_data.GetDimensions()
mean_field = np.zeros_like(field)
std_field = np.zeros_like(field)
# volume_grains = np.zeros_like(grain_index_field)
vx_vol = np.prod(vx_size) # vx_size[0]*vx_size[1]*vx_size[2]
# print(np.prod(vx_size))
# if weighted:
volume_total = vx_vol * np.prod(field_dimension)
# else:
# volume_total = 1.0
# print(" volume_total ", volume_total)
# print(" np.prod(field_dimension) ", np.prod(field_dimension))
volume = 1.0
for index in real_indx_grains:
mask_grains = np.nonzero(grain_index_field == index)
# if weighted:
# volume = np.count_nonzero(grain_index_field == index) * vx_vol
mean = algs.mean(field[mask_grains], axis=0) # * volume / volume_total
if VERBOSE:
print(
"- index {} v_i {} v_t {} mean {} mean {}".format(
index,
volume,
volume_total,
algs.mean(field[mask_grains], axis=0),
mean,
)
)
if compute_std_dev:
std_dev = np.std(field[mask_grains], axis=0) # * volume / volume_total
std_field[mask_grains] = std_dev
# volume_grains[mask_grains] = volume
mean_field[mask_grains] = mean
# gamma_by_grain = np.row_stack(gamma_by_grain)
value_by_grain = np.unique(mean_field, axis=0)
# print(" gamma_by_grain ", gamma_by_grain.shape)
# mean_by_grains = np.column_stack((real_indx_grains,gamma_by_grain))
return value_by_grain, mean_field, std_field | 8baa187a853c1d44597cae0417b455c74db2072d | 13,363 |
def evaluate_argument_value(xpath_or_tagname, datafile):
"""This function takes checks if the given xpath_or_tagname exists in the
datafile and returns its value. Else returns None."""
tree = ET.parse(datafile)
root = tree.getroot()
if xpath_or_tagname.startswith(root.tag + "/"):
xpath_or_tagname = xpath_or_tagname[len(root.tag + "/"):]
try:
xpath_or_tagname = root.find(xpath_or_tagname).text
except Exception:
print_error("Invalid xpath: {0}".format(root.tag + "/" + xpath_or_tagname))
xpath_or_tagname = None
else:
print_error("Invalid xpath: {0}".format(xpath_or_tagname))
xpath_or_tagname = None
return xpath_or_tagname | be4597e039717a535a86edfa4b04761417d0eaf4 | 13,364 |
def normalise_genome_position(x):
"""
Normalise position (circular genome)
"""
x['PositionNorm0'] = np.where(x['Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['Position']),
x['Position'])
x['PositionNorm'] = x['PositionNorm0']**(1/2)
# Reference position
n_reads = x['readCount'].max()
start_position_ref = int(1)
end_position_ref = x['GenomeLength'].iloc[0]
end_position_ref = end_position_ref + n_reads
increase_by = (end_position_ref / n_reads)
x['ref_Position'] = list(frange(start_position_ref, end_position_ref,
increase_by))
x['ref_Position'] = x['ref_Position'].astype(int)
x['PositionNorm_ref0'] = np.where(x['ref_Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['ref_Position']),
x['ref_Position'])
x['PositionNorm_ref'] = x['PositionNorm_ref0'].astype(int)
return x | 251808a0c7ea2b4f83e8c82ec06fc2d1d9e9b887 | 13,365 |
from faker import Faker
def random_address(invalid_data):
"""
Generate Random Address
return: string containing imitation postal address.
"""
fake = Faker(['en_CA']) # localized to Canada
return fake.address().replace('\n',', '), global_valid_data | 3375f2eefd05e1575ec8caf2944ee7960a17ca46 | 13,366 |
import math
def rot_poly(angle, polygon, n):
"""rotate polygon into 2D plane in order to determine if a point exists
within it. The Shapely library uses 2D geometry, so this is done in order
to use it effectively for intersection calculations.
Parameters
----------
angle : float
Euler angle to rotate a vector with respect to n
polygon : NumPy array
Coordinates encompassing a polygon (i.e. a boundary)
n : NumPy array
Normal vector of a boundary
Returns
-------
poly_2d : Shapely Polygon object
Shapely Polygon object in 2D coordinates
Notes
-----
This is not an elegant way of doing this. This works for surfaces that are
tilted with respect to the x-axis, and will work for surfaces with a normal
that is parallel to the y-axis, but will not allow for anything else. For
the code to be fully generalizable, this function will need to be expanded.
"""
xvect = np.array([1,0,0])
frontbacktest = lc.incidence_angle(n,xvect)
# if this is a front or back surface of the LSC, rotate with respect to y
if frontbacktest == 0 or frontbacktest == math.pi:
poly_2d = rot_poly_y(angle, polygon)
# otherwise, rotate with respect to x
else:
poly_2d = rot_poly_x(angle, polygon)
return poly_2d | 3048d6fac8a5e2dffb3d621c8bec2a25aa6b31d0 | 13,367 |
def bytes_isspace(x: bytes) -> bool:
"""Checks if given bytes object contains only whitespace elements.
Compiling bytes.isspace compiles this function.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object to examine.
Returns:
Result of check.
"""
if len(x) == 0:
return False
for i in x:
if i != ord(' ') and i != ord('\t') and i != ord('\n') and i != ord('\r') and i != 0x0b and i != ord('\f'):
return False
return True | 6c28b904cb6e0ef515ce7a16725fb99a535c3192 | 13,368 |
import re
def snake_case(name: str):
"""
https://stackoverflow.com/a/1176023/1371716
"""
name = re.sub('(\\.)', r'_', name)
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower() | 4696ca3c1a50590aa6617ee3917b8364c11f3910 | 13,369 |
import glob
def get_loss_data():
"""
This function returns a list of paths to all .npy loss
files.
Returns
-------
path_list : list of strings
The list of paths to output files
"""
path = "./data/*_loss.npy"
path_list = glob.glob(path, recursive=True)
return path_list | bc98b0bdf60ac3f7125da82fd68956957e89a777 | 13,370 |
def ranked_avg_knn_scores(batch_states, memory, k=10, knn=batch_count_scaled_knn):
"""
Computes ranked average KNN score for each element in batch of states
\sum_{i = 1}^{K} (1/i) * d(x, x_i)
Parameters
----------
k: k neighbors
batch_states: numpy array of size [batch_size x state_size]
memory: numpy array of size [memory_size x state_size]
Returns
-------
numpy array of scores of dims [batch_size]
"""
nearest_neighbor_scores = knn(batch_states, memory, k=k)
k = nearest_neighbor_scores.shape[1]
scales = 1 / np.expand_dims(np.arange(1, k + 1), axis=0).repeat(batch_states.shape[0], axis=0)
# There may be the edge case where the number of unique distances for this particular batch
# is less than k. If that's the case, we need to reduce our scales dimension.
# This means one of two things:
# 1. you either have a very small map, or
# 2. your representation has collapsed into less than k points.
ranked_avg_scores = np.multiply(nearest_neighbor_scores, scales)
return np.sum(ranked_avg_scores, axis=-1) | 56d60d97e7c10b06deb417e0d6b52a5a76f9150e | 13,371 |
def login_exempt(view_func):
"""登录豁免,被此装饰器修饰的action可以不校验登录."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.login_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) | d0853317260d68e9ea3d3a70a02c2f1ca67681a2 | 13,372 |
def to_uint8_image(message : ImageMessage) -> ImageMessage:
"""Convert image type to uint8.
Args:
message (ImageMessage): Image to be converted
Returns:
ImageMessage: Resulting iamge
"""
message.image = np.uint8(message.image*255)
if message.mask is not None:
message.mask = np.uint8(message.mask*255)
return message | fd9626800b5c0fec284ab185c1ff29e9cfefb3e7 | 13,373 |
import string
def list_zero_alphabet() -> list:
"""Build a list: 0, a, b, c etc."""
score_dirs = ['0']
for char in string.ascii_lowercase:
score_dirs.append(char)
return score_dirs | 6cd9fc9e93257dcc7729235ac3cffa01dbd80c95 | 13,374 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.