content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def remove_suboptimal_parses(parses: Parses, just_one: bool) -> Parses:
""" Return all parses that have same optimal cost. """
minimum = min(parse_cost(parse) for parse in parses)
minimal_parses = [parse for parse in parses if parse_cost(parse) == minimum]
if just_one:
return Parses([minimal_parses[0]])
return Parses(minimal_parses)
|
c223229e73a5319bdb40ac58695aa6f5a8c0bb4b
| 27,042 |
def local_ranking(results):
"""
Parameters
----------
results : list
Dataset with initial hand ranking and the global hand ranking.
Returns
-------
results : list
Dataset with the initial hand ranking and the game-local hand ranking
(from 0 - nplayers).
"""
for i in range(len(results)):
results[i][1] = np.argsort(results[i][1])
return results
|
2be1ff269ad18ba9439d183f5899f5034927b5d7
| 27,043 |
def is_monotonic_increasing(bounds: np.ndarray) -> bool:
"""Check if int64 values are monotonically increasing."""
n = len(bounds)
if n < 2:
return True
prev = bounds[0]
for i in range(1, n):
cur = bounds[i]
if cur < prev:
return False
prev = cur
return True
|
e745ce3825f4e052b2f62c7fdc23e66b5ee5d4d1
| 27,044 |
def parse(data):
"""
Takes binary data, detects the TLS message type, parses the info into a nice
Python object, which is what is returned.
"""
if data[0] == TLS_TYPE_HANDSHAKE:
obj = TlsHandshake()
obj.version = data[1:3]
obj.length = unpack(">H", data[3:5])[0]
if data[5] == TLS_TYPE_CLIENT_HELLO:
obj.data = ClientHello()
obj.data.length = unpack(">I", (b"\x00" + data[6:9]))[0] # 3-byte length
obj.data.version = data[9:11]
obj.data.random = data[11:43] # 32 bytes of random
if data[43] == 0x00:
obj.data.session_id = None
else:
obj.data.session_id = data[44:44+data[43]]
offset = 44 + data[43]
cipher_suite_length = unpack(">H", data[offset:offset+2])[0]
offset += 2
obj.data.cipher_suites = data[offset:offset+cipher_suite_length]
offset += cipher_suite_length
obj.data.compression_methods = data[offset+1:offset+data[offset]+1]
offset += 1 + data[offset]
extensions_length = unpack(">H", data[offset:offset+2])[0]
offset += 2
extension_data = data[offset:]
obj.data.extension_data = []
while len(extension_data):
extension, extension_data = parse_tls_extension(extension_data)
obj.data.extension_data.append(extension)
return obj
raise NotImplemented("Only CLIENT_HELLO handshake message is currently implemented")
raise NotImplemented("Only handshake messages are currently implemented")
|
64698fde904d702181f4d8bacda648d9fbea68a7
| 27,045 |
def recoverSecretRanks_GPT(mod_rec, tok_rec, startingText, outInd, finishSentence=True):
"""
Function to calculate the secret ranks of GPT2 LM of a cover text given the cover text
"""
startingInd=tok_rec.encode(startingText)
endingInd=outInd[len(startingInd):]
secretTokensRec=[]
for i in range(len(endingInd)):
token=getTokens_GPT(mod_rec, tok_rec, startingInd, endingInd[i])
if (finishSentence):
if (token==3):
break
if(token>2):
token-=1
startingInd.append(endingInd[i])
secretTokensRec.append(token[0].tolist())
return secretTokensRec
|
be08520901b5c010d89a248814f96681265bb467
| 27,046 |
def threshold_strategies(random_state=None):
"""Plan (threshold):
- [x] aggregated features: (abs(mean - median) < 3dBm) || (2*stdev(x) < 8dBm)
- [x] histogram: x < 85dBm
- [ ] timeseries batch: p < 10**-3
"""
dummy = lambda: dummy_anomaly_injector(scaler=None, random_state=random_state)
spikes = lambda: spike_anomaly_injector(scaler=None, random_state=random_state)
norecovery = lambda: norecovery_anomaly_injector(scaler=None, random_state=random_state)
recovery = lambda: recovery_anomaly_injector(scaler=None, random_state=random_state)
slow = lambda: slow_anomaly_injector(scaler=None, random_state=random_state)
datasets = {
#'dummy': dummy,
'norecovery': norecovery,
'recovery': recovery,
'spikes': spikes,
'slow': slow,
}
lin2log = lambda x: 10 * np.log10(x)
def aggr_approach(df, offset_threshold=3.0, stdev_threshold=2.5):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
# Difference between mean and median has to be lower than threshold
criteria1 = np.abs(np.mean(x) - np.median(x)) > offset_threshold
# Deviation has to be lower than threshold
criteria2 = 2 * np.std(x) > stdev_threshold
#criteria2 = (np.mean(x) + 2*np.std()
result = np.any(criteria1 | criteria2)
#print(criteria1 + criteria2)
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
#print(np.any(view['anomaly']), 2*np.std(x))
#print()
return y_true, y_pred
def hist_approach(df, threshold=-85.0):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
result = np.any(x < threshold)
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
return y_true, y_pred
def ts_as_feature_vector(df, alpha=1e-3):
y_true, y_pred = [], []
unique_pairs = df.apply(lambda row: (row['src'], row['dst'], row['noise']), axis=1).unique()
for src, dst, noise in unique_pairs:
query = (df.src==src) & (df.dst==dst) & (df.noise==noise)
view = df[query]
x = view.rss.ravel()
k2, p = sp.stats.normaltest(x)
result = (p < alpha) # if p < alpha, it is not normal distribution, therefore anomaly
y_pred.append(result)
y_true.append(np.any(view['anomaly']))
return y_true, y_pred
#def fft_approach(x):
# freq = np.abs(np.fft.fft(x))
# freq_db = lin2log(freq)
# # [N/2] - 1; frequency for each sample is i * samplingFrequency / N; 10Hz / (300 / 2 - 1)
# ratio = 300 // 5
# return (np.sum(freq_db[:ratio] > -20.0) > ratio // 2)
#df = norecovery_anomaly_injector(scaler=None, random_state=SEED)
for dataset_name, dataset in datasets.items():
string = f'% computer generated: anomaly={dataset_name}\nBaseline & Threshold (Tab.~\\ref{{tab:threshold-config}})'
for name, func in (('time-value', ts_as_feature_vector), ('aggr', aggr_approach), ('hist', hist_approach)):
df = dataset()
y_true, y_pred = func(df)
#print(metrics.classification_report(y_true=y_true, y_pred=y_pred))
prec = metrics.precision_score(y_true, y_pred, labels=[False, True])
rec = metrics.recall_score(y_true, y_pred, labels=[False, True])
f1 = metrics.f1_score(y_true, y_pred, labels=[False, True])
string += f'\t& {prec:.2f} & {rec:.2f} & {f1:.2f}\\tnote{{1}} &'
string = string + '& - & - ' + '\\\\'
print(string)
|
d127a36d36360f6733e26538c37d3cbb47f199a4
| 27,047 |
import math
def ellipse_properties(x, y, w):
"""
Given a the (x,y) locations of the foci of the ellipse and the width return
the center of the ellipse, width, height, and angle relative to the x-axis.
:param double x: x-coordinates of the foci
:param double y: y-coordinates of the foci
:param double w: width of the ellipse
:rtype: tuple of doubles
:returns: (center_coordinates, width, height, angle_in_rads)
"""
p1 = [x[0], y[0]]
p2 = [x[1], y[1]]
#center point
xy = [(p1[0] + p2[0])/2, (p1[1] + p2[1])/2]
#distance between points
d = ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)**(0.5)
#theta to positive Xaxis
angle = math.atan((p1[1] - p2[1])/(p1[0] - p2[0]))
#sin = math.sin(-angle)
#cos = math.cos(-angle)
#width will be the axis the points lie on
width = 2*((0.5*d)**2 + (0.5*w)**2)**(0.5)
height = w
return (xy, width, height, angle*180/math.pi)
|
95864eac0feb9c34546eefed5ca158f330f88e3d
| 27,048 |
def build_func(f, build_type):
"""
Custom decorator that is similar to the @conf decorator except that it is intended to mark
build functions specifically. All build functions must be decorated with this decorator
:param f: build method to bind
:type f: function
:parm build_type: The WAF build type (see BuildTargetType)
:type build_type: string
"""
def fun(*k, **kw):
kw[BUILD_FUNC_KW] = f.__name__
kw[BUILD_TYPE_KW] = build_type
result = f(*k, **kw)
return result
setattr(OptionsContext, f.__name__, fun)
setattr(ConfigurationContext, f.__name__, fun)
setattr(BuildContext, f.__name__, fun)
return f
|
e880b7d5a3c4ac79a3caff48f1a3f991ed321262
| 27,049 |
def getnumoflinesinblob(ext_blob):
"""
Get number of lines in blob
"""
ext, blob_id = ext_blob
return (ext, blob_id, int(getpipeoutput(['git cat-file blob %s' % blob_id, 'wc -l']).split()[0]))
|
ccc492cc66e046d73389f6822ad04cd943376f7b
| 27,050 |
import requests
def fetch_data(full_query):
"""
Fetches data from the given url
"""
url = requests.get(full_query)
# Parse the json dat so it can be used as a normal dict
raw_data = url.json()
# It's a good practice to always close opened urls!
url.close()
return raw_data
|
576b2548c1b89827e7586542e4d7e3f0cc89051d
| 27,052 |
import http
def post(*args, **kwargs): # pragma: no cover
"""Make a post request. This method is needed for mocking."""
return http.post(*args, **kwargs)
|
d5c91da5f39ece36183a8265f74378a35f11c4c7
| 27,053 |
def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor:
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = transform(
image=wrap(image), transforms=[1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
|
230fb5d346a966c4945b0bb39f336c1fddeb94fd
| 27,054 |
def extract_const_string(data):
"""Extract const string information from a string
Warning: strings array seems to be practically indistinguishable from strings with ", ".
e.g.
The following is an array of two elements
const/4 v0, 0x1
new-array v0, v0, [Ljava/lang/String;
const/4 v1, 0x0
const-string v2, "NIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4, OIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4"
aput-object v2, v0, v1
It seems equal to this other case:
const/4 v0, 0x2
new-array v0, v0, [Ljava/lang/String;
const/4 v1, 0x0
const-string v2, "LIzaSyCuxR_sUTfFJZBDkIsauakeuqXaFxhbur4"
aput-object v2, v0, v1
But who says that in the second case the const-string last argument is just a string while in the
first case the last arg are two elements of the array?
:data: Data would be sth like: v0, "this is a string"
:return: Returns a const string object, otherwise None
:rtype: dict or list
"""
match = regex_var.search(data)
if match:
# A const string is usually saved in this form
# <variable name>,<value>
name = match.group('var')
value = match.group('value')
if ", " not in value:
c = {
# Variable
'name': name,
# Value of string
'value': value
}
return c
else:
c = []
for val in value.split(", "):
c.append({
'name': name,
'value': val
})
return c
else:
return None
|
70229ea1a6183218577244f185a5e37d170fe4be
| 27,056 |
def choose_action(q, sx, so, epsilon):
"""
Choose action index for given state.
"""
# Get valid action indices
a_vindices = np.where((sx+so)==False)
a_tvindices = np.transpose(a_vindices)
q_max_index = tuple(a_tvindices[np.argmax(q[a_vindices])])
# Choose next action based on epsilon-greedy policy
if np.random.random() <= epsilon:
# Choose random action from list of valid actions
a_index = tuple(a_tvindices[np.random.randint(len(a_tvindices))])
else:
# Choose valid action w/ max Q
a_index = q_max_index
return q_max_index, a_index
|
626ccda15c24d983a060bdd6dd90a836c461b1ba
| 27,057 |
def soliswets(function, sol, fitness, lower, upper, maxevals, delta):
""""
Implements the solis wets algorithm
"""
bias = zeros(delta.shape)
evals = 0
num_success = 0
num_failed = 0
dim = len(sol)
while evals < maxevals:
dif = uniform(0, delta, dim)
newsol = clip(sol+bias+dif, lower, upper)
new_fitness = function(newsol)
evals += 1
if new_fitness < fitness:
sol = newsol
fitness = new_fitness
bias = _increm_bias(bias, dif)
num_success += 1
num_failed = 0
elif evals < maxevals:
new_sol = clip(sol - bias - dif, lower, upper)
new_fitness = function(new_sol)
evals += 1
if new_fitness < fitness:
sol = new_sol
fitness = new_fitness
bias = _dec_bias(bias, dif)
num_success += 1
num_failed = 0
else:
bias = 0.5 * bias
num_success = 0
num_failed += 1
if num_success >= 5:
num_success = 0
delta *= 2
elif num_failed >= 3:
num_failed = 0
delta /= 2
return EAresult(solution=sol, fitness=fitness, evaluations=maxevals), delta
|
19104e717af6701ce3d838d526059575306018cf
| 27,059 |
def _get_precision_type(network_el):
"""Given a network element from a VRP-REP instance, returns its precision type:
floor, ceil, or decimals. If no such precision type is present, returns None.
"""
if 'decimals' in network_el:
return 'decimals'
if 'floor' in network_el:
return 'floor'
if 'ceil' in network_el:
return 'ceil'
return None
|
b3b451a26ec50ce5f2424ea7a3652123ae96321d
| 27,060 |
import json
def user_list():
"""Retrieves a list of the users currently in the db.
Returns:
A json object with 'items' set to the list of users in the db.
"""
users_json = json.dumps(({'items': models.User.get_items_as_list_of_dict()}))
return flask.Response(ufo.XSSI_PREFIX + users_json, headers=ufo.JSON_HEADERS)
|
b216b41b35b4b25c23ea2cc987ff4fe2b6464775
| 27,062 |
import hashlib
def md5_str(content):
"""
计算字符串的MD5值
:param content:输入字符串
:return:
"""
m = hashlib.md5(content.encode('utf-8'))
return m.hexdigest()
|
affe4742c2b44a60ef6dafa52d7a330594a70ed9
| 27,063 |
import requests
def hurun_rank(indicator: str = "百富榜", year: str = "2020") -> pd.DataFrame:
"""
胡润排行榜
http://www.hurun.net/CN/HuList/Index?num=3YwKs889SRIm
:param indicator: choice of {"百富榜", "富豪榜", "至尚优品"}
:type indicator: str
:param year: 指定年份; {"百富榜": "2015至今", "富豪榜": "2015至今", "至尚优品": "2017至今"}
:type year: str
:return: 指定 indicator 和 year 的数据
:rtype: pandas.DataFrame
"""
if indicator == "百富榜":
symbol_map = {
"2015": "5",
"2016": "1",
"2017": "11",
"2018": "15",
"2019": "19",
"2020": "22",
}
elif indicator == "全球榜":
symbol_map = {
"2015": "6",
"2016": "4",
"2017": "2",
"2018": "14",
"2019": "18",
"2020": "20",
}
elif indicator == "至尚优品":
symbol_map = {
"2017": "10",
"2018": "13",
"2019": "17",
"2020": "21",
}
url = f"http://www.hurun.net/CN/HuList/BobListJson/{symbol_map[year]}"
payload = {"order": "asc", "search": ""}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = [
"_",
"_",
"_",
"类别",
"_",
"_",
"奖项",
"_",
"排名",
"品牌",
"_",
"_",
]
temp_df = temp_df[
[
"类别",
"奖项",
"排名",
"品牌",
]
]
temp_df["类别"].replace("", np.nan, inplace=True)
temp_df.fillna(method="ffill", inplace=True)
return temp_df
url = f"http://www.hurun.net/CN/HuList/ListJson/{symbol_map[year]}"
payload = {"order": "asc", "search": ""}
r = requests.post(url, json=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = [
"_",
"排名",
"姓名",
"财富",
"出生日期",
"关联企业",
"主营行业",
]
temp_df = temp_df[
[
"排名",
"姓名",
"财富",
"出生日期",
"关联企业",
"主营行业",
]
]
return temp_df
|
d8540f3b7482f8f56f0ec40ac2592ef0cfae4035
| 27,064 |
def yamartino_method(a, axis=None):
"""This function calclates the standard devation along the
chosen axis of the array. This function has been writen to
calculate the mean of complex numbers correctly by taking
the standard devation of the argument & the
angle (exp(1j*theta) ). This uses the Yamartino method
which is a one pass method of estimating the standard
devation of an angle.
Input :
a : N-D numpy array
axis : The axis to perform the operation over
The Default is over all axies
Output:
This returns a an array or a one value array
Example:
"""
if a.dtype.kind == 'c':
r = np.sqrt(a.real ** 2 + a.imag ** 2).std(axis=axis)#mean
th = np.arctan2(a.imag,a.real)
if axis is None :
sa = (np.sin(th) / len(th)).sum()
ca = (np.cos(th) / len(th)).sum()
else:
sa = (np.sin(th) / len(th)).sum(axis=axis)
ca = (np.cos(th) / len(th)).sum(axis=axis)
e = np.sqrt(1. - (sa ** 2 + ca ** 2))
thsd = np.arcsin(e)*(1. + (2. / np.sqrt(3) - 1.) * e ** 3)
return r * np.exp(1j * thsd)
else:
return np.std(a, axis=axis)
|
1a313ac97495a0822de1f071191be08ec5b65269
| 27,065 |
def calc_half_fs_axis(total_points, fs):
""" Геренирует ось до половины частоты дискр. с числом
точек равным заданному
"""
freq_axis = arange(total_points)*fs/2/total_points # Hz до половины fs
return freq_axis
|
35ef0482e3062d0af6f0e03e03e58e1c3cd33406
| 27,066 |
def fetch_weather():
""" select flight records for display """
sql = "select station, latitude,longitude,visibility,coalesce(nullif(windspeed,''),cast(0.0 as varchar)) as windspeed, coalesce(nullif(precipitation,''),cast(0.00 as varchar)) as precipitation from (select station_id AS station, info ->> 'Latitude' as latitude, info ->> 'Longitude' as longitude, info ->> 'Mean_Visibility' as visibility, info ->> 'Mean_Windspeed' as windspeed, info ->> 'Precipitation' as precipitation from weather w where date_trunc('day',w.create_date) >= date_trunc('day',current_timestamp - interval '1' day) and create_date = (select max(create_date) from weather wi where wi.station_id = w.station_id) limit 300) b;"
conn = None
state_id = None
try:
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(sql)
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in cur.fetchall()]
cur.connection.close()
return r
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
|
8ab9f20255a64cfdaa5bfd6ed9aa675ed76f2f5d
| 27,067 |
def update_params(old_param, new_param, errors="raise"):
""" Update 'old_param' with 'new_param'
"""
# Copy old param
updated_param = old_param.copy()
for k,v in new_param.items():
if k in old_param:
updated_param[k] = v
else:
if errors=="raise":
raise Exception(f"Parameters {k} not recognized as a default parameter for this estimator")
else:
pass
return updated_param
|
95de4e8e1278b07d2bd8ccc61af4e2dc43f87ca2
| 27,068 |
from datetime import datetime
def rng_name():
"""Generate random string for a username."""
name = "b{dt.second}{dt.microsecond}"
return name.format(dt=datetime.datetime.utcnow())
|
81be1b40770b08ec6b9adce0c3c9970ff1f3d442
| 27,070 |
def collections(id=None):
"""
Return Collections
Parameters
----------
id : STR, optional
The default is None, which returns all know collections.
You can provide a ICOS URI or DOI to filter for a specifict collection
Returns
-------
query : STR
A query, which can be run against the SPARQL endpoint.
"""
if not id:
coll = '' # create an empyt string insert into sparql query
else:
coll = ''.join(['FILTER(str(?collection) = "' + id+ '" || ?doi = "' + id + '") .'])
query = """
prefix cpmeta: <http://meta.icos-cp.eu/ontologies/cpmeta/>
prefix dcterms: <http://purl.org/dc/terms/>
select * where{
?collection a cpmeta:Collection .
%s
OPTIONAL{?collection cpmeta:hasDoi ?doi} .
?collection dcterms:title ?title .
OPTIONAL{?collection dcterms:description ?description}
FILTER NOT EXISTS {[] cpmeta:isNextVersionOf ?collection}
}
order by ?title
""" % coll
return query
|
0cd1704d2ac43f34d6e83a3f9e9ead39db390c2e
| 27,071 |
def zmat_to_coords(zmat, keep_dummy=False, skip_undefined=False):
"""
Generate the cartesian coordinates from a zmat dict.
Considers the zmat atomic map so the returned coordinates is ordered correctly.
Most common isotopes assumed, if this is not the case, then isotopes should be reassigned to the xyz.
This function assumes that all zmat variables relate to already defined atoms with a lower index in the zmat.
This function implements the SN-NeRF algorithm as described in:
J. Parsons, J.B. Holmes, J.M Rojas, J. Tsai, C.E.M. Strauss, "Practical Conversion from Torsion Space to Cartesian
Space for In Silico Protein Synthesis", Journal of Computational Chemistry 2005, 26 (10), 1063-1068,
https://doi.org/10.1002/jcc.20237
Tested in converterTest.py rather than zmatTest
Args:
zmat (dict): The zmat.
keep_dummy (bool): Whether to keep dummy atoms ('X'), ``True`` to keep, default is ``False``.
skip_undefined (bool): Whether to skip atoms with undefined variables, instead of raising an error.
``True`` to skip, default is ``False``.
Returns:
list: The cartesian coordinates.
Returns:
list: The atomic symbols corresponding to the coordinates.
Raises:
ZMatError: If zmat if of wrong type or does not contain all keys.
"""
if not isinstance(zmat, dict):
raise ZMatError(f'zmat has to be a dictionary, got {type(zmat)}')
if 'symbols' not in zmat or 'coords' not in zmat or 'vars' not in zmat or 'map' not in zmat:
raise ZMatError(f'Expected to find symbols, coords, vars, and map in zmat, got instead: {list(zmat.keys())}.')
if not len(zmat['symbols']) == len(zmat['coords']) == len(zmat['map']):
raise ZMatError(f'zmat sections symbols, coords, and map have different lengths: {len(zmat["symbols"])}, '
f'{len(zmat["coords"])}, and {len(zmat["map"])}, respectively.')
var_list = list(zmat['vars'].keys())
coords_to_skip = list()
for i, coords in enumerate(zmat['coords']):
for coord in coords:
if coord is not None and coord not in var_list:
if skip_undefined:
coords_to_skip.append(i)
else:
raise ZMatError(f'The parameter {coord} was not found in the "vars" section of '
f'the zmat:\n{zmat["vars"]}')
coords = list()
for i in range(len(zmat['symbols'])):
coords = _add_nth_atom_to_coords(zmat=zmat, coords=coords, i=i, coords_to_skip=coords_to_skip)
# reorder the xyz according to the zmat map and remove dummy atoms
ordered_coords, ordered_symbols = list(), list()
for i in range(len([symbol for symbol in zmat['symbols'] if symbol != 'X'])):
zmat_index = key_by_val(zmat['map'], i)
if zmat_index < len(coords) and i not in coords_to_skip:
ordered_coords.append(coords[zmat_index])
ordered_symbols.append(zmat['symbols'][zmat_index])
if keep_dummy:
for key, val in zmat['map'].items():
if 'X' in str(val):
ordered_coords.append(coords[key])
ordered_symbols.append(zmat['symbols'][key])
return ordered_coords, ordered_symbols
|
0859a549b611347b4e3d94e4f0965a8a550e198e
| 27,073 |
def get_module_version(module_name: str) -> str:
"""Check module version. Raise exception when not found."""
version = None
if module_name == "onnxrt":
module_name = "onnx"
command = [
"python",
"-c",
f"import {module_name} as module; print(module.__version__)",
]
proc = Proc()
proc.run(args=command)
if proc.is_ok:
for line in proc.output:
version = line
proc.remove_logs()
if version is None:
raise ClientErrorException(f"Could not found version of {module_name} module.")
return version
|
caadba47f46d96b0318cd90b0f85f8a2ca2275b0
| 27,074 |
def pd_images(foc_offsets=[0,0], xt_offsets = [0,0], yt_offsets = [0,0],
phase_zernikes=[0,0,0,0], amp_zernikes = [0], outer_diam=200, inner_diam=0, \
stage_pos=[0,-10,10], radians_per_um=None, NA=0.58, wavelength=0.633, sz=512, \
fresnel_focal_length=None, um_per_pix=6.0):
"""
Create a set of simulated phase diversity images.
Note that dimensions here are in microns.
Parameters
----------
foc_offsets: (n_images-1) numpy array
Focus offset in radians for the second and subsequent images
xt_offsets: (n_images-1) numpy array
X tilt offset
yt_offsets: (n_images-1) numpy array
Y tilt offset
phase_zernikes: numpy array
Zernike terms for phase, excluding piston.
amp_zernikes: numpy array
Zernike terms for amplitude, including overall normalisation.
outer_rad, inner_rad: float
Inner and outer radius of annular pupil in pixels. Note that a better
model would have a (slightly) variable pupil size as the focus changes.
radians_per_micron: float
Radians in focus term per micron of stage movement. This is
approximately 2*np.pi * NA^2 / wavelength.
stage_pos: (n_images) numpy array
Nominal stage position in microns.
fresnel_focal_length: float
Focal length in microns if we are in the Fresnel regime. If this is None,
a Fraunhofer calculation will be made.
um_per_pix: float
If we are in the Fresnel regime, we need to define the pixel scale of the
input pupil.
"""
#Firstly, sort out focus, and tilt offsets. This focus offset is a little of a
#guess...
if radians_per_um is None:
radians_per_um = np.pi*NA**2/wavelength
total_focus = np.array(stage_pos) * radians_per_um
total_focus[1:] += np.array(foc_offsets)
#Add a zero (for ref image) to the tilt offsets
xt = np.concatenate([[0],xt_offsets])
yt = np.concatenate([[0],yt_offsets])
#Create the amplitude zernike array. Normalise so that the
#image sum is zero for a evenly illuminated pupil (amplitude zernikes
#all 0).
pup_even = circle(sz, outer_diam, interp_edge=True) - \
circle(sz, inner_diam, interp_edge=True)
pup_even /= np.sqrt(np.sum(pup_even**2))*sz
pup = pup_even*zernike_amp(sz, amp_zernikes, diam=outer_diam)
#Needed for the Fresnel calculation
flux_norm = np.sum(pup**2)/np.sum(pup_even**2)
#Prepare for fresnel propagation if needed.
if fresnel_focal_length is not None:
lens = FocusingLens(sz, um_per_pix, um_per_pix, fresnel_focal_length, wavelength)
print("Using Fresnel propagation...")
#Now iterate through the images at different foci.
n_ims = len(total_focus)
ims = np.zeros( (n_ims, sz, sz) )
for i in range(n_ims):
#Phase zernikes for this image
im_phase_zernikes = np.concatenate([[0.], phase_zernikes])
im_phase_zernikes[1] += xt[i]
im_phase_zernikes[2] += yt[i]
im_phase_zernikes[4] += total_focus[i]
wf = pup*zernike_wf(sz, im_phase_zernikes, diam=outer_diam)
if fresnel_focal_length is None:
ims[i] = np.fft.fftshift(np.abs(np.fft.fft2(wf))**2)
else:
#For a Fresnel propagation, we need to normalise separately,
#because the lens class was written with inbuilt normalisation.
ims[i] = lens.focus(wf) * flux_norm
return ims
|
71a7dd7206936541cc55d8909be7795261aeaefa
| 27,075 |
def add_tickets(create_user, add_flights):
"""Fixture to add tickets"""
user = create_user(USER)
tickets = [{
"ticket_ref": "LOS29203SLC",
"paid": False,
"flight": add_flights[0],
"type": "ECO",
"seat_number": "E001",
"made_by": user,
}, {
"ticket_ref": "LOS24933SLC",
"paid": False,
"flight": add_flights[1],
"type": "ECO",
"seat_number": "E001",
"made_by": user
}]
return [Ticket.objects.create(**ticket) for ticket in tickets]
|
27f9ed9a5231c71e98a79632a97137b73831a0e0
| 27,076 |
def compute_depth_errors(gt, pred):
"""Computation of error metrics between predicted and ground truth depths
Args:
gt (N): ground truth depth
pred (N): predicted depth
"""
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
log10 = np.mean(np.abs((np.log10(gt) - np.log10(pred))))
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
if args.dataset == 'nyu':
return abs_rel, log10, rmse, a1, a2, a3
elif args.dataset == 'kitti':
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
|
a781d5a8c1e61b5562870d75124de64e05fe2789
| 27,077 |
def sanitize_bvals(bvals, target_bvals=[0, 1000, 2000, 3000]):
"""
Remove small variation in bvals and bring them to their closest target bvals
"""
for idx, bval in enumerate(bvals):
bvals[idx] = min(target_bvals, key=lambda x: abs(x - bval))
return bvals
|
a92b170748b5dbc64c4e62703a3c63103675b702
| 27,078 |
def fetch_engines():
"""
fetch_engines() : Fetches documents from Firestore collection as JSON
all_engines : Return all documents
"""
all_engines = []
for doc in engine_ref.stream():
engine = doc.to_dict()
engine["id"] = doc.id
all_engines.append(engine)
return make_response(jsonify(all_engines), 200)
|
a79a623140209ed4e9e7cbea2d8944b3434f720a
| 27,079 |
def isone(a: float) -> bool:
"""Work around with float precision issues"""
return np.isclose(a, 1.0, atol=1.0e-8, rtol=0.0)
|
ee44d5d7a9b00457e51501d8ce5680cd95726e3f
| 27,080 |
def kerr(E=0, U=0, gs=None):
"""
Setup the Kerr nonlinear element
"""
model = scattering.Model(
omegas=[E]*1,
links=[],
U=[U])
if gs is None:
gs = (0.1, 0.1)
channels = []
channels.append(scattering.Channel(site=0, strength=gs[0]))
channels.append(scattering.Channel(site=0, strength=gs[1]))
se = scattering.Setup(model, channels)
se.label = 'U={0}'.format(U)
return se
|
a94ecb4618405a2817267609008bc56ef97033b9
| 27,082 |
from typing import Dict
import requests
import logging
def get_estate_urls(last_estate_id: str) -> Dict:
"""Fetch urls of newly added estates
Args:
last_estate_id (str): estate_id of the most recent estate added (from last scrape)
Returns:
Dict: result dict in format {estate_id_1: {estate_url_1}, ... estate_id_N: {estate_url_N}}
"""
# Calculate number of API pages based on result size and estates per page
base_url = 'https://www.sreality.cz/api/'
res = requests.get(base_url + 'cs/v2/estates?per_page=1&page=1')
num_pages = res.json()['result_size'] // 500
# Obtain url suffix for each estate up until the newest from last scrape
estate_urls = {}
for page in range(1, num_pages):
url = base_url + f'cs/v2/estates?per_page=500&page={page}'
# EAFP
try:
res = requests.get(url)
res.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.error(error)
# Some API responses are missing the content
# which causes the entire scraper to fail
res = res.json().get("_embedded")
if res is None:
continue
estates = res["estates"]
for estate in estates:
estate_url = estate["_links"]["self"]["href"]
estate_id = estate_url.split("/")[-1]
# Break once we hit an estate from last scraping
already_scraped = estate_id == last_estate_id
if already_scraped:
return estate_urls
estate_urls[estate_id] = estate_url
return estate_urls
|
d93299002204edc9d26b3c77e2dff1f56f4b93d8
| 27,083 |
from datetime import datetime
def revert_transaction():
"""Revert a transaction."""
if not (current_user.is_admin or current_user.is_bartender):
flash("You don't have the rights to access this page.", 'danger')
return redirect(url_for('main.dashboard'))
transaction_id = request.args.get('transaction_id', -1, type=int)
# Transactions that are already reverted can't be reverted again
transaction = Transaction.query.filter_by(id=transaction_id).first_or_404()
if transaction.is_reverted or 'Revert' in transaction.type:
flash("You can't revert this transaction.", 'warning')
return redirect(request.referrer)
# Revert client balance
if transaction.client:
# Check if user balance stays positive before reverting
if transaction.client.balance - transaction.balance_change < 0:
flash(transaction.client.first_name + ' '
+ transaction.client.last_name + '\'s balance would be '
+ 'negative if this transaction were reverted.', 'warning')
return redirect(request.referrer)
transaction.client.balance -= transaction.balance_change
if transaction.item and transaction.item.is_alcohol:
transaction.client.last_drink = None
# Revert item quantity
if transaction.item and transaction.item.is_quantifiable:
transaction.item.quantity += 1
# Transaction is now reverted: it won't ever be 'unreverted'
transaction.is_reverted = True
transaction = Transaction(client_id=None,
barman=current_user.username,
date=datetime.datetime.utcnow(),
type='Revert #'+str(transaction_id),
balance_change=None)
db.session.add(transaction)
db.session.commit()
flash('The transaction #'+str(transaction_id)+' has been reverted.',
'primary')
return redirect(request.referrer)
|
39f4fc0c6af9c58197c514d5d648e07da20558aa
| 27,084 |
def is_number(s):
"""returns true if input can be converted to a float"""
try:
float(s)
return True
except ValueError:
return False
|
d9fc4411bbc5e5fd8d02b3c105a770e8859048e0
| 27,085 |
def bib_to_string(bibliography):
""" dict of dict -> str
Take a biblatex bibliography represented as a dictionary
and return a string representing it as a biblatex file.
"""
string = ''
for entry in bibliography:
string += '\n@{}{{{},\n'.format(
bibliography[entry]['type'],
bibliography[entry]['id']
)
for field in bibliography[entry]:
if field != 'id' and field != 'type':
string += '\t{} = {{{}}},\n'.format(
field,
bibliography[entry][field]
)
string = string[:-2] + '}\n'
return string
|
c8fc4247210f74309929fdf9b210cd6f1e2ece3f
| 27,086 |
import io
def make_plot(z, figsize=(20, 20), scale=255 * 257,
wavelength=800, terrain=None,
nir_min=0.2, offset=3.5):
"""
Make a 3-D plot of image intensity as z-axis and RGB image as an underlay on the z=0 plane.
:param z: NIR intensities
:param figsize: size of the figure to default (20,20)
:param scale: Scale to resize intensities for aesthetics (make intensities <= 1)
:param wavelength: The wavelength to include in the legend
:param terrain: The rgb image to include as the x-y plane (default is no underlay)
:param nir_min: Cutoff for the minimum level of NIR intensity (0 - 1) so the plot is cleaner
:param offset: Shift the RGB underlay by this amount for visual appeal so there is a space
:return: a PIL Image
"""
fig = plt.figure(figsize=figsize)
ax = fig.gca(projection='3d')
z = np.float32(bw)
Z = z / scale
X, Y = np.arange(0, z.shape[1], 1), np.arange(0, z.shape[0], 1)
X, Y = np.meshgrid(X, Y)
surf = ax.plot_surface(X, Y, Z,
rstride=3, cstride=3,
cmap=cm.coolwarm,
alpha=0.3,
linewidth=0,
antialiased=False,
vmin=0, vmax=1)
if terrain is not None:
ax.plot_surface(X, Y, -offset * np.ones_like(Z, dtype=np.float32),
rstride=5, cstride=5, facecolors=terrain / 255)
''' Now overlay the fluorescence '''
z_fluorescence = Z.copy()
z_fluorescence[z_fluorescence < nir_min] = 0
z_rgba = np.ones((Z.shape[0], Z.shape[1], 4))
z_rgba[:, :, 3] = z_fluorescence[:, :]
ax.plot_surface(X, Y, -offset * np.ones_like(Z, dtype=np.float32),
rstride=5, cstride=5, facecolors=z_rgba)
ax.set_zlim(-offset, 1)
else:
ax.plot_surface(X, Y, (Z / 257) - offset,
rstride=3, cstride=3,
cmap=cm.coolwarm,
alpha=0.4,
linewidth=0,
antialiased=False,
vmin=-offset, vmax=(1.0 / 257.0) - offset)
ax.set_zlim(-offset, 1)
ax.zaxis.set_major_locator(FixedLocator([0.0, 0.5, 1.0]))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel('\nHeight (Pixels)')
ax.set_ylabel('\nWidth (Pixels)')
ax.set_zlabel('\nRelative NIR\nIntensity')
ax.view_init(azim=30)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.4, aspect=5, orientation="horizontal",
label='Relative Intensity of fluorescence \nat wavelength ' + r"$\lambda_{f} =$" + "{}nm".format(
wavelength))
buf = io.BytesIO()
plt.tight_layout(h_pad=1)
plt.savefig(buf, format='png')
buf.seek(0)
img = Image.open(buf)
plt.close('all')
return img
|
1a4dde23a11b320e6564b6657a871a33ecb65eea
| 27,087 |
def check_prio_and_sorted(node):
"""Check that a treap object fulfills the priority requirement and that its sorted correctly."""
if node is None:
return None # The root is empty
else:
if (node.left_node is None) and (node.right_node is None): # No children to compare with
pass # Do nothing
elif node.left_node is None: # No left child
assert node.prio <= node.right_node.prio # Check priority of right child and node
assert node.data < node.right_node.data # Check sorting
elif node.right_node is None: # No right child
assert node.prio <= node.left_node.prio # Check priority of left child and node
assert node.data > node.left_node.data # Check sorting
else: # Two children
assert node.prio <= (node.left_node.prio and node.right_node.prio) # Check priority of both left and right child with node
assert (node.data > node.left_node.data) and (node.data < node.right_node.data) # Check sorting
check_prio_and_sorted(node.left_node) # Recursion. Goes down the left tree first
check_prio_and_sorted(node.right_node) # Recursion. Goes down the right tree next
|
64100fd4ba9af699ab362d16f5bbf216effa2da5
| 27,088 |
import pickle
async def wait_for_msg(channel):
"""Wait for a message on the specified Redis channel"""
while await channel.wait_message():
pickled_msg = await channel.get()
return pickle.loads(pickled_msg)
|
dca398cb3adeb778458dd6be173a53cdd204bcb9
| 27,090 |
def abandoned_baby_bull(high, low, open_, close, periods = 10):
"""
Abandoned Baby Bull
Parameters
----------
high : `ndarray`
An array containing high prices.
low : `ndarray`
An array containing low prices.
open_ : `ndarray`
An array containing open prices.
close : `ndarray`
An array containing close prices.
periods : `int`, optional
Specifying number of periods for trend identification.
Returns
-------
pattern : `ndarray`
A numpy ndarray of type bool specifying true whether
a pattern has been found or false otherwise.
"""
type_ = "bull"
pattern = abandoned_baby_calc(high, low, open_, close, periods, type_)
return pattern
|
5fb0f2e3063e7b7aa03663d1e2d04d565ec8e885
| 27,091 |
def split_line_num(line):
"""Split each line into line number and remaining line text
Args:
line (str): Text of each line to split
Returns:
tuple consisting of:
line number (int): Line number split from the beginning of line
remaining text (str): Text for remainder of line with whitespace
stripped
"""
line = line.lstrip()
acc = []
while line and line[0].isdigit():
acc.append(line[0])
line = line[1:]
return (int(''.join(acc)), line.lstrip())
|
d232fd046ee60ac804fff032494c8c821456c294
| 27,092 |
def rad_to_arcmin(angle: float) -> float:
"""Convert radians to arcmins"""
return np.rad2deg(angle)*60
|
c342286befd79a311edda18e8a7a2e978d8312ad
| 27,093 |
def get_tile_prefix(rasterFileName):
"""
Returns 'rump' of raster file name, to be used as prefix for tile files.
rasterFileName is <date>_<time>_<sat. ID>_<product type>_<asset type>.tif(f)
where asset type can be any of ["AnalyticMS","AnalyticMS_SR","Visual","newVisual"]
The rump is defined as <date>_<time>_<sat. ID>_<product type>
"""
return rasterFileName.rsplit("_", 1)[0].rsplit("_AnalyticMS")[0]
|
15b517e5ba83b2cfb5f3b0014d800402c9683815
| 27,094 |
def get_indices_by_groups(dataset):
"""
Only use this to see F1-scores for how well we can recover the subgroups
"""
indices = []
for g in range(len(dataset.group_labels)):
indices.append(
np.where(dataset.targets_all['group_idx'] == g)[0]
)
return indices
|
864aad8eef0339afd04cce34bee65f46c9fb030b
| 27,095 |
def ranksumtest(x, y):
"""Calculates the rank sum statistics for the two input data sets
``x`` and ``y`` and returns z and p.
This method returns a slight difference compared to scipy.stats.ranksumtest
in the two-tailed p-value. Should be test drived...
Returns: z-value for first data set ``x`` and two-tailed p-value
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = np.sum(x, axis=0)
assert s + np.sum(y, axis=0) == np.sum(range(n1 + n2 + 1))
expected = n1 * (n1 + n2 + 1) / 2.0
z = (s - expected) / np.sqrt(n1 * n2 * (n1 + n2 + 1) / 12.0)
prob = 2 * (1.0 - zprob(abs(z)))
return z, prob
|
d01d0a56cf888983fa1b8358f2f6f0819ca824d9
| 27,096 |
def inchi_to_can(inchi, engine="openbabel"):
"""Convert InChI to canonical SMILES.
Parameters
----------
inchi : str
InChI string.
engine : str (default: "openbabel")
Molecular conversion engine ("openbabel" or "rdkit").
Returns
-------
str
Canonical SMILES.
"""
if engine == "openbabel":
obconversion = openbabel.OBConversion()
obconversion.SetInAndOutFormats("inchi", "can")
obmol = openbabel.OBMol()
obconversion.ReadString(obmol, inchi)
outinchi = obconversion.WriteString(obmol)
can = outinchi.rstrip()
elif engine == "rdkit":
mol = Chem.MolFromInchi(inchi)
can = Chem.MolToSmiles(mol)
else:
raise AttributeError(
"Engine must be either 'openbabel' or 'rdkit'."
)
return can
|
040d091f1cdbc1556fd60b9ee001953e1a382356
| 27,098 |
from typing import List
from re import T
def swap(arr: List[T],
i: int,
j: int) -> List[T]:
"""Swap two array elements.
:param arr:
:param i:
:param j:
:return:
"""
arr[i], arr[j] = arr[j], arr[i]
return arr
|
e34c983b816f255a8f0fb438c14b6c81468b38c6
| 27,099 |
def is_anno_end_marker(tag):
"""
Checks for the beginning of a new post
"""
text = tag.get_text()
m = anno_end_marker_regex.match(text)
if m:
return True
else:
return False
|
28b7d216c38dabedaef33f4d71f9749e72344b65
| 27,100 |
async def fetch_and_parse(session, url):
"""
Parse a fatality page from a URL.
:param aiohttp.ClientSession session: aiohttp session
:param str url: detail page URL
:return: a dictionary representing a fatality.
:rtype: dict
"""
# Retrieve the page.
# page = await fetch_text(session, url)
page = await fetch_detail_page(session, url)
# Parse it.
d = parse_page(page)
# Add the link.
d[Fields.LINK] = url
# Return the result.
return d
|
525bf965854a098507046b3408de5e73bcd4abc9
| 27,101 |
def wmt_diag_base():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.batch_size = 4096
hparams.num_hidden_layers = 6
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_heads = 8
# VAE-related flags.
hparams.latent_size = 512
hparams.n_posterior_layers = 4
hparams.n_decoder_layers = 6
hparams.dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams
|
384820d2fadc13711968a666a6f4d7b1be0726c5
| 27,102 |
def K_axialbending(EA, EI_x, EI_y, x_C=0, y_C=0, theta_p=0):
"""
Axial bending problem. See KK for notations.
"""
H_xx = EI_x*cos(theta_p)**2 + EI_y*sin(theta_p)**2
H_yy = EI_x*sin(theta_p)**2 + EI_y*cos(theta_p)**2
H_xy = (EI_y-EI_x)*sin(theta_p)*cos(theta_p)
return np.array([
[EA , EA*y_C , -EA*x_C ] ,
[EA*y_C , H_xx + EA*y_C**2 , -H_xy - EA*x_C*y_C ] ,
[-EA*x_C , -H_xy - EA*x_C*y_C , H_yy + EA*x_C**2 ]
])
|
f187b35c5324a0aa46e5500a0f37aebbd2b7cc62
| 27,103 |
def get_closest_intersection_pt_dist(path1, path2):
"""Returns the manhattan distance from the start location to the closest
intersection point.
Args:
path1: the first path (list of consecutive (x,y) tuples)
path2: the secong path
Returns:
int of lowest manhattan distance for an intersection
"""
pts1 = get_pts(path1)
pts2 = get_pts(path2)
intersections = get_intersections(pts1, pts2)
return get_closest_dist(intersections)
|
07bbe3a2d5f817f28b4e077989a89a78747c676f
| 27,104 |
def is_voiced_offset(c_offset):
"""
Is the offset a voiced consonant
"""
return c_offset in VOICED_LIST
|
6dfad8859ba8992e2f05c9946e9ad7bf9428d181
| 27,105 |
def add_boundary_label(lbl, dtype=np.uint16):
"""
Find boundary labels for a labelled image.
Parameters
----------
lbl : array(int)
lbl is an integer label image (not binarized).
Returns
-------
res : array(int)
res is an integer label image with boundary encoded as 2.
"""
b = find_boundaries(lbl, mode='outer')
res = (lbl > 0).astype(dtype)
res[b] = 2
return res
|
31bae32ad08c66a66b19379d30d6210ba2b61ada
| 27,107 |
def kmax(array, k):
""" return k largest values of a float32 array """
I = np.zeros(k, dtype='int64')
D = np.zeros(k, dtype='float32')
ha = float_minheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = 1
ha.k = k
ha.heapify()
ha.addn(array.size, swig_ptr(array))
ha.reorder()
return D, I
|
41037c924ae240636309f272b95a3c9dcfe10c5e
| 27,108 |
def adcp_ins2earth(u, v, w, heading, pitch, roll, vertical):
"""
Description:
This function converts the Instrument Coordinate transformed velocity
profiles to the Earth coordinate system. The calculation is defined in
the Data Product Specification for Velocity Profile and Echo Intensity
- DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-04-04: Russell Desiderio. Optimized code performance by replacing the for
loops previously used to calculate vectorized matrix multiplication
products with calls to np.einsum (numpy Einstein summation function).
2015-06-24: Russell Desiderio. Changed implementation of 'vertical' in the roll
calculation so that if these values are equal to the CI fill value
(-999999999), when these fill values are replaced with nans, the nans
will propagate through to the data product output.
2015-06-24: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
uu, vu, ww = adcp_ins2earth(u, v, w, heading, pitch, roll, vertical)
where
uu = "east" velocity profiles in earth coordinates [mm s-1]
vv = "north" velocity profiles in earth coordinates [mm s-1]
ww = "vertical" velocity profiles in earth coordinates [mm s-1]
u = east velocity profiles in instrument coordinates [mm s-1]
v = north velocity profiles in instrument coordinates [mm s-1]
w = vertical velocity profiles in instrument coordinates [mm s-1]
heading = instrument's uncorrected magnetic heading [centidegrees]
pitch = instrument pitch [centidegrees]
roll = instrument roll [centidegrees]
vertical = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
### the input beam data for adcp_ins2earth are always called using the output
### of adcp_beam2ins, so the following lines are not needed.
# insure we are dealing with array inputs
#u = np.atleast_2d(u)
#v = np.atleast_2d(v)
#w = np.atleast_2d(w)
# check for CI fill values before changing units.
# this function 'conditions' (np.atleast_1d) its inputs.
# TRDI does not apply its ADCP fill/bad value sentinels to compass data.
heading, pitch, roll, vertical = replace_fill_with_nan(None, heading, pitch, roll, vertical)
# change units from centidegrees to degrees
heading = heading / 100.0
pitch = pitch / 100.0
roll = roll / 100.0
# better way to calculate roll from the vertical orientation toggle;
# this will propagate R as nans if the vertical variable is missing from the data.
R = roll + vertical * 180.0
# roll
Rrad = np.radians(R)
cos_R = np.cos(Rrad)
sin_R = np.sin(Rrad)
# heading
Hrad = np.radians(heading)
cos_H = np.cos(Hrad)
sin_H = np.sin(Hrad)
# pitch
t1rad = np.radians(pitch)
t2rad = np.radians(roll)
Prad = np.arctan(np.tan(t1rad) * np.cos(t2rad))
cos_P = np.cos(Prad)
sin_P = np.sin(Prad)
# determine array size
n_packets = u.shape[0]
n_uvw = u.shape[1]
# initialize vectors to be used as matrix elements
ones = np.ones(n_packets)
zeros = ones * 0.0
# the rollaxis calls reorient the matrices so that their lead index is
# the data packet index
M1 = np.array([[cos_H, sin_H, zeros],
[-sin_H, cos_H, zeros],
[zeros, zeros, ones]])
M1 = np.rollaxis(M1, 2)
M2 = np.array([[ones, zeros, zeros],
[zeros, cos_P, -sin_P],
[zeros, sin_P, cos_P]])
M2 = np.rollaxis(M2, 2)
M3 = np.array([[cos_R, zeros, sin_R],
[zeros, ones, zeros],
[-sin_R, zeros, cos_R]])
M3 = np.rollaxis(M3, 2)
# construct input array of coordinates (velocities) to be transformed.
# the basis set is 3D (E,N,U) so that the middle dimension is sized at 3.
uvw = np.zeros((n_packets, 3, n_uvw))
# pack the coordinates (velocities) to be transformed into the appropriate
# slices.
uvw[:, 0, :] = u
uvw[:, 1, :] = v
uvw[:, 2, :] = w
# the Einstein summation is here configured to do the matrix
# multiplication MM(i,l) = M1(i,j) * M2(j,k) * M3(k,l) on each slice h.
MM = np.einsum('hij,hjk,hkl->hil', M1, M2, M3)
# the Einstein summation is here configured to do the matrix
# multiplication uvw_earth(i,m) = MM(i,l) * uvw(l,m) on each slice h.
uvw_earth = np.einsum('hil,hlm->him', MM, uvw)
# NOTE:
# these last two executable statements run about a factor of 2
# faster in the 10000 data packet performance tests versus combining
# these operations into the one statement:
# uvw_earth = np.einsum('hij,hjk,hkl,hlm->him', M1, M2, M3, uvw)
# break out the coordinate slices and return them
uu = uvw_earth[:, 0, :]
vv = uvw_earth[:, 1, :]
ww = uvw_earth[:, 2, :]
return (uu, vv, ww)
|
0a51db6b5d6186c4f9208e4fa2425160e8c43925
| 27,109 |
import math
def strength(data,l):
"""
Returns the strength of earthquake as tuple (P(z),S(xy))
"""
# FFT
# https://momonoki2017.blogspot.com/2018/03/pythonfft-1-fft.html
# Fast Fourier Transform
# fx = np.fft.fft(data[0])
# fy = np.fft.fft(data[1])
# fz = np.fft.fft(data[2])
# What's is the filter??
# https://www.data.jma.go.jp/svd/eqev/data/kyoshin/kaisetsu/calc_sindo.htm
# Inverse Fast Fourier Transform
# ifx = np.fft.ifft(fx)
# ify = np.fft.ifft(fy)
# ifz = np.fft.ifft(fz)
# rpi-seismometer
# https://github.com/p2pquake/rpi-seismometer
# for i in range(3):
# rv[i] = rv[i] * 0.94 + d[i]*0.06
# gals[i] = (rv[i] - avgs[i]) * 1.13426
avgs = [0,0,0]
for i in range(3):
avgs[i] = sum(data[i][-l:]) / len(data[i][-l:])
gals_z = [] # P wave?
gals_xy = [] # S wave?
for d in np.array(data).T[-l:]:
dd = 0
for i in range(2):
dd += (d[i] - avgs[i])**2
gals_xy.append(math.sqrt(dd))
gals_z.append(math.sqrt((d[2]-avgs[2])**2))
avg_z = sum(gals_z) / len(gals_z) * 100
avg_xy = sum(gals_xy) / len(gals_xy) * 100
return avg_z,avg_xy
|
705b04644002c2cf826ca6a03838cab66ccea1f8
| 27,110 |
def humanize(tag, value):
"""Make the metadata value human-readable
:param tag: The key of the metadata value
:param value: The actual raw value
:return: Returns ``None`` or a human-readable version ``str``
:rtype: ``str`` or ``None``
"""
for formatter in find_humanizers(tag):
human_readable = formatter(value)
if human_readable is not None:
return human_readable
|
42a4e1506b4655a86607495790f555cc318b6d82
| 27,112 |
import itertools
def cartesian(sequences, dtype=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
sequences : list of array-like
1-D arrays to form the cartesian product of.
dtype : data-type, optional
Desired output data-type.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
if dtype is None:
dtype = np.dtype(type(sequences[0][0]))
return np.array(list(itertools.product(*sequences)), dtype=dtype)
|
51e6031c568eee425f2ea86c16b472474ae499eb
| 27,113 |
def nasa_date_to_iso(datestr):
"""Convert the day-number based NASA format to ISO.
Parameters
----------
datestr : str
Date string in the form Y-j
Returns
-------
Datestring in ISO standard yyyy-mm-ddTHH:MM:SS.MMMMMM
"""
date = dt.datetime.strptime(datestr, nasa_date_format)
return date.isoformat()
|
d77114c874fdd41a220aae907ce7eabd6dd239bf
| 27,114 |
def auto_label_color(labels):
"""
???+ note "Create a label->hex color mapping dict."
"""
use_labels = set(labels)
use_labels.discard(module_config.ABSTAIN_DECODED)
use_labels = sorted(use_labels, reverse=False)
assert len(use_labels) <= 20, "Too many labels to support (max at 20)"
palette = Category10[10] if len(use_labels) <= 10 else Category20[20]
color_dict = {
module_config.ABSTAIN_DECODED: "#dcdcdc", # gainsboro hex code
**{_l: _c for _l, _c in zip(use_labels, palette)},
}
return color_dict
|
791de575e500bf2c2e0e1d56c390c59a2f62381c
| 27,116 |
import re
def dedentString(text):
"""Dedent the docstring, so that docutils can correctly render it."""
dedent = min([len(match) for match in space_re.findall(text)] or [0])
return re.compile('\n {%i}' % dedent, re.M).sub('\n', text)
|
a384b0c9700a17a7ce621bca16175464192c9aee
| 27,117 |
def preprocess(df):
"""Preprocess the DataFrame, replacing identifiable information"""
# Usernames: <USER_TOKEN>
username_pattern = r"(?<=\B|^)@\w{1,18}"
df.text = df.text.str.replace(username_pattern, "<USERNAME>")
# URLs: <URL_TOKEN>
url_pattern = (
r"https?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]"
r"|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
df.text = df.text.str.replace(url_pattern, "<URL>")
# Email: <EMAIL_TOKEN>
email_pattern = r"[-.+\w]+@[-\w]+\.[-.\w]+"
df.text = df.text.str.replace(email_pattern, "<EMAIL>")
# Replace tokens in Wikipedia Talk dataset
df.text = df.text.str.replace("NEWLINE;?_TOKEN", "\n")
df.text = df.text.str.replace("TAB_TOKEN", "\t")
return df
|
d592d9e56af9ec17dcebede31d458dfdc001c220
| 27,118 |
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
|
550f8273dfe52c67b712f8cd12d1e916f7a917cc
| 27,119 |
def random_forest_classifier(model, inputs, method="predict_proba"):
"""
Creates a SKAST expression corresponding to a given random forest classifier
"""
trees = [decision_tree(estimator.tree_, inputs, method="predict_proba", value_transform=lambda v: v/len(model.estimators_))
for estimator in model.estimators_]
return classifier(sum_(trees), method)
|
d13e28e05d01a2938116a1bac5ddbd64f7b5438c
| 27,120 |
from cowbird.utils import get_settings as real_get_settings
import functools
def mock_get_settings(test):
"""
Decorator to mock :func:`cowbird.utils.get_settings` to allow retrieval of settings from :class:`DummyRequest`.
.. warning::
Only apply on test methods (not on class TestCase) to ensure that :mod:`pytest` can collect them correctly.
"""
def mocked(container):
if isinstance(container, DummyRequest):
return container.registry.settings
return real_get_settings(container)
@functools.wraps(test)
def wrapped(*_, **__):
# mock.patch("cowbird.services.get_settings", side_effect=mocked)
with mock.patch("cowbird.utils.get_settings", side_effect=mocked):
return test(*_, **__)
return wrapped
|
8332d08846bcee6e9637f75c5c15fb763d9978a4
| 27,121 |
def _convert_to_interbatch_order(order: pd.Series,
batch: pd.Series) -> pd.Series:
"""
Convert the order values from a per-batch order to a interbatch order.
Parameters
----------
order: pandas.Series
order and batch must share the same index, size and be of dtype int.
batch: pandas.Series
Returns
-------
interbatch_order: pandas.Series
Raises
------
ValueError: if the order values are already unique.
Examples
--------
>>>order = pd.Series([1, 2, 3, 1, 2, 3])
>>>batch = pd.Series([1, 1, 1, 2, 2, 2])
>>>_convert_to_interbatch_order(order, batch)
pd.Series([1, 2, 3, 4, 5, 6])
"""
if order.unique().size == order.size:
return order
# find a value to add to each batch to make unique and sorted order values
max_order = order.groupby(batch).max()
add_to_order = np.roll(max_order, 1)
add_to_order[0] = 0
add_to_order = add_to_order.cumsum()
add_to_order = pd.Series(data=add_to_order, index=max_order.index)
add_to_order = batch.map(add_to_order)
interbatch_order = order + add_to_order
return interbatch_order
|
235e99d8a93ebeecde7bfe274b82fe32980288dd
| 27,122 |
def CV_INIT_3X3_DELTAS(*args):
"""CV_INIT_3X3_DELTAS(double deltas, int step, int nch)"""
return _cv.CV_INIT_3X3_DELTAS(*args)
|
cbcbd6de2593d548c8e5bc02992d1df9a3d66460
| 27,123 |
def is_instance_cold_migrated_alarm(alarms, instance, guest_hb=False):
"""
Check if an instance cold-migrated alarm has been raised
"""
expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_COLD_MIGRATED,
'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL}
return _instance_alarm_raised(alarms, expected_alarm, instance)
|
8b6db3498d09d4d538382507ffac249226a2912f
| 27,124 |
def precision_macro(y_target, y_predicted):
"""
y_target: m x n 2D array. {0, 1}
real labels
y_predicted: m x n 2D array {0, 1}
prediction labels
m (y-axis): # of instances
n (x-axis): # of classes
"""
average = 'macro'
score = precision_score(y_target, y_predicted, average=average)
return score
|
4038eb838f35da93b24301809e5f0c3d5c37e2c9
| 27,125 |
def layout(mat,widths=None,heights=None):
"""layout"""
ncol=len(mat[0])
nrow=len(mat)
arr=[]
list(map(lambda m: arr.extend(m),mat))
rscript='layout(matrix(c(%s), %d, %d, byrow = TRUE),' %(str(arr)[1:-1],nrow,ncol)
if widths:
rscript+='widths=c(%s),' %(str(widths)[1:-1])
if heights:
rscript+='heights=c(%s),' %(str(heights)[1:-1])
rscript=rscript[:-1]+')\n'
return rscript
|
813fb351b4e09d4762255ecbbe6f9ee7e050efd0
| 27,126 |
def get_file_language(filename, text=None):
"""Get file language from filename"""
ext = osp.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:] # file extension with leading dot
language = ext
if not ext:
if text is None:
text, _enc = encoding.read(filename)
for line in text.splitlines():
if not line.strip():
continue
if line.startswith('#!'):
shebang = line[2:]
if 'python' in shebang:
language = 'python'
else:
break
return language
|
7cfcd49d94cc1c2246f03946cfea1c99b866f941
| 27,127 |
import re
def _get_output_name(fpattern,file_ind,ind):
""" Returns an output name for volumetric image
This function returns a file output name for the image volume
based on the names of the file names of the individual z-slices.
All variables are kept the same as in the original filename,
but the z values are transformed into a range surrounded by <>.
For example, if the following files are processed:
image_c000_z000.ome.tif
image_c000_z001.ome.tif
image_c000_z002.ome.tif
then the output file will be:
image_c000_z<000-002>.ome.tif
Inputs:
fpattern - A filename pattern indicating variables in filenames
file_ind - A parsed dictionary of file names
ind - A dictionary containing the indices for the file name (i.e. {'r':1,'t':1})
Outputs:
fname - an output file name
"""
# If no regex was supplied, return default image name
if fpattern==None or fpattern=='':
return 'image.ome.tif'
for key in ind.keys():
assert key in VARIABLES, "Input dictionary key not a valid variable: {}".format(key)
# Parse variables
expr = []
variables = []
for g in re.finditer(r"\{[pxyzctr]+\}",fpattern):
expr.append(g.group(0))
variables.append(expr[-1][1])
# Return an output file name
fname = fpattern
for e,v in zip(expr,variables):
if v not in STATICS:
minval = min([int(z) for z in file_ind.keys()])
maxval = max([int(z) for z in file_ind.keys()])
fname = fname.replace(e,'<' + str(minval).zfill(len(e)-2) +
'-' + str(maxval).zfill(len(e)-2) + '>')
elif v not in ind.keys():
fname = fname.replace(e,str(0).zfill(len(e)-2))
else:
fname = fname.replace(e,str(ind[v]).zfill(len(e)-2))
return fname
|
8ce392acab2984b5012d8de7a0aa205f9a5e5e3b
| 27,128 |
import re
def MatchNameComponent(key, name_list, case_sensitive=True):
"""Try to match a name against a list.
This function will try to match a name like test1 against a list
like C{['test1.example.com', 'test2.example.com', ...]}. Against
this list, I{'test1'} as well as I{'test1.example'} will match, but
not I{'test1.ex'}. A multiple match will be considered as no match
at all (e.g. I{'test1'} against C{['test1.example.com',
'test1.example.org']}), except when the key fully matches an entry
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
@type key: str
@param key: the name to be searched
@type name_list: list
@param name_list: the list of strings against which to search the key
@type case_sensitive: boolean
@param case_sensitive: whether to provide a case-sensitive match
@rtype: None or str
@return: None if there is no match I{or} if there are multiple matches,
otherwise the element from the list which matches
"""
if key in name_list:
return key
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
key = key.upper()
name_re = re.compile(r"^%s(\..*)?$" % re.escape(key), re_flags)
names_filtered = []
string_matches = []
for name in name_list:
if name_re.match(name) is not None:
names_filtered.append(name)
if not case_sensitive and key == name.upper():
string_matches.append(name)
if len(string_matches) == 1:
return string_matches[0]
if len(names_filtered) == 1:
return names_filtered[0]
return None
|
ad522feba9cabb3407e3b8e1e8c221f3e9800e16
| 27,129 |
import requests
def news_api():
"""Uses news API and returns a dictionary containing news """
news_base_url = "https://newsapi.org/v2/top-headlines?"
news_api_key = keys["news"]
country = location["country"]
news_url = news_base_url + "country=" + country + "&apiKey=" + news_api_key
n_api = requests.get(news_url)
return n_api.json()
|
45e8a9d42d64066e2259fc95727d52e6b5bdfc9e
| 27,130 |
def compress(mesh, engine_name="draco"):
""" Compress mesh data.
Args:
mesh (:class:`Mesh`): Input mesh.
engine_name (``string``): Valid engines are:
* ``draco``: `Google's Draco engine <https://google.github.io/draco/>`_
[#]_
Returns:
A binary string representing the compressed mesh data.
A simple usage example:
>>> mesh = pymesh.generate_icosphere(1.0, [0, 0, 0])
>>> data = pymesh.compress(mesh)
>>> with open("out.drc", 'wb') as fout:
... fout.write(data)
.. [#] Draco uses lossy compression. Both accuarcy and
vertices/face order will be lost due to compression. Draco only works
with triangular mesh or point cloud.
"""
engine = PyMesh.CompressionEngine.create(engine_name)
data = engine.compress(mesh.raw_mesh)
return data
|
67d8ec030d006f6720bacffad7bacd0c36b9df42
| 27,131 |
import configparser
def get_hotkey_next(config: configparser.RawConfigParser):
"""
获取热键:下一个桌面背景
"""
return __get_hotkey(config, 'Hotkey', 'hk_next')
|
3af499c01778a1defb0a440d042538885d829398
| 27,133 |
def get_soup(url):
""" Returns beautiful soup object of given url.
get_soup(str) -> object(?)
"""
req = urllib2.Request(url)
response = urllib2.urlopen(req)
html = response.read()
soup = bs4(html)
return soup
|
8d0bb43ae1d404cef5a3873dfd089b88461bf9fd
| 27,135 |
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
message = error.message or str(error)
app.logger.info(message)
return make_response(jsonify(status=500, error='Internal Server Error', message=message), 500)
|
8e80a4502a4656a1ccdb2c720177090dd7bcf53a
| 27,136 |
import math
def diffsnorms(A, S, V, n_iter=20):
"""
2-norm accuracy of a Schur decomp. of a matrix.
Computes an estimate snorm of the spectral norm (the operator norm
induced by the Euclidean vector norm) of A-VSV', using n_iter
iterations of the power method started with a random vector;
n_iter must be a positive integer.
Increasing n_iter improves the accuracy of the estimate snorm of
the spectral norm of A-VSV'.
Notes
-----
To obtain repeatable results, reset the seed for the pseudorandom
number generator.
Parameters
----------
A : array_like
first matrix in A-VSV' whose spectral norm is being estimated
S : array_like
third matrix in A-VSV' whose spectral norm is being estimated
V : array_like
second matrix in A-VSV' whose spectral norm is being estimated
n_iter : int, optional
number of iterations of the power method to conduct;
n_iter must be a positive integer, and defaults to 20
Returns
-------
float
an estimate of the spectral norm of A-VSV' (the estimate fails
to be accurate with exponentially low probability as n_iter
increases; see references DS1_, DS2_, and DS3_ below)
Examples
--------
>>> from fbpca import diffsnorms, eigenn
>>> from numpy import diag
>>> from numpy.random import uniform
>>> from scipy.linalg import svd
>>>
>>> A = uniform(low=-1.0, high=1.0, size=(2, 100))
>>> A = A.T.dot(A)
>>> (U, s, Va) = svd(A, full_matrices=False)
>>> A = A / s[0]
>>>
>>> (d, V) = eigenn(A, 2)
>>> err = diffsnorms(A, diag(d), V)
>>> print(err)
This example produces a rank-2 approximation V diag(d) V' to A
such that the columns of V are orthonormal and the entries of d
are nonnegative and are nonincreasing.
diffsnorms(A, diag(d), V) outputs an estimate of the spectral norm
of A - V diag(d) V', which should be close to the machine
precision.
References
----------
.. [DS1] Jacek Kuczynski and Henryk Wozniakowski, Estimating the
largest eigenvalues by the power and Lanczos methods with
a random start, SIAM Journal on Matrix Analysis and
Applications, 13 (4): 1094-1122, 1992.
.. [DS2] Edo Liberty, Franco Woolfe, Per-Gunnar Martinsson,
Vladimir Rokhlin, and Mark Tygert, Randomized algorithms
for the low-rank approximation of matrices, Proceedings of
the National Academy of Sciences (USA), 104 (51):
20167-20172, 2007. (See the appendix.)
.. [DS3] Franco Woolfe, Edo Liberty, Vladimir Rokhlin, and Mark
Tygert, A fast randomized algorithm for the approximation
of matrices, Applied and Computational Harmonic Analysis,
25 (3): 335-366, 2008. (See Section 3.4.)
See also
--------
eigenn, eigens
"""
(m, n) = A.shape
(m2, k) = V.shape
(k2, k3) = S.shape
assert m == n
assert m == m2
assert k == k2
assert k2 == k3
assert n_iter >= 1
if np.isrealobj(A) and np.isrealobj(V) and np.isrealobj(S):
isreal = True
else:
isreal = False
# Promote the types of integer data to float data.
dtype = (A * 1.0).dtype
#
# Generate a random vector x.
#
if isreal:
x = np.random.normal(size=(n, 1)).astype(dtype)
else:
x = np.random.normal(size=(n, 1)).astype(dtype) \
+ 1j * np.random.normal(size=(n, 1)).astype(dtype)
x = x / norm(x)
#
# Run n_iter iterations of the power method.
#
for it in range(n_iter):
#
# Set y = (A-VSV')x.
#
y = mult(A, x) - V.dot(S.dot(V.conj().T.dot(x)))
#
# Set x = (A'-VS'V')y.
#
x = mult(y.conj().T, A).conj().T \
- V.dot(S.conj().T.dot(V.conj().T.dot(y)))
#
# Normalize x, memorizing its Euclidean norm.
#
snorm = norm(x)
if snorm == 0:
return 0
x = x / snorm
snorm = math.sqrt(snorm)
return snorm
|
2f446a08c6ff5d8377cca22ffcd1570c68f46748
| 27,137 |
from typing import Iterator
from typing import Tuple
def data_selection(workload: spec.Workload,
input_queue: Iterator[Tuple[spec.Tensor, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
hyperparameters: spec.Hyperparamters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a single training example and label.
We left out `current_params_types` because we do not believe that it would
# be necessary for this function.
Return a tuple of input label batches.
"""
del workload
del optimizer_state
del current_param_container
del hyperparameters
del global_step
del rng
return next(input_queue)
|
6daa0950e5ce82da081b71a01572dc29374f17f8
| 27,138 |
def graph_cases_factory(selenium):
"""
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:rtype: callable
:return: Constructor method to create a graph cases factory with a custom
host.
"""
return lambda host: GraphCaseFactory(selenium=selenium, host=host)
|
b41b02c148b340c07859e707cbaf4810db3b6004
| 27,139 |
def clean_scene_from_file(file_name):
"""
Args:
file_name: The name of the input sequence file
Returns:
Name of the scene used in the sequence file
"""
scene = scenename_from_file(file_name)
print('Scene: ', scene)
mesh_file = SCENE_PATH + scene + '/10M_clean.ply'
return mesh_file
|
cd706c900ca3e3fce6736ce5c4288cce6079b3e0
| 27,140 |
def _non_overlapping_chunks(seq, size):
"""
This function takes an input sequence and produces chunks of chosen size
that strictly do not overlap. This is a much faster implemetnation than
_overlapping_chunks and should be preferred if running on very large seq.
Parameters
----------
seq : tuple or list
Sequence of integers.
size : int
Length of each produced chunk.
Returns
-------
zip
zip object that produces chunks of specified size, one at a time.
"""
return zip(*[iter(seq)] * size)
|
15b5d2b4a7d8df9785ccc02b5369a3f162704e9e
| 27,141 |
import logging
def Compute_Error(X_data, pinn, K, mu, Lf, deltamean, epsilon, ndim) :
"""
Function to determine error for input data X_data
:param array X_data: input data for PINN
:param PINN pinn: PINN under investigation
:param float K: key parameter for using trapezoidal rule and estimating the number of required subintervals
:param float mu: smoothening parameter for creating delta from deviation R
:param float Lf: Lipschitz constant or spectral abscissa of system under investigation
:param float deltamean: a priori determined average deviation in ODE/PDE
:param float epsilon: contribution the error of the numerical integration may give to the overall a posteriori error
:param int ndim: dimensions of input data
"""
# initialize variables for error and number of support points
E_pred = np.zeros((X_data.shape[0], 2))
N_SP = np.repeat(0, X_data.shape[0], axis=0)
# compute target value and error for all times
for x_index in range(X_data.shape[0]):
# get current item
x_item = np.reshape(X_data[x_index], (1, X_data.shape[1]))
# predict value at time 0 and compare to input values to get r0
t = x_item[0,0]
x_item[0,0] = 0
r0 = np.sqrt(np.sum((pinn.predict(x_item)[0] - x_item[0, -ndim:])**2))
x_item[0,0] = t
# compute predicted machine learning error and number of required support points
E_ML = np.exp(Lf * x_item[0,0])*(r0 + (1-np.exp(-x_item[0,0]*Lf))*deltamean/Lf)
N_SP[x_index] = np.ceil(np.sqrt(K*x_item[0,0]**3 / (12*E_ML*epsilon))).astype(int)
# compute prediction of support points
T_test = np.transpose(np.array([np.linspace(0,x_item[0,0],2*(N_SP[x_index]+1))]))
X_test = np.repeat(x_item, T_test.shape[0], axis=0)
X_test[:,0] = T_test[:,0]
_, F_pred = pinn.predict(X_test)
# compute integral for error
targetfun = (np.sqrt(np.reshape(np.sum(F_pred**2, axis=1),(F_pred.shape[0],1)) + np.full((F_pred.shape[0],1), mu, dtype="float64")**2) * np.exp(-Lf*T_test))
I_1 = compute_integral_trpz(targetfun, T_test[1]-T_test[0])
if x_item[0,0] > 0:
I_2 = compute_integral_trpz(targetfun[0::2], T_test[2]-T_test[0])
# determine error
E_pred[x_index, 0] = np.exp(Lf*x_item[0,0])*(r0)
if x_item[0,0] == 0:
E_pred[x_index, 1] = 0
else:
E_pred[x_index, 1] = np.exp(Lf*x_item[0,0])*(I_1 + 0.75*np.absolute(I_1-I_2))
if x_index % 100 == 0:
logging.info(f'Predicted error for index {x_index}: {E_pred[x_index]}')
return E_pred, N_SP
|
0789d7c52c96aed5cb40aa45c44c4df09f5cffaf
| 27,143 |
import itertools
def sort_fiducials(qr_a, qr_b):
"""Sort 2d fiducial markers in a consistent ordering based on their relative positions.
In general, when we find fiducials in an image, we don't expect them to be
returned in a consistent order. Additionally, the image coordinate may be
rotated from image to image. Here we match fiducials by trying all permutations
of matches and taking the best fit. We assume that the fiducials are all
aligned in similar directions; this is a constraint on fiducials placement.
"""
qr_a = np.array(qr_a)
qr_b = np.array(qr_b)
# Get unit vectors defining our common coordinate system in each image
ux_a = np.array([0.0, 0.0])
ux_b = np.array([0.0, 0.0])
for qr in qr_a:
ux_a += qr[1] - qr[0]
for qr in qr_b:
ux_b += qr[1] - qr[0]
ux_a /= np.linalg.norm(ux_a)
ux_b /= np.linalg.norm(ux_b)
def displacements(qrcodes, ux):
uy = np.array([ux[1], ux[0]])
#uy_b = np.array([ux_b[1], ux_b[0]])
displacements = []
for i in range(1, len(qrcodes)):
d = qrcodes[i][0] - qrcodes[0][0]
d2 = np.array([np.dot(ux, d), np.dot(uy, d)])
displacements.append(d2)
return np.array(displacements)
best_error = float("inf")
best_permutation = []
d_a = displacements(qr_a, ux_a)
for perm in itertools.permutations(qr_b):
d_perm = displacements(perm, ux_b)
error = np.sum(np.square(d_perm - d_a))
if error < best_error:
best_error = error
best_permutation = perm
return qr_a.tolist(), [p.tolist() for p in list(best_permutation)]
|
daa96f12ef2e94fed86970979e4d140f8a3fa3d5
| 27,144 |
from pathlib import Path
import jinja2
def form_render(path: str, **kwargs) -> str:
""" Just jinja2 """
file_text = Path(path).read_text()
template = jinja2.Template(file_text)
return template.render(**kwargs)
|
b5da5afdedcac922c164f644eabeae5f038f9169
| 27,145 |
def _names(fg, bg):
"""3/4 bit encoding part
c.f. https://en.wikipedia.org/wiki/ANSI_escape_code#3.2F4_bit
Parameters:
"""
if not (fg is None or fg in _FOREGROUNDS):
raise ValueError('Invalid color name fg = "{}"'.format(fg))
if not (bg is None or bg in _BACKGROUNDS):
raise ValueError('Invalid color name bg = "{}"'.format(bg))
fg_ = _FOREGROUNDS.get(fg, '')
bg_ = _BACKGROUNDS.get(bg, '')
return _join_codes(fg_, bg_)
|
50e4dfe9aa56c1f3fc7622c468045b26da9b4175
| 27,146 |
def preprocess(code):
"""Preprocess a code by removing comments, version and merging includes."""
if code:
#code = remove_comments(code)
code = merge_includes(code)
return code
|
b4ecbf28fa2043559b744e7351f268a2ba1e8200
| 27,147 |
import types
def _from_schemas_get_model(
stay_within_model: bool, schemas: _oa_types.Schemas, schema: _oa_types.Schema
) -> types.ModelArtifacts:
"""
Get artifacts for a model.
Assume the schema is valid.
Args:
schema: The schema of the model to get artifacts for.
schemas: All defined schemas used to resolve any $ref.
stay_within_model: Whether only properties from within a model should be
included.
Returns:
The artifacts of the model.
"""
model_artifacts = model.get(schemas, schema)
properties_artifacts = _from_schemas_get_properties_artifacts(
stay_within_model, schemas, schema
)
return types.ModelArtifacts(
tablename=model_artifacts.tablename,
inherits=model_artifacts.inherits,
parent=model_artifacts.parent,
description=model_artifacts.description,
mixins=model_artifacts.mixins,
kwargs=model_artifacts.kwargs,
composite_index=model_artifacts.composite_index,
composite_unique=model_artifacts.composite_unique,
backrefs=model_artifacts.backrefs,
properties=list(properties_artifacts),
)
|
0c5166c6baaabda64795729554b7bb3444a902c9
| 27,148 |
def solution2(inp):
"""Solves the second part of the challenge"""
return "done"
|
8e20e1a81911b3f2e54fac058df8a44e54945af0
| 27,149 |
import math
def juld_to_grdt(juld: JulianDay) -> GregorianDateTime:
"""ユリウス通日をグレゴリオ曆の日時に變換する."""
A = math.floor(juld.julian_day + 68569.5)
B = juld.julian_day + 0.5
a = math.floor(A / 36524.25)
b = A - math.floor(36524.25 * a + 0.75)
c = math.floor((b + 1) / 365.25025)
d = b - math.floor(365.25 * c) + 31
e = math.floor(d / 30.59)
f = math.floor(e / 11.0)
u = 100 * (a - 49) + c + f
v = e - 12 * f + 2
w = d - math.floor(30.59 * e) + (B % 1)
(hour, minute) = divmod(round(juld.second), 60 * 60)
hour = (hour + 12) % 24
(minute, second) = divmod(minute, 60)
return GregorianDateTime(u, v, math.floor(w), hour, minute, second, None)
|
94559bbec7fef45e6c7f6d8594d20c8039b58672
| 27,150 |
def users_all(request):
"""
Returns name + surname and email of all users
Note: This type of function can only be justified
when considering the current circumstances:
An *INTERNAL* file sharing app (used by staff)
Hence, all names and emails may be fetched be other
authenticated users
For Example: Select the users you want to share a folder with
"""
users = User.objects.exclude(id=request.user.id).values(
"id", "first_name", "last_name", "email")
resp = {
"users": list(users)
}
return JsonResponse(resp)
|
53302d074ee1bbbc1156ffa2f94da4f834e9cb3c
| 27,151 |
def _resolve_categorical_entities(request, responder):
"""
This function retrieves all categorical entities as listed below and filters
the knowledge base using these entities as filters. The final search object
containing the shortlisted employee data is returned back to the calling function.
"""
# Finding all categorical entities
categorical_entities = [e for e in request.entities if e['type'] in
('state', 'sex', 'maritaldesc', 'citizendesc', 'racedesc',
'performance_score', 'employment_status', 'employee_source',
'position', 'department', 'reason_for_termination')]
# Building custom search
qa = app.question_answerer.build_search(index='employee_data')
# Querying the knowledge base for all categorical filters
if categorical_entities:
try:
for categorical_entity in categorical_entities:
key = categorical_entity['type']
if key == 'reason_for_termination':
key = 'rft'
val = categorical_entity['value'][0]['cname']
kw = {key: val}
qa = qa.filter(**kw)
except KeyError:
pass
# return size of the whole dataset to prevent the execute function from restricting
# the responses to 10 (Which is the default)
return qa, SIZE
|
d6671d030699df1b0400b1d478dc98f86be06c29
| 27,152 |
def filter_c13(df):
""" Filter predicted formulas with 13C.
Returns filtered df and n excluded """
shape_i = df.shape[0]
df = df[df['C13'] == 0]
df = df.reset_index(drop=True)
shape_f = df.shape[0]
n_excluded = shape_i - shape_f
return df, n_excluded
|
4f0d3eb6c9de7c07bc2e3f285ad5502bb6d6dd06
| 27,153 |
import random
import gzip
def getContent(url):
"""
此函数用于抓取返回403禁止访问的网页
"""
random_header = random.choice(HEARDERS)
"""
对于Request中的第二个参数headers,它是字典型参数,所以在传入时
也可以直接将个字典传入,字典中就是下面元组的键值对应
"""
req = Request(url)
req.add_header("User-Agent", random_header)
req.add_header("Host", "datachart.500.com")
content = urlopen(req).read()
html = gzip.decompress(content)
html = html.decode('gbk')
return html
|
da396d664fb23737ea2d87b6548521948adad709
| 27,154 |
def neighbour(x,y,image):
"""Return 8-neighbours of image point P1(x,y), in a clockwise order"""
img = image.copy()
x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]]
|
8e645f7634d089a0e65335f6ea3363d4ed66235b
| 27,155 |
def deconv2d(x, kernel, output_shape, strides=(1, 1),
border_mode='valid',
dim_ordering='default',
image_shape=None, filter_shape=None):
"""2D deconvolution (i.e. transposed convolution).
# Arguments
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
border_mode: string, `"same"` or `"valid"`.
dim_ordering: `"tf"` or `"th"`.
Whether to use Theano or TensorFlow dimension ordering
for inputs/kernels/ouputs.
# Returns
A tensor, result of transposed 2D convolution.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
x = _preprocess_convnd_input(x, dim_ordering)
layout_kernel, nb_filter = _layout_kernel(dim_ordering, kernel.shape)
kernel = _preprocess_deconvnd_kernel(kernel, dim_ordering)
output_shape = _preprocess_deconvnd_output(output_shape, dim_ordering)
s = mx.sym.Deconvolution(data=x.symbol, name=kernel.name, kernel=layout_kernel, stride=strides,
num_filter=nb_filter, weight=kernel.symbol, no_bias=True, target_shape=output_shape)
out = _postprocess_convnd_output(KerasSymbol(s), dim_ordering)
return out
|
d1ed452b627764f0f08c669e4bea749886ebd0a6
| 27,157 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.