content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def pe(cmd, shell=True):
"""
Print and execute command on system
"""
ret = []
for line in execute(cmd, shell=shell):
ret.append(line)
print(line, end="")
return ret | 0a238be68a7c383153834d45fbf3193f9b8c9a72 | 13,957 |
def crop(image):
"""
Method to crop out the uncessary white parts of the image.
Inputs:
image (numpy array): Numpy array of the image label.
Outputs:
image (numpy array): Numpy array of the image label, cropped.
"""
image = ImageOps.invert(image)
imageBox = image.getbbox()
image = image.crop(imageBox)
return ImageOps.invert(image) | 37a12733bcda66a9da16d72ff3fae749784481a0 | 13,959 |
def all_pairs_normalized_distances(X):
"""
We can't really compute distances over incomplete data since
rows are missing different numbers of entries.
The next best thing is the mean squared difference between two vectors
(a normalized distance), which gets computed only over the columns that
two vectors have in common. If two vectors have no features in common
then their distance is infinity.
Parameters
----------
X : np.ndarray
Data matrix of shape (n_samples, n_features) with missing entries
marked using np.nan
Returns a (n_samples, n_samples) matrix of pairwise normalized distances.
"""
n_rows, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_rows, n_rows), dtype="float32", order="C") * np.inf
# we can cheaply determine the number of columns that two rows share
# by taking the dot product between their finite masks
observed_elements = np.isfinite(X).astype(int)
n_shared_features_for_pairs_of_rows = np.dot(
observed_elements,
observed_elements.T)
no_overlapping_features_rows = n_shared_features_for_pairs_of_rows == 0
number_incomparable_rows = no_overlapping_features_rows.sum(axis=1)
row_overlaps_every_other_row = (number_incomparable_rows == 0)
row_overlaps_no_other_rows = number_incomparable_rows == n_rows
valid_rows_mask = ~row_overlaps_no_other_rows
valid_row_indices = np.where(valid_rows_mask)[0]
# preallocate all the arrays that we would otherwise create in the
# following loop and pass them as "out" parameters to NumPy ufuncs
diffs = np.zeros_like(X)
missing_differences = np.zeros_like(diffs, dtype=bool)
valid_rows = np.zeros(n_rows, dtype=bool)
ssd = np.zeros(n_rows, dtype=X.dtype)
for i in valid_row_indices:
x = X[i, :]
np.subtract(X, x.reshape((1, n_cols)), out=diffs)
np.isnan(diffs, out=missing_differences)
# zero out all NaN's
diffs[missing_differences] = 0
# square each difference
diffs **= 2
observed_counts_per_row = n_shared_features_for_pairs_of_rows[i]
if row_overlaps_every_other_row[i]:
# add up all the non-missing squared differences
diffs.sum(axis=1, out=D[i, :])
D[i, :] /= observed_counts_per_row
else:
np.logical_not(no_overlapping_features_rows[i], out=valid_rows)
# add up all the non-missing squared differences
diffs.sum(axis=1, out=ssd)
ssd[valid_rows] /= observed_counts_per_row[valid_rows]
D[i, valid_rows] = ssd[valid_rows]
return D | c744c6ac87cbd3760d6512178747ac60794d616a | 13,960 |
import torch
def forward_pass(model, target_angle, mixed_data, conditioning_label, args):
"""
Runs the network on the mixed_data
with the candidate region given by voice
"""
target_pos = np.array([
FAR_FIELD_RADIUS * np.cos(target_angle),
FAR_FIELD_RADIUS * np.sin(target_angle)
])
data, _ = utils.shift_mixture(
torch.tensor(mixed_data).to(args.device), target_pos, args.mic_radius,
args.sr)
data = data.float().unsqueeze(0) # Batch size is 1
# Normalize input
data, means, stds = normalize_input(data)
# Run through the model
valid_length = model.valid_length(data.shape[-1])
delta = valid_length - data.shape[-1]
padded = F.pad(data, (delta // 2, delta - delta // 2))
output_signal = model(padded, conditioning_label)
output_signal = center_trim(output_signal, data)
output_signal = unnormalize_input(output_signal, means, stds)
output_voices = output_signal[:, 0] # batch x n_mics x n_samples
output_np = output_voices.detach().cpu().numpy()[0]
energy = librosa.feature.rms(output_np).mean()
return output_np, energy | e9644b01ea04b08ae92d50d3c7944e0d72213b2b | 13,961 |
import select
from typing import Optional
from datetime import datetime
import pytz
async def get_event_by_code(code: str, db: AsyncSession) -> Event:
"""
Get an event by its code
"""
statement = select(Event).where(Event.code == code)
result = await db.execute(statement)
event: Optional[Event] = result.scalars().first()
if event is None:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="invalid attendance code",
)
# Check that the code is still valid
with tracer.start_as_current_span("check-validity"):
now = datetime.now(tz=pytz.utc)
if not event.enabled or now < event.valid_from or now > event.valid_until:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="invalid code")
return event | 592cd6b5aad7b12a98889bf82ea7e32a55b8832e | 13,962 |
def get(name):
"""Returns an OpDef for a given `name` or None if the lookup fails."""
with _sync_lock:
return _registered_ops.get(name) | 75e3ba3601f1ad8f67e77046a9b286bee8e60be6 | 13,963 |
def angle_detect_dnn(img, adjust=True):
"""
文字方向检测
"""
h, w = img.shape[:2]
ROTATE = [0, 90, 180, 270]
if adjust:
thesh = 0.05
xmin, ymin, xmax, ymax = int(thesh * w), int(thesh * h), w - int(thesh * w), h - int(thesh * h)
img = img[ymin:ymax, xmin:xmax] ##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True,
mean=[103.939, 116.779, 123.68], crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred, axis=1)[0]
return ROTATE[index] | a3fc8513afce26e96a315a606acfd9be9feaa376 | 13,964 |
def get_correct_line(df_decisions):
"""
The passed df has repeated lines for the same file (same chemin_source).
We take the most recent one.
:param df_decisions: Dataframe of decisions
:return: Dataframe without repeated lines (according to the chemin_source column)
"""
return df_decisions.sort_values('timestamp_modification').drop_duplicates('chemin_source', keep='last') | 989f1aba1c5e0c61f8b7ca1c883baf4dd181ebbc | 13,965 |
def fix_1(lst1, lst2):
"""
Divide all of the elements in `lst1` by each element in `lst2`
and return the values in a list.
>>> fix_1([1, 2, 3], [0, 1])
[1.0, 2.0, 3.0]
>>> fix_1([], [])
[]
>>> fix_1([10, 20, 30], [0, 10, 10, 0])
[1.0, 2.0, 3.0, 1.0, 2.0, 3.0]
"""
out = []
for div in lst2:
for num in lst1:
try:
out.append(num / div) # add try-except block
except ZeroDivisionError:
pass
return out | 7929cfc19952a829c66c18af967668d1015f8477 | 13,966 |
def user_wants_upload():
"""
Determines whether or not the user wants to upload the extension
:return: boolean
"""
choice = input("Do you want to upload your extension right now? :")
if "y" in choice or "Y" in choice:
return True
else:
return False | 67643d1ccf8d1ffe23ddc503cd8e9f4dc4e98707 | 13,967 |
def has_genus_flag(df, genus_col="mhm_Genus", bit_col="mhm_HasGenus", inplace=False):
"""
Creates a bit flag: `mhm_HasGenus` where 1 denotes a recorded Genus and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
genus_col : str, default="mhm_Genus"
The name of the column in the mosquito habitat mapper DataFrame that contains the genus records.
bit_col : str, default="mhm_HasGenus"
The name of the column which will store the generated HasGenus flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the HasGenus flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
df[bit_col] = (~pd.isna(df[genus_col].to_numpy())).astype(int)
if not inplace:
return df | 7e178f7570f8de436521047e012518e6f5ee6a72 | 13,968 |
from typing import Tuple
def compass(
size: Tuple[float, float] = (4.0, 2.0),
layer: Layer = gf.LAYER.WG,
port_type: str = "electrical",
) -> Component:
"""Rectangular contact pad with centered ports on rectangle edges
(north, south, east, and west)
Args:
size: rectangle size
layer: tuple (int, int)
port_type:
"""
c = gf.Component()
dx, dy = size
points = [
[-dx / 2.0, -dy / 2.0],
[-dx / 2.0, dy / 2],
[dx / 2, dy / 2],
[dx / 2, -dy / 2.0],
]
c.add_polygon(points, layer=layer)
c.add_port(
name="e1",
midpoint=[-dx / 2, 0],
width=dy,
orientation=180,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e2",
midpoint=[0, dy / 2],
width=dx,
orientation=90,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e3",
midpoint=[dx / 2, 0],
width=dy,
orientation=0,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e4",
midpoint=[0, -dy / 2],
width=dx,
orientation=-90,
layer=layer,
port_type=port_type,
)
c.auto_rename_ports()
return c | fefa0842958fb91b870eb78e2170a81d7c8daaa9 | 13,969 |
def get_service(vm, port):
"""Return the service for a given port."""
for service in vm.get('suppliedServices', []):
if service['portRange'] == port:
return service | d617771c25c69ee874b0bc64adcc735aa876f929 | 13,970 |
async def async_setup_entry(hass, config_entry):
"""Set up AirVisual as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_API_KEY]
if not config_entry.options:
# If the config entry doesn't already have any options set, set defaults:
entry_updates["options"] = DEFAULT_OPTIONS
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
websession = aiohttp_client.async_get_clientsession(hass)
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = AirVisualData(
hass, Client(websession, api_key=config_entry.data[CONF_API_KEY]), config_entry
)
try:
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
except InvalidKeyError:
_LOGGER.error("Invalid API key provided")
raise ConfigEntryNotReady
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
async def refresh(event_time):
"""Refresh data from AirVisual."""
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = async_track_time_interval(
hass, refresh, DEFAULT_SCAN_INTERVAL
)
config_entry.add_update_listener(async_update_options)
return True | e09b0c8e499a055123a88503cac4d1d1492a3d53 | 13,971 |
def rotation_point_cloud(pc):
"""
Randomly rotate the point clouds to augment the dataset
rotation is per shape based along up direction
:param pc: B X N X 3 array, original batch of point clouds
:return: BxNx3 array, rotated batch of point clouds
"""
# rotated_data = np.zeros(pc.shape, dtype=np.float32)
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
# rotation_matrix = np.array([[cosval, 0, sinval],
# [0, 1, 0],
# [-sinval, 0, cosval]])
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, -sinval],
[0, sinval, cosval]])
# rotation_matrix = np.array([[cosval, -sinval, 0],
# [sinval, cosval, 0],
# [0, 0, 1]])
rotated_data = np.dot(pc.reshape((-1, 3)), rotation_matrix)
return rotated_data | f1f84b9dad06bea6c377559d8b4a64be88031847 | 13,972 |
import time
def alliance_system_oneday(mongohandle, alliance_id, system):
"""find by corp and system - one day"""
allkills = mongohandle.allkills
system = int(system)
timeframe = 24 * 60 * 60
gmtminus = time.mktime(time.gmtime()) - timeframe
cursor = allkills.find({"alliance_id": alliance_id,
"solar_system_id": system,
"unix_kill_time": {
"$gte": gmtminus}},
{"ship": 1,
"items": 1,
"_id": 0}).hint('alliancesystemtime')
(ships, items, ammos) = parsecursor.ships_and_items(cursor)
return (ships, items, ammos) | b951f11f606352dc6614e1ff1c587c3a64ed1ea8 | 13,973 |
def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param length: length of slit
:param width: width of slit
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
:return: bool, True if photon/ray is within the slit, False otherwise
"""
ra_ = ra - center_ra
dec_ = dec - center_dec
x = np.cos(angle) * ra_ + np.sin(angle) * dec_
y = - np.sin(angle) * ra_ + np.cos(angle) * dec_
if abs(x) < length / 2. and abs(y) < width / 2.:
return True
else:
return False | a3047a59bbc8566d261f1d52f92b437ad2b26d52 | 13,974 |
def login():
""" Logs in user """
req = flask.request.get_json(force=True)
username = req.get('username', None)
password = req.get('password', None)
user = guard.authenticate(username, password)
ret = {'access_token': guard.encode_jwt_token(user)}
return ret, 200 | b577c7982bf65d3a24cfd3f116f5cb128079cd1f | 13,975 |
def statuses_filter(auth, **params):
"""
Collect tweets from the twitter statuses_filter api.
"""
endpoint = "https://stream.twitter.com/1.1/statuses/filter.json"
if "follow" in params and isinstance(params["follow"], (list, tuple)):
params["follow"] = list_to_csv(params["follow"])
if "track" in params and isinstance(params["track"], (list, tuple)):
params["track"] = list_to_csv(params["track"])
params.setdefault("delimited", 0)
params.setdefault("stall_warnings", 1)
return stream_call(endpoint, auth, params, "post") | e81f85d5c747a4bcca8fc9b3b82d362905404452 | 13,976 |
def adjust_hue(image, hue_factor):
"""Adjusts hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
Args:
image (PIL.Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL.Image: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
dtype = image.dtype
image = image.astype(np.uint8)
hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL)
h, s, v = cv2.split(hsv_img)
alpha = np.random.uniform(hue_factor, hue_factor)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
h += np.uint8(alpha * 255)
hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype) | 52390b83a60cc8f23632f198a558b518d687f94e | 13,977 |
import json
import requests
import time
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
#print("Received event: " + json.dumps(event, indent=2))
body = json.loads(event['body'])
print(f"Body is: {body}")
url = body['url']
print(f"Getting image from URL: {url}")
response = requests.get(url)
print("Load image into memory")
img = PILImage.create(BytesIO(response.content))
print("Doing forward pass")
start = time.time()
pred,pred_idx,probs = learn.predict(img)
end = time.time()
inference_time = np.round((end - start) * 1000, 2)
print(f'class: {pred}, probability: {probs[pred_idx]:.04f}')
print(f'Inference time is: {str(inference_time)} ms')
return {
"statusCode": 200,
"body": json.dumps(
{
"class": pred,
"probability": "%.4f" % probs[pred_idx]
}
),
} | 05b5da6e2c2aff16c43a3822978f0cd800370bed | 13,978 |
def compareDict(a, b):
"""
Compare two definitions removing the unique Ids from the entities
"""
ignore = ['Id']
_a = [hashDict(dict(x), ignore) for x in a]
_b = [hashDict(dict(y), ignore) for y in b]
_a.sort()
_b.sort()
return _a == _b | 19f0340064c95584a4e80ecb4a090c25944f6923 | 13,979 |
import traceback
import time
def create_twitter_auth(cf_t):
"""Function to create a twitter object
Args: cf_t is configuration dictionary.
Returns: Twitter object.
"""
# When using twitter stream you must authorize.
# these tokens are necessary for user authentication
# create twitter API object
auth = OAuth(cf_t['access_token'], cf_t['access_token_secret'], cf_t['consumer_key'], cf_t['consumer_secret'])
try:
# create twitter API object
twitter = Twitter(auth = auth)
except TwitterHTTPError:
traceback.print_exc()
time.sleep(cf_t['sleep_interval'])
return twitter | 0eff78ce2dba182d739cc2bb082d5053a6a8847a | 13,980 |
def _project(doc, projection):
"""Return new doc with items filtered according to projection."""
def _include_key(key, projection):
for k, v in projection.items():
if key == k:
if v == 0:
return False
elif v == 1:
return True
else:
raise ValueError('Projection value must be 0 or 1.')
if projection and key != '_id':
return False
return True
return {k: v for k, v in doc.items() if _include_key(k, projection)} | 0f2cd190e73b39ceeec0f850054baab1dd357587 | 13,981 |
import random
def random_swap(words, n):
"""
Randomly swap two words in the sentence n times
Args:
words ([type]): [description]
n ([type]): [description]
Returns:
[type]: [description]
"""
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words) - 1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words) - 1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = (
new_words[random_idx_2],
new_words[random_idx_1],
)
return new_words
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words | d6916404c363176f13010d006cd61354dcd4e16e | 13,982 |
def get_dist_for_angles(dict_of_arrays, clusters, roll, pitch, yaw, metric='3d', kind='max'):
"""
Calculate a single distance metric for a combination of angles
"""
if (dict_of_arrays['yaw_corr'] == 0).all():
rot_by_boresight = apply_boresight_same(dict_of_arrays, roll, pitch, yaw)
else:
rot_by_boresight = apply_boresight_yaw_correct(dict_of_arrays, roll, pitch, yaw)
rot_to_real_world = rotate_to_real_world(rot_by_boresight)
real_wrld_coords = shift_to_real_world(rot_to_real_world)
if kind == 'mean':
distance = get_mean_3D_distance(real_wrld_coords, clusters, metric)
elif kind == 'median':
distance = get_median_3D_distance(real_wrld_coords, clusters, metric)
else:
distance = get_max_3D_distance(real_wrld_coords, clusters, metric)
return distance | 4db8a68cebc845de942817eb9eb28e57d2db5cc4 | 13,983 |
import asyncio
async def stream():
"""Main streaming loop for PHD"""
while True:
if phd_client.is_connected and manager.active_connections:
response = await phd_client.get_responses()
if response is not None:
# Add to the websocket queue
# If it is the initial data, put in variable
if response.get('Event') == 'Version':
phd_client.initial_data = response
q.put_nowait(response)
await asyncio.sleep(STREAM_INTERVAL)
return None | 19e1934e8cb48fa66f8ab3f61ca013fd19b040fc | 13,984 |
def filter_camera_angle(places, angle=1.):
"""Filter pointclound by camera angle"""
bool_in = np.logical_and((places[:, 1] * angle < places[:, 0]),
(-places[:, 1] * angle < places[:, 0]))
return places[bool_in] | 9956c5b001989c5f64d935087a1e13ffbc6469b7 | 13,985 |
def load_nifti(path: str) \
-> tuple[np.ndarray, np.ndarray, nib.nifti1.Nifti1Header]:
"""
This function loads a nifti image using
the nibabel library.
"""
# Extract image
img = nib.load(path)
img_aff = img.affine
img_hdr = img.header
# Extract the actual data in a numpy array
data = img.get_fdata()
return data, img_aff, img_hdr | 9e76e3f6e6d200b3cd3be34b3780f8fe84cad53e | 13,986 |
def f5_list_policy_hostnames_command(client: Client, policy_md5: str) -> CommandResults:
"""
Get a list of all policy hostnames.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
"""
result = client.list_policy_hostnames(policy_md5)
table_name = 'f5 data for listing policy hostname:'
readable_output, printable_result = build_command_result(result, table_name)
command_results = CommandResults(
outputs_prefix='f5.Hostname',
outputs_key_field='id',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results | 38263c85480ba5d7de8a21509820052444b4cdab | 13,987 |
def predict(m, count, s, A):
"""predict the chain after s
calculate the probability of a m-length chain,
then return chains.
CAUTION the number of chains maybe less then count
args:
m: the length of predict chain
count: the number of predict chain
s: the last element of the current chain
A: transition matrix
return:
some chains save in list
"""
process = []
start = {}
start[s] = [1, None]
process.append(start)
for i in range(m):
line = process[-1]
next_line = {}
for key in line.keys():
if A.get(key, None) is None:
continue
for k in A[key].keys():
p = next_line.get(k, [0, None])[0]
if p < A[key][k] * line[key][0]:
next_line[k] = [A[key][k] * line[key][0], key]
process.append(next_line)
ans = process[-1]
# sort according to probability from high to low
ans = sorted(ans.iteritems(), key=lambda item: item[1][0], reverse=True)
if len(ans) == 0:
return None # Can't predict, because of no answer can be find
else:
count = min(len(ans), count) # the number of ans maybe less than count
chains = []
length = len(process)
for i in range(count):
elem = ans[i][0]
chain = get_chain(elem, length-1, process)
chains.append(chain[1:])
return chains | f45acc67c97204efdabb48f29d73277fb4b75967 | 13,988 |
def f_multidim(anchors, basis, distance_measurements, coeffs):
"""
:param anchors: anchors dim x N
:param basis: basis vectors K x M
:param distance_measurements: matrix of squared distances M x N
:param coeffs: coefficient matrix dim x K
:return: vector of differences between estimate distance and measured distance.
"""
assert basis.shape[0] == coeffs.shape[1]
assert anchors.shape[0] == coeffs.shape[0]
assert anchors.shape[1] == distance_measurements.shape[1]
assert basis.shape[1] == distance_measurements.shape[0]
X = coeffs.dot(basis) # is (dim x M)
diff = anchors[:, :, np.newaxis] - X[:, np.newaxis, :]
distance_estimates = np.linalg.norm(diff, axis=0)**2
diff = distance_measurements.T - distance_estimates
nnz_diffs = diff[distance_measurements.T > 0].flatten()
return nnz_diffs | cd9f7fa67e6cbf3cfb5fe14e53b019713c56aa26 | 13,990 |
def getHomography(indict, outdict, outsize=None):
"""Returns a transformation to go from input pts to output pts using a homography.
'indict' and 'outdict' should contain identical keys mapping to 2-tuples.
We create A:
x1 y1 1 0 0 0 -x1*x1' -y1*x1'
0 0 0 x1 y1 1 -x1*y1' -y1*y1'
x2 y2 1 0 0 0 -x2*x2' -y2*x2'
0 0 0 x2 y2 1 -x2*y2' -y2*y2'
...
And b:
[x1' y1' x2' y2' x3' y3' ...].T
Then solve for h in Ah = b using linear least squares, where h is:
[h11 h12 h13 h21 h22 h23 h31 h32].T
and h33 is 1.
Returns (h, Ah), where the 2nd term is the transformed locations of the inputs.
"""
# initialize both matrices
A = np.zeros((2*len(outdict), 8), dtype=np.double)
b = np.zeros((2*len(outdict), 1), dtype=np.double)
inputs, outputs = getFidsFromDicts(indict, outdict, outsize=outsize)
# copy over data
for i, ((xi, yi, _), (xo, yo, _)) in enumerate(zip(inputs, outputs)):
A[2*i,:] = [xi, yi, 1, 0, 0, 0, -xi*xo, -yi*xo]
A[2*i+1, :] = [0, 0, 0, xi, yi, 1, -xi*yo, -yi*yo]
b[2*i] = xo
b[2*i+1] = yo
#print A, A.shape, b, b.shape, inputs, inputs.shape
# Linear least squares solve
h, resids, rank, s = np.linalg.lstsq(A, b)
h = h.flatten()
ret = np.ones((3,3), dtype=np.double)
ret[:, :] = [h[:3], h[3:6], [h[6], h[7], 1.0]]
ret = ret.transpose()
# we need transposed version of h throughout
ah = np.dot(inputs, ret)
ah /= ah[:, -1:]
if 0:
print h, len(h)
print 'ret\n', ret, ret.shape
print 'normed ah\n', ah, ah.shape
print 'outputs\n', outputs
print 'inputs\n', inputs
print 'diff %\n', 100.0*(outputs-ah)/outputs
return ret, ah | 709fad7ffba7047e8d2c15e79611c3ac897733b7 | 13,991 |
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()} | bc1f433b6a67898d8c010a56c6c51821f50df81a | 13,992 |
def from_strings(data, gaps="-", length=None, dtype=np.int8):
"""Convert a series of strings to an array of integer encoded alleles.
Parameters
----------
data : array_like, str
Sequence of strings of alleles.
gaps : str, optional
String of symbols to be interpreted as gaps in the sequence.
length : int, optional
Truncate or extend sequence to a set length by padding with gap values.
dtype : dtype, optional
Specify dtype of returned array.
Returns
-------
array : ndarray, int
Array of alleles encoded as integers.
"""
if isinstance(data, str):
return vector_from_string(data, gaps=gaps, length=length, dtype=dtype)
if isinstance(data, np.ndarray):
pass
else:
data = np.array(data, copy=False)
sequences = data.ravel()
# default to length of longest element
if length is None:
length = max(len(i) for i in sequences)
# number of sequences
n_seq = len(sequences)
# new array with gap as default
array = np.empty((n_seq, length), dtype=dtype)
for i in range(n_seq):
array[i] = vector_from_string(
sequences[i], gaps=gaps, length=length, dtype=dtype
)
shape = data.shape + (length,)
return array.reshape(shape) | 7405e208613aa75b132f686fcf5fe7451a4160cc | 13,993 |
def get_relationship_targets(item_ids, relationships, id2rec):
"""Get item ID set of item IDs in a relationship target set"""
# Requirements to use this function:
# 1) item Terms must have been loaded with 'relationships'
# 2) item IDs in 'item_ids' arguement must be present in id2rec
# 3) Arg, 'relationships' must be True or an iterable
reltgt_objs_all = set()
for goid in item_ids:
obj = id2rec[goid]
for reltype, reltgt_objs_cur in obj.relationship.items():
if relationships is True or reltype in relationships:
reltgt_objs_all.update(reltgt_objs_cur)
return reltgt_objs_all | 55542448af0eb2b46442bff0e0464361b669241a | 13,994 |
def cli(ctx, newick, analysis_id, name="", xref_db="null", xref_accession="", match_on_name=False, prefix=""):
"""Load a phylogenetic tree (Newick format) into Chado db
Output:
Number of inserted trees
"""
return ctx.gi.phylogeny.load_tree(newick, analysis_id, name=name, xref_db=xref_db, xref_accession=xref_accession, match_on_name=match_on_name, prefix=prefix) | 9b68dec5584a692f2fe04746d9bb179c9e002682 | 13,995 |
def roll_neighbors(sites, site, dims=None, radius=1):
""" N-dimensional pixel neighborhood
for periodic images on regular grids """
index = np.unravel_index(site, dims=dims)
neighs = sites.take(nbr_range+index, axis=0, mode='wrap')
return neighs.flatten() | e653604c07f4824ef766c3a7f41a6c6c8a35bad0 | 13,996 |
import requests
import json
def folder0_content(folder0_id, host, token):
"""
Modules
-------
request, json
----------
Parameters
----------
folder0_id : Onedata folder level 0 id containing the data to publish.
host : OneData provider (e.g., ceta-ciemat-02.datahub.egi.eu).
token : OneData personal access token.
-------
Returns
-------
all_level0: "name" and "id" of the folders contained in the folder defined by "folder0_id"
"""
OneData_urlchildren = "https://" + host + '/api/v3/oneprovider/data/' + folder0_id + "/children"
request_param = {'X-Auth-Token': token}
r_level0 = requests.get(OneData_urlchildren, headers=request_param)
all_level0 = json.loads(r_level0.text)
return (all_level0) | 8ce6ae617666f936643b9599ae115e140b30bd2b | 13,999 |
import urllib
import json
def fetch_object(object_id: int, url: str):
"""
Fetch a single object from a feature layer. We have to fetch objects one by one, because they
can get pretty big. Big enough, that if you ask for more than one at a time, you're likely to
encounter 500 errors.
object_id: object id to fetch (e.g. 1)
url: layer url to fetch (e.g. https://maps.gov.bc.ca/arcserver/rest/services/whse/bcgw_pub_whse_legal_admin_boundaries/MapServer/2)
"""
print(f'fetching object {object_id}')
params = {
'where': f'objectid={object_id}',
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelIntersects',
# 'outSR': '102100',
'outFields': '*',
'returnGeometry': 'true',
'returnIdsOnly': 'false',
'f': 'geojson'
}
encode_params = urllib.parse.urlencode(params).encode("utf-8")
print(f'{url}/query?{encode_params.decode()}')
with urllib.request.urlopen(f'{url}/query?', encode_params) as response:
json_data = json.loads(response.read())
return json_data | d193d9368eec79028beeb545a3fe411fa0c131bc | 14,000 |
def density_forecast_param(Yp, sigma, _, rankmatrix, errordist_normed, dof):
"""creates a density forecast for Yp with Schaake Schuffle
Parameters
----------
Yp: numpy.array
24-dimensional array with point-predictions of day ahead prices
sigma: numpy.array
Variance prediction for each hour
_ :
rankmatrix: numpy.array
Matrix with rank positions of forecast samples
errordist_normed: numpy.array
Realized normed prediction errors
dof: int
Degrees of Freedom of parametric margins
0: Normal distribution
>0: t-distribution
Returns
-------
newdataarray: numpy.array
Array containing the density predictions of day ahead price
"""
# Initialize
errordist=errordist_normed.copy()
nzero=np.size(rankmatrix,axis=0)
n_sample=np.size(errordist, axis=0)
sqrtsigma = np.sqrt(sigma)
#
for h in range(24):
# Assume Normal distribution for dof==0
if dof[0]==0:
errordist[:, h]=np.linspace(st.norm(Yp[0, h], sqrtsigma[h]).ppf(1 / (n_sample + 1)), st.norm(Yp[0, h], sqrtsigma[h]).ppf(n_sample / (n_sample + 1)), n_sample)
# Assume t-distribution with given degrees of freedom
else:
errordist[:, h] = np.linspace(st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(1 / (n_sample + 1)),
st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(n_sample / (n_sample + 1)), n_sample)
Yt = np.zeros(shape=(nzero, 24))
u_new = np.arange(1, nzero + 1) / (nzero + 1)
std_error = np.zeros(shape=(nzero, 24))
for h in range(24):
helper = np.sort(errordist[:, h])
std_error_pos = np.array(np.floor(u_new * np.size(errordist, axis=0)), dtype='int')
std_error[:, h] = helper[std_error_pos]
for i in range(nzero):
Yt[i, :] = std_error[i, :]
# order newdata according to rank-matrix
newdataarray = np.zeros(shape=(nzero, 24))
for col in range(24):
for i in range(0, nzero):
help = int(rankmatrix[i, col] - 1)
newdataarray[i, col] = Yt[help, col]
return newdataarray | 809458a7d3de0ae2997f392e52f91a9b4c02e181 | 14,001 |
def gaussian_blur(img: np.ndarray, kernel_size: int) -> np.ndarray:
"""Applies a Gaussian Noise kernel"""
if not is_valid_kernel_size(kernel_size):
raise ValueError(
"kernel_size must either be 0 or a positive, odd integer")
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) | 6bedc6b15848c18ed52c8348f3bec1b4181f74d7 | 14,002 |
def get_controls_snapshots_count(selenium, src_obj):
"""Return dictionary with controls snapshots actual count and count taken
from tab title."""
controls_ui_service = webui_service.ControlsService(selenium)
return {
"controls_tab_count": controls_ui_service.get_count_objs_from_tab(
src_obj=src_obj),
"controls_count": len(controls_ui_service.get_list_objs_from_tree_view(
src_obj=src_obj))} | 5e6a11a2a94093850f810e0ec6c93037a9f40bca | 14,003 |
import random
import math
def fast_gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
Notes
-----
The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2`
(undirected) or `n (n - 1)` (directed) possible edges with probability `p`.
This algorithm runs in `O(n + m)` time, where `m` is the expected number of
edges, which equals `p n (n - 1) / 2`. This should be faster than
:func:`gnp_random_graph` when `p` is small and the expected number of edges
is small (that is, the graph is sparse).
See Also
--------
gnp_random_graph
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = empty_graph(n)
G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
if not seed is None:
random.seed(seed)
if p <= 0 or p >= 1:
return nx.gnp_random_graph(n,p,directed=directed)
w = -1
lp = math.log(1.0 - p)
if directed:
G = nx.DiGraph(G)
# Nodes in graph are from 0,n-1 (start with v as the first node index).
v = 0
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
if v == w: # avoid self loops
w = w + 1
while w >= n and v < n:
w = w - n
v = v + 1
if v == w: # avoid self loops
w = w + 1
if v < n:
G.add_edge(v, w)
else:
# Nodes in graph are from 0,n-1 (start with v as the second node index).
v = 1
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
while w >= v and v < n:
w = w - v
v = v + 1
if v < n:
G.add_edge(v, w)
return G | f84c577a4f575186913980c8d9a5dcc16d771291 | 14,004 |
import math
def round_to(f: float, p: int = 0) -> float:
"""Round to the specified precision using "half up" rounding."""
# Do no rounding, just return a float with full precision
if p == -1:
return float(f)
# Integer rounding
elif p == 0:
return round_half_up(f)
# Round to the specified precision
else:
whole = int(f)
digits = 0 if whole == 0 else int(math.log10(-whole if whole < 0 else whole)) + 1
return round_half_up(whole if digits > p else f, p - digits) | ad464bced2e2b1b87208f61e7ca73b42d5e31fa5 | 14,005 |
def get_interface_type(interface):
"""Gets the type of interface
"""
if interface.upper().startswith('GI'):
return 'GigabitEthernet'
elif interface.upper().startswith('TE'):
return 'TenGigabitEthernet'
elif interface.upper().startswith('FA'):
return 'FastEthernet'
elif interface.upper().startswith('FO'):
return 'FortyGigabitEthernet'
elif interface.upper().startswith('LON'):
return 'LongReachEthernet'
elif interface.upper().startswith('ET'):
return 'Ethernet'
elif interface.upper().startswith('VL'):
return 'Vlan'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('PO'):
return 'Port-channel'
elif interface.upper().startswith('NV'):
return 'nve'
elif interface.upper().startswith('TWE'):
return 'TwentyFiveGigE'
elif interface.upper().startswith('HU'):
return 'HundredGigE'
else:
return 'unknown' | 8a898f75e0e05715e0ced7258b8e8d4bf9905377 | 14,006 |
def __get_global_options(cmd_line_options, conf_file_options=None):
""" Get all global options
:type cmd_line_options: dict
:param cmd_line_options: Dictionary with all command line options
:type conf_file_options: dict
:param conf_file_options: Dictionary with all config file options
:returns: dict
"""
options = {}
for option in DEFAULT_OPTIONS['global'].keys():
options[option] = DEFAULT_OPTIONS['global'][option]
if conf_file_options and option in conf_file_options:
options[option] = conf_file_options[option]
if cmd_line_options and option in cmd_line_options:
options[option] = cmd_line_options[option]
return options | 3c9880616ae274f4254cdd29558f1022fdfc6ff4 | 14,007 |
def download_file(service, drive_file):
"""Download a file's content.
Args:
service: Drive API service instance.
drive_file: Drive File instance.
Returns:
File's content if successful, None otherwise.
"""
download_url = drive_file.get('downloadUrl')
if download_url:
resp, content = service._http.request(download_url)
if resp.status == 200:
#print 'Status: %s' % resp
return content
else:
#print 'An error occurred: %s' % resp
return None
else:
# The file doesn't have any content stored on Drive.
return None | fa8ad859e47dbaec0cb9a4eea0be5497239e359e | 14,008 |
def get_include_file_end_before(block: Block) -> str:
"""
>>> # test end-before set to 'end-marker'
>>> block = lib_test.get_test_block_ok()
>>> get_include_file_end_before(block)
'# end-marker'
>>> assert block.include_file_end_before == '# end-marker'
>>> # test end-before not set
>>> block = lib_test.get_test_block_end_before_not_set()
>>> get_include_file_end_before(block)
''
>>> # test end-before invalid
>>> block = lib_test.get_test_block_end_before_invalid()
>>> get_include_file_end_before(block) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Error in File ".../README.template.rst", Line 47106: option "end-before" has no value
"""
include_file_end_before = block.include_file_end_before
if lib_block_options.is_option_in_block('end-before', block):
include_file_end_before = lib_block_options.get_option_value_from_block_or_raise_if_empty_or_invalid('end-before', block)
block.include_file_end_before = include_file_end_before
return include_file_end_before | c8ae330fb24a2a7e5304d8a5e5c5438bf9346c63 | 14,009 |
import torch
import random
def add_random_circles(tensor: torch.Tensor, n_circles: int, equalize_overlaps: bool = True):
"""Adds n_circles random circles onto the image."""
height, width = tensor.shape
circle_img = torch.zeros_like(tensor)
for _ in range(n_circles):
circle_img = add_circle(circle_img, {'x': random.randint(0, width), 'y': random.randint(0, height)}, random.randint(1, int(max(height, width) / 30)))
tensor += (circle_img != 0)
if equalize_overlaps:
tensor = (tensor != 0)
return tensor.type(torch.FloatTensor) | 17e0cf8d53cf0f8b3c542f0fd0f49151c6842ba9 | 14,010 |
def sample_quadric_surface(quadric, center, samples):
"""Samples the algebraic distance to the input quadric at sparse locations.
Args:
quadric: Tensor with shape [..., 4, 4]. Contains the matrix of the quadric
surface.
center: Tensor with shape [..., 3]. Contains the [x,y,z] coordinates of the
center of the coordinate frame of the quadric surface in NIC space with a
top-left origin.
samples: Tensor with shape [..., N, 3], where N is the number of samples to
evaluate. These are the sample locations in the same space in which the
quadric surface center is defined. Supports broadcasting the batching
dimensions.
Returns:
distances: Tensor with shape [..., N, 1]. Contains the algebraic distance
to the surface at each sample.
"""
with tf.name_scope('sample_quadric_surface'):
batching_dimensions = quadric.get_shape().as_list()[:-2]
batching_rank = len(batching_dimensions)
tf_util.assert_shape(quadric, batching_dimensions + [4, 4],
'sample_quadric_surface:quadric')
tf_util.assert_shape(center, batching_dimensions + [-1],
'sample_quadric_surface:center')
tf_util.assert_shape(samples, batching_rank * [-1] + [-1, 3],
'sample_quadric_surface:samples')
# We want to transform the coordinates so that they are in the coordinate
# frame of the conic section matrix, so we subtract the center of the
# conic.
samples = samples - tf.expand_dims(center, axis=batching_rank)
sample_count = samples.get_shape().as_list()[-2]
homogeneous_sample_ones = tf.ones(
samples.get_shape().as_list()[:-1] + [1], dtype=tf.float32)
homogeneous_sample_coords = tf.concat([samples, homogeneous_sample_ones],
axis=-1)
# When we transform the coordinates per-image, we broadcast on both sides-
# the batching dimensions broadcast up the coordinate grid, and the
# coordinate center broadcasts up along the height and width.
# Per-pixel, the algebraic distance is v^T * M * v, where M is the matrix
# of the conic section, and v is the homogeneous column vector [x y z 1]^T.
half_distance = tf.matmul(
quadric, homogeneous_sample_coords, transpose_b=True)
rank = batching_rank + 2
half_distance = tf.transpose(
half_distance, perm=list(range(rank - 2)) + [rank - 1, rank - 2])
algebraic_distance = tf.reduce_sum(
tf.multiply(homogeneous_sample_coords, half_distance), axis=-1)
return tf.reshape(algebraic_distance,
batching_dimensions + [sample_count, 1]) | e4448be0058f4a8010a72eaf9506e95695b1b35e | 14,011 |
def mol2df(mols: Mols[pd.DataFrame], multiindex=False) -> pd.DataFrame:
"""
flattens a mol into a dataframe with the columns containing the start, stop and price
:param mols: mols to transform
:return:
"""
if multiindex:
flat = {
((start, stop), price): series
for (start, stop), mol in mols.items()
for price, series in mol.items()
}
else:
flat = {
f"{start} -> {stop}: {price}": series
for (start, stop), mol in mols.items()
for price, series in mol.items()
}
return pd.concat(flat, axis="columns") | 63b16fa99a9c76a29cbef8755cf29928f05637f6 | 14,012 |
from typing import Tuple
def load_sequence_classifier_configs(args) -> Tuple[WrapperConfig, pet.TrainConfig, pet.EvalConfig]:
"""
Load the model, training and evaluation configs for a regular sequence classifier from the given command line
arguments. This classifier can either be used as a standalone model or as the final classifier for PET/iPET.
"""
model_cfg = WrapperConfig(
model_type=args.model_type,
model_name_or_path=args.model_name_or_path,
wrapper_type=SEQUENCE_CLASSIFIER_WRAPPER,
task_name=args.task_name,
label_list=args.label_list,
max_seq_length=args.sc_max_seq_length,
verbalizer_file=args.verbalizer_file,
cache_dir=args.cache_dir,
)
train_cfg = pet.TrainConfig(
device=args.device,
per_gpu_train_batch_size=args.sc_per_gpu_train_batch_size,
per_gpu_unlabeled_batch_size=args.sc_per_gpu_unlabeled_batch_size,
n_gpu=args.n_gpu,
num_train_epochs=args.sc_num_train_epochs,
max_steps=args.sc_max_steps,
min_steps=args.sc_min_steps,
temperature=args.temperature,
gradient_accumulation_steps=args.sc_gradient_accumulation_steps,
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
warmup_steps=args.warmup_steps,
logging_steps=args.logging_steps,
logging_number=args.logging_number,
max_grad_norm=args.max_grad_norm,
use_logits=args.method != "sequence_classifier",
local_rank=args.local_rank,
)
eval_cfg = pet.EvalConfig(
device=args.device,
n_gpu=args.n_gpu,
metrics=args.metrics,
per_gpu_eval_batch_size=args.sc_per_gpu_eval_batch_size,
local_rank=args.local_rank,
)
return model_cfg, train_cfg, eval_cfg | 8729851faae06ed7c0331960db4f933283e7278e | 14,013 |
def gender(word):
""" Returns the gender for the given word, either:
MALE, FEMALE, (MALE, FEMALE), (MALE, PLURAL) or (FEMALE, PLURAL).
"""
w = word.lower()
# Adjectives ending in -e: cruciale, difficile, ...
if w.endswith(("ale", "ile", "ese", "nte")):
return (MALE, FEMALE)
# Most nouns ending in -a (-e) are feminine, -o (-i) masculine:
if w.endswith(("ore", "ista", "mma")):
return MALE
if w.endswith(("a", u"tà", u"tù", "ione", "rice")):
return FEMALE
if w.endswith(("e", "oni")):
return (FEMALE, PLURAL)
if w.endswith("i"):
return (MALE, PLURAL)
if w.endswith("o"):
return MALE
return MALE | 7a8384d778b9aec9fcc5eb32f26c282805cdfa0b | 14,014 |
from typing import Counter
def fcmp(d,r):
"""
Compares two files, d and r, cell by cell. Float comparisons
are made to 4 decimal places. Extending this function could
be a project in and of itself.
"""
# we need to compare the files
dh=open(d,'rb')
rh=open(r,'rb')
dlines = dh.readlines()
rlines = rh.readlines()
boolCounter = Counter()
for dline, rline in zip(dlines,rlines):
for dc,rc in zip(dline.split(','), rline.split(',')):
if _isfloat(dc):
if round(float(dc),4)!=round(float(rc),4):
boolCounter[False] += 1
else:
boolCounter[True] += 1
else:
pass
if dc!=rc:
boolCounter[False]+= 1
else:
boolCounter[True]+= 1
dh.close()
rh.close()
if all(boolCounter):
return True
else:
return False | 9f6f24314316fbef26ce0fb404a88d34c3049b2b | 14,015 |
def is_vector_equal(vec1, vec2, tolerance=1e-10):
"""Compare if two vectors are equal (L1-norm) according to a tolerance"""
return np.all(np.abs(vec1 - vec2) <= tolerance) | 9bb42fa3bc2cbb25edd6eabeddb2aa2d8d93e5c8 | 14,016 |
def partition_pair(bif_point):
"""Calculate the partition pairs at a bifurcation point.
The number of nodes in each child tree is counted. The partition
pairs is the number of bifurcations in the two child subtrees
at each branch point.
"""
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return (n, m) | 7889eb95a0ac3b2a7d1138061a4651b1e79427c0 | 14,017 |
def readPyCorrFit(file):
"""
Read header and data of .csv PyCorrFit output file
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
file String with path to .csv file
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
outputdata Object with the tau, G, Gfit, Gres in data field, and separate
fields for the fitted values n, SP, offset, and chi2
========== ===============================================================
"""
# create object
outputdata = PyCorrFitData()
# read .csv header
f = open(file, "r")
if f.mode == "r":
contents = f.read()
start = contents.find("Parameters:")
[n, start] = readPyCorrFitSingleParam(contents, "# n\t", "\n", start)
[tauD, start] = readPyCorrFitSingleParam(contents, "_diff [ms]\t", "\n", start)
[SP, start] = readPyCorrFitSingleParam(contents, "# SP\t", "\n", start)
[offset, start] = readPyCorrFitSingleParam(contents, "# offset\t", "\n", start)
start = contents.find("Fitting:", start)
[chi2, start] = readPyCorrFitSingleParam(contents, "\t", "\n", start)
[Gfitstart, start] = readPyCorrFitSingleParam(contents, "# Ival start [ms]\t", "\n", start)
[Gfitstop, start] = readPyCorrFitSingleParam(contents, "# Ival end [ms]\t", "\n", start)
outputdata.n = n
outputdata.tauD = tauD
outputdata.SP = SP
outputdata.offset = offset
outputdata.chi2 = chi2
outputdata.Gfitstart = Gfitstart
outputdata.Gfitstop = Gfitstop
# load .csv file
data = csv2array(file)
# extract data
tau = data[:,0]
G = data[:,1]
Gfit = data[:,2]
Gres = G - Gfit
outputdata.data = np.stack((tau, G, Gfit, Gres), axis=1)
return outputdata | 0dcaa26c0ef2f8270748241cbd03bc6aaa750672 | 14,018 |
def end_of_time(t):
""" Return the next hour of the passed time. e.g, 18:25:36 --> 19:00:00 """
return t + timedelta(minutes=60) - timedelta(minutes=t.minute) - timedelta(seconds=t.second) | dce1f0cde67c834580edb349e0dfbcdee0b4d171 | 14,019 |
def modf(x):
"""modf(x)
Return the fractional and integer parts of x. Both results carry the sign
of x.
"""
signx = sign(x)
absx = Abs(x)
return (signx * Mod(absx, 1), signx * floor(absx)) | 18f4e9aca22591f2960bb6ddf28fcf677bedee65 | 14,020 |
from typing import Tuple
def get_user_from_request(request, available_query_params: list()) -> Tuple[User, GeneralApiResponse]:
"""
Entra com o request da view e uma lista de query params do user que podem ser consultados
Retorna um user caso seja si mesmo, ou tenha permissão de acesso a outros usuários
Retorna uma resposta de erro caso algo não possa ser completado
"""
get_keys = list(request.GET.keys())
if len(get_keys) != 0 or (len(get_keys) > 1 and 'page' in get_keys):
if request.user.is_superuser:
if any(query_param not in available_query_params for query_param in get_keys):
return None, GeneralApiResponse.bad_request() # algum query param na requisição de user ta zoado
ignore_page_query_params = {key: v for key, v in request.GET.dict().items() if key not in ['page']}
users = User.objects.filter(**ignore_page_query_params) # ignora o query param "page"
if not users.exists():
return None, GeneralApiResponse.not_found() # não achou usuário que atendesse à query
elif len(users) > 1:
return None, GeneralApiResponse.bad_request('a query retorna mais de um user')
else:
return users[0], None
else:
return None, GeneralApiResponse.unauthorized()
else:
return request.user, None | b9d0274ac5ea8e0cbc210b1f4f5e8d46398e8e6d | 14,021 |
def longest_CD(values):
"""
Return the sequence range for the longest continuous
disorder (CDl) subsequence.
"""
# Filter residues with score equal or greater than 0.5
# and store its position index
dis_res = [index for index, res in enumerate(values)
if float(res) >= 0.5]
# Initialize longest CD region
CDl = []
# Counter to store partial results of each continuous region
c = []
# Iterate over disordered residues list
for i, j in zip(dis_res, dis_res[1:]):
# Check if residues are consecutive
if j - i == 1:
# Update counter
c.append(i)
# Not consecutive
else:
# Add last residue of the interval
c.append(i)
# Update CDl
if len(c) > len(CDl):
CDl = c
# Reset counter for the next interval
c = []
return CDl | f07b74b9553c156d2d4b62e17ea02b466a16fe74 | 14,022 |
def get_read_length(filename):
""" Return the first read length of fastq file.
:param str filename: fastq file.
"""
with FastqReader(filename) as filin:
read_len = len(next(iter(filin)))
return read_len | 961af7ff12c422c68349dabee064acd465a1a090 | 14,023 |
def no_outliers_estimator(base_estimator, x, alpha=0.01):
""" Calculate base_estimator function after removal of extreme quantiles
from the sample
"""
x = np.array(x)
if len(x.shape) < 3:
x = np.expand_dims(x, -1)
low_value = np.quantile(x, alpha, axis=(0, 1))
high_value = np.quantile(x, 1 - alpha, axis=(0, 1))
result = np.zeros(x.shape[2], x.dtype)
for i in range(x.shape[2]):
x_ch = x[:, :, i]
x_ch = x_ch[(x_ch >= low_value[i]) & (x_ch <= high_value[i])]
result[i] = base_estimator(x_ch)
return result | 3c23f9cacc1108d6ecb24b690ff731e3a3554b44 | 14,024 |
from server.forms import FormNotCompleteError, FormValidationError
from typing import cast
from typing import Tuple
def error_state_to_dict(err: ErrorState) -> ErrorDict:
"""Return an ErrorDict based on the exception, string or tuple in the ErrorState.
Args:
err: ErrorState from a api error state
Returns:
An ErrorDict containing the error message a status_code and a traceback if available
"""
# Import here to prevent cyclic imports
if isinstance(err, FormValidationError):
return {
"class": type(err).__name__,
"error": str(err),
"traceback": err,
"validation_errors": err.errors, # type:ignore
"status_code": HTTPStatus.BAD_REQUEST,
}
elif isinstance(err, FormNotCompleteError):
return {
"class": type(err).__name__,
"error": str(err),
"traceback": err,
"form": err.form,
"status_code": HTTPStatus.NOT_EXTENDED,
}
elif isinstance(err, Exception):
if is_api_exception(err):
err = cast(ApiException, err)
return {
"class": type(err).__name__,
"error": err.reason,
"status_code": err.status,
"body": err.body,
"headers": "\n".join(f"{k}: {v}" for k, v in err.headers.items()),
"traceback": err,
}
return {
"class": type(err).__name__,
"error": str(err),
"traceback": show_ex(err),
}
elif isinstance(err, tuple):
cast(Tuple, err)
error, status_code = err
return {"error": str(error), "status_code": int(status_code)}
elif isinstance(err, str):
return {"error": err}
elif isinstance(err, dict) and "error" in err: # type: ignore
return err
else:
raise TypeError("ErrorState should be a tuple, exception or string") | 79cf9a971886241c8760bf0091af0c91a4d80ade | 14,025 |
from typing import Set
def english_words() -> Set[str]:
"""Return a set of english words from the nltk corpus "words".
Returns:
Set of english words.
"""
nltk_resource("corpora/words")
return set(nltk.corpus.words.words()) | 2cda38fb0026805c7792bcf45727492b09b38a89 | 14,026 |
def bipartite_matching_wrapper(a, b, score_func, symmetric=False):
"""A wrapper to `bipartite_matching()` that returns `(matches, unmatched_in_a, unmatched_in_b)`
The list of `matches` contains tuples of `(score, a_element, b_element)`. The two unmatched
lists are elements from each of the respective input lists.
"""
found_a, found_b = set(), set()
matches = []
for score, i, j in bipartite_matching(a, b, score_func, symmetric=symmetric):
matches.append((score, i, j))
found_a.add(i)
found_b.add(j)
unmatched_in_a = set(a) - found_a
unmatched_in_b = set(b) - found_b
return matches, unmatched_in_a, unmatched_in_b | 702c290b6874b595fb0249c865c5723c84d485ba | 14,027 |
def get_interface_type(interface):
"""Gets the type of interface
Args:
interface (str): full name of interface, i.e. Ethernet1/1, loopback10,
port-channel20, vlan20
Returns:
type of interface: ethernet, svi, loopback, management, portchannel,
or unknown
"""
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown' | 8196bfa37ef25f0fa1c08577d215329ecc977c4a | 14,029 |
def create_int_feature_list(name, key, prefix="", module_dict=None):
"""Creates accessor functions for bytes feature lists.
The provided functions are has_${NAME}, get_${NAME}_size, get_${NAME}_at,
clear_${NAME}, and add_${NAME}.
example = tensorflow.train.SequenceExample()
add_image_timestamp(1000000, example)
add_image_timestamp(2000000, example)
if has_image_timestamp:
for i in range(get_image_timestamp_size(example):
timestamp = get_image_timestamp_at(i, example)
clear_image_timestamp(example)
Args:
name: the name of the feature to use in function names.
key: the key for this feature in the SequenceExample.
prefix: a prefix to append to the key in the SequenceExample
module_dict: adds the functions to the corresponding module dict.
"""
def _has(sequence_example, prefix=prefix):
return has_feature_list(key, sequence_example, prefix=prefix)
def _get_size(sequence_example, prefix=prefix):
return get_feature_list_size(key, sequence_example, prefix=prefix)
def _get_at(index, sequence_example, prefix=prefix):
return get_int_at(key, index, sequence_example, prefix=prefix)
def _clear(sequence_example, prefix=prefix):
clear_feature_list(key, sequence_example, prefix=prefix)
def _add(value, sequence_example, prefix=prefix):
add_int(key, value, sequence_example, prefix=prefix)
def _get_key(prefix=prefix):
return merge_prefix(prefix, key)
def _get_default_parser():
return tf.io.FixedLenSequenceFeature((), tf.int64)
function_dict = {
"has_" + name: _has,
"get_" + name + "_size": _get_size,
"get_" + name + "_at": _get_at,
"clear_" + name: _clear,
"add_" + name: _add,
"get_" + name + "_key": _get_key,
"get_" + name + "_default_parser": _get_default_parser,
}
add_functions_to_module(function_dict, module_dict) | 58b08f518050a67db72f0572a78f7dab5a68d468 | 14,030 |
def ROC(y_pred, y_true, positive_column = 0,draw = True):
"""
ROC
"""
y_pred = y_pred[:,0]
y_true = y_true[:,0]
# sort by y_pred
sort_index = np.argsort(-y_pred)
y_pred = y_pred[sort_index]
y_true = y_true[sort_index]
tprs = []
fprs = []
positive_num = (y_true == 1.0).sum()
negivate_num = len(y_true) - positive_num
for threshold in np.arange(0,1+0.1,0.1):
t = ((y_true == 1.0)& (y_pred >= threshold)).sum()
f = ((y_true == 0.0) & (y_pred >= threshold)).sum()
tprs.append(t*1.0/positive_num)
fprs.append(f*1.0/negivate_num)
if draw:
plt.plot(fprs,tprs,c='r')
plt.show()
return tprs, fprs | efeefbd570988c83f912345794cbd19e15ec67a2 | 14,031 |
def ignore_check(self, channel: discord.TextChannel, ignore_dm: bool = False, from_main: bool = False):
"""
A function that checks whether or not that channel allows command.
Args:
self: instance of the class this command calls or this can be commands.Bot
channel (discord.TextChannel): the channel the command call happened in
ignore_dm (bool): whether or not the command is being ignored in direct messages
from_main (bool): indicator for whether or not this call is from Main.py, which switches changes how self is
read
Returns:
True: if channel needs to be ignored
False: if channel is fine
"""
if ignore_dm:
if channel.type is discord.ChannelType.private:
return True
try:
if from_main:
ignore = self.get_cog("Ignores").find(channel.guild.id, channel.id)
else:
ignore = self.bot.get_cog('Ignores').find(channel.guild.id, channel.id)
except AttributeError:
return False
if ignore:
return True
return False | 284ba6432d792a3382383cf9b53a5932897b5e53 | 14,032 |
def network_count_allocated_ips(context, network_id):
"""Return the number of allocated non-reserved ips in the network."""
return IMPL.network_count_allocated_ips(context, network_id) | 33f7ce340d222c3843962e6e64a06440e5dfd526 | 14,033 |
def _parse_transform_spec( transform_spec ):
"""
Parses a transform specification into its name and parameters dictionary.
Raises ValueError if the specification is invalid, it represents an unknown
transform, or if the encoded parameters do not match the transform's expected
types.
Takes 1 argument:
transform_spec - Transform specification string. See lookup_transform() for
details.
Returns 2 values:
transform_name - Name of the specified transform.
transform_parameters - Dictionary of parameters for the specified transform.
Dictionary values are cast to the types expected
the transform.
"""
try:
# break the "<name>:<parameters>" string. make sure we don't break
# the <parameters> into multiple components so it can contain colons
# in the (key, value) pairs.
(transform_name,
transform_parameters_spec) = transform_spec.split( ":",
maxsplit=1 )
except ValueError:
raise ValueError( "Failed to get a transform name and parameters "
"specification from '{:s}'.".format(
transform_spec ) )
# make sure this is a known transform.
if transform_name not in _transform_map:
raise ValueError( "Unknown transform '{:s}'!".format(
transform_name ) )
# get the associated parameter parser for this transform.
_, parameter_parser = _transform_map[transform_name]
try:
# split the remaining <parameters> into (key, value) pairs. each
# (key, value) set is colon-delimited, and each set equal
# sign-delimited.
#
# e.g. "parameter1=value1:parameter2=value2a,value2b,value2c"
#
transform_parameters = dict( map( lambda key_value: key_value.split( "=" ),
transform_parameters_spec.split( ":" ) ) )
# map individual parameters to their expected data types.
transform_parameters = parameter_parser( transform_parameters )
except ValueError as e:
raise ValueError( "<parameters> -> (<key>, <value>) ({:s})".format(
str( e ) ) )
return (transform_name, transform_parameters) | b914d96d9ad1e8da3deb10f1c6500c2ee58b4928 | 14,034 |
from typing import Union
from typing import List
def parse_text_multiline(data: Union[str, List[str]]) -> str:
"""Parse the text in multiline mode."""
if isinstance(data, str):
return data
elif isinstance(data, list) and all(map(is_str, data)):
return '\n'.join(data)
else:
raise ValueError(data) | ba8e50422a89de14a464d4917138c5faa051124d | 14,035 |
def _set_user_permissions_for_volumes(users, volumes):
"""
Returns the section of the user data script to create a Linux
user group and grant the group permission to access the mounted
volumes on the EC2 instance.
"""
group_name = 'volumes'
user_data_script_section = f"""
groupadd {group_name}
"""
for user in users:
user_data_script_section += f"""
usermod -a -G {group_name} {user.login}
"""
for volume in volumes:
user_data_script_section += f"""
chgrp -R {group_name} {volume.mount}
chmod -R 2775 {volume.mount}
"""
return user_data_script_section | 2d262a52cfa2f3e142da3dd7767dcc6cff14c929 | 14,037 |
def cached_examples():
"""This view should be cached for 60 sec"""
examples = ExampleModel.query()
return render_template('list_examples_cached.html', examples=examples) | f598589967f82daaf3c7e9cb88f7679786e5bf18 | 14,038 |
from typing import Callable
import datasets
def librispeech_adversarial(
split_type: str = "adversarial",
epochs: int = 1,
batch_size: int = 1,
dataset_dir: str = None,
preprocessing_fn: Callable = None,
cache_dataset: bool = True,
framework: str = "numpy",
clean_key: str = "clean",
adversarial_key: str = "adversarial",
) -> datasets.ArmoryDataGenerator:
"""
Adversarial dataset based on Librispeech-dev-clean using Universal
Perturbation with PGD.
split_type - one of ("adversarial")
returns:
Generator
"""
if clean_key != "clean":
raise ValueError(f"{clean_key} != 'clean'")
if adversarial_key != "adversarial":
raise ValueError(f"{adversarial_key} != 'adversarial'")
return datasets._generator_from_tfds(
"librispeech_adversarial:1.0.0",
split_type=split_type,
batch_size=batch_size,
epochs=epochs,
dataset_dir=dataset_dir,
preprocessing_fn=preprocessing_fn,
as_supervised=False,
supervised_xy_keys=("audio", "label"),
variable_length=bool(batch_size > 1),
cache_dataset=cache_dataset,
framework=framework,
lambda_map=lambda x, y: ((x[clean_key], x[adversarial_key]), y),
) | 2ab2da4f56194dada3cd361371ef32b1f2fd6194 | 14,040 |
def search4letters(phrase, letters='aeiou'):
"""
->return a set of the 'letters' found in 'phrase'.
:param phrase: phrase where the search will be made
:param letters:set of letters that will be searched for in the sentence
:return returns a set ()
"""
return set(letters).intersection(set(phrase)) | e58d0863aa090ac3644cd7bf26e783efe2956d35 | 14,041 |
import gc
def merge_flights(prev_flights_filename, next_flights_filename, ids_df, log):
"""
Gets the next days flights that are the continuation of the previous days
flights and merges them with the previous days flights.
It writes the new next days and previous days flights to files prepended
with new.
it returns True if successful, False otherwise.
"""
new_items_df = get_next_day_items(next_flights_filename, ids_df, log)
# free memory used by get_next_day_items
gc.collect()
prev_flights_df = pd.DataFrame()
try:
prev_flights_df = pd.read_csv(prev_flights_filename,
index_col='FLIGHT_ID',
converters={'FLIGHT_ID': lambda x: UUID(x)},
memory_map=True)
log.info('%s read ok', prev_flights_filename)
except EnvironmentError:
log.error('could not read file: %s', prev_flights_filename)
return False
# merge next days flight data with the previous days flight data
update_flight_data(prev_flights_df, new_items_df)
# Output the new previous flights
new_prev_flights_filename = 'new_' + prev_flights_filename
try:
is_bz2 = has_bz2_extension(prev_flights_filename)
if is_bz2:
new_prev_flights_filename = new_prev_flights_filename[:-BZ2_LENGTH]
prev_flights_df.to_csv(new_prev_flights_filename, index=True,
date_format=ISO8601_DATETIME_FORMAT)
log.info('written file: %s', new_prev_flights_filename)
except EnvironmentError:
log.error('could not write file: %s', new_prev_flights_filename)
return False
return True | 6d0cec2c8cc66d04facdde01e24ce0b3aa57dc55 | 14,043 |
def findClusters( peaks, thresh ):
"""Since the peaks are in sequence, this method follows a very simplistic
approach. For each peak it checks its distance from the previous peak. If
it is less than threshold, it clusters that peak with the previous one.
Note that in each of the clusters, input order is maintained."""
clusters, cluster = [], []
cluster.append(peaks[0])
for peak in peaks[1:]:
if euclideanDistance( cluster[-1], peak ) < thresh:
cluster.append( peak )
else:
clusters.append(cluster)
cluster = [peak]
clusters.append( cluster )
print( clusters )
return clusters | f74e504557e7c7e796d29290dccabe043ac70dc0 | 14,044 |
def acq_max_single_seed(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the 'L-BFGS-B' method.
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
#max_acq = None
dim=bounds.shape[0]
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(50*dim, dim))
# evaluate
y_tries=ac(x_tries,gp=gp, y_max=y_max)
#find x optimal for init
idx_max=np.argmax(y_tries)
x_init_max=x_tries[idx_max]
#x_try=np.array(bounds[:, 0])
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_init_max.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
x_max = res.x
#max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1]) | 5a705e15e41be8063f476a40b1cfae9385b98af7 | 14,047 |
def futures_pig_rank(symbol: str = "外三元") -> pd.DataFrame:
"""
价格排行榜
https://zhujia.zhuwang.cc/lists.shtml
:param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"}
:type symbol: str
:return: 价格排行榜
:rtype: pandas.DataFrame
"""
if symbol == "外三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "内三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-1.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "土杂猪":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-2.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "玉米":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-3.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "豆粕":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-4.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df | 9afc155021afc2b8ffbef4a0e778f1ab6360219f | 14,048 |
import math
def psubl_T(T):
"""
EQ 6 / Sublimation Pressure
"""
T_star = 273.16
p_star = 611.657E-6
a = (-0.212144006E2, 0.273203819E2, -0.610598130E1)
b = ( 0.333333333E-2, 0.120666667E1, 0.170333333E1)
theta = T / T_star
sum = 0
for i in range(0, 3):
sum += a[i] * theta ** b[i]
pi_subl = math.exp((theta ** -1) * sum)
return pi_subl * p_star | 0e3f875fc2d249c78a5db6268dcc0df31213a7ff | 14,049 |
def map_key_values(f, dct):
"""
Like map_with_obj but expects a key value pair returned from f and uses it to form a new dict
:param f: Called with a key and value
:param dct:
:return:
"""
return from_pairs(values(map_with_obj(f, dct))) | 0918ff4ff9ab994b10fe2543dce305f99b7278fb | 14,050 |
def plot_ppc(
ax,
length_plotters,
rows,
cols,
figsize,
animated,
obs_plotters,
pp_plotters,
posterior_predictive,
pp_sample_ix,
kind,
alpha,
linewidth,
mean,
xt_labelsize,
ax_labelsize,
jitter,
total_pp_samples,
legend,
markersize,
animation_kwargs,
num_pp_samples,
):
"""Matplotlib ppc plot."""
if ax is None:
fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize)
else:
axes = np.ravel(ax)
if len(axes) != length_plotters:
raise ValueError(
"Found {} variables to plot but {} axes instances. They must be equal.".format(
length_plotters, len(axes)
)
)
if animated:
fig = axes[0].get_figure()
if not all([ax.get_figure() is fig for ax in axes]):
raise ValueError("All axes must be on the same figure for animation to work")
for i, ax_i in enumerate(axes):
var_name, selection, obs_vals = obs_plotters[i]
pp_var_name, _, pp_vals = pp_plotters[i]
dtype = posterior_predictive[pp_var_name].dtype.kind
# flatten non-specified dimensions
obs_vals = obs_vals.flatten()
pp_vals = pp_vals.reshape(total_pp_samples, -1)
pp_sampled_vals = pp_vals[pp_sample_ix]
if kind == "kde":
plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth}
if dtype == "i":
plot_kwargs["drawstyle"] = "steps-pre"
ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if dtype == "f":
plot_kde(
obs_vals,
label="Observed {}".format(var_name),
plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3},
fill_kwargs={"alpha": 0},
ax=ax_i,
legend=legend,
)
else:
bins = get_bins(obs_vals)
_, hist, bin_edges = histogram(obs_vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
label="Observed {}".format(var_name),
color="k",
linewidth=linewidth,
zorder=3,
drawstyle=plot_kwargs["drawstyle"],
)
pp_densities = []
pp_xs = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
if dtype == "f":
pp_density, lower, upper = _fast_kde(vals)
pp_x = np.linspace(lower, upper, len(pp_density))
pp_densities.append(pp_density)
pp_xs.append(pp_x)
else:
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
pp_densities.append(hist)
pp_xs.append(bin_edges)
if animated:
animate, init = _set_animation(
pp_sampled_vals, ax_i, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs
)
else:
if dtype == "f":
ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities), **plot_kwargs)
else:
for x_s, y_s in zip(pp_xs, pp_densities):
ax_i.plot(x_s, y_s, **plot_kwargs)
if mean:
if dtype == "f":
rep = len(pp_densities)
len_density = len(pp_densities[0])
new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density)
new_d = np.zeros((rep, len_density))
bins = np.digitize(pp_xs, new_x, right=True)
new_x -= (new_x[1] - new_x[0]) / 2
for irep in range(rep):
new_d[irep][bins[irep]] = pp_densities[irep]
ax_i.plot(
new_x,
new_d.mean(0),
color="C0",
linestyle="--",
linewidth=linewidth,
zorder=2,
label="Posterior predictive mean {}".format(pp_var_name),
)
else:
vals = pp_vals.flatten()
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=2,
linestyle="--",
drawstyle=plot_kwargs["drawstyle"],
)
ax_i.tick_params(labelsize=xt_labelsize)
ax_i.set_yticks([])
elif kind == "cumulative":
drawstyle = "default" if dtype == "f" else "steps-pre"
ax_i.plot(
*_empirical_cdf(obs_vals),
color="k",
linewidth=linewidth,
label="Observed {}".format(var_name),
drawstyle=drawstyle,
zorder=3
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax_i,
kind=kind,
alpha=alpha,
drawstyle=drawstyle,
linewidth=linewidth,
)
else:
pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size))
for idx, vals in enumerate(pp_sampled_vals):
vals = np.array([vals]).flatten()
pp_x, pp_density = _empirical_cdf(vals)
pp_densities[2 * idx] = pp_x
pp_densities[2 * idx + 1] = pp_density
ax_i.plot(
*pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth
)
ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if mean:
ax_i.plot(
*_empirical_cdf(pp_vals.flatten()),
color="C0",
linestyle="--",
linewidth=linewidth,
drawstyle=drawstyle,
label="Posterior predictive mean {}".format(pp_var_name)
)
ax_i.set_yticks([0, 0.5, 1])
elif kind == "scatter":
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 3,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax_i,
legend=legend,
)
else:
vals = pp_vals.flatten()
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=3,
linestyle="--",
drawstyle="steps-pre",
)
_, limit = ax_i.get_ylim()
limit *= 1.05
y_rows = np.linspace(0, limit, num_pp_samples + 1)
jitter_scale = y_rows[1] - y_rows[0]
scale_low = 0
scale_high = jitter_scale * jitter
obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)
if jitter:
obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals))
ax_i.plot(
obs_vals,
obs_yvals,
"o",
color="C0",
markersize=markersize,
alpha=alpha,
label="Observed {}".format(var_name),
zorder=4,
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax_i,
kind=kind,
height=y_rows.mean() * 0.5,
markersize=markersize,
)
else:
for vals, y in zip(pp_sampled_vals, y_rows[1:]):
vals = np.ravel(vals)
yvals = np.full_like(vals, y, dtype=np.float64)
if jitter:
yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals))
ax_i.plot(
vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha
)
ax_i.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name))
ax_i.set_yticks([])
if var_name != pp_var_name:
xlabel = "{} / {}".format(var_name, pp_var_name)
else:
xlabel = var_name
ax_i.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)
if legend:
if i == 0:
ax_i.legend(fontsize=xt_labelsize * 0.75)
else:
ax_i.legend([])
if animated:
ani = animation.FuncAnimation(
fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs
)
return axes, ani
else:
return axes | 83d01e6b9f9f170b9e8dc2ff3cf95916106196c5 | 14,051 |
import importlib
def load_module(name):
"""Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module
"""
spec = importlib.util.find_spec(name)
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
spec.loader.exec_module(mod)
return mod | 762c99efcc17f9f1d1659cdae52989c9cfa9423a | 14,052 |
def make_no_graph_input_fn(graph_data, args, treatments, outcomes, filter_test=False):
"""
A dataset w/ all the label processing, but no graph structure.
Used at evaluation and prediction time
"""
def input_fn():
vertex_dataset = tf.data.Dataset.from_tensor_slices(
({'vertex_index': np.expand_dims(np.array(range(graph_data.num_vertices)), 1),
'is_positive': np.expand_dims(np.array(range(graph_data.num_vertices)), 1)},))
data_processing = adapters.compose(
adapters.append_vertex_labels(treatments, 'treatment'),
adapters.append_vertex_labels(outcomes, 'outcome'),
adapters.make_split_vertex_labels(
graph_data.num_vertices, args.proportion_censored,
np.random.RandomState(args.seed)),
adapters.format_features_labels())
dataset = vertex_dataset.map(data_processing, 8)
if filter_test:
def filter_test_fn(features, labels):
return tf.equal(tf.squeeze(features['in_test']), 1)
dataset = dataset.filter(filter_test_fn)
batch_size = args.batch_size
dataset = dataset.batch(batch_size=batch_size, drop_remainder=False)
return dataset
return input_fn | 8526a64b55608f986ef4b000b2cb75a99160e1a0 | 14,053 |
import torch
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.tensor(np.random.random((real_samples.size(0), 1, 1, 1,1)), dtype = real_samples.dtype, device = real_samples.device)
# Get random interpolation between real and fake samples
#print(alpha.shape, fake_samples.shape)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(d_interpolates.shape[0], 1).fill_(1.0), requires_grad=False).view(-1)
#print(d_interpolates.shape, interpolates.shape, fake.shape)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty | 110e4854284be694c0813fd5fc71d2ff51d3b6d8 | 14,054 |
def tab(num):
"""
Get tab indentation.
Parameters
----------
num : int
indentation depth
"""
return num * 4 * " " | 39311a9f28aa70f105271432916745dddeb0b46a | 14,056 |
def merge_sort(lst):
"""Sorts the input list into ascending order."""
if len(lst) < 2:
return lst
half = len(lst) // 2
# This variant of merge sort uses O(N * log N) memory, since list slicing in Python 3 creates a copy.
return merge(merge_sort(lst[:half]), merge_sort(lst[half:])) | e8cada6428fde5aa430497c3c562dc4361c11c1e | 14,057 |
from typing import Optional
def get_top_experts_per_item_dispatcher(gates: Array, name: str,
num_selected_experts: int,
batch_priority: bool,
capacity: Optional[int] = None,
capacity_factor: Optional[float] = None,
**dispatcher_kwargs) -> BaseDispatcher:
"""Returns a dispatcher implementing Top-Experts-Per-Item routing.
For each item, the `num_selected_experts` experts with the largest gating
score are selected in a greedy fashion. However, because each expert has a
fixed `capacity`, if more items than `capacity` select a given expert some of
the assignments will be ignored. All top-1 choices have priority over top-2
choices and so on. In addition, the choices that are ignored also depend on
`batch_priority`. If it is False, the "Vanilla" algorithm is used, meaning
that items in earlier positions of the array have priority. If it is True, the
"Batch Priority Routing" algorithm (see https://arxiv.org/abs/2106.05974) is
used, which gives more priority to the items whose largest score is greater.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
name: String with the type of dispatcher to use (supported values are
"einsum" and "indices").
num_selected_experts: Maximum number of experts to select per each item (K).
batch_priority: Whether to use batch priority routing or not.
capacity: If given, maximum number of items processed by each expert.
Either this or `capacity_factor` must be given.
capacity_factor: If given, sets the `capacity` to this factor of S * K / E.
Either this or `capacity` must be given.
**dispatcher_kwargs: Additional arguments for the dispatcher object.
Returns:
A dispatcher.
"""
if (capacity is None) == (capacity_factor is None):
raise ValueError(
"You must specify either 'capacity' or 'capacity_factor', and not both."
f" Current values are capacity = {capacity!r}, "
f"capacity_factor = {capacity_factor!r}")
if not capacity:
group_size, num_experts = gates.shape
capacity = _compute_capacity(
# Target number of tokens to split among the `num_experts` experts.
num_tokens=group_size * num_selected_experts,
num_experts=num_experts,
capacity_factor=capacity_factor)
fn_map = {
"einsum": _get_top_experts_per_item_einsum_dispatcher,
"indices": _get_top_experts_per_item_expert_indices_dispatcher,
}
if name not in fn_map:
raise ValueError(f"Unknown dispatcher type: {name!r}")
return fn_map[name](gates, num_selected_experts, capacity, batch_priority,
**dispatcher_kwargs) | 94e090bc3de59fd03903151fa2e34b2daca50198 | 14,058 |
def find_files_list(*args, **kwargs):
""" Returns a list of find_files generator"""
return list(find_files(*args, **kwargs)) | b51595dbc75308c583b75c3151c41ea84aafaeaf | 14,060 |
def bool_from_string(subject, strict=False, default=False):
"""Interpret a subject as a boolean.
A subject can be a boolean, a string or an integer. Boolean type value
will be returned directly, otherwise the subject will be converted to
a string. A case-insensitive match is performed such that strings
matching 't','true', 'on', 'y', 'yes', or '1' are considered True and,
when `strict=False`, anything else returns the value specified by
'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if isinstance(subject, bool):
return subject
if not isinstance(subject, str):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ", ".join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = ("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {"val": subject,
"acceptable": acceptable}
raise ValueError(msg)
else:
return default | b3f7728eb5fdd4c660144279200daabd25034bf3 | 14,061 |
import logging
def get_msg_timeout(options):
"""Reads the configured sbd message timeout from each device.
Key arguments:
options -- options dictionary
Return Value:
msg_timeout (integer, seconds)
"""
# get the defined msg_timeout
msg_timeout = -1 # default sbd msg timeout
cmd = generate_sbd_command(options, "dump")
(return_code, out, err) = run_command(options, cmd)
for line in out.split("\n"):
if len(line) == 0:
continue
if "msgwait" in line:
tmp_msg_timeout = int(line.split(':')[1])
if -1 != msg_timeout and tmp_msg_timeout != msg_timeout:
logging.warn(\
"sbd message timeouts differ in different devices")
# we only save the highest timeout
if tmp_msg_timeout > msg_timeout:
msg_timeout = tmp_msg_timeout
return msg_timeout | 4b2df955ac796da38b5b9fa176477fec3c0470a2 | 14,063 |
import requests
import logging
def odata_getone(url, headers):
"""
Get a single object from Odata
"""
r = requests.get(url, headers=headers)
if not r.ok:
logging.warning(f"Fetch url {url} hit {r.status_code}")
return None
rjson = r.json()
if 'error' in rjson:
logging.warning(f"Fetching of {url} returned error {r.text}")
return None
return rjson | 5d6c668845132d821f175a2e8c1a924492a9eb2f | 14,064 |
import json
def _tokenizer_from_json(json_string):
"""Parses a JSON tokenizer configuration file and returns a
tokenizer instance.
# Arguments
json_string: JSON string encoding a tokenizer configuration.
# Returns
A Keras Tokenizer instance
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get('config')
word_counts = json.loads(config.pop('word_counts'))
word_docs = json.loads(config.pop('word_docs'))
index_docs = json.loads(config.pop('index_docs'))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop('index_word'))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop('word_index'))
tokenizer = tf.keras.preprocessing.text.Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer | 665485d9faad1352927879e81c381dd81b77b5c5 | 14,065 |
from typing import List
from pathlib import Path
def get_all_pip_requirements_files() -> List[Path]:
"""
If the root level hi-ml directory is available (e.g. it has been installed as a submodule or
downloaded directly into a parent repo) then we must add it's pip requirements to any environment
definition. This function returns a list of the necessary pip requirements files. If the hi-ml
root directory does not exist (e.g. hi-ml has been installed as a pip package, this is not necessary
and so this function returns an empty list.)
:return: An list list of pip requirements files in the hi-ml and hi-ml-azure packages if relevant,
or else an empty list
"""
files = []
if paths.is_himl_used_from_git_repo():
git_root = paths.git_repo_root_folder()
for folder in [Path("hi-ml") / "run_requirements.txt", Path("hi-ml-azure") / "run_requirements.txt"]:
files.append(git_root / folder)
return files | 7ce5a327af6961ad23555ba5334246b75d8bd782 | 14,066 |
def load_data(dataset_name: str, split: str) -> object:
"""
Load the data from datasets library and convert to dataframe
Parameters
----------
dataset_name : str
name of the dataset to be downloaded.
split : str
type of split (train or test).
Returns
-------
object
dataframe.
"""
data = load_dataset(dataset_name, split=split)
logger.info(split + " dataset downloaded!")
return data | f6dc374d8c12fa74b9f390a1766af369791bc3b2 | 14,067 |
def horizontal_south_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of south horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids *must be of len(horizontal_links)*.
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of south horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_north_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_south_link_neighbor(rmg.shape, horizontal_links)
array([-1, -1, -1, -1, 0, 1, 2, 3, 9, 10, 11, 12, 18, 19, 20, 21])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), 1, axis=0)
links[0, :] = bad_index_value
return links.reshape(-1) | 413fdd5a4af8a0e77b0c3ab191bac60f2ba2cc26 | 14,068 |
def _get_output(algorithm, iport=0, iconnection=0, oport=0, active_scalar=None,
active_scalar_field='point'):
"""A helper to get the algorithm's output and copy input's vtki meta info"""
ido = algorithm.GetInputDataObject(iport, iconnection)
data = wrap(algorithm.GetOutputDataObject(oport))
data.copy_meta_from(ido)
if active_scalar is not None:
data.set_active_scalar(active_scalar, preference=active_scalar_field)
return data | dd70cbb1ee6c2d6ed085fc589c24e88fc62a17ab | 14,069 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.