content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_exploration_summary_from_model(exp_summary_model):
"""Returns an ExplorationSummary domain object.
Args:
exp_summary_model: ExplorationSummary. An ExplorationSummary model
instance.
Returns:
ExplorationSummary. The summary domain object correspoding to the
given exploration summary model.
"""
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.scaled_average_rating,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.voice_artist_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids,
exp_summary_model.contributors_summary, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated,
exp_summary_model.first_published_msec
) | c6561670f976e28a3869eb89c4be3ba884808da0 | 13,616 |
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = ServiceAccountCredentials.from_p12_keyfile(service_account_email,key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
# Insert user email here | 6c333f43c5feb5b44128b8f592586804eba68e1e | 13,617 |
from IsabelaFunctions.langlais_coeff import glm as g
from IsabelaFunctions.langlais_coeff import hlm as h
import tqdm
def model_map(lon, lat, alt, comp, binsize = 0.1, nmax = 134, a = 3393.5):
"""
Calculates a map of one component of the crustal magnetic field field model, for a given altitude.
Parameters:
lon: array
The longitude range, in degrees. Ex.: [20., 50.].
lat: array
The latitude range, in degrees.
alt: float
The altitude in which the map will be computed, in km.
comp: string
The desired magnetic field component, in spherical coordinates. Options are 'Br', 'Btheta', 'Bphi', and 'Bt'.
binsize: float, list, optional
The resolution of the grid. If a float, apply the same binsize for longitude and latitude.
If a list, the first value represents the longitude binsize and the second, the latitude binsize.
nmax: integer, optional
The maximum degree and order of the functions.
a: float, optional
The radius of the planet. Default is the Mars' radius.
Returns:
A lon X lat array containing the magnetic field component.
"""
# Raise an AssertionError if arguments are invalid
assert comp == 'Br' or comp == 'Btheta' or comp == 'Bphi' or comp == 'Bt', "Check argument for comp"
assert type(binsize) is float or type(binsize) is list, "Argument for binsize should be a float or a list"
# Import the coefficient files
# Calculate r, theta, phi, and the Legendre functions
r = a + alt
if type(binsize) is float:
binsize = [binsize, binsize]
lat_len = int(round((lat[1] - lat[0]) / binsize[1] + 1.0))
lon_len = int(round((lon[1] - lon[0]) / binsize[0] + 1.0))
longitude = np.deg2rad(np.linspace(lon[0], lon[1], lon_len))
latitude = np.linspace(lat[0], lat[1], lat_len)
P = np.empty((nmax+1, nmax+1, lat_len)) * np.nan
dP = np.empty_like(P) * np.nan
for theta in range(lat_len):
P[:, :, theta], dP[:, :, theta] = legendre_schmidt_Pyshtools(latitude[theta])
cos = np.empty((nmax+1, lon_len)) * np.nan
sen = np.empty_like(cos) * np.nan
for phi in range(lon_len):
for m in range(nmax+1):
cos[m, phi] = np.cos(m * longitude[phi])
sen[m, phi] = np.sin(m * longitude[phi])
a_over_r = np.empty((nmax+1)) * np.nan
for n in range(nmax+1):
a_over_r[n] = (a/r)**(n+2)
if comp == 'Bt':
Br = np.zeros((lon_len, lat_len))
Btheta = np.zeros((lon_len, lat_len))
Bphi = np.zeros((lon_len, lat_len))
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in range(1, nmax+1):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
Br += tmp3
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
Btheta += tmp3
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
Bphi += tmp3
for theta in range(lat_len):
Bphi[:, theta] /= sen_theta[theta]
B = np.sqrt(Br**2 + Btheta**2 + Bphi**2)
else:
B = np.zeros((lon_len, lat_len))
if comp == 'Br':
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
B += tmp3
elif comp == 'Btheta':
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
B += tmp3
else:
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
B += tmp3
for theta in range(lat_len):
B[:, theta] /= sen_theta[theta]
return B.T | 9a49e4a1f31180cd7a26f2028c5e45d077103346 | 13,618 |
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message() | abe82ae2fe29014b3441889c973a412a536b78f1 | 13,619 |
def angle_connectivity(ibonds):
"""Given the bonds, get the indices of the atoms defining all the bond
angles
A 'bond angle' is defined as any set of 3 atoms, `i`, `j`, `k` such that
atom `i` is bonded to `j` and `j` is bonded to `k`
Parameters
----------
ibonds : np.ndarray, shape=[n_bonds, 2], dtype=int
Each row in `ibonds` is a pair of indicies `i`, `j`, indicating that
atoms `i` and `j` are bonded
Returns
-------
iangles : np.ndarray, shape[n_angles, 3], dtype=int
n_angles x 3 array of indices, where each row is the index of three
atoms m,n,o such that n is bonded to both m and o.
"""
graph = nx.from_edgelist(ibonds)
iangles = []
for i in graph.nodes():
for (m, n) in combinations(graph.neighbors(i), 2):
# so now the there is a bond angle m-i-n
iangles.append((m, i, n))
return np.array(iangles) | 86c992a1a8ac2d3c6b1fbc5a137ef0734a3079ed | 13,621 |
def BOPDS_PassKeyMapHasher_IsEqual(*args):
"""
:param aPKey1:
:type aPKey1: BOPDS_PassKey &
:param aPKey2:
:type aPKey2: BOPDS_PassKey &
:rtype: bool
"""
return _BOPDS.BOPDS_PassKeyMapHasher_IsEqual(*args) | 8da04f1755e3d2f7d10ad3ecf5ec6b0d00ca5fcb | 13,622 |
def dms2dd(s):
"""convert lat and long to decimal degrees"""
direction = s[-1]
degrees = s[0:4]
dd = float(degrees)
if direction in ('S','W'):
dd*= -1
return dd | cb76efbf8c3b6a75bcc26593fab81a8ef3e16bbf | 13,624 |
def setna(self, value, na=np.nan, inplace=False):
""" set a value as missing
Parameters
----------
value : the values to set to na
na : the replacement value (default np.nan)
Examples
--------
>>> from dimarray import DimArray
>>> a = DimArray([1,2,-99])
>>> a.setna(-99)
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., 2., nan])
>>> a.setna([-99, 2]) # sequence
dimarray: 1 non-null elements (2 null)
0 / x0 (3): 0 to 2
array([ 1., nan, nan])
>>> a.setna(a > 1) # boolean
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., nan, -99.])
>>> a = DimArray([[1,2,-99]]) # multi-dim
>>> a.setna([-99, a>1]) # boolean
dimarray: 1 non-null elements (2 null)
0 / x0 (1): 0 to 0
1 / x1 (3): 0 to 2
array([[ 1., nan, nan]])
"""
return self.put(_matches(self.values, value), na, cast=True, inplace=inplace) | 6ada601dee346d5440a64ffdbf8d2642873bdb08 | 13,625 |
def hbox(*items, **config):
""" Create a DeferredConstraints object composed of horizontal
abutments for a given sequence of items.
"""
return LinearBoxHelper('horizontal', *items, **config) | cdfe16a35c73a2f8406207a0262b4210ce86146f | 13,626 |
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols | 86b4c866a8fbe20ab1d4b0a34e4940155df00744 | 13,627 |
def _preprocess_data(smiles, labels, batchsize = 100):
"""
prepares all input batches to train/test the GDNN fingerprints implementation
"""
N = len(smiles)
batches = []
num_bond_features = 6
for i in range(int(np.ceil(N*1./batchsize))):
array_rep = utils.array_rep_from_smiles(smiles[i*batchsize:min(N,(i+1)*batchsize)])
labels_b = labels[i*batchsize:min(N,(i+1)*batchsize)]
atom_features = array_rep['atom_features']
summed_bond_features_by_degree = extract_bondfeatures_of_neighbors_by_degree(array_rep)
batch_dict = {'input_atom_features':atom_features} # (num_atoms, num_atom_features)
missing_degrees = []
for degree in degrees:
atom_neighbors_list = array_rep[('atom_neighbors', degree)]
if len(atom_neighbors_list)==0:
missing_degrees.append(degree)
continue
# this matrix is used by every layer to match and sum all neighboring updated atom features to the atoms
atom_neighbor_matching_matrix = connectivity_to_Matrix(atom_neighbors_list, atom_features.shape[0])
atom_batch_matching_matrix = connectivity_to_Matrix(array_rep['atom_list'], atom_features.shape[0]).T
assert np.all(atom_batch_matching_matrix.sum(1).mean()==1)
assert np.all(atom_batch_matching_matrix.sum(0).mean()>1),'Error: looks like a single-atom molecule?'
batch_dict['bond_features_degree_'+str(degree)] = summed_bond_features_by_degree[degree]
batch_dict['atom_neighbors_indices_degree_'+str(degree)] = atom_neighbors_list
batch_dict['atom_features_selector_matrix_degree_'+str(degree)] = atom_neighbor_matching_matrix
batch_dict['atom_batch_matching_matrix_degree_'+str(degree)] = atom_batch_matching_matrix.T # (batchsize, num_atoms)
if degree==0:
print 'degree 0 bond?'
print smiles[i*batchsize:min(N,(i+1)*batchsize)]
return
# input_atom_features (292L, 62L)
# bond_features_degree_ 1 (70L, 6L)
# atom_neighbors_indices_degree_ 1 (70L, 1L)
# bond_features_degree_ 2 (134L, 6L)
# atom_neighbors_indices_degree_ 2 (134L, 2L)
# bond_features_degree_ 3 (78L, 6L)
# atom_neighbors_indices_degree_ 3 (78L, 3L)
# bond_features_degree_ 4 (10L, 6L)
# atom_neighbors_indices_degree_ 4 (10L, 4L)
num_bond_features = batch_dict['bond_features_degree_'+str(degree)].shape[1]
num_atoms = atom_neighbor_matching_matrix.shape[1]
for missing_degree in missing_degrees:
batch_dict['atom_neighbors_indices_degree_'+str(missing_degree)] = np.zeros((0, missing_degree),'int32')
batch_dict['bond_features_degree_'+str(missing_degree)] = np.zeros((0, num_bond_features),'float32')
batch_dict['atom_features_selector_matrix_degree_'+str(missing_degree)] = np.zeros((0, num_atoms),'float32')
batch_dict['atom_batch_matching_matrix_degree_'+str(missing_degree)] = atom_batch_matching_matrix.T
batches.append((batch_dict,labels_b))
return batches | 3456fe2059e386088d359ec0c2d54dff2d7fac25 | 13,628 |
def linear_activation_forward(A_prev, W, b, activation, keep_prob=1):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
Dt = np.random.rand(A.shape[0], A.shape[1])
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
# Dropout
# Step 1: initialize matrix D2 = np.random.rand(..., ...)
Dt = np.random.rand(A.shape[0], A.shape[1])
# Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the
# threshold)
Dt = Dt < keep_prob
# Step 3: shut down some neurons of A2
A = A * Dt
A = A / keep_prob
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache, Dt)
return A, cache | 0e4d12142224bfb46af0afb547abe3dde0aa6811 | 13,629 |
def load_image(image_path, size):
"""
Load an image as a Numpy array.
:param image_path: Path of the image
:param size: Target size
:return Image array, normalized between 0 and 1
"""
image = img_to_array(load_img(image_path, target_size=size)) / 255.
return image | 3d9a790b762f800a222c26578dc0572587b091fb | 13,631 |
import signal
def _signal_exit_code(signum: signal.Signals) -> int:
"""
Return the exit code corresponding to a received signal.
Conventionally, when a program exits due to a signal its exit code is 128
plus the signal number.
"""
return 128 + int(signum) | 050eee98632216fddcbd71e4eb6b0c973f6d4144 | 13,632 |
import csv
def make_template_matrix(msigdb_file, blacklist, checkblacklist=True):
"""
Retrieve all genes and pathways from given msigdb .gmt file
Output:
sorted gene by pathways pandas dataframe. Entries indicate membership
"""
all_db_pathways = []
all_db_genes = []
# Get a set of all genes and all pathways in MSigDB (not blacklisted)
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
all_db_pathways.append(signature_name)
all_db_genes += signature_genes
big_msigdb_df = pd.DataFrame(0, index=set(all_db_genes), columns=all_db_pathways)
big_msigdb_df = big_msigdb_df.sort_index()
big_msigdb_df = big_msigdb_df.T.sort_index().T
# Loop through file again to populate dataframe. This is a fast implementation
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
for gene in signature_genes:
big_msigdb_df.at[gene, signature_name] = 1
return big_msigdb_df | b8068089279dfbe3b3cfc8b16dee016cc0994746 | 13,633 |
def unwrap_key(
security_control: SecurityControlField, wrapping_key: bytes, wrapped_key: bytes
):
"""
Simple function to unwrap a key received.
"""
validate_key(security_control.security_suite, wrapping_key)
validate_key(security_control.security_suite, wrapped_key)
unwrapped_key = aes_key_unwrap(wrapping_key, wrapped_key)
return unwrapped_key | 7720ad8905f6818b1a3fa4132b040560a9ae0dfa | 13,634 |
def checkOwnership(obj, login_session):
"""
This function helps to check if the current logged in user
is the creator of the given category or a given item.
This function return True if the current user owns the category,
otherwise, it will return False.
"""
# the user has logged in at this moment
userID = getUserID(login_session["email"])
# comparing user_id is a better approach
# Because different user still can have same usernames
if obj.user_id == userID:
return True
else:
return False | 851d2dafae633ed92698af525b1c717091edb2b7 | 13,635 |
import logging
def transform_file_name(original_file_name):
"""
Now, this is just whatever I felt like. Whee.
So in this function I could have just used 0 and 1 as my indices directly when I look at the different parts of
the file name, but it's generally better to name these sorts of things, so people know *why* they're 0 and 1.
Another benefit is that you now know exactly why these particular things are 0 and 1 without having to guess,
and you know that these usages of 0 or 1 are different for other usages. For example, I have 2 usages of the
value 1 in this function, but they serve different purposes.
"""
# So script constants are in all caps. But when we're using constants inside a specific function or class or
# something along those lines, then we do something a little different. These values are meant to be used
# inside the function, but they're not meant to be used outside of it, returned, or anything like that. The leading
# underscore is a signal to anyone else who uses this script to indicate that.
_file_name_location = 0
_file_type_ending_location = 1
logging.info("Original file name: {}".format(original_file_name))
# Split the original filename into parts once, based on the specified separator, exactly one time.
# Also, do this by searching for the separator starting from the right-hand side of the string.
file_name_parts = original_file_name.rsplit(
# I don't want this line to be too long, so I've added line breaks here to keep things from getting too wide.
ScriptConstants.FILE_EXTENSION_SEPARATOR,
ScriptConstants.NUM_FILE_EXTENSIONS_IN_FILE_NAME
)
file_ending = file_name_parts[_file_type_ending_location]
file_name = file_name_parts[_file_name_location]
# I forget whether I mentioned this before, but when you add strings together, Python interprets it as
# an instruction to concatenate the strings together (with no separator).
new_file_name = file_name + '_derp_i_moved_this_thing' + ScriptConstants.FILE_EXTENSION_SEPARATOR + file_ending
logging.info('New file name: {}'.format(new_file_name))
return new_file_name | daa5b3be0ae7a40c9d20ac4a8aa37c51dec89c89 | 13,637 |
import pandas
def remove_overlapping_cells(graph):
"""
Takes in a graph in which each node is a cell and edges connect cells that
overlap eachother in space. Removes overlapping cells, preferentially
eliminating the cell that overlaps the most cells (i.e. if cell A overlaps
cells B, C, and D, whereas cell B only overlaps cell A, cell C only overlaps
cell A, and cell D only overlaps cell A, then cell A will be removed,
leaving cells B, C, and D remaining because there is no more overlap
within this group of cells).
Args:
graph: An undirected graph, in which each node is a cell and each
edge connects overlapping cells. nodes are expected to have
the following attributes: originalFOV, assignedFOV
Returns:
A pandas dataframe containing the feature ID of all cells after removing
all instances of overlap. There are columns for cell_id, originalFOV,
and assignedFOV
"""
connectedComponents = list(nx.connected_components(graph))
cleanedCells = []
connectedComponents = [list(x) for x in connectedComponents]
for component in connectedComponents:
if len(component) == 1:
originalFOV = graph.nodes[component[0]]['originalFOV']
assignedFOV = graph.nodes[component[0]]['assignedFOV']
cleanedCells.append([component[0], originalFOV, assignedFOV])
if len(component) > 1:
sg = nx.subgraph(graph, component)
verts = list(nx.articulation_points(sg))
if len(verts) > 0:
sg = nx.subgraph(graph,
[x for x in component if x not in verts])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1], reverse=True)
maxEdges = sortedEdges[0][1]
while maxEdges > 0:
sg = nx.subgraph(graph, [x[0] for x in sortedEdges[1:]])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1],
reverse=True)
maxEdges = sortedEdges[0][1]
keptComponents = list(sg.nodes())
cellIDs = []
originalFOVs = []
assignedFOVs = []
for c in keptComponents:
cellIDs.append(c)
originalFOVs.append(graph.nodes[c]['originalFOV'])
assignedFOVs.append(graph.nodes[c]['assignedFOV'])
listOfLists = list(zip(cellIDs, originalFOVs, assignedFOVs))
listOfLists = [list(x) for x in listOfLists]
cleanedCells = cleanedCells + listOfLists
cleanedCellsDF = pandas.DataFrame(cleanedCells,
columns=['cell_id', 'originalFOV',
'assignedFOV'])
return cleanedCellsDF | bd133c5ddd59f950d34ba16fb7bc3ff0215f0cf2 | 13,638 |
def main_page(request) :
"""Renders main page and gets the n (matrix demension number)"""
if request.method != 'POST' :
form = InputForm()
else :
form = InputForm(data=request.POST)
if form.is_valid() :
return redirect('calculator:set_demensions')
context = {'form' : form}
return render(request, 'calculator/main_page.html', context) | a6131ea837c8d9b986e8579a40ada1f7a0a3bb64 | 13,639 |
def int2fin_reference(n):
"""Calculates a checksum for a Finnish national reference number"""
checksum = 10 - (sum([int(c) * i for c, i in zip(str(n)[::-1], it.cycle((7, 3, 1)))]) % 10)
return "%s%s" % (n, checksum) | f21e66cb917631797d62ecc8ba2728b18d36ae1c | 13,640 |
def COLSTR(str, tag):
"""
Utility function to create a colored line
@param str: The string
@param tag: Color tag constant. One of SCOLOR_XXXX
"""
return SCOLOR_ON + tag + str + SCOLOR_OFF + tag | abe3d9111a30ebb678d1f1a2011d3b8a3ad39a75 | 13,641 |
def get_instance_pricing(instance_types):
"""
Get the spot and on demand price of an instance type
in all the regions at current instant
:param instance_types: EC2 instance type
:return: a pandas DataFrame with columns as
region, spot price and on demand price
"""
all_regions = get_all_regions()
price_df = pd.DataFrame({DF_COL_INSTANCE_TYPE: [],
DF_COL_REGION: [],
DF_COL_SPOT_PRICE: [],
DF_COL_ON_DEMAND_PRICE: []})
for region_name in all_regions:
spot_prices = get_spot_price(instance_types, region_name)
on_demand_prices = get_on_demand_price(instance_types, region_name)
both_prices = pd.merge(spot_prices, on_demand_prices,
on=DF_COL_INSTANCE_TYPE)
n_rows = both_prices.shape[0]
region_list = n_rows * [region_name]
both_prices[DF_COL_REGION] = region_list
both_prices = both_prices[[DF_COL_INSTANCE_TYPE, DF_COL_REGION,
DF_COL_SPOT_PRICE,
DF_COL_ON_DEMAND_PRICE]]
price_df = price_df.append(both_prices)
return price_df | 62dba0e3c3f46ac460178da0bc4d615869819f83 | 13,642 |
import itertools
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) | 9fd62d6f971c871ce290700f3abb7eb467692533 | 13,643 |
def plot_bivariate_correlations(df, path=None, dpi=150):
"""
Plots heatmaps of 2-variable correlations to the Target function
The bivariate correlations are assmebled using both the arithmatic and geometric means for
two subplots in the figure.
Parameters
----------
df: dataframe
path: optional string path for saving
dpi: integer dots per inch
Returns
-------
fig: figure with 2 subplots of bivariate correlations (using arithmatic and geometric mean)
"""
# Plot function for subplots
def makeit(ax):
bound = np.max(np.abs(correlations))
img = ax.matshow(correlations, cmap=cm.coolwarm, vmin=-bound, vmax=bound)
ax.set(xticks=np.arange(df.shape[1]),
yticks=np.arange(df.shape[1]),
xticklabels=df.columns,
yticklabels=df.columns
)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(75)
label.set_fontsize(16)
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(16)
if matplotlib.__version__ == '3.1.1':
ax.set_ylim(len(df.columns) - 0.5, -0.5)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="8%", pad=0.1)
cb = plt.colorbar(img, cax=cax)
cb.set_ticks([])
try:
target = df.Target
except AttributeError:
print('Not set up for working without Target series in DataFrame')
df = df.drop(columns=["Target"])
features = list(df.columns)
arr = np.array(df)
correlations = np.zeros((len(features), len(features)))
# First the arithmatic mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}+{}".format(features[i], features[j])] = (arr[:, i] + arr[:, j]) / 2
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
fig, axes = plt.subplots(2, 1, figsize=(10, 20))
ax = axes[0]
makeit(ax)
ax.set_title('Arithmatic Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
correlations = np.zeros((len(features), len(features)))
# Second the geometrix mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}*{}".format(features[i], features[j])] = np.sqrt((arr[:, i] * arr[:, j]))
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
ax = axes[1]
makeit(ax)
ax.set_title('Geometric Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
plt.tight_layout()
if path: plt.savefig(path, dpi=dpi)
return fig | d5dc7da98228aa7b7865510bd4dcd6531e7049bc | 13,644 |
from torch.utils.data import DataLoader
def create_datastream(dataset_path, **kwargs):
""" create data_loader to stream images 1 by 1 """
if osp.isfile(osp.join(dataset_path, 'calibration.txt')):
db = ETH3DStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'image_left')):
db = TartanAirStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'rgb.txt')):
db = TUMStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'mav0')):
db = EurocStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'calib.txt')):
db = KITTIStream(dataset_path, **kwargs)
else:
# db = TartanAirStream(dataset_path, **kwargs)
db = TartanAirTestStream(dataset_path, **kwargs)
stream = DataLoader(db, shuffle=False, batch_size=1, num_workers=4)
return stream | 145f8c44e8e718fea9a9bdabf5e1f9497a00241a | 13,645 |
def is_contained(target, keys):
"""Check is the target json object contained specified keys
:param target: target json object
:param keys: keys
:return: True if all of keys contained or False if anyone is not contained
Invalid parameters is always return False.
"""
if not target or not keys:
return False
# if keys is just a string convert it to a list
if type(keys) == str:
keys = [keys]
# traverse the list to check json object
# if key does not exist or value is None then return False
try:
for key in keys:
if target[key] is None:
return False
except KeyError:
return False
# All seems to be going well
return True | 948196d4b470788199506bd7768e03554fa67b40 | 13,646 |
def map(x, in_min, in_max, out_min, out_max):
"""
Map a value from one range to another
:param in_min: minimum of input range
:param in_max: maximum of input range
:param out_min: minimum of output range
:param out_max: maximum of output range
:return: The value scaled to the new range
:rtype: int
"""
return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min) | 4117af35b0061df1fd271306accf198692442dac | 13,647 |
import requests
def get_points(sess: requests.Session, console: Console, status: Status, projectID: int):
"""
Get all exisiting points in a project
"""
base_url = f"https://mapitfast.agterra.com/api/Points"
resp = sess.get(base_url, params={"projectId": projectID})
points_obj_list = list()
for raw_resp in resp.json():
points_obj_list.append(Points(raw_data=raw_resp))
return points_obj_list | c5f1fce542b06d1680637750f51c3bd7a6e6ebc4 | 13,648 |
def calculate_discounted_returns(rewards):
"""
Calculate discounted reward and then normalize it
(see Sutton book for definition)
Params:
rewards: list of rewards for every episode
"""
returns = np.zeros(len(rewards))
next_return = 0 # 0 because we start at the last timestep
for t in reversed(range(0, len(rewards))):
next_return = rewards[t] + args.gamma * next_return
returns[t] = next_return
# normalize for better statistical properties
returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)
return returns | 538c3d5636bc6105ddf603f0928e4e891fea774c | 13,649 |
def parse_binskim_old(bin_an_dic, output):
"""Parse old version of binskim."""
current_run = output['runs'][0]
if 'results' in current_run:
rules = output['runs'][0]['rules']
for res in current_run['results']:
if res['level'] != 'pass':
if len(res['formattedRuleMessage']['arguments']) > 2:
info = ('{}, {}').format(
res['formattedRuleMessage']['arguments'][1],
res['formattedRuleMessage']['arguments'][2])
else:
info = ''
result = {
'rule_id': res['ruleId'],
'status': 'Insecure',
'info': info,
'desc': rules[res['ruleId']]['shortDescription'],
}
else:
result = {
'rule_id': res['ruleId'],
'status': 'Secure',
'info': '',
'desc': rules[res['ruleId']]['shortDescription'],
}
bin_an_dic['results'].append(result)
else:
logger.warning('binskim has no results.')
# Create an warining for the gui
warning = {
'rule_id': 'No Binskim-Results',
'status': 'Info',
'info': '',
'desc': 'No results from Binskim.',
}
bin_an_dic['warnings'].append(warning)
if 'configurationNotifications' in current_run:
for warn in current_run['configurationNotifications']:
warning = {
'rule_id': warn['ruleId'],
'status': 'Info',
'info': '',
'desc': warn['message'],
}
bin_an_dic['warnings'].append(warning)
# Return updated dict
return bin_an_dic | bd927aa972148b1171dcf2d5c60aa219cf4527b6 | 13,651 |
import operator
def binary_elementwise_compute(
ifm: te.Tensor,
ifm2: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ifm2_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of binary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
ifm2 : te.Tensor
The Input Feature Map tensor 2 (IFM2).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 1.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
assert ifm.shape[0] == 1
assert ifm2.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ifm2_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, (0, 0, 0, 0)
)
dmaed_ifm2 = dma_ifm_compute(
ifm2, ifm2_layout, ifm2_zero_point, ifm2_scale, ifm2_channels, (0, 0, 0, 0)
)
# Binary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
binary_elementwise_attrs = {
"op": "ethosu_binary_elementwise",
"operator_type": operator_type,
"reversed_operands": reversed_operands,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
operators = {
"ADD": operator.add,
"SUB": operator.sub,
"MUL": operator.mul,
"MIN": te.min,
"MAX": te.max,
"SHR": operator.add,
"SHL": operator.add,
}
broadcast = [value == 1 for value in dmaed_ifm2.shape]
if reversed_operands:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
else:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 16, 0, 1, -16],
[0, 0, 0, 0, 0, 1],
]
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - int(broadcast[1])), 0, 0, int(broadcast[1])],
[0, 0, (1 - int(broadcast[2])), 0, int(broadcast[2])],
[0, 0, 0, (1 - int(broadcast[3])), int(broadcast[3])],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
ifm2_propagator = Propagator(
ifm2_matrix,
[0, 0, 0, 0] if ifm2_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"ifm2_propagator": ifm2_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
binary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ifm_channels,
attrs=propagator_attrs,
) | 2bbac91e8606512180b6a652538eeac23e369c7c | 13,652 |
def x_power_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x^n depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + ...) * x^n
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x ** n, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='x^{} dependence'.format(n), code='x{}'.format(n)+':{}', **kwargs
) | 49b1a605001003b52f38f7f469a7c7bfafd43d6b | 13,653 |
from typing import Iterable
def get_subseqs(s, ops):
"""Returns a list of sequences given when applying the list of (ops)
on them, until a constant one is found, thus:
new[0] = next seq of s with ops[0]
new[i] = next seq of new[i-1] with op[i]
If 'ops' is not a list, then the same operation will be repeated.
The length of 'ops' should be equal to the length of 's' minus 1"""
if len(s) < 2:
# We can't get the next sequence based on two terms if there's only one
return []
if not isinstance(ops, Iterable):
ops = [ops for _ in range(len(s)-1)]
# Start with the initial subsequence
subseqs = [get_subseq(s, ops[0])]
# And base the next subsequences on the previous one until they're constant
i = 1
while not is_constant(subseqs[-1]) and len(subseqs[-1]) > 1:
subseqs.append(get_subseq(subseqs[-1], ops[i]))
i += 1
return subseqs | 3ad7a955c7b55596f327ae52d34368451ef79737 | 13,654 |
def update_s(C,k):
"""
Args: C: 2d array
k: 1d array
Return: 1d array
"""
if np.shape(C)[0]==0:
s = np.array([1])
else:
temp = np.dot(C,k)
s = np.append(temp,1)
return s | ce4604d71b05d328d6b8b60bea9f611d8d12f6eb | 13,656 |
def test_handler_callback_failure():
"""Test failure mode for inappropriate handlers."""
class BadHandler(object):
def handler(self, one):
return 'too many'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler
class BadHandler(object):
def handler(self, one, two, three, four, five):
return 'not enough'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler | c5d8daf4cca81ef8dee8ba5a10b9e572899bd23e | 13,657 |
def get_chord_type(chord):
"""'Parses' input for a chord and returns the type of chord from it"""
cleaned_chord = chord[1:]
cleaned_chord = cleaned_chord.replace('b', '')
cleaned_chord = cleaned_chord.replace('#', '')
mapping = {
'7': 'seven',
'9': 'nine',
'm7': 'minor7',
'm9': 'minor9',
'm': 'minor',
'M7': 'major7',
'M9': 'major9',
'': 'major',
}
return mapping[cleaned_chord] | 4a753eb31f1e33340a7aa4df6942c4752b208fdd | 13,658 |
from typing import Union
def transpile(model: Union[SympyOpt, Model]) -> SympyOpt:
"""Transpile optimization problem into SympyOpt model
Only accepts SympyOpt or Docplex model.
:param model: model to be transpiled
:raises ValueError: if the argument is of inappropriate type
:return: transpiled model
"""
if isinstance(model, SympyOpt):
return deepcopy(model)
elif isinstance(model, Model):
return DocplexToSympyopt().transpile(model)
elif isinstance(model, LpProblem):
return PulpToSympyopt().transpile(model)
elif isinstance(model, (QuadraticProgram, PauliSumOp)):
return QiskitToSympyopt().transpile(model)
elif isinstance(model, (BinaryQuadraticModel, ConstrainedQuadraticModel)):
return DimodToSympyopt().transpile(model)
else:
raise ValueError(f"Unknown model type: {type(model)}") | f2b4895cb980e535166d9749eb93925722981828 | 13,660 |
def definition():
"""View of the finances with subtotals generated."""
return sql.format(source=source) | c0b9add49b9c7403328449b8989e29739be267a9 | 13,661 |
import math
def random_mini_batches(X, Y, mini_batch_size = 32, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
Y -- true "label" vector (containing 0 if control, 1 if case), of shape (1, number of examples) (m, n_y)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation,:,:,:]
shuffled_Y = Y[permutation,:]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(num_complete_minibatches)
for k in range(0, int(num_complete_minibatches)):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches | 8baa63be638a1706c49176a51013524594a59452 | 13,662 |
def file_base_features(path, record_type):
"""Return values for BASE_SCHEMA features."""
base_feature_dict = {
"record_id": path,
"record_type": record_type,
# "utc_last_access": os.stat(path).st_atime,
"utc_last_access": 1600000000.0,
}
return base_feature_dict | 12f16684002892d7af59a1e26e8a40501098ca4f | 13,663 |
def split_ref(ref):
"""
セル参照をセル文字と1ベース行番号文字に分割する。
Params:
ref(str):
Returns:
Tuple[str, str]: 列、行
"""
m = re_cellref.match(ref)
if m:
return m.group(1), m.group(2)
return None, None | 1ae8e058a47ad0410b7131d4b89061dea822ed68 | 13,664 |
def table_definition(dataset):
"""print an azure synapse table definition for a kartothek dataset"""
index_col = list(dataset.dataset_metadata.index_columns)[
0
] ##works only with one index column
cols = synapse_columns(
dataset.dataset_metadata.table_meta[dataset.table], index_col
)
template = """
with {dataset.dataset_uuid} as (
SELECT
result.filepath(1) as [{index_col}],
*
FROM
OPENROWSET(
BULK '{dataset.url}/{index_col}=*/*.parquet',
FORMAT='PARQUET'
) with(
{cols}
) as [result]
)
select top 100 * from {dataset.dataset_uuid};
"""
return template.format(dataset=dataset, cols=cols, index_col=index_col) | 75a2f55fa31025899e9adb05e20dbc89ae8dabd4 | 13,665 |
import itertools
def node_extractor(dataframe, *columns):
"""
Extracts the set of nodes from a given dataframe.
:param dataframe: dataframe from which to extract the node list
:param columns: list of column names that contain nodes
:return: list of all unique nodes that appear in the provided dataset
"""
data_list = [dataframe[column].unique().tolist() for column in columns]
return list(set(itertools.chain.from_iterable(data_list))) | 7a4ab889257a0f2c5ddfe18e65d0a7f5f35d8d98 | 13,667 |
def _get_bag(environ, bag_name):
"""
Get the named bag out of the store.
"""
store = environ['tiddlyweb.store']
bag = Bag(bag_name)
try:
bag = store.get(bag)
except NoBagError as exc:
raise HTTP404('%s not found, %s' % (bag.name, exc))
return bag | db4e2425f6c4d839fa091c08b524ea8ecd3c7c27 | 13,668 |
def missing_values_operation(files):
"""Will take iterable file objects and eliminate features or samples with missing values or inputing missing values if necessary"""
for i in files:
with open(i,'rw') as f:
if missing_values(f)==True:
file_data=load_data(i)
#Dropping rows with missing values
file_data.dropna(axis=0)
#Dropping columns with missing values
file_data.dropna(axis=1)
return "dropped rows and columns"
else:
return "no values to be dropped" | df5a6f6809605107db9b008b877fa913a3dc686d | 13,669 |
def _object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
object_id = "{}_{}".format(slugify(_value_name(value)),
value.node.node_id)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return "{}_{}".format(object_id, value.instance)
return object_id | 34c21de533a99ffdabfdabf21540492f7ce33b7f | 13,670 |
def _apply_attention_constraint(
e, last_attended_idx, backward_window=1, forward_window=3
):
"""Apply monotonic attention constraint.
**Note** This function is copied from espnet.nets.pytorch_backend.rnn.attention.py
"""
if e.size(0) != 1:
raise NotImplementedError(
"Batch attention constraining is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < e.size(1):
e[:, forward_idx:] = -float("inf")
return e | 213ef514a9cff31134185e38c57d46921eba763a | 13,671 |
from bs4 import BeautifulSoup
import re
def parse_reolink(email):
"""Parse Reolink tracking numbers."""
tracking_numbers = []
soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')
links = [link.get('href') for link in soup.find_all('a')]
for link in links:
if not link:
continue
match = re.search('qtc_tLabels1=(.*?)$', link)
if match and match.group(1) not in tracking_numbers:
tracking_numbers.append(match.group(1))
return tracking_numbers | cc96d35edb2ace40d83464f4cc3bed1c91480f0f | 13,673 |
def HMF(state, Delta, N):
"""Computes the result of the MF hamiltonian acting on a given state."""
#kinetic term: sum_i(eps(i)*(n_i,up + n_i,down))
kinetic_state = dict_list_sum(
[dict_prod(eps(i, N), dict_sum(number_op(state, i, 0, N), number_op(state, i, 1, N))) for i in range(N)])
#interaction term: sum_i( Delta c_iUP^dag c_iDOWN^dag + conj.(Delta) c_iDOWN c_iUP )
interaction_state = dict_list_sum(
[dict_sum(dict_prod(Delta, cr(cr(state, i, 1, N), i, 0, N)), dict_prod(np.conj(Delta), an(an(state, i, 0, N), i, 1, N))) for i in range(N)])
return dict_sum(kinetic_state, interaction_state) | 3c608d42a328e05fd59c55cbaeded3b6d0b4970b | 13,674 |
def calculate_probability_of_multicoincidence(ambient_size: int = 0,
set_sizes: tuple = (),
intersection_size: int = 0):
"""
Calculates the probability that subsets of a set of a given size, themselves of
prescribed sizes, have mutual intersection of a given cardinality.
Parameters
----------
ambient_size : int
The size of the ambient set.
set_sizes : tuple
The integer sizes of some subsets.
intersection_size : int
The size of the intersection of the subsets.
Returns
-------
probability : float
The probability. Calculated as the number of configurations with the given
intersection size, divided by the number of all configurations.
"""
reduced_sizes = [size - intersection_size for size in set_sizes]
if any(size < 0 for size in reduced_sizes):
return 0
initial_choices = binom(
ambient_size=ambient_size,
subset_size=intersection_size,
)
reduced_ambient_size = ambient_size - intersection_size
covers_of_remaining = compute_number_of_covers(
set_sizes=tuple(reduced_ambient_size - size for size in reduced_sizes),
ambient_size=reduced_ambient_size,
)
all_configurations = count_all_configurations(
set_sizes=set_sizes,
ambient_size=ambient_size,
)
return initial_choices * covers_of_remaining / all_configurations | 1d9deb083f0a0397b067f6efa989a94d68d11b69 | 13,675 |
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt) | 3f817bf2286b459b11ded67abba33b654b090caf | 13,676 |
def no_cloud_fixture():
"""Multi-realization cloud data cube with no cloud present."""
cloud_area_fraction = np.zeros((3, 10, 10), dtype=np.float32)
thresholds = [0.265, 0.415, 0.8125]
return cloud_probability_cube(cloud_area_fraction, thresholds) | 5128c40485fdbc9c8646bec25d1949aac4cddb58 | 13,677 |
from typing import Iterable
def make_slicer_query(
database: Database,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Field] = (),
metrics: Iterable[Field] = (),
filters: Iterable[Filter] = (),
orders: Iterable = (),
):
"""
Creates a pypika/SQL query from a list of slicer elements.
This is the base implementation shared by two implementations: the query to fetch data for a slicer request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The slicer query extends this with metrics, references, and totals.
:param database:
:param base_table:
pypika.Table - The base table of the query, the one in the FROM clause
:param joins:
A collection of joins available in the slicer. This should include all slicer joins. Only joins required for
the query will be used.
:param dimensions:
A collection of dimensions to use in the query.
:param metrics:
A collection of metrics to use in the query.
:param filters:
A collection of filters to apply to the query.
:param orders:
A collection of orders as tuples of the metric/dimension to order by and the direction to order in.
:return:
"""
query = database.query_cls.from_(base_table, immutable=False)
elements = flatten([metrics, dimensions, filters])
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
# Add dimensions
for dimension in dimensions:
dimension_term = make_term_for_field(dimension, database.trunc_date)
query = query.select(dimension_term)
query = query.groupby(dimension_term)
# Add filters
for fltr in filters:
query = (
query.having(fltr.definition)
if fltr.is_aggregate
else query.where(fltr.definition)
)
# Add metrics
metric_terms = [make_term_for_field(metric) for metric in metrics]
if metric_terms:
query = query.select(*metric_terms)
# In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs
# to be added to the query.
select_aliases = {el.alias for el in query._selects}
for (orderby_field, orientation) in orders:
orderby_term = make_term_for_field(orderby_field)
query = query.orderby(orderby_term, order=orientation)
if orderby_term.alias not in select_aliases:
query = query.select(orderby_term)
return query | 31821bdbb0ab94c8971a70d35c1165f5245d90fb | 13,678 |
def build_grid_generator(cfg, input_shape):
"""
Built an grid generator from `cfg.MODEL.GRID_GENERATOR.NAME`.
"""
grid_generator = cfg.MODEL.GRID_GENERATOR.NAME
return GRID_GENERATOR_REGISTRY.get(grid_generator)(cfg, input_shape) | 5f6edbaeece026fc56068aec0fc75549a71ce4a8 | 13,679 |
def main_page(request):
"""
This function is used to display the main page of programme_curriculum
@param:
request - contains metadata about the requested page
"""
return render(request, 'programme_curriculum/mainpage.html') | fdee3342d369112abb2560c4ecfda17a8dfe01e4 | 13,680 |
def _write_detailed_dot(graph, dotfilename):
"""Create a dot file with connection info
digraph structs {
node [shape=record];
struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
struct2 [label="<f0> one|<f1> two"];
struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
struct1:f1 -> struct2:f0;
struct1:f0 -> struct2:f1;
struct1:f2 -> struct3:here;
}
"""
text = ['digraph structs {', 'node [shape=record];']
# write nodes
edges = []
replacefunk = lambda x: x.replace('_', '').replace('.', ''). \
replace('@', '').replace('-', '')
for n in nx.topological_sort(graph):
nodename = str(n)
inports = []
for u, v, d in graph.in_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
inport = cd[1]
ipstrip = 'in' + replacefunk(inport)
opstrip = 'out' + replacefunk(outport)
edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''),
opstrip,
str(v).replace('.', ''),
ipstrip))
if inport not in inports:
inports.append(inport)
inputstr = '{IN'
for ip in sorted(inports):
inputstr += '|<in%s> %s' % (replacefunk(ip), ip)
inputstr += '}'
outports = []
for u, v, d in graph.out_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
if outport not in outports:
outports.append(outport)
outputstr = '{OUT'
for op in sorted(outports):
outputstr += '|<out%s> %s' % (replacefunk(op), op)
outputstr += '}'
srcpackage = ''
if hasattr(n, '_interface'):
pkglist = n._interface.__class__.__module__.split('.')
interface = n._interface.__class__.__name__
if len(pkglist) > 2:
srcpackage = pkglist[2]
srchierarchy = '.'.join(nodename.split('.')[1:-1])
nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1],
srcpackage,
srchierarchy)
text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''),
inputstr,
nodenamestr,
outputstr)]
# write edges
for edge in sorted(edges):
text.append(edge)
text.append('}')
filep = open(dotfilename, 'wt')
filep.write('\n'.join(text))
filep.close()
return text | 793983b56b8fff32fde4e9dc5379a93e4edcb16e | 13,681 |
import functools
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True, bn=False):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = MeanPoolConv
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)
elif resample=='up':
conv_shortcut = UpsampleConv
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
if bn:
output = Normalize(name+'.BN1', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output, he_init=he_init, biases=False)
if bn:
output = Normalize(name+'.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output, he_init=he_init)
return shortcut + output | 8871553f11975edef2a1b0bbf96aff8c54417adf | 13,682 |
def timer(func):
"""Logging elapsed time of funciton (decorator)."""
@wraps(func)
def wrapper(*args, **kwargs):
with timing(func.__name__):
return func(*args, **kwargs)
return wrapper | eb38d9856f59328188ac24e66f3bb4f9356ebe89 | 13,684 |
def peak_ana(x, y, nb=3, plotpoints_axis=None):
""" nb = number of point (on each side) to use as background"""
## get background
xb = np.hstack((x[0:nb], x[-(nb):]))
yb = np.hstack((y[0:nb], y[-(nb):]))
a = np.polyfit(xb, yb, 1)
b = np.polyval(a, x)
yf = y - b
yd = np.diff(yf)
## determine whether peak or step
ispeak = np.abs(skew(yf)) > np.abs(skew(yd))
if ispeak:
yw = yf
xw = x
else:
yw = yd
xw = (x[1:] + x[0:-1]) / 2
## get background
xwb = np.hstack((xw[0:nb], xw[-(nb):]))
ywb = np.hstack((yw[0:nb], yw[-(nb):]))
aw = np.polyfit(xwb, ywb, 1)
bw = np.polyval(aw, xw)
yw = yw - bw
Iw = (xw[1:] - xw[0:-1]) * (yw[1:] + yw[0:-1]) / 2
if sum(Iw) < 0:
yw = -yw
## get parameters
mm = yw.argmax(0)
PEAK = xw[mm]
ywmax = yw[mm]
gg = (yw[:mm][::-1] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm - gg - 1, mm - gg]), xw.take([mm - gg - 1, mm - gg]), kind="linear"
)
xhm1 = ip(ywmax / 2)
gg = (yw[mm:] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm + gg, mm + gg - 1]), xw.take([mm + gg, mm + gg - 1]), kind="linear"
)
xhm2 = ip(ywmax / 2)
FWHM = np.abs(xhm2 - xhm1)
CEN = (xhm2 + xhm1) / 2
if plotpoints_axis and ispeak:
# plot the found points for center and FWHM edges
plotpoints_axis.plot(x, b, "g--")
plotpoints_axis.plot(x, b + ywmax, "g--")
plotpoints_axis.plot([xhm1, xhm1], np.polyval(a, xhm1) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm2, xhm2], np.polyval(a, xhm2) + [0, ywmax], "g--")
plotpoints_axis.plot([CEN, CEN], np.polyval(a, CEN) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm1, xhm2], [np.polyval(a, xhm1), np.polyval(a, xhm2)] + ywmax / 2, "gx")
if not ispeak:
try:
# findings start of step coming from left.
std0 = sp.std(y[0:nb])
nt = nb
while (sp.std(y[0:nt]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev0 = sp.mean(y[0:nt])
# findings start of step coming from right.
std0 = sp.std(y[-nb:])
nt = nb
while (sp.std(y[-nt:]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev1 = sp.mean(y[-nt:])
gg = np.abs(y - ((lev0 + lev1) / 2)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
ip = interp1d(ftx, fty, kind="linear")
CEN = ip((lev0 + lev1) / 2)
gg = np.abs(y - (lev1 + (lev0 - lev1) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev1+(lev0-lev1)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H1 = ip((lev1 + (lev0 - lev1) * 0.1195))
# print "H1=%f" % H1
gg = np.abs(y - (lev0 + (lev1 - lev0) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev0+(lev1-lev0)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H2 = ip((lev0 + (lev1 - lev0) * 0.1195))
# print "H2=%f" % abs(H2-H1)
FWHM = abs(H2 - H1)
if plotpoints is True:
# plot the found points for center and FWHM edges
plotpoints_axis.plot([x.min(), x.max()], [lev0, lev0], "g--")
plotpoints_axis.plot([x.min(), x.max()], [lev1, lev1], "g--")
plotpoints_axis.plot([H2, H2], [lev0, lev1], "g--")
plotpoints_axis.plot([H1, H1], [lev0, lev1], "g--")
plotpoints_axis.plot([CEN, CEN], [lev0, lev1], "g--")
plotpoints_axis.plot(
[H2, CEN, H1],
[
lev0 + (lev1 - lev0) * 0.1195,
(lev1 + lev0) / 2,
lev1 + (lev0 - lev1) * 0.1195,
],
"gx",
)
except:
CEN = np.nan
FWHM = np.nan
PEAK = np.nan
return (CEN, FWHM, PEAK) | 1f9ea444b09684ac7764ced8ba5ca3fdbd3e8593 | 13,685 |
from jams.distributions import sep_fs_mean, sep_fs_std
def sample_sep01(nn, xi=1., beta=0.):
"""
Samples from the skew exponential power distribution with location zero and scale one.
Definition
----------
def sample_sep01(nn, xi=1., beta=0.):
Input
-----
nn number of samples
Optional Input
--------------
xi parameter which controls the skewness
beta parameter which controls the kurtosis
Output
------
Samples from the standardized skew exponential power distribution
Examples
--------
None
Literature
--------
Schoups G & Vrugt JA (2010) A formal likelihood function for parameter and predictive
inference of hydrologic models with correlated, heteroscedastic, and non-Gaussian errors.
Water Resources Research 46, W10531.
--> Steps (6) described on page 5
History
-------
Written, JM, May 2016
"""
SEP_fs = sample_sep01_fs(nn, xi=xi, beta=beta)
# (6) Standardize SEP_fs
mean_sep_fs = sep_fs_mean(xi=xi, beta=beta)
std_sep_fs = sep_fs_std(xi=xi, beta=beta)
sSEP = (SEP_fs - mean_sep_fs) / std_sep_fs # standardized SEP (=Schoups and Vrugt's a_t)
return sSEP | dbeda8efa38db5d55b688c4bfc30350262c39f32 | 13,687 |
def pandas_from_feather(file: str = None) -> pd.DataFrame:
""" Load a feather file to a pandas DataFrame.
Uses pyarrow to load a csv file into a [pyarrow.Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html) and convert to pandas format.
Args:
file (str): the feather file path.
"""
return feather.read_feather(file).to_pandas() | 2bd7679581690095865d9f9d2cae85cf9d736f8d | 13,688 |
def email_coas():
"""
Email certificates of analysis to their recipients.
"""
# Get the certificate data.
# Email links (optional attachments) to the contacts.
return NotImplementedError | b09c6650c498618b77a5e0beab0caf63a2cbf99d | 13,690 |
import random
def dropout(x, key, keep_rate):
"""Implement a dropout layer.
Arguments:
x: np array to be dropped out
key: random.PRNGKey for random bits
keep_rate: dropout rate
Returns:
np array of dropped out x
"""
# The shenanigans with np.where are to avoid having to re-jit if
# keep rate changes.
do_keep = random.bernoulli(key, keep_rate, x.shape)
kept_rates = np.where(do_keep, x / keep_rate, 0.0)
return np.where(keep_rate < 1.0, kept_rates, x) | f9686e64a11e17ca35eefacaa8f0b356cc0f065e | 13,691 |
def band_spd_spin_polarized(
folder,
output='band_spd_sp.png',
scale_factor=2,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates a spin polarized s, p, d projected band structure. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (str): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
stack (str): Determines how the plots are stacked (vertical or horizontal)
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing. (fig, ax1, ax2)
"""
band_up = Band(
folder=folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
if stack == 'vertical':
fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
elif stack == 'horizontal':
fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
bbox = dict(boxstyle='round', fc='white',
edgecolor='gray', alpha=0.95, pad=0.3)
ax1.annotate(
annotations[0],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
ax2.annotate(
annotations[1],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
band_up.plot_spd(
ax=ax1,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
band_down.plot_spd(
ax=ax2,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax2,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2 | 4cd0ef74a2ad4ce46d28aad296a9156ec91dc301 | 13,692 |
def initial_queries(bo):
"""
script which explores the initial query points of a BayesianOptimization
instance, reports errors to Slack
Input: instance of a BayesianOptimization
"""
# loop to try a second time in case of error
errcount = 0
for i in range(2):
try:
bo.maximize(init_points=3, n_iter=1, kappa=5) # would be just this line without errorhandling
except KeyBoardInterrupt:
raise
except:
if errcount == 1:
text = "Exception occured twice in initialization, aborting!"
print(text)
sc.api_call("chat.postMessage",channel="CA26521FW",
text=text,username="Botty",
unfurl_links="true")
raise
errcount =+ 1
return bo | 3419cd89724a23296688f321469a68c8209d2a25 | 13,693 |
def cell2AB(cell):
"""Computes orthogonalization matrix from unit cell constants
:param tuple cell: a,b,c, alpha, beta, gamma (degrees)
:returns: tuple of two 3x3 numpy arrays (A,B)
A for crystal(x) to Cartesian(X) transformations A*x = np.inner(A,x) =X
B (= inverse of A) for Cartesian to crystal transformation
B*X = np.inner(B,X) = x
in reciprocal space
X* = B.T @ x* or x @ B
A = |ax bx cx| B = |a*x a*y a*z|
|ay by cy| |b*x b*y b*z|
|az bz cz| |c*x c*y c*z|
"""
G, g = cell2Gmat(cell)
cellstar = Gmat2cell(G)
A = np.zeros(shape=(3, 3))
# from Giacovazzo (Fundamentals 2nd Ed.) p.75
A[0, 0] = cell[0] # a
A[0, 1] = cell[1] * cosd(cell[5]) # b cos(gamma)
A[0, 2] = cell[2] * cosd(cell[4]) # c cos(beta)
A[1, 1] = cell[1] * sind(cell[5]) # b sin(gamma)
# - c cos(alpha*) sin(beta)
A[1, 2] = -cell[2] * cosd(cellstar[3]) * sind(cell[4])
A[2, 2] = 1. / cellstar[2] # 1/c*
B = nl.inv(A)
return A, B | 970acf484a701efcdb024e7cad5981ded314209e | 13,695 |
def sendMessage(qry):
"""
Message sending handling, either update if the query suggests it otherwise send the message.
:param qry: current query
:return: Status of Message sending.
"""
try: getUserName()
except: return _skypeError()
if(qry == "skype update"):
_writeFriends()
_getAvatars()
return len(_readFriends()).__str__()+" friends found and cached!"
else:
m = qry.partition(": ")
ret = skype("MESSAGE " + m[0]+" "+m[2])
if("SENDING" in ret):
return "Message sent to "+m[0]
else:
return "ERROR sending message to: "+m[0] | c13e187170015d3e9a786ceb7cb9a364928fa8c0 | 13,697 |
def scrape_detail_page(response):
"""
get detail page info as dict type
"""
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [h3.text_content() for h3 in root.cssselect('#content > h3')],
}
return ebook | 5c3b7e743cd109fe2d05e0cc261e46884c673421 | 13,698 |
import tqdm
from pathlib import Path
import torch
def reload_from_numpy(device, metadata, reload_dir):
"""Reload the output of voice conversion model."""
conv_mels = []
for pair in tqdm(metadata["pairs"]):
file_path = Path(reload_dir) / pair["mel_path"]
conv_mel = torch.load(file_path)
conv_mels.append(conv_mel.to(device))
return metadata, conv_mels | 7cf5b2c1f12886f8fcded9072a86c53384b93760 | 13,699 |
def jaccard_similarity_coefficient(A, B, no_positives=1.0):
"""Returns the jaccard index/similarity coefficient between A and B.
This should work for arrays of any dimensions.
J = len(intersection(A,B)) / len(union(A,B))
To extend to probabilistic input, to compute the intersection, use the min(A,B).
To compute the union, use max(A,B).
Assumes that a value of 1 indicates the positive values.
A value of 0 indicates the negative values.
If no positive values (1) in either A or B, then returns no_positives.
"""
# Make sure the shapes are the same.
if not A.shape == B.shape:
raise ValueError("A and B must be the same shape")
# Make sure values are between 0 and 1.
if np.any( (A>1.) | (A<0) | (B>1.) | (B<0)):
raise ValueError("A and B must be between 0 and 1")
# Flatten to handle nd arrays.
A = A.flatten()
B = B.flatten()
intersect = np.minimum(A,B)
union = np.maximum(A, B)
# Special case if neither A or B have a 1 value.
if union.sum() == 0:
return no_positives
# Compute the Jaccard.
J = float(intersect.sum()) / union.sum()
return J | fe408565827f61323513d7d3b562bd79a23e47ec | 13,700 |
def get_argument_from_call(call_node: astroid.Call,
position: int = None,
keyword: str = None) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError | e4b7e054c4728f5b74bcbbe1678816a910f64bda | 13,701 |
def snake_string(ls):
"""
Question 7.11: Write a string sinusoidally
"""
result = []
strlen = len(ls)
for idx in xrange(1, strlen, 4):
result.append(ls[idx])
for idx in xrange(0, strlen, 2):
result.append(ls[idx])
for idx in xrange(3, strlen, 4):
result.append(ls[idx])
return ''.join(result) | 391f7cef4289c5746f77598501aeaa7ae93d31bc | 13,702 |
def _prepare_memoization_key(args, kwargs):
"""
Make a tuple of arguments which can be used as a key
for a memoized function's lookup_table. If some object can't be hashed
then used its __repr__ instead.
"""
key_list = []
for arg in args:
try:
hash(arg)
key_list.append(arg)
except:
key_list.append(repr(arg))
for (k, v) in kwargs.items():
try:
hash(k)
hash(v)
key_list.append((k, v))
except:
key_list.append((repr(k), repr(v)))
return tuple(key_list) | c83e08c42886ba0e7f6e4defe5bc8f53f5682657 | 13,703 |
def kl_divergence_with_logits(p_logits = None,
q_logits = None,
temperature = 1.):
"""Compute the KL between two categorical distributions from their logits.
Args:
p_logits: [..., dim] array with logits for the first distribution.
q_logits: [..., dim] array with logits for the second distribution.
temperature: the temperature for the softmax distribution, defaults at 1.
Returns:
an array of KL divergence terms taken over the last axis.
"""
chex.assert_type([p_logits, q_logits], float)
chex.assert_equal_shape([p_logits, q_logits])
p_logits /= temperature
q_logits /= temperature
p = jax.nn.softmax(p_logits)
log_p = jax.nn.log_softmax(p_logits)
log_q = jax.nn.log_softmax(q_logits)
kl = jnp.sum(p * (log_p - log_q), axis=-1)
## KL divergence should be positive, this helps with numerical stability
loss = jax.nn.relu(kl)
return loss | 1950dea9e5c6d040ce464e0861b09469742810c4 | 13,704 |
from typing import Any
from datetime import datetime
def convert_bosch_datetime(dt: Any = None) -> datetime:
"""Create a datetime object from the string (or give back the datetime object) from Bosch. Checks if a valid number of milliseconds is sent."""
if dt:
if isinstance(dt, str):
if dt.find(".") > 0:
return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f%z")
return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z")
if isinstance(dt, datetime):
return dt
return None | 845e9de019b700b2ab37ebb4a1b577d0bd068638 | 13,705 |
def day_log_add_id(day_log):
"""
その日のログにID(day_id)を割り振る
:param day_log:
:return:
"""
for v in range(len(day_log)):
day_log[v]['day_id'] = v + 1
return day_log | c4608b07e86c074a11cf78d171490ec152092eeb | 13,706 |
def cisco_ios_l3_acl_parsed():
"""Cisco IOS L3 Interface with ip address, acl, description and vlan."""
vlan = Vlan(id="300", encapsulation="dot1Q")
ipv4 = IPv4(address="10.3.3.13", mask="255.255.255.128")
acl_in = ACL(name="Common_Client_IN", direction="in")
acl_out = ACL(name="TEST_ACL_03", direction="out")
interface = Interface(
name="FastEthernet0/0.300",
description='"Test logical subinterface 3"',
vlans=[vlan],
ipv4=[ipv4],
acl=[acl_in, acl_out],
)
parsed_config = interface.dict()
return parsed_config | 25c7ad34695499bb6426ff71a9893c233b54a925 | 13,707 |
def brillance(p, g, m = 255):
"""
p < 0 : diminution de la brillance
p > 0 : augmentation de la brillance
"""
if (p + g < m + 1) and (p + g > 0):
return int(p + g)
elif p + g <= 0:
return 0
else:
return m | b40169e487521c146c4c0777517492205951cf16 | 13,708 |
def payback(request):
"""
微信支付回调函数
:param request:
:return:
"""
return HttpResponse('payback') | e178abe0effe6359a664dca434e181390c1a56c1 | 13,710 |
from datetime import datetime
def get_index_shares(name, end_date=None):
"""获取某一交易日的指数成分股列表
symbols = get_index_shares("上证50", "2019-01-01 09:30:00")
"""
if not end_date:
end_date = datetime.now().strftime(date_fmt)
else:
end_date = pd.to_datetime(end_date).strftime(date_fmt)
constituents = get_history_constituents(indices[name], end_date, end_date)[0]
symbol_list = [k for k, v in constituents['constituents'].items()]
return list(set(symbol_list)) | 7a9e2890d0508b00d15da4688980736776199cfa | 13,711 |
def erfcx(x):
"""Elementwise scaled complementary error function.
.. note::
Forward computation in CPU cannot be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erfcx().apply((x,))[0] | 60f1655a6e390ca935f80d33e0d9156879b56c41 | 13,712 |
def fetch_data_async(blob, start_index, end_index, rpc=None):
"""Asynchronously fetches data for a blob.
Fetches a fragment of a blob up to `MAX_BLOB_FETCH_SIZE` in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from `start_index` until the end of the blob, which will be
a smaller size than requested. Requesting a fragment that is entirely outside
the boundaries of the blob will return an empty string. Attempting to fetch a
negative index will raise an exception.
Args:
blob: A `BlobInfo`, `BlobKey`, string, or Unicode representation of
the `BlobKey` of the blob from which you want to fetch data.
start_index: The start index of blob data to fetch. This value must not be
negative.
end_index: The end index (inclusive) of the blob data to fetch. This value
must be greater than or equal to `start_index`.
rpc: Optional UserRPC object.
Returns:
A UserRPC whose result will be a string as returned by `fetch_data()`.
Raises:
TypeError: If `start_index` or `end_index` are not indexes, or if `blob` is
not a string, `BlobKey` or `BlobInfo`.
DataIndexOutOfRangeError: If `start_index` is set to a value that is less
than 0 or `end_index` is less than `start_index` when calling
`rpc.get_result()`.
BlobFetchSizeTooLargeError: If the requested blob fragment is larger than
`MAX_BLOB_FETCH_SIZE` when calling `rpc.get_result()`.
BlobNotFoundError: If the blob does not exist when calling
`rpc.get_result()`.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc) | 518f1ef45c19b8a7be55940d9abdeaf0fe014835 | 13,713 |
def get_legal_moves(color, size, board):
"""
Get Legal Moves
"""
legal_moves = {}
for y in range(size):
for x in range(size):
reversibles = get_reversibles(color, size, board, x, y)
if reversibles:
legal_moves[(x, y)] = reversibles
return legal_moves | eaab0b7fededbe660b02974f675877b97e3327f4 | 13,714 |
def edition(self, key, value):
"""Translates edition indicator field."""
sub_a = clean_val("a", value, str)
if sub_a:
return sub_a.replace("ed.", "")
raise IgnoreKey("edition") | 715724dffb4ef6d72c173afbf8186acfdf9f20e3 | 13,715 |
from typing import Dict
from typing import Set
import itertools
def get_site_data(hostname: str) -> SiteData:
"""Get metadata about a site from the API"""
url = f"https://{hostname}/w/api.php"
data = dict(
action="query",
meta="siteinfo",
siprop="|".join(
[
"namespaces",
"namespacealiases",
"specialpagealiases",
"magicwords",
"general",
]
),
formatversion="2",
format="json",
)
res_json = backoff_retry("get", url, params=data, output="json")
namespaces: Dict[str, Set[str]] = {}
all_namespaces = res_json["query"]["namespaces"]
namespace_aliases = res_json["query"]["namespacealiases"]
for namespace, nsdata in all_namespaces.items():
namespaces.setdefault(namespace, set()).update(
[
datasources.normal_name(nsdata.get("canonical", "").lower()),
datasources.normal_name(nsdata.get("name", "").lower()),
]
)
for nsdata in namespace_aliases:
namespaces.setdefault(str(nsdata["id"]), set()).add(
datasources.normal_name(nsdata.get("alias", "").lower())
)
specialpages = {
item["realname"]: item["aliases"]
for item in res_json["query"]["specialpagealiases"]
}
magicwords = {
item["name"]: item["aliases"] for item in res_json["query"]["magicwords"]
}
general = res_json["query"]["general"]
contribs = {datasources.normal_name(name) for name in specialpages["Contributions"]}
subst = list(
itertools.chain(
magicwords.get("subst", ["SUBST"]),
[item.lower() for item in magicwords.get("subst", ["SUBST"])],
[item[0] + item[1:].lower() for item in magicwords.get("subst", ["SUBST"])],
)
)
sitedata = SiteData(
user=namespaces["2"] - {""},
user_talk=namespaces["3"] - {""},
file=namespaces["6"] - {""},
special=namespaces["-1"] - {""},
contribs=contribs,
subst=subst,
dbname=general["wikiid"],
hostname=hostname,
)
return sitedata | 83ca853c6fb2ebadf6473b8f5da0008b145717b0 | 13,716 |
def clear_monitor(nodenet_uid, monitor_uid):
"""Leaves the monitor intact, but deletes the current list of stored values."""
micropsi_core.runtime.get_nodenet(nodenet_uid).get_monitor(monitor_uid).clear()
return True | ad39c344f41fcf307f85d09add71eeeac66b30c1 | 13,717 |
def loadGrammarFrom(filename, data=None):
"""Return the text of a grammar file loaded from the disk"""
with open(filename, 'r') as f:
text = f.read()
lookup = mako.lookup.TemplateLookup(directories=[relativePath('grammars')])
template = mako.template.Template(text, lookup=lookup)
#
base_data = {}
base_data.update(BASE_GRAMMAR_SETTINGS)
#
if data:
for k, v in data.items():
if v is not None:
base_data[k] = v
#
return str(template.render(**base_data)) | 0a0bbd0f2af5db4c673d7dbd31259a3977adb9cf | 13,718 |
def create_generator_selfatt(generator_inputs, generator_outputs_channels, flag_I=True):
"""
Add Conditional Self-Attention Modual to the U-Net Generator.
By default, 256x256 => 256x256
Args:
generator_inputs: a tensor of input images, [b, h, w, n], with each pixel value [-1, 1].
generator_outputs_channels: the number of generator output channels.
flag_I: bool flag to indicate if add conditional input to self-attention layer.
Returns:
layers[-1]: the output of generator, eg the generated images batch, [b, h, w, n], with each pixel value [-1, 1].
beta_list: list of beta matrics, save to visualize attention maps.
Note: a beta matrix is too large to view directly, visualize it row by row as attention maps
"""
# save output of layers for skip connections
layers = []
###################### encoder ###########################################
# encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
with tf.variable_scope("encoder_1"):
output = ops.conv(generator_inputs, channels=a.ngf, kernel=4, stride=2, pad=1, sn=a.sn)
output = ops.lrelu(output, 0.2)
# consider: append output before/after lrelu.
# Why not use batch norm in the first layer?
layers.append(output)
# encoder information, (out_channels)
encoder_layers = [
(a.ngf * 2), # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
(a.ngf * 4), # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
(a.ngf * 8), # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
(a.ngf * 8), # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]
(a.ngf * 8), # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
# a.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
# a.ngf * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]
]
beta_list = []
for i, out_channels in enumerate(encoder_layers):
with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
# [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
# Conv + BN + leakyReLU + [selfatt]
output = ops.conv(layers[-1], channels=out_channels, kernel=4, stride=2, pad=1, sn=a.sn)
output = batchnorm(output) # not use ops.batch_norm, because do not know its update strategy
output = ops.lrelu(output, 0.2)
if a.enc_atten[i]=='T':
output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac)
beta_list.append(beta)
layers.append(output)
###################### decoder ###########################################
# Explictly assign decoder to /gpu:1
# Consider: layers[] is assign to /gpu:0 by default, skip connections involve communication between GPUs.
with tf.device("/gpu:1"):
# decoder information: (out_channels, dropout rate)
decoder_layers = [
# (a.ngf * 8, 0.0), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
# (a.ngf * 8, 0.0), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(a.ngf * 8, 0.0), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
(a.ngf * 8, 0.0), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
(a.ngf * 4, 0.0), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
(a.ngf * 2, 0.0), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
(a.ngf, 0.0), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
]
num_encoder_layers = len(layers)
for decoder_layer, (out_channels, dropout) in enumerate(decoder_layers):
skip_layer = num_encoder_layers - decoder_layer - 1
with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
if decoder_layer == 0 or decoder_layer >= a.num_unet:
# first decoder layer is directly connected to the skip_layer
# a.num_unet controls the number of skip connections
input = layers[-1]
else:
input = tf.concat([layers[-1], layers[skip_layer]], axis=3)
# [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
# Up-sample + 1x1 Conv + BN + leakyReLU + [selfatt] + [dropout]
output = ops.up_sample(input, scale_factor=2) #use upsample+conv replace deconv to advoid checkboard effect
output = ops.conv(output, channels=out_channels, kernel=3, stride=1, pad=1, sn=True)
output = batchnorm(output)
output = ops.lrelu(output)
if a.dec_atten[i]=='T':
output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac)
beta_list.append(beta)
if dropout > 0.0:
output = tf.nn.dropout(output, keep_prob=1 - dropout)
layers.append(output)
with tf.device("/gpu:1"):
# decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
with tf.variable_scope("decoder_1"):
output = tf.concat([layers[-1], layers[0]], axis=3)
output = tf.nn.relu(output)
output = deconv(output, generator_outputs_channels)
output = tf.tanh(output)
layers.append(output)
return layers[-1], beta_list | bfcc81955c7849e84053c45ea7a593570059bf28 | 13,719 |
def by_tag(articles_by_tag, tag):
""" Filter a list of (tag, articles) to list of articles by tag"""
for a in articles_by_tag:
if a[0].slug == tag:
return a[1] | 642472a89cb624ed02a6e8ec488b72856ac231a9 | 13,720 |
def experiment(dataset='SUPPORT', quantiles=(0.25, 0.5, 0.75), prot_att='race',
groups=('black', 'white'), model='dcm', adj='KM',
cv_folds=5, seed=100, hyperparams=None, plot=True, store=False):
"""Top level interface to train and evaluate proposed survival models.
This is the top level function that is designed to be called directly from
inside a jupyter notebook kernel. This function allows the user to run
one of the proposed survival analysis models on the SUPPORT datasets
in a cross validation fashion. The function then plots and
outputs the Expected Calibration Error and ROC characteristic at various
event time quantiles.
Parameters
----------
dataset: str
a string that determines the dataset to run experiments on.
One of "FLCHAIN" or "SUPPORT".
quantiles: list
a list of event time quantiles at which the models are to be evaluated.
prot_att: str
a string that specifies the column in the dataset that is to be treated
as a protected attribute.
groups: list
a list of strings indicating groups on which the survival analysis
models are to be evaluated vis a vis discrimination and calibration.
model: str
the choice of the proposed survival analysis model.
currently supports only "dcm".
adj: str
the choice of adjustment for the L1-ECE: one of
* 'IPCW': Inverse Propensity of Censoring Weighting.
* 'KM': Kaplan-Meier.
cv_folds: int
int that determines the number of Cross Validation folds.
seed: int
numpy random seed.
hyperparams: dict
a dict with hyperparams for the DCM model.
plot: bool
binary flag to determine if the results are to be plotted.
store: bool
whether the models/results are to be stored to disk.
Returns:
a Matplotlib figure with the ROC Curves and Reliability (Calibration) curves
at various event quantiles.
"""
np.random.seed(seed)
fair_strategy = None
(x, t, e, a), folds, quantiles = load_dataset(dataset, cv_folds, prot_att, fair_strategy, quantiles)
trained_model = models.train_model(x, t, e, a, folds, groups, params=hyperparams)
if store:
store_model(dataset, model, trained_model, params)
if plot:
outputs = predict(trained_model, model, x, t, e, a, folds, quantiles, fair_strategy)
results = plots.plot_results(outputs, x, e, t, a, folds, groups,
quantiles, strat='quantile', adj=adj)
return results | 79ec44d4d62a42dea4f7e612cd4291ce8fbc5585 | 13,721 |
def ldns_str2rdf_type(*args):
"""LDNS buffer."""
return _ldns.ldns_str2rdf_type(*args) | d121f8534c64b7597d775e5443b706c962ec738a | 13,722 |
import hashlib
import _crypt
def scramble(password, message):
"""scramble message with password"""
scramble_length = 20
sha_new = partial(hashlib.new, 'sha1')
if not password:
return b''
stage1 = sha_new(password).digest()
stage2 = sha_new(stage1).digest()
buf = sha_new()
buf.update(message[:scramble_length])
buf.update(stage2)
result = buf.digest()
return _crypt(result, stage1) | 9ad006a5626d7b4ca3f8220dc4cbdd719a3cbac8 | 13,723 |
def dp_port_id(switch: str, port: str) -> str:
"""
Return a unique id of a DP switch port based on switch name and port name
:param switch:
:param port:
:return:
"""
return 'port+' + switch + ':' + port | 479891e41b51114744dcbb2b177180c19cd1bfd5 | 13,724 |
import requests
def request_item(zip_code, only_return_po_boxes=False, spatial_reference='4326'):
"""
Request data for a single ZIP code, either routes or PO boxes.
Note that the spatial reference '4326' returns latitudes and longitudes of results.
"""
url = BASE_URL.format(
zip_code=str(zip_code),
spatial_reference=str(spatial_reference),
route_or_box='B' if only_return_po_boxes else 'R'
)
response = requests.get(url)
response.raise_for_status()
return response.json() | 956a2a86f0960a888046bfd5a8e3c2d7c56bc9dc | 13,725 |
def smoothen_histogram(hist: np.array) -> np.array:
""" Smoothens a histogram with an average filter.
The filter as defined as multiple convolutions
with a three-tap box filter [1, 1, 1] / 3.
See AOS section 4.1.B.
Args:
hist: A histogram containing gradient orientation counts.
Returns:
hist_smoothed: The histogram after average smoothing.
"""
pad_amount = round(len(smooth_kernel) / 2)
hist_pad = np.pad(hist, pad_width=pad_amount, mode='wrap')
hist_smoothed = np.convolve(hist_pad, smooth_kernel, mode='valid')
return hist_smoothed | bdcc5de3df5aa2aad33653cce237f7f07d825b9d | 13,726 |
from typing import Tuple
def end_point(min_radius: float, max_radius: float) -> Tuple[int, int]:
"""
Generate a random goal that is reachable by the robot arm
"""
# Ensure theta is not 0
theta = (np.random.random() + np.finfo(float).eps) * 2 * np.pi
# Ensure point is reachable
r = np.random.uniform(low=min_radius, high=max_radius)
x = int(r * np.cos(theta))
y = int(r * np.sin(theta))
#x = -53
#y = -84
return x, y | 8d6a79195108e8354fad986f93da5f089b6df0d7 | 13,727 |
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value=value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims) | 50adf652fff47418d1f8f1250a2a6d01f712da76 | 13,728 |
from typing import Mapping
from typing import Any
def parse_header(
info: Mapping[str, Any],
field_meta_data: Mapping[str, FieldMetaData],
component_meta_data: Mapping[str, ComponentMetaData]
) -> Mapping[str, MessageMemberMetaData]:
"""Parse the header.
Args:
info (Mapping[str, Any]): The header.
field_meta_data (Mapping[str, FieldMetaData]): The field metadata.
component_meta_data (Mapping[str, ComponentMetaData]): The component
metadata.
Returns:
Mapping[str, MessageMemberMetaData]: The parsed header.
"""
return _to_message_member_meta_data(info, field_meta_data, component_meta_data) | a8043c62070c540712074c60e01e3c9c3ebfe99b | 13,729 |
def amina_choo(update, context): #3.2.1
"""Show new choice of buttons"""
query = update.callback_query
bot = context.bot
keyboard = [
[InlineKeyboardButton("Yes", callback_data='0'),
InlineKeyboardButton("No", callback_data='00')],
[InlineKeyboardButton("Back",callback_data='3.2')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(
chat_id=query.message.chat_id,
message_id=query.message.message_id,
text="""We have found a lawyer that suits your needs!""",
)
bot.send_photo(
chat_id=query.message.chat_id,
photo = open("female.jpg",'rb')
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Name: Amina Choo \nCompany: Boo and Ow LLP \nYears of Experience: 8""",
)
bot.send_message(
chat_id=query.message.chat_id,
text = """See more on our website: https://eldoraboo.github.io/PairALegal/amina-choo"""
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Thank you for using Pair-A-Legal bot. \nWould you like to restart?""",
reply_markup = reply_markup
)
return FIRST | b43d2e6d63e111b9a2f70fd71e5da765ef923746 | 13,730 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.