content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Union
from typing import List
from typing import Dict
from typing import Set
import tqdm
def scaffold_to_smiles(mols: Union[List[str], List[Chem.Mol]],
use_indices: bool = False) -> Dict[str, Union[Set[str], Set[int]]]:
""" Computes the scaffold for each SMILES and returns a mapping from scaffolds to sets of smiles (or indices).
Parameters
----------
mols: A list of SMILES strings or RDKit molecules.
use_indices:
Whether to map to the SMILES's index in :code:`mols` rather than mapping to the smiles string itself.
This is necessary if there are duplicate smiles.
Returns
-------
A dictionary mapping each unique scaffold to all SMILES (or indices) which have that scaffold.
"""
scaffolds = defaultdict(set)
for i, mol in tqdm(enumerate(mols), total=len(mols)):
scaffold = generate_scaffold(mol)
if use_indices:
scaffolds[scaffold].add(i)
else:
scaffolds[scaffold].add(mol)
return scaffolds | 2a45731a5574bb37e81042fa19ac7c2f015c21ef | 14,421 |
import crypt
def encontrar_passwords():
"""
Probar todas las combinaciones de 6 letras, hasheando cada una para ver si
coinciden con los hashes guardados en los /etc/shadow
Para el tema de equipos, basicamente fui probando con copiar y pegar
contenido en texto de distintas paginas de wikipedia en el archivo
equipos.txt, hasta que con la NBA funciono.
"""
hashes = [
('ox', 'ox45K6RsEUfmQ', generar_palabras()), # fido
('$1$42dJ1xYh', '$1$42dJ1xYh$MfrRke8/Ej3h5.vMtNEhC.', leer_palabras('./colores.txt')), # white
('$6$SZGpKoPi', '$6$SZGpKoPi$GGGqHYKy6PO/H5nvV0AmaGB/5krnxVuz2k2uX81O.CF5nYctE5RlR/rzJQCL3ZsF8yratCRbSR2ZuwKzvve.D0', leer_palabras('./equipos.txt')), # knicks
]
encontradas = []
for algo_y_salt, hash_resultado, origen_passwords in hashes:
for password in origen_passwords:
if crypt(password, algo_y_salt) == hash_resultado:
encontradas.append(password)
break
return encontradas | 5762fed1f5e493c2399d40dbbc1e19ad24c6718e | 14,422 |
def queue_merlin_study(study, adapter):
"""
Launch a chain of tasks based off of a MerlinStudy.
"""
samples = study.samples
sample_labels = study.sample_labels
egraph = study.dag
LOG.info("Calculating task groupings from DAG.")
groups_of_chains = egraph.group_tasks("_source")
# magic to turn graph into celery tasks
LOG.info("Converting graph to tasks.")
celery_dag = chain(
chord(
group(
[
expand_tasks_with_samples.s(
egraph,
gchain,
samples,
sample_labels,
merlin_step,
adapter,
study.level_max_dirs,
).set(queue=egraph.step(chain_group[0][0]).get_task_queue())
for gchain in chain_group
]
),
chordfinisher.s().set(
queue=egraph.step(chain_group[0][0]).get_task_queue()
),
)
for chain_group in groups_of_chains[1:]
)
LOG.info("Launching tasks.")
return celery_dag.delay(None) | 448365e799001d09281707cab69c71c3be05408e | 14,423 |
import math
def sphere_mass(density,radius):
"""Usage: Find the mass of a sphere using density and radius"""
return density*((4/3)*(math.pi)*radius**3) | 8c1a2dc949980ca96a4f56f3918bacd19568965e | 14,424 |
def generate_stats_table(buildings_clust_df):
"""
Generate statistical analysis table of building types in the area
Args:
buildings_clust_df: building footprints dataframe after performed building blocks assignment (HDBSCAN)
Return:
stat_table: statistical analysis results which contains means and standard deviations values for every building type in the area
"""
# Count
count_table = buildings_clust_df.groupby('building_types')[['building_types']].size().to_frame('count').reset_index()
# Mean
mean_table = buildings_clust_df.groupby('building_types')[['building_types','surface_area','rectangularity']].mean().reset_index()
mean_table.columns = ['building_types','mean_surface_area','mean_rectangularity']
# Standard deviation
sd_table=buildings_clust_df.groupby('building_types')[['surface_area','rectangularity']].agg(np.std, ddof=0).reset_index()
# Rename columns
sd_table.columns = ['building_types','sd_surface_area','sd_rectangularity']
stat_table = count_table.merge(mean_table).merge(sd_table)
# Reorder columns
stat_table = stat_table[stat_table.columns[[0,1,3,2,4,5]]]
return stat_table | 732f035e591dc9f0b03673584f3a6e21dad03cad | 14,425 |
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree
(i.e. merge a set of nucleus-satellite relations that share the same nucleus
into one subtree).
"""
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
# add elements (nucleus and satellite) from first relation to resulting (sub)tree
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=element))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=element, relation=relname))
# reorder elements of the remaining relation and add them to the resulting (sub)tree
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
result_segment = SAT_TEMPLATE.substitute(satellite=element, relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments) | 16c1808267087beea6cea21811cd3c1f7d70932e | 14,426 |
import io
def plot_to_image(figure):
"""Converts the matplotlib figure to a PNG image."""
# The function is adapted from
# github.com/tensorflow/tensorboard/blob/master/docs/image_summaries.ipynb
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# tf.summary.image requires 4-D inputs. [num_samples, height, weight, color].
image = tf.expand_dims(image, 0)
return image | 24a63f7b27d47421baf2c7913bf989903e4d9545 | 14,427 |
async def getDiscordTwitchAlerts(cls:"PhaazebotDiscord", guild_id:str, alert_id:int=None, limit:int=0, offset:int=0) -> list:
"""
Get server discord alerts, if alert_id = None, get all
else only get one associated with the alert_id
Returns a list of DiscordTwitchAlert().
"""
sql:str = """
SELECT
`discord_twitch_alert`.*,
`twitch_user_name`.`user_name` AS `twitch_channel_name`
FROM `discord_twitch_alert`
LEFT JOIN `twitch_user_name`
ON `discord_twitch_alert`.`twitch_channel_id` = `twitch_user_name`.`user_id`
WHERE `discord_twitch_alert`.`discord_guild_id` = %s"""
values:tuple = ( str(guild_id), )
if alert_id:
sql += " AND `discord_twitch_alert`.`id` = %s"
values += (alert_id,)
if limit:
sql += f" LIMIT {limit}"
if offset:
sql += f" OFFSET {offset}"
res:list = cls.BASE.PhaazeDB.selectQuery(sql, values)
if res:
return [DiscordTwitchAlert(x) for x in res]
else:
return [] | 00c1bad85f4f7891d36e5fab5c651f10c79abf02 | 14,428 |
def is_visible_dir(file_info):
"""Checks to see if the file is a visible directory.
@param file_info: The file to check
@type file_info: a gnomevfs.FileInfo
"""
return is_dir(file_info) and not is_hidden(file_info) | 776361d4cbe16a5b3c45dc3073a37192e31e87a9 | 14,429 |
def read_file(item):
"""Read file in key path into key image
"""
item['image'] = tf.read_file(item['path'])
return item | 06b87851717bd486b13f964ad5b45cbdc7a97142 | 14,431 |
def make_joint(withdraw, old_password, new_password):
"""Return a password-protected withdraw function that has joint access to
the balance of withdraw.
>>> w = make_withdraw(100, 'hax0r')
>>> w(25, 'hax0r')
75
>>> make_joint(w, 'my', 'secret')
'Incorrect password'
>>> j = make_joint(w, 'hax0r', 'secret')
>>> w(25, 'secret')
'Incorrect password'
>>> j(25, 'secret')
50
>>> j(25, 'hax0r')
25
>>> j(100, 'secret')
'Insufficient funds'
>>> j2 = make_joint(j, 'secret', 'code')
>>> j2(5, 'code')
20
>>> j2(5, 'secret')
15
>>> j2(5, 'hax0r')
10
>>> j2(25, 'password')
'Incorrect password'
>>> j2(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> j(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> w(5, 'hax0r')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> make_joint(w, 'hax0r', 'hello')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
"""
"*** YOUR CODE HERE ***"
x = withdraw(0, old_password)
if type(x) == str:
return x
else:
def withdraw_r(amount, code):
if code == new_password:
# print('password is new')
return withdraw(amount, old_password)
elif code != new_password:
return withdraw(amount, code)
return withdraw_r | f40073429aea946486263a7d6e0fc8b24cd60a84 | 14,432 |
def should_parse(config, file):
"""Check if file extension is in list of supported file types (can be configured from cli)"""
return file.suffix and file.suffix.lower() in config.filetypes | 1c2258d405ef715574b557d99cdf87e461627ffd | 14,433 |
def _get_pipeline_per_subband(subband_name: str):
"""
Constructs a pipeline to extract the specified subband related features.
Output:
sklearn.pipeline.Pipeline object containing all steps to calculate time-domain feature on the specified subband.
"""
freq_range = FREQ_BANDS_RANGE[subband_name]
order = FREQ_BANDS_ORDERS[subband_name]
assert len(
freq_range) == 2, "Frequency range must only have 2 elements: [lower bound frequency, upper bound frequency]"
bounds = [freq / NYQUIST_FREQ for freq in freq_range]
b, a = butter(order, bounds, btype='bandpass')
def filter_epochs_in_specified_subband(epochs):
return epochs.copy().filter(
l_freq=bounds[0],
h_freq=bounds[1],
method='iir',
n_jobs=1,
iir_params={
'a': a,
'b': b
}, verbose=False)
return Pipeline([
('filter', FunctionTransformer(filter_epochs_in_specified_subband, validate=False)),
('get-values', FunctionTransformer(get_data_from_epochs, validate=False)),
('mean-energy', FunctionTransformer(
get_transformer(_get_signal_mean_energy), validate=True
)),
]) | 2b1d8bd2543ae07b861df6f979297a82e3f5e827 | 14,434 |
def get_credentials_interactively() -> Credentials:
""" Gets credentials for the bl interactively
"""
return ("placeholder-user", "placeholder-pass") | b5e4d55015155589632b958252a3c078b5920e59 | 14,435 |
def reynolds(find="Re", printEqs=True, **kwargs):
"""
Reynolds Number = Inertia / Viscosity
"""
eq = list()
eq.append("Eq(Re, rho * U * L / mu)")
return solveEqs(eq, find=find, printEq=printEqs, **kwargs) | df5ad0c0279894e8f671942ceddb64e08e35fa0d | 14,436 |
def data_app():
""" Data Processer and Visualizer """
st.title("Data Cake")
st.subheader("A to Z Data Analysis")
file = ['./dataset/Ac1',[0,1]]
def file_selector():
filename = st.file_uploader("Upload Excel File", type=['xls','xlsx'])
if filename is not None:
sheetnames = pd.ExcelFile(filename).sheet_names
sheet = st.selectbox("Sheet Sheet", sheetnames)
return [filename, sheet]
file = file_selector()
# Read Data
try :
df = pd.read_excel(file[0], sheet_name = file[1])
except Exception as e:
st.info("Please upload Excel file")
# Show Datas
try:
if st.checkbox("Show Dataset"):
number = st.number_input("Number of Rows to View",5,10)
st.dataframe(df.head(number))
except Exception as e:
st.info("Please upload Excel file")
# Show Columns
try:
if st.button("Column Names"):
st.write(df.columns)
except Exception as e:
st.info("Please upload Excel file")
# Show Shape
try:
if st.checkbox("Shape of Dataset"):
st.write(df.shape)
except Exception as e:
st.info("Please upload Excel file")
# Select Columns
try:
if st.checkbox("Select Columns To Show"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Select",all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
except Exception as e:
st.info("Please upload Excel file")
# Show Datatypes
try:
if st.button("Data Types"):
st.write(df.dtypes)
except Exception as e:
st.info("Please upload Excel file")
# Show Summary
try:
if st.checkbox("Summary"):
st.write(df.describe().T)
except Exception as e:
st.info("Please upload Excel file")
## Plot and Visualization
st.subheader("Data Visualization")
# Correlation
# Seaborn Plot
if st.checkbox("Correlation Plot[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
# Pie Chart
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
if st.button("Generate Pie Plot"):
st.success("Generating A Pie Plot")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
# Count Plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
#Contour Plot
if st.checkbox("Contour Plot "):
st.text("3D Contour Plot")
all_columns_names = df.columns.tolist()
X = st.selectbox("Select X axis",all_columns_names)
Y = st.selectbox("Select Y axis",all_columns_names,index = 1)
VS = st.selectbox("Select Z axis",all_columns_names,index =2)
Z_F = df.pivot_table(index=X, columns=Y, values=VS).T.values
X_unique = np.sort(df[X].unique())
Y_unique = np.sort(df[Y].unique())
X_F, Y_F = np.meshgrid(X_unique, Y_unique)
pd.DataFrame(Z_F).round(3)
pd.DataFrame(X_F).round(3)
pd.DataFrame(Y_F).round(3)
fig,ax=plt.subplots(1,1)
cp = ax.contourf(X_F, Y_F, Z_F)
fig.colorbar(cp) # Add a colorbar to a plot
st.pyplot(fig=fig)
# Customizable Plot
try:
st.subheader("Customizable Plot")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
if st.button("Generate Plot"):
st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
# Custom Plot
elif type_of_plot:
cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("Ready to ML !"):
st.balloons()
except:
st.info("Please upload Excel file")
st.sidebar.header("Data Cake")
st.sidebar.info("Built by Veera Ragavan") | b6286064757d276a5319ef0b3cffe1515c4a7fb1 | 14,437 |
def derivative(x, y, order=1):
"""Returns the derivative of y-coordinates as a function of x-coodinates.
Args:
x (list or array): 1D array x-coordinates.
y (list or array): 1D array y-coordinates.
order (number, optional): derivative order.
Returns:
x and y arrays.
"""
if order<0:
raise ValueError('order must be a positive integer.')
x = np.array(x)
y = np.array(y)
x_diff = np.diff(x)
y_diff = np.diff(y)/x_diff
for i in range(order-1):
y_diff = np.diff(y_diff)/x_diff[:len(x_diff)-(i+1)]
for i in range(order):
x = moving_average(x, n=2)
return x, y_diff | 23a1213721e553e5b72a0bd92877675b499f9848 | 14,438 |
from typing import Dict
def get_ff_parameters(wc_params, molecule=None, components=None):
"""Get the parameters for ff_builder."""
ff_params = {
'ff_framework': wc_params['ff_framework'],
'ff_molecules': {},
'shifted': wc_params['ff_shifted'],
'tail_corrections': wc_params['ff_tail_corrections'],
'mixing_rule': wc_params['ff_mixing_rule'],
'separate_interactions': wc_params['ff_separate_interactions']
}
if molecule is not None:
ff_params['ff_molecules'] = {molecule['name']: molecule['forcefield']}
if components is not None:
for value in components.get_dict().values():
ff = value['forcefield'] #pylint: disable=invalid-name
ff_params['ff_molecules'][value['name']] = ff
return Dict(dict=ff_params) | d87008dba0b9d835f71366eb64486b1d18fe2381 | 14,439 |
def healpix_header_odict(nside,nest=False,ordering='RING',coord=None, partial=True):
"""Mimic the healpy header keywords."""
hdr = odict([])
hdr['PIXTYPE']=odict([('name','PIXTYPE'),
('value','HEALPIX'),
('comment','HEALPIX pixelisation')])
ordering = 'NEST' if nest else 'RING'
hdr['ORDERING']=odict([('name','ORDERING'),
('value',ordering),
('comment','Pixel ordering scheme, either RING or NESTED')])
hdr['NSIDE']=odict([('name','NSIDE'),
('value',nside),
('comment','Resolution parameter of HEALPIX')])
if coord:
hdr['COORDSYS']=odict([('name','COORDSYS'),
('value',coord),
('comment','Ecliptic, Galactic or Celestial (equatorial)')])
if not partial:
hdr['FIRSTPIX']=odict([('name','FIRSTPIX'),
('value',0),
('comment','First pixel # (0 based)')])
hdr['LASTPIX']=odict([('name','LASTPIX'),
('value',hp.nside2npix(nside)-1),
('comment','Last pixel # (0 based)')])
hdr['INDXSCHM']=odict([('name','INDXSCHM'),
('value','EXPLICIT' if partial else 'IMPLICIT'),
('comment','Indexing: IMPLICIT or EXPLICIT')])
hdr['OBJECT']=odict([('name','OBJECT'),
('value','PARTIAL' if partial else 'FULLSKY'),
('comment','Sky coverage, either FULLSKY or PARTIAL')])
return hdr | 1202d7564b94a3c2288a513f42e4b781a583e41c | 14,440 |
def hello():
"""Test endpoint"""
return {'hello': 'world'} | 91ad620815a6371a4723e21bc79aad8c1d49e73e | 14,441 |
def permute_channels(n_channels, keep_nbr_order=True):
"""Permute the order of neighbor channels
Args:
n_channels: the total number of channels
keep_nbr_order: whether to keep the relative order of neighbors
if true, only do random rotation and flip
if false, random permutation
"""
ch_idx = np.arange(1, n_channels)
if keep_nbr_order:
# rotate and flip
ch_idx = np.roll(ch_idx, np.random.randint(n_channels-1))
if np.random.randint(2) == 1:
ch_idx = ch_idx[::-1]
else:
# random permute
np.random.shuffle(ch_idx)
ch_idx = np.concatenate([[0], ch_idx])
return ch_idx | 5491f181d32a5ff77ef1d9f6ac3327e6b0a746e0 | 14,443 |
def from_file(file,typcls):
"""Parse an instance of the given typeclass from the given file."""
s = Stream(file)
return s.read_value(typcls._ep_typedesc) | 90507278f33fe30a73c31f94ab046c07962250cc | 14,444 |
def read_test_ids():
"""
Read sample submission file, list and return all test image ids.
"""
df_test = pd.read_csv(SAMPLE_SUBMISSION_PATH)
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
return ids_test | cc4d53d28631fe0e22cabe30704a1844ff3e3a5b | 14,445 |
def chuseok(year=None):
"""
:parm year: int
:return: Thanksgiving Day of Korea
"""
year = year if year else _year
return LunarDate(year, 8, 15).toSolarDate() | 28a3170153862bda2ae52176d4931ee10050c3e1 | 14,446 |
def DELETE(request):
"""Delete a user's authorization level over a simulation."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'simulationId': 'int',
'userId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Authorization
authorization = Authorization.from_primary_key((
request.params_path['userId'],
request.params_path['simulationId']
))
# Make sure this Authorization exists in the database
if not authorization.exists():
return Response(404, '{} not found.'.format(authorization))
# Make sure this User is allowed to delete this Authorization
if not authorization.google_id_has_at_least(request.google_id, 'OWN'):
return Response(403, 'Forbidden from deleting {}.'.format(authorization))
# Delete this Authorization
authorization.delete()
return Response(
200,
'Successfully deleted {}.'.format(authorization),
authorization.to_JSON()
) | 7ac7c6277126a827790e786cbfdf9f84ccaace7b | 14,447 |
def process_query(data):
"""
Concat query, question, and narrative then 'preprocess'
:data: a dataframe with queries in rows; query, question, and narrative in columns
:return: 2d list of tokens (rows: queries, columns: tokens)
"""
lst_index = []
lst_words = []
for index, row in data.iterrows():
tmp = preprocess(row["query"] +" "+ row["question"]+ " "+row["narrative"])
lst_words.append(tmp)
lst_index.append(row["number"])
return lst_words | 8a8067f1766abdc08aa7b8995508d6cdc9057bd2 | 14,448 |
def nb_view_patches(Yr, A, C, S, b, f, d1, d2, YrA=None, image_neurons=None, thr=0.99, denoised_color=None, cmap='jet'):
"""
Interactive plotting utility for ipython notebook
Args:
Yr: np.ndarray
movie
A,C,b,f: np.ndarrays
outputs of matrix factorization algorithm
d1,d2: floats
dimensions of movie (x and y)
YrA: np.ndarray
ROI filtered residual as it is given from update_temporal_components
If not given, then it is computed (K x T)
image_neurons: np.ndarray
image to be overlaid to neurons (for instance the average)
thr: double
threshold regulating the extent of the displayed patches
denoised_color: string or None
color name (e.g. 'red') or hex color code (e.g. '#F0027F')
cmap: string
name of colormap (e.g. 'viridis') used to plot image_neurons
"""
# PREPROCESSING
nr, T = C.shape
nA2 = np.ravel(np.power(A, 2).sum(0)) if type(
A) == np.ndarray else np.ravel(A.power(2).sum(0))
b = np.squeeze(b)
f = np.squeeze(f)
if YrA is None:
Y_r = np.array(spdiags(old_div(1, nA2), 0, nr, nr) *
(A.T * np.matrix(Yr) -
(A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) -
A.T.dot(A) * np.matrix(C)) + C)
else:
Y_r = C + YrA
x = np.arange(T)
if image_neurons is None:
image_neurons = A.mean(1).reshape((d1, d2), order='F')
coors = get_contours(A, (d1, d2), thr)
cc1 = [cor['coordinates'][:, 0] for cor in coors]
cc2 = [cor['coordinates'][:, 1] for cor in coors]
c1 = cc1[0]
c2 = cc2[0]
# PLOTTING
fig, axes = plt.subplots(2)
axes[0].imshow(image_neurons, cmap = 'gray')
axes[0].set_title('Neural map')
axes[1].plot(C[0], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[0], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[0], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
# WIDGETS
neuron_nr_slider = IntSlider(description = 'Neuron Number', value = 0, min = 0, max = len(C) - 1)
def neuron_nr_handler(*args):
i = neuron_nr_slider.value
axes[1].clear()
axes[1].plot(C[i], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[i], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[i], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
neuron_nr_slider.observe(neuron_nr_handler, 'value')
widgets = [neuron_nr_slider]
return fig, widgets | 84db2e40734b21ebb6be5eef8eeb89bbe2838542 | 14,450 |
def menu():
"""Manda el Menú \n
Opciones:
1: Añadir a un donante
2: Añadir a un donatario
3: Revisar la lista de donantes
4: Revisar la lista de donatarios
5: Realizar una transfusion
6: Estadisticas
7: Salir
Returns:
opc(num):Opcion del menu """
print("\nBienvenido a el sistema de Donacion de Sangre. Elige la accion que deseas realizar.\n1.Añadir Donante de Sangre\n2.Añadir Donatario de Sangre\n3.Revisar lista de Donantes\n4.Revisar Lista de Donatarios\n5.Realizar una transfusion\n6.Estadisticas\n7.Salir")
opc=int(input("Seleccionar: "))
return opc | 805d1ef48fbe03f8185e3c7be71ce3d9aa6104df | 14,451 |
import shutil
def get_engine(hass, config):
"""Set up Pico speech component."""
if shutil.which("pico2wave") is None:
_LOGGER.error("'pico2wave' was not found")
return False
return PicoProvider(config[CONF_LANG]) | d03a19dcff4bc8556a84b434d14468a45ffc7e6c | 14,452 |
from typing import Tuple
from typing import List
def load_cp() -> Tuple[List[str], List[List[float]]]:
"""
Loads cloud point data; target values given in Celsius
Returns:
Tuple[List[str], List[List[float]]]: (smiles strings, target values);
target values have shape (n_samples, 1)
"""
return _load_set('cp') | 84b11da1b3cd1a9ecaf5e1419d69b877c160e2aa | 14,453 |
def look_up(f, *args, **kwargs):
"""
:param f:
:type f:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return:
:rtype:"""
ag_hash = hash(args) + make_hash(kwargs)
if f in global_table:
if ag_hash in global_table[f]:
return global_table[f][ag_hash]
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res
global_table[f] = {}
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res | ebc9015066959f66cd98db226178cdf087fc897b | 14,454 |
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
"""
return environ.get(key, default) | 0e355c73dbbdc971e4442123dc5945dc04fac8fc | 14,455 |
def read_tag(request, tid, *args, **kwargs):
"""read_tag(tid) returns ..."""
s = api.read_tag(request, tid, *args, **kwargs)
return render_to_response('read/tag.html', s) | 7582e752563f35eb8d025625eaac87d8a9f45f32 | 14,456 |
def ber_img(original_img_bin, decoded_img_bin):
"""Compute Bit-Error-Rate (BER) by comparing 2 binary images."""
if not original_img_bin.shape == decoded_img_bin.shape:
raise ValueError('Original and decoded images\' shapes don\'t match !')
height, width, k = original_img_bin.shape
errors_bits = abs(original_img_bin - decoded_img_bin).sum()
errors_bits = errors_bits.flatten()
total_bits = np.prod(original_img_bin.shape)
ber = errors_bits / total_bits
return(ber) | f5768aa5435a76bcd82b331d76dadf554749e82d | 14,457 |
def get_fractal_patterns_WtoE_NtoS(fractal_portrait, width, height):
""" get all fractal patterns from fractal portrait, from West to East, from North to South """
fractal_patterns = []
for x in range(width):
# single fractal pattern
f_p = get_fractal_patterns_zero_amounts()
for y in range(height):
if fractal_portrait[x][y] != EMPTY_PLACE:
f_p[fractal_portrait[x][y]] += 1
if any(v > 0 for v in f_p.values()):
fractal_patterns.append(f_p)
return fractal_patterns | ad5e2025515231ae8efb256b5dc466d66fedb467 | 14,458 |
import time
from datetime import datetime
import requests
import io
def rating(date=None):
"""P2peye comprehensive rating and display results.
from https://www.p2peye.com
Args:
date: if None, download latest data, if like '201812', that download month data.
Returns:
DataFrame
"""
start = time.time()
if date is None:
date = str(pd.to_datetime(datetime.datetime.now())-pd.DateOffset(months=1))[:7].replace('-', '')
assert (isinstance(date, str) and len(date)==6), "`date` shoule format '201812' or None"
url_txt = 'https://raw.githubusercontent.com/Hourout/datasets/master/report/p2peye/rating/p2peye_rating'+date+'.txt'
s = requests.get(url_txt).content
data = pd.read_csv(io.StringIO(s.decode('utf-8')))
print('p2peye rating dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return data | d76205c933c07764938b38fbfcdcbbe84b584471 | 14,459 |
def crop_image(src, box, expand=0):
"""Read sensor data and crop a bounding box
Args:
src: a rasterio opened path
box: geopandas geometry polygon object
expand: add padding in percent to the edge of the crop
Returns:
masked_image: a crop of sensor data at specified bounds
"""
#Read data and mask
try:
left, bottom, right, top = box.bounds
expand_width = (right - left) * expand /2
expand_height = (top - bottom) * expand / 2
#If expand is greater than increase both size
if expand >= 0:
expanded_left = left - expand_width
expanded_bottom = bottom - expand_height
expanded_right = right + expand_width
expanded_top = top+expand_height
else:
#Make sure of no negative boxes
expanded_left = left+expand_width
expanded_bottom = bottom+expand
expanded_right = right-expand_width
expanded_top = top-expand_height
window = rasterio.windows.from_bounds(expanded_left, expanded_bottom, expanded_right, expanded_top, transform=src.transform)
masked_image = src.read(window=window)
except Exception as e:
raise ValueError("sensor path: {} failed at reading window {} with error {}".format(src, box.bounds,e))
#Roll depth to channel last
masked_image = np.rollaxis(masked_image, 0, 3)
#Skip empty frames
if masked_image.size ==0:
raise ValueError("Empty frame crop for box {} in sensor path {}".format(box, src))
return masked_image | 84948cf3c81fe650d15acd834f89c532e9658466 | 14,460 |
def add_msgpack_support(cls, ext, add_cls_methods=True):
"""Adds serialization support,
Enables packing and unpacking with msgpack with 'pack.packb' and
'pack.unpackb' methods.
If add_method then enables equality, reading and writing for the classs.
Specificly, adds methods:
bytes <- obj.to_binary()
obj <- cls.from_binary(bytes)
boolean <- obj1 == obj2
Args:
cls: class
ext: an unique code for the msgpack's Ext hook
"""
def enc(obj):
return packb(obj.__dict__)
def dec(data):
obj = cls.__new__(cls)
obj.__dict__.update(unpackb(data))
return obj
def eq(a, b):
if type(a) != type(b):
return NotImplemented
return a.__dict__ == b.__dict__
if add_cls_methods:
if cls.__eq__ is object.__eq__:
cls.__eq__ = eq
cls.to_bytes = enc
cls.from_bytes = staticmethod(dec)
_pack_reg[cls] = (ext, enc)
petlib.pack.register_coders(cls, ext, enc, dec) | fa8a6fcce7103466f923b84eeb6f5bdae8383b79 | 14,461 |
def UnN(X, Z, N, sampling_type):
"""Computes block-wise complete U-statistic."""
return UN(X, Z, N, Un, sampling_type=sampling_type) | 0e2fb8c62cbcca99017bc7d0ff2c13dbecad1ab3 | 14,463 |
def view_log_view(request, model_name, object_id):
"""view log view
Arguments:
request {object} -- wsgi request object
content_type {str} -- content type
object_id {int} -- admin_log id
Returns:
retun -- html view
"""
if model_name not in register_form:
return render(request, 'admin/error.html', {'error_msg': 'illegal request!'})
model = register_form[model_name]['model']
res = get_object_or_404(model, pk=object_id)
log_entries = LogEntry.objects.filter(
content_type_id=get_content_type_for_model(model).pk,
object_id=res.id
)
return render(request, 'admin/view_log.html', {
'log_data': log_entries
}) | 55ebd2d5226e06b1f5833595b0efad3de81140d7 | 14,464 |
from typing import Tuple
from typing import Dict
def parse_markdown(source: str) -> Tuple[str, Dict]:
"""Parse a Markdown document using our custom parser.
Args:
source (str): the Markdown source text
Returns:
tuple(str, dict):
1. the converted output as a string
2. any extracted metadata as a dict
"""
# Reset or we'll have leftover garbage from the previous file
_md_parser.reset()
html: str = _md_parser.convert(source)
meta: Dict = set_metadata(_md_parser.metadata)
return html, meta | 2bf5a8d43f3763d6b1356dc0496ab4ed1896fe99 | 14,465 |
def flatten_list(nested_list):
# Essentially we want to loop through each element in the list
# and check to see if it is of type integer or list
"""
Flatten a arbitrarily nested list
Args:
nested_list: a nested list with item to be either integer or list
example:
[2,[[3,[4]], 5]]
Returns:
a flattened list with only integers
example:
[2,3,4,5]
"""
result = []
for element in nested_list:
if isinstance(element, int):
result.append(element)
elif hasattr(element, '__iter__'):
#check to see if it is of type list
list_result = flatten_list(element) #recursive call
for single_integer in list_result:
result.append(single_integer)
return result | d79b350167cd1fdf35582e9b149bfb364741d566 | 14,466 |
import inspect
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test discovery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
if record[1].endswith("python2.6/unittest.py"):
_test_runner_guess = "unittest"
break
else:
_test_runner_guess = None
return _test_runner_guess | 881758d42e5047fe58106a99377dcc7191c0010c | 14,467 |
def retinanet(
mode,
offsets_mean=None,
offsets_std=None,
architecture='resnet50',
train_bn=False,
channels_fmap=256,
num_anchors_per_pixel=9,
num_object_classes=1,
pi=0.01,
alpha=0.25,
gamma=2.0,
confidence_threshold=0.05,
num_top_scoring=1000,
batch_size=2,
max_objects_per_class_per_img=100,
iou_threshold=0.5,
output_top_scoring=False
):
"""
Builds a RetinaNet.
Parameters
----------
mode : string
The mode of building a retinanet either in 'training' or 'inference'.
offsets_mean, offsets_std : float
The mean and std of anchor offsets for a given dataset. If offsets are
normalized, they will be used to de-normalize offsets.
architecture : string, optional
ResNet architecture in {'resnet50', 'resnet101'}. The default is
'resnet50'.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
channels_fmap : integer, optional
The number of filters in all FPN conv layers. The default is 256.
num_anchors_per_pixel : integer, optional
The number of anchors to generate at different scales for every pixel;
see anchors.anchors_from_fpn(). The default is 9.
num_object_classes : integer, optional
The number of classes containing only objects, i.e., object classes
denoted by positive integers while background denoted by 0. The default
is 1.
pi : float, optional
The bias initialization at the final conv layer of the classification
subnet, prevents the large number of anchors from generating a large
loss value in the first iteration of training. The default is 0.01.
alpha : float, optional
A weighting factor in [0,1] for the object class, addressing class
imbalance. The default is 0.25.
gamma : float, optional
A focusing parameter >= 0 for removing easy examples. The default is
2.0.
confidence_threshold : float, optional
The minimum selection's probabilites. The default is 0.05.
num_top_scoring : integer, optional
The number of top-scoring selections. The default is 1000.
batch_size : integer, optional
The batch size of input images. The default is 2.
max_objects_per_class_per_img : integer, optional
The maximum number of objects over all images for a particular class.
The default is 100.
iou_threshold : float, optional
An iou threshold for NMS. The default is 0.5.
output_top_scoring : boolean, optional
Whether to include the output of detections.select_top_scoring() in the
inference mode. The default is False.
Returns
-------
model : tf keras
The retinanet.
- Training mode
* inputs are a batch of images, anchor indicators, ground-truth
class ids and offsets generated by data_gen.data_generator();
* outputs are predicted anchor probabilities, offsets,
classification and regression losses.
- Inference mode
* inputs are a batch of raw images, a list of anchors at all
levels generated by anchors.anchors_from_fpn() and a window with
shape of [1, 4] used in clipping anchors in
detections.SelectTopScoring() where 4 is (y1, x1, y2, x2) corner
coordinates for all images in the batch.
* outputs is a list of detections, each has corresponding target
boxes, class ids and scores.
"""
assert mode in ['training', 'inference']
# input images
images = tf.keras.Input(shape=(None, None, 3), name='images')
if mode == 'training':
# inputs generated by anchors.anchors_targets()
gt_anchor_indicators = tf.keras.Input(
shape=(None,),
name='gt_anchor_indicators',
dtype=tf.int32)
gt_anchor_class_ids = tf.keras.Input(
shape=(None, num_object_classes),
name='gt_anchor_class_ids',
dtype=tf.int32)
gt_anchor_offsets = tf.keras.Input(
shape=(None, 4),
name='gt_anchor_offsets',
dtype=tf.float32)
# backbone, ResNet + FPN
fmaps = resnet_fpn.resnet_fpn(
images, architecture, train_bn, channels_fmap)
if mode == 'inference':
# input generated by anchors.anchors_from_fpn(), and then each
# element is broadcasted to batch_size, resulting in shape of
# [batch_size, num_anchors_per_fmap, 4]
anchors_fpn_batches = []
for i in range(len(fmaps)):
anchors_i = tf.keras.Input(
shape=(None, 4),
name='anchors_p'+str(i+3),
dtype=tf.float32)
anchors_fpn_batches.append(anchors_i)
# input used when clipping anchors in detections.SelectTopScoring()
window = tf.keras.Input(
shape=(4),
batch_size=1,
name='window',
dtype=tf.int32)
# classification and regression subnets
cls_subnet = subnets.cls_subnet(
num_anchors_per_pixel,
num_object_classes,
channels_fmap,
pi)
reg_subnet = subnets.reg_subnet(
num_anchors_per_pixel,
channels_fmap)
# outputs, list, each element is for one FPN level
if mode == 'training':
pred_anchor_probs, pred_anchor_offsets = [], []
else:
list_anchor_idxes = []
list_anchors, list_class_ids, list_scores = [], [], []
# loop for each FPN level
for i in range(len(fmaps)):
# fmap, [batch_size, h_i, w_i, channels_fmap] where h_i and w_i denote
# the current fmap size
p = fmaps[i]
# cls, [batch_size, h_i, w_i, num_anchors_per_pixel*num_object_classes]
pred_anchor_probs_i = cls_subnet([p])
# reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, num_object_classes]
pred_anchor_probs_i = tf.keras.layers.Reshape(
(-1, num_object_classes),
name='cls_probs_p'+str(i+3)
)(pred_anchor_probs_i)
# reg, [batch_size, h_i, w_i, num_anchors_per_pixel*4]
pred_anchor_offsets_i = reg_subnet([p])
# reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, 4]
pred_anchor_offsets_i = tf.keras.layers.Reshape(
(-1, 4),
name='reg_offsets_p'+str(i+3)
)(pred_anchor_offsets_i)
if mode == 'training':
pred_anchor_probs.append(pred_anchor_probs_i)
pred_anchor_offsets.append(pred_anchor_offsets_i)
else:
# filter low confidence, select top-scoring and refine anchors
anchors_i = anchors_fpn_batches[i]
select_top_scoring_inputs_i = [
anchors_i,
pred_anchor_probs_i,
pred_anchor_offsets_i,
window]
select_top_scoring_outputs_i = detections.SelectTopScoring(
confidence_threshold,
num_top_scoring,
batch_size,
offsets_mean,
offsets_std,
name='select_top_detection_p'+str(i+3)
)(select_top_scoring_inputs_i)
list_anchor_idxes.append(select_top_scoring_outputs_i[0])
list_anchors.append(select_top_scoring_outputs_i[1])
list_class_ids.append(select_top_scoring_outputs_i[2])
list_scores.append(select_top_scoring_outputs_i[3])
if mode == 'training':
# probs, [batch_size, num_anchors, num_object_classes]
pred_anchor_probs = tf.keras.layers.Concatenate(
axis=1, name='pred_anchor_probs')(pred_anchor_probs)
# offsets, [batch_size, num_anchors, 4]
pred_anchor_offsets = tf.keras.layers.Concatenate(
axis=1, name='pred_anchor_offsets')(pred_anchor_offsets)
# cls loss
cls_inputs = [
gt_anchor_indicators, gt_anchor_class_ids, pred_anchor_probs]
cls_loss = losses.ClsLoss(alpha, gamma)(cls_inputs)
# reg loss
reg_inputs = [
gt_anchor_indicators, gt_anchor_offsets, pred_anchor_offsets]
reg_loss = losses.RegLoss()(reg_inputs)
# training model's inputs and outputs
inputs = [
images,
gt_anchor_indicators,
gt_anchor_class_ids,
gt_anchor_offsets,]
outputs = [
pred_anchor_probs,
pred_anchor_offsets,
cls_loss,
reg_loss]
else:
# NMS
nms_fpn_inputs = [
list_anchor_idxes, list_anchors, list_class_ids, list_scores]
nms_fpn_outputs = detections.NMS_FPN(
max_objects_per_class_per_img,
iou_threshold,
batch_size,
name='nms'
)(nms_fpn_inputs)
# anchors_batch, class_ids_batch, scores_batch = nms_fpn_outputs
# inference model's inputs and outputs
inputs = [images, anchors_fpn_batches, window]
if output_top_scoring:
outputs = [nms_fpn_inputs, nms_fpn_outputs]
else:
outputs = nms_fpn_outputs
with tf.device('/cpu:0'):
model = tf.keras.Model(inputs, outputs, name='RetinaNet')
return model | a3cdb088345740583c7ea08049e5f03f8d496cad | 14,468 |
from typing import Union
from typing import Tuple
from typing import List
from typing import Literal
def default_chap_exec(gallery_or_id: Union[Gallery, int], chap: Chapter, only_values=False) \
-> Union[Tuple[str, dict], Tuple[int, Union[str, List[str]], int, bytes, int, Literal[0, 1]]]:
"""Pass a Gallery object or gallery id and a Chapter object"""
gid: int
if isinstance(gallery_or_id, Gallery):
gallery: Gallery = gallery_or_id
gid = gallery.id
in_archive = gallery.is_archive
else:
gid = gallery_or_id
in_archive = chap.in_archive
if only_values:
result_exec = (gid, chap.title, chap.number, str.encode(chap.path), chap.pages, in_archive)
else:
result_exec = (
"""
INSERT INTO chapters(series_id, chapter_title, chapter_number, chapter_path, pages, in_archive)
VALUES(:series_id, :chapter_title, :chapter_number, :chapter_path, :pages, :in_archive)""",
{
'series_id': gid,
'chapter_title': chap.title,
'chapter_number': chap.number,
'chapter_path': str.encode(chap.path),
'pages': chap.pages,
'in_archive': in_archive
}
)
return result_exec | 8bd8cbfc47ce3463f2ea6da313cc871d8b6dcdf5 | 14,469 |
from typing import List
from typing import Dict
from typing import Literal
from typing import Any
def get_matching_based_variables(match_definitions:List[Dict[Literal['name', 'matching'],Any]],
global_dict=None,
local_dict=None,
var_lenght=0):
"""
Function to construct an array with values depending on the condition provided by user
The idea is to define things like, for example, 'region' for a table,
indicating which analysis region is used.
Example:
Assume we want to have region="SRB" when "MET>100 && mt2<450".
For ``MET=[50 ,150,250]`` and ``mt2=[300,400,500]``,
when provided with argument
``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}]``
will give output of ``[None,SRB, None]``.
Args:
match_definitions: list of dictionaries defining matching conditions and
the value associated with the match.
Each dictionary has to have field 'name' (value of variable when condition is met)
and 'matching' -- list of cuts and indices for which the condition is met.
Conditions are concacanated to each other.
In the example above ``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}``
is equivalent to ``matching_definitions=[{name:"SRB","matching":[1]}`` (index specifying position that matches)
submission_dict: collections of variables and other known objects to be used in the transformation
local_vars: yet another collection of variables known to be used in the transformation
var_lenght: lenght of the corresponding variable/table (in case index is is chosen for matching specification)
"""
result=None
for specification in match_definitions:
var=specification.get('name',None)
if(var is None):
raise ValueError(f"matching_definitions have to have name for each specification.")
cuts=specification.get('matching',[])
for cut in cuts:
if(type(cut)==str):
cutOutput=np.where(eval(cut,global_dict,local_dict),var,None)
ToAppend=cutOutput.reshape(len(cutOutput),1)
if(not result):
result=ToAppend
else:
result=np.concatenate((result,ToAppend),axis=1)
elif(type(cut)==int):
if(cut>=len(cuts)):
raise RuntimeError("lenght of cut table smaller than required index.")
else:
ToAppend=np.array([[None]]*len(var_lenght))
ToAppend[cut]=var
if(not result):
result=ToAppend
else:
result=np.concatenate((result,ToAppend),axis=1)
else:
raise TypeError("Variable cutDefinitions has improper content.")
return result | 6722bb4f258ef69c4aab74970f8f924ca938bbf5 | 14,471 |
def _AStar_graph(problem: BridgeProblem) -> (list, list):
"""Used for graphing, returns solution as well as all nodes in a list"""
all_nodes = [problem.initial_node]
pq = [(problem.initial_node.path_cost + problem.h(problem.initial_node.state), problem.initial_node)]
closed = set()
while True:
assert pq
priority, node = heappop(pq)
if problem.goal_test(node):
return problem.get_ancestors(node), all_nodes
closed.add(node)
children = problem.expand(node)
for node in children:
priority = node.path_cost + problem.h(node.state)
bn = (priority, node)
inpq = None
for i, (_, pq_node) in enumerate(pq):
if node == pq_node: inpq = i
if node not in closed and inpq is None:
heappush(pq, bn)
elif inpq is not None and bn < pq[inpq]:
pq.pop(inpq)
pq.append(bn)
heapify(pq)
all_nodes.extend(children) | a1616f7d499f12a229843007d7bc4939cbd02a7a | 14,472 |
def plot_setup(name, figsize=None, fontsize=9, font='paper', dpi=None):
""" Setup a PDF page for plot.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
dpi: resolution of the figure.
"""
paper_plot(fontsize=fontsize, font=font)
if not name.endswith('.pdf'):
name += '.pdf'
pdfpage = matplotlib.backends.backend_pdf.PdfPages(name)
fig = matplotlib.pyplot.figure(figsize=figsize, dpi=dpi)
return pdfpage, fig | 4f9595757df57ee451dddc82815a91b727feb1f1 | 14,473 |
def author_endyear(pub2author_df = None, colgroupby = 'AuthorId', datecol = 'Year', show_progress=False):
"""
Calculate the year of last publication for each author.
Parameters
----------
pub2author_df : DataFrame, default None, Optional
A DataFrame with the author2publication information.
colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
datecol : str, default 'Year', Optional
The DataFrame column with Date information. If None then the database 'Year' is used.
Returns
-------
DataFrame
Productivity DataFrame with 2 columns: 'AuthorId', 'CareerLength'
"""
newname_dict = zip2dict([str(datecol), '0'], ['EndYear']*2)
return pub2author_df.groupby(colgroupby)[datecol].max().to_frame().reset_index().rename(columns=newname_dict) | 8a3ebf5e1870a8aa79ed2cba18fbb18fa634e604 | 14,474 |
def to_gif(images, fps):
"""Converts image sequence (4D numpy array) to gif."""
imageio.mimsave('./animation.gif', images, fps=fps)
return embed.embed_file('./animation.gif') | b329da4710a5ad2da57ed2cf6b774ac4b6b8c7dd | 14,475 |
import logging
def get_half_max_down(signal, peak):
"""See `get_half_max_up` for explanation.
This is a minor modification of the above function.
"""
if peak['peak'] == 0:
return np.nan
fflag = False
half_max = signal[peak['peak']] / 2
falling_signal = signal[peak['peak']:(peak['right']+1)]
closest_idx = (np.abs(falling_signal - half_max)).argmin() + peak['peak']
if closest_idx <= 1 or closest_idx >= 98:
logging.warning('HM_DOWN: half-max too close to end of signal')
return np.nan
# If the signal at the index is nearly equal to half max, take that index
if np.allclose(half_max, signal[closest_idx]):
half_max_point = closest_idx
# ...otherwise interpolate
else:
ix = -1
triplet = signal[(closest_idx - 1):(closest_idx + 2)]
if triplet[0] > half_max > triplet[1]:
ix = 0
elif triplet[1] > half_max > triplet[2]:
ix = 1
else:
logging.warning('HM_DOWN: simple method for interpolating'
' half-max decay time failed')
fflag = True
if ix != -1:
y = [ix,ix+1]
x = [triplet[ix], triplet[ix+1]]
f = interp1d(x,y)
trip_coord = f(half_max)
half_max_point = closest_idx + (trip_coord - 1)
if fflag == True:
half_max_down = np.nan
else:
half_max_down = float(half_max_point - peak['peak'])
return half_max_down | 0b9b20b66a82d8a60aa650bc1bacd24f67f217f1 | 14,476 |
import copy
def ternary(c):
"""
Encodes the circuit with ternary values
Parameters
----------
c : Circuit
Circuit to encode.
Returns
-------
Circuit
Encoded circuit.
"""
if c.blackboxes:
raise ValueError(f"{c.name} contains a blackbox")
t = copy(c)
# add dual nodes
for n in c:
if c.type(n) in ["and", "nand"]:
t.add(f"{n}_x", "and")
t.add(
f"{n}_x_in_fi",
"or",
fanout=f"{n}_x",
fanin=[f"{p}_x" for p in c.fanin(n)],
)
t.add(f"{n}_0_not_in_fi", "nor", fanout=f"{n}_x")
for p in c.fanin(n):
t.add(
f"{p}_is_0", "nor", fanout=f"{n}_0_not_in_fi", fanin=[p, f"{p}_x"]
)
elif c.type(n) in ["or", "nor"]:
t.add(f"{n}_x", "and")
t.add(
f"{n}_x_in_fi",
"or",
fanout=f"{n}_x",
fanin=[f"{p}_x" for p in c.fanin(n)],
)
t.add(f"{n}_1_not_in_fi", "nor", fanout=f"{n}_x")
for p in c.fanin(n):
t.add(f"{p}_is_1", "and", fanout=f"{n}_1_not_in_fi", fanin=p)
t.add(f"{p}_not_x", "not", fanout=f"{p}_is_1", fanin=f"{p}_x")
elif c.type(n) in ["buf", "not"]:
p = c.fanin(n).pop()
t.add(f"{n}_x", "buf", fanin=f"{p}_x")
elif c.type(n) in ["output"]:
p = c.fanin(n).pop()
t.add(f"{n}_x", "output", fanin=f"{p}_x")
elif c.type(n) in ["xor", "xnor"]:
t.add(f"{n}_x", "or", fanin=(f"{p}_x" for p in c.fanin(n)))
elif c.type(n) in ["0", "1"]:
t.add(f"{n}_x", "0")
elif c.type(n) in ["input"]:
t.add(f"{n}_x", "input")
else:
raise ValueError(f"Node {n} has unrecognized type: {c.type(n)}")
return t | 6fd813b957da408c23cc8a37038b8f3b660fdc73 | 14,478 |
from typing import List
from typing import Dict
def eval_lane_per_frame(
gt_file: str, pred_file: str, bound_ths: List[float]
) -> Dict[str, np.ndarray]:
"""Compute mean,recall and decay from per-frame evaluation."""
task2arr: Dict[str, np.ndarray] = dict() # str -> 2d array
gt_byte = np.asarray(Image.open(gt_file))
pred_byte = np.asarray(Image.open(pred_file))
gt_foreground = get_foreground(gt_byte)
pd_foreground = get_foreground(pred_byte)
for task_name, class_func in sub_task_funcs.items():
task_scores: List[List[float]] = []
for value in range(len(sub_task_cats[task_name])):
gt_mask = class_func(gt_byte, value) & gt_foreground
pd_mask = class_func(pred_byte, value) & pd_foreground
cat_scores = [
eval_lane_per_threshold(gt_mask, pd_mask, bound_th)
for bound_th in bound_ths
]
task_scores.append(cat_scores)
task2arr[task_name] = np.array(task_scores)
return task2arr | 571cd737151576869170e33d181f89d22bc0657b | 14,479 |
import torch
def membrane(field, voxel_size=1, bound='dct2', dim=None, weights=None):
"""Precision matrix for the Membrane energy
Note
----
.. This is exactly equivalent to SPM's membrane energy
Parameters
----------
field : (..., *spatial) tensor
voxel_size : float or sequence[float], default=1
bound : str, default='dct2'
dim : int, default=field.dim()
weights : (..., *spatial) tensor, optional
Returns
-------
field : (..., *spatial) tensor
"""
if weights is None:
return _membrane_l2(field, voxel_size, bound, dim)
def mul_(x, y):
"""Smart in-place multiplication"""
if ((torch.is_tensor(x) and x.requires_grad) or
(torch.is_tensor(y) and y.requires_grad)):
return x * y
else:
return x.mul_(y)
backend = dict(dtype=field.dtype, device=field.device)
dim = dim or field.dim()
if torch.is_tensor(voxel_size):
voxel_size = make_vector(voxel_size, dim, **backend)
dims = list(range(field.dim()-dim, field.dim()))
fieldf = diff(field, dim=dims, voxel_size=voxel_size, side='f', bound=bound)
weights = torch.as_tensor(weights, **backend)
fieldf = mul_(fieldf, weights[..., None])
fieldb = diff(field, dim=dims, voxel_size=voxel_size, side='b', bound=bound)
fieldb = mul_(fieldb, weights[..., None])
dims = list(range(fieldb.dim() - 1 - dim, fieldb.dim() - 1))
fieldb = div(fieldb, dim=dims, voxel_size=voxel_size, side='b', bound=bound)
dims = list(range(fieldf.dim()-1-dim, fieldf.dim()-1))
field = div(fieldf, dim=dims, voxel_size=voxel_size, side='f', bound=bound)
del fieldf
field += fieldb
field *= 0.5
return field | 5e238ca2253fc7105b1bbfba58947ac442c05699 | 14,480 |
from typing import Tuple
import struct
def get_uint64(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 64-bit big-endian unsigned integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _UINT64_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>Q',
dgram[start_index:start_index + _UINT64_DGRAM_LEN])[0],
start_index + _UINT64_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e) | e5ed2470656e3c0d1a8efe02bd638ac05245f187 | 14,481 |
import torch
from typing import Optional
from typing import List
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim) | a1ff7a81df83df63dcbcf56cf89d9b0e54c16ba0 | 14,482 |
def flatten(x):
"""Flattens nested list"""
if isinstance(x, list):
return [a for i in x for a in flatten(i)]
else:
return [x] | 7d348f8287dfccfbb77a52a84a5642c265381eb1 | 14,483 |
import copy
def get_capture_points_gazebo(bag, odom_topic='/gazebo/model_states', sync_topic='/mavros/imu/data_raw', camera_freq=20, sync_topic_freq=100, method='every'):
"""
method(string): method for sampling capturing points.
'every': Sample IMU for every n msgs, and then capture odometry msg which has the closest timestamp. This requires the existence of odom_msg for every imu_msg.
"""
odom_msg_list = []
odom_time_list = []
odom_stamp_list = []
capture_time_list = []
sync_topic_num = 0
for topic, msg, t in bag:
if topic==odom_topic:
odom_msg_list.append(msg)
odom_time_list.append(t.to_time())
odom_stamp_list.append(copy.deepcopy(t))
for topic, msg, t in bag:
if topic==sync_topic:
if odom_time_list[0] > t.to_time():
continue
if sync_topic_num % (int(sync_topic_freq/camera_freq)) == 0:
capture_time_list.append(t.to_time())
sync_topic_num += 1
assert len(odom_msg_list)==len(odom_time_list) and len(odom_msg_list)==len(odom_stamp_list), 'length of odom_(msg/time/stamp)_list is not equal.'
# start sampling odometry
capture_points = []
curr_odom_idx = 0
for idx, capture_time in enumerate(capture_time_list):
# take an odometry msg which has the timestamp closest to capture_time
if capture_time < min(odom_time_list):
continue
while abs(capture_time - odom_time_list[curr_odom_idx]) >= 5*10**(-5):
curr_odom_idx += 1
if curr_odom_idx >= len(odom_time_list):
break
if curr_odom_idx >= len(odom_time_list):
break
if odom_topic=='/gazebo/gazebo_states':
capture_point = get_capture_point_from_gazebo_model_states(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx])
elif odom_topic=='/odometry':
capture_point = get_capture_point_from_navmsgs_odom(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx])
capture_points.append(capture_point)
return capture_points | 75d351c0ecd6ad8dad6e7cfcd2ecd04d0826405b | 14,484 |
import fileinput
def parse_input():
"""Parse input and return array of calendar
A user can either pass the calendar via the stdin or via one or several
icalendar files. This method will parse the input and return an array
of valid icalendar
"""
input_data = ''
calendars = []
for line in fileinput.input():
if 'BEGIN:VCALENDAR' in line:
calendars.append(input_data)
input_data = line
else:
input_data += line
calendars.append(input_data)
return calendars[1:] | a60a760968f139da0b7753ae5717d78b640cb232 | 14,485 |
def identity(obj):
"""Returns the ``obj`` parameter itself
:param obj: The parameter to be returned
:return: ``obj`` itself
>>> identity(5)
5
>>> foo = 2
>>> identity(foo) is foo
True
"""
return obj | a3271a831d2e91fe6eebed7e80c18e7c81996da6 | 14,486 |
def percent_clipper(x, percentiles):
"""
Takes data as np.ndarray and percentiles as array-like
Returns clipped ndarray
"""
LOWERBOUND, UPPERBOUND = np.percentile(x, [percentiles[0], percentiles[1])
return np.clip(x, LOWERBOUND, UPPERBOUND) | 3d114a956bfd0b6b8349c39f5c42f4487a812ee7 | 14,487 |
def check_prob_vector(p):
"""
Check if a vector is a probability vector.
Args:
p, array/list.
"""
assert np.all(p >= 0), p
assert np.isclose(np.sum(p), 1), p
return True | f9a6ea74fe9e5ff8a7244e7cc8aee2cbf5ae512e | 14,488 |
def relabel_subgraph():
""" This function adapts an existing sampler by relabelling the vertices in the edge list
to have dense index.
Returns
-------
sample: a function, that when invoked, produces a sample for the input function.
"""
def relabel(edge_list, positive_vertices):
shape = edge_list.shape
vertex_index, edge_list = np.unique(edge_list, return_inverse=True)
edge_list = edge_list.astype(np.int32).reshape(shape)
# relabel the positive vertices
positive_verts = np.searchsorted(vertex_index, positive_vertices)
is_positive = np.zeros_like(vertex_index)
is_positive[positive_verts] = 1
return edge_list, vertex_index, is_positive
def sample(data):
edge_list = data['edge_list']
positive_vertices = data.get('positive_vertices', tf.unique(tf.reshape(edge_list, [-1]))[0])
vertex_index = data.get('vertex_index', None)
if isinstance(edge_list, tf.Tensor):
new_edge_list, new_vertex_index, is_positive = tf.py_func(relabel, [edge_list, positive_vertices],
[tf.int32, tf.int32, tf.int32], stateful=False)
new_edge_list.set_shape(edge_list.shape)
new_vertex_index.set_shape([None])
is_positive.set_shape([None])
else:
new_edge_list, new_vertex_index, is_positive = relabel(edge_list, positive_vertices)
if vertex_index is not None:
if isinstance(vertex_index, tf.Tensor):
vertex_index = tf.gather(vertex_index, new_vertex_index, name='resample_vertex_index')
else:
vertex_index = vertex_index[new_vertex_index]
else:
vertex_index = new_vertex_index
return {**data, 'edge_list': new_edge_list, 'vertex_index': vertex_index, 'is_positive': is_positive}
return sample | e9b8269640663b830c894c4aa4f8a8cce2b49af7 | 14,489 |
def init_binary(mocker):
"""Initialize a dummy BinaryDigitalAssetFile for testing."""
mocker.patch.multiple(
houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile,
__init__=lambda x, y, z: None,
)
def _create():
return houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile(
None, None
)
return _create | 34a3ee5fb09f413bf07b36f1b73189472c188f3d | 14,491 |
def with_setup_(setup=None, teardown=None):
"""Decorator like `with_setup` of nosetest but which can be applied to any
function"""
def decorated(function):
def app(*args, **kwargs):
if setup:
setup()
try:
function(*args, **kwargs)
finally:
if teardown:
teardown()
return app
return decorated | f9e8eddfd01ee99e458857de403c49b91dafa92c | 14,492 |
import click
def post_options():
"""Standard arguments and options for posting timeseries readings.
"""
options = [
click.argument('port'),
click.argument('value', type=JSONParamType()),
click.option('--timestamp', metavar='DATE',
help='the time of the reading'),
]
def wrapper(func):
func.__doc__ += _post_options_docs
for option in reversed(options):
func = option(func)
return func
return wrapper | 7b7c386bfcbf36f1365392a6ba2562fa0ed520ce | 14,493 |
def authenticated(f):
"""Decorator for authenticating with the Hub"""
@wraps(f)
def decorated(*args, **kwargs):
token = request.cookies.get(auth.cookie_name)
if token:
user = auth.user_for_token(token)
else:
user = None
if user:
return f(user, *args, **kwargs)
else:
# redirect to login url on failed auth
state = auth.generate_state(next_url=request.path)
response = make_response(
redirect(auth.login_url + '&state=%s' % state)
)
response.set_cookie(auth.state_cookie_name, state)
return response
return decorated | 1149f14ad540521b71efa3a3240c13719ccf8a17 | 14,494 |
def json_complex_hook(dct):
"""
Return an encoded complex number to it's python representation.
:param dct: (dict) json encoded complex number (__complex__)
:return: python complex number
"""
if isinstance(dct, dict):
if '__complex__' in dct:
parts = dct['__complex__']
assert len(parts) == 2
return parts[0] + parts[1] * 1j
return dct | a3c8cb13485279ab3b222eb63efdfdf6421c17a6 | 14,495 |
def reg_logLiklihood(x, weights, y, C):
"""Regularizd log-liklihood function (cost function to minimized in logistic
regression classification with L2 regularization)
Parameters
-----------
x : {array-like}, shape = [n_samples, n_features + 1]
feature vectors. Note, first column of x must be
a vector of ones.
weights : 1d-array, shape = [1, 1 + n_features]
Coefficients that weight each samples feature vector
y : list, shape = [n_samples,], values = 1|0
target values
C : float
Regularization parameter. C is equal to 1/lambda
Returns
-----------
Value of regularized log-liklihood function with the given feature values,
weights, target values, and regularization parameter
"""
z = np.dot(x, weights)
reg_term = (1 / (2 * C)) * np.dot(weights.T, weights)
return -1 * np.sum((y * np.log(logistic_func(z))) + ((1 - y) * np.log(1 - logistic_func(z)))) + reg_term | 4a13bac09a6989463014784c72c72729ec40e718 | 14,496 |
import itertools
def estimate_gridsearch_size(model, params):
""" Compute the total number of parameter combinations in a grid search
Parameters
----------
model: str
name of the model to train. The function currently supports feedforward neural networks (model = 'FNN'),
long-short term memory (model = 'LSTM') and naive discriminative learning (model = 'NDL') also commonly known as
Rescorla-Wagner model.
params: dict of lists
parameter set of the grid search:
Returns
-------
int
number of param combinations
"""
### FNN model
if model == 'FNN':
# Extract the dimensions of the pretrained embeddings
pretrain_embed_dim = {}
embed_inputs = params['embedding_input']
for i, e in enumerate(embed_inputs):
if embed_inputs[i] and embed_inputs[i] != 'learn':
pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])})
# Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
### Remove impossible combinations
ind_to_remove = []
for i,d in enumerate(grid_full):
# In the case of no hidden layer, no need to set the 'activation' parameter - only 'last_activation' is used
if grid_full[i]['hidden_layers'] == 0:
grid_full[i]['activation'] = None
# In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise,
# it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with
# embeddings to be learned from scratch
if not grid_full[i]['embedding_input']:
grid_full[i]['embedding_dim'] = None
elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']:
ind_to_remove.append(i)
elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn':
grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']]
# In the case of embeddings, it is essential to set 'max_len' (max_len cannot be None),
# so remove all cases where embeddings are used max_len is not given
if grid_full[i]['embedding_input'] and not grid_full[i]['max_len']:
ind_to_remove.append(i)
# First remove the detected impossible combinations (e.g. 'embedding_input = 'learn', embedding_dim = None')
for ii in sorted(ind_to_remove, reverse = True):
del grid_full[ii]
# Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None'
grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}]
### LSTM model
elif model == 'LSTM':
# Extract the dimensions of the pretrained embeddings
pretrain_embed_dim = {}
embed_inputs = params['embedding_input']
for i, e in enumerate(embed_inputs):
if embed_inputs[i] and embed_inputs[i] != 'learn':
pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])})
### Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
### Remove impossible combinations
ind_to_remove = []
for i,d in enumerate(grid_full):
# In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise,
# it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with
# embeddings to be learned from scratch
if not grid_full[i]['embedding_input']:
grid_full[i]['embedding_dim'] = None
elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']:
ind_to_remove.append(i)
elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn':
grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']]
# First remove the combinations 'embedding_input = 'learn', embedding_dim = None'
for ii in sorted(ind_to_remove, reverse = True):
del grid_full[ii]
# Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None'
grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}]
### NDL model
elif model == 'NDL':
### Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
# Raise an error if a non-supported model is entered
else:
raise ValueError(f'The entered model "{model}" is not supported')
return len(grid_full) | 8105a457b3ec30c3cdc6c42bb71afd229770b376 | 14,499 |
import pytz
from datetime import datetime
def str2posix(timelist):
""" This will take a list of strings with the date along with a start and
end time and make a list with the posix times.
Inputs
timelist - A list of strings with the data followed by two times.
The date for the second time can also be used, it will be at index
2 and the second time will be at index 3.
Outputs
dtts - A list of posix times from the original inputs"""
if len(timelist)==3:
timelist.insert(2,timelist[0])
(dt1,dt2) = parser.parse(timelist[0]+ ' '+timelist[1]),parser.parse(timelist[2]+ ' '+timelist[3])
dt1 =dt1.replace(tzinfo=pytz.utc)
dt2 = dt2.replace(tzinfo=pytz.utc)
dt1ts = (dt1 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds()
dt2ts = (dt2 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds()
return [dt1ts,dt2ts] | 476fc634b967419818ef41d9ff4b21f9e4f76ff1 | 14,502 |
def keys_verif(verif: bool = True):
"""
Used to verify existence of private or/and public keys of ElGamal.
"""
print("\nChecking the presence of keys in the system....")
if isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING):
# from cipher.asymmetric import elGamal as elG
print(f"\nPublic key is already here.\n")
if isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING):
print(f"Private key is here too.\n")
if verif and not query_yn("Do you want to keep them? (default: No)", "no"):
rmFile("public_key.kpk", config.DIRECTORY_PROCESSING)
rmFile("private_key.kpk", config.DIRECTORY_PROCESSING)
rmFile("encrypted.kat", config.DIRECTORY_PROCESSING)
return True
else:
print("Private key's missing.\n")
if query_yn("Do you want to add them now?\n"):
while not isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING):
input("Please put your 'private_key.kpk' file into the 'processing' folder.")
print("Find it !")
keys_verif()
else:
katsuAsymm()
elif isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING):
print("\nPrivate key's already here but not public one's.\n")
if query_yn("Do you want to add them now? ( default: No)\n", "no"):
while not isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING):
input("Please put your 'public_key.kpk' file into the 'processing' folder.")
print("find it !")
keys_verif()
else:
return True
else:
return True
return False | 824b8c31ad9cc0fb1b2ef7eef585e47c2e338a8b | 14,503 |
def r2(ground_truth, simulation, join='inner', fill_value=0):
"""
R-squared value between ground truth and simulation
Inputs:
ground_truth - ground truth measurement (data frame) with measurement in the "value" column
simulation - simulation measurement (data frame) with measurement in the "value" column
join - type of join to perform between ground truth and simulation
fill_value - fill value for non-overlapping joins
"""
if simulation is None or ground_truth is None:
return None
if len(simulation) == 0 or len(ground_truth) == 0:
return None
if type(ground_truth) is list:
ground_truth = np.nan_to_num(ground_truth)
simulation = np.nan_to_num(simulation)
ground_truth = ground_truth[np.isfinite(ground_truth)]
simulation = simulation[np.isfinite(simulation)]
return np.sqrt(((np.asarray(ground_truth) - np.asarray(simulation)) ** 2).mean())
ground_truth = ground_truth[np.isfinite(ground_truth.value)]
simulation = simulation[np.isfinite(simulation.value)]
df = join_dfs(ground_truth,simulation,join=join,fill_value=fill_value)
if df.empty:
return None
else:
return r2_score(df["value_gt"],df["value_sim"]) | 75d78e575bef0a59620cbdbf1992396a8edd0929 | 14,504 |
def depth_analysis_transform_1(rgb_tensor, depth_tensor, num_filters):
"""Builds the analysis transform."""
with tf.variable_scope("analysis"):
# --------------------------------------- rgb branch
with tf.variable_scope("layer_0"):
layer = tfc.SignalConv2D(
num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros",
use_bias=True, activation=tf.nn.relu)
rgb_tensor = layer(rgb_tensor)
# --------------------------------------- depth branch
with tf.variable_scope("layer_d0"):
layer = tfc.SignalConv2D(
num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros",
use_bias=True, activation=tf.nn.relu)
depth_tensor = layer(depth_tensor)
# --------------------------------------- fusion
tf.summary.histogram('rgb_tensor', rgb_tensor)
tf.summary.histogram('depth_tensor', depth_tensor)
tensor = rgb_tensor + depth_tensor
with tf.variable_scope("layer_1"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=True, activation=tf.nn.relu)
tensor = layer(tensor)
with tf.variable_scope("layer_2"):
layer = tfc.SignalConv2D(
num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
use_bias=False, activation=None)
tensor = layer(tensor)
return tensor | 27637e35619f61e5da2b965392a39b38cdfb6a29 | 14,506 |
def hub_quantile_prediction_dict_validator(target_group_dict, prediction_dict):
"""
Does hub prediction_dict validation as documented in `json_io_dict_from_quantile_csv_file()`
"""
error_messages = [] # return value. filled next
valid_quantiles = target_group_dict['quantiles']
prediction_quantiles = prediction_dict['prediction']['quantile']
if set(valid_quantiles) != set(prediction_quantiles):
error_messages.append(f"prediction_dict quantiles != valid_quantiles. valid_quantiles={valid_quantiles}, "
f"prediction_quantiles={prediction_quantiles}")
return error_messages | ec13824557ef9533d7c4a777daadd07414752767 | 14,508 |
def allclose_periodical(x, y, a, b, atol=1e-10):
"""
Checks np.allclose(x,y), but assumes both x and y are periodical with respect to interval (a,b)
"""
assert(len(x) == len(y))
period = b-a
x_p = np.remainder(x-a,period) # now in 0, b-a
y_p = np.remainder(y-a,period)
return all(np.isclose(x_p[i], y_p[i], atol=atol) or np.isclose(x_p[i], y_p[i]+period, atol=atol) or np.isclose(x_p[i], y_p[i]-period, atol=atol) for i in range(len(x_p))) | bd1c58a362a9c3926bffbcb0a27e355bfc982955 | 14,509 |
import operator
def get_categories_to_rows_ratio(df):
"""
Gets ratio of unique categories to number of rows
in the categorical variable; do this for each categorical
variable
:param df: pd.DataFrame
:return: array of tuples
"""
cat_columns = get_categorical_variable_names(df)
ratios = {col:len(df[col].unique()) / df[col].count() for col in cat_columns}
sorted_ratios = sorted(ratios.items(), key=operator.itemgetter(1), reverse=True)
return sorted_ratios | 2734b898b6c6538b65d54709be617a6dd393c3da | 14,510 |
def _width_left_set(size: int, lsize: int, value: list, fmt: str, meta: dict) -> dict:
"""Width setting of paragraph with left repositioning."""
return Plain([RawInline(fmt, '<p style="text-align:left !important;'
'text-indent:0 !important;'
'position:relative;width:{0};left:{1}">'.
format(size, lsize))]
+ value + [RawInline(fmt, '</p>')]) | 6042b0d255fe804d7423b5e49dd700bd7f0b9bdf | 14,511 |
def GetMappingKeyName(run, user):
"""Returns a str used to uniquely identify a mapping."""
return 'RunTesterMap_%s_%s' % (run.key().name(), str(user.user_id())) | b4eb80ca5f084ea956f6a458f92de1b85e722cda | 14,512 |
def get_invitee_from_table(invite_code: str, table):
"""
Get a dictionary of the stored information for this invite code.
Args:
invite_code: The invitation code to search for
table: A DynamoDB table for querying
Returns:
A dictionary of information stored under the invite code
Throws:
UnknownInviteCodeError: If the invite code is not in the database
"""
response = table.query(
KeyConditionExpression=Key('invite_code').eq(invite_code)
)
items = response['Items']
if len(items) == 0:
# If there were no matches to the code then throw an error
raise UnknownInviteCodeError()
# The output will be a list, so we'll just use the first one since there
# should not be duplicates
items = items[0]
# DynamoDB cannot store empty strings, so we use null instead and convert
# between it as needed. At this point in time, we have no significance for
# null so this works fine.
items = {k: convert_null_to_empty_string(v) for k, v in items.items()}
return items | 1377e20a58174f69d8984e36aab3426c0eb392bd | 14,513 |
import math
def d_beta_dr(radius, beta, mass_r, epsilon, pressure, h_r):
""" d_beta_dr """
return 2. * (1 - 2 * (mass_r/radius)) ** (-1.) * h_r * \
( -2. * math.pi * (5*epsilon + 9*pressure + f(epsilon, pressure)) + (3/radius**2.) + 2*(1 - 2 * mass_r / radius)**(-1) * \
((mass_r/radius) + 4 * math.pi*radius*pressure)**2 ) + (2 * beta/radius) *(1 - 2 * mass_r / radius)**(-1) * \
(-1 + mass_r/radius + 2 * math.pi * radius**2 * (epsilon - pressure)) | 0880439516b70e07c01be3164a3c030bb9deeaca | 14,514 |
import json
def score(capstone, student_api):
"""
Calculates the score of the students' API model
:param student_api: StudentApi object
:return: score as a float
"""
# Check which simulators have datapoints with outcomes outcomes
simulator_ids = []
for simulator in capstone.simulators.all():
if simulator.datapoints.exclude(outcome="").count() > 0:
simulator_ids.append(simulator.id)
if len(simulator_ids) == 0:
raise RuntimeError("No simulators with outcomes found.")
qs = DueDatapoint.objects.filter(
simulator_id__in=simulator_ids,
student=student_api.student,
)
outcomes = []
predictions = []
sensitive_class_race = {}
sensitive_class_sex = {}
for ddp in qs: # loop through each entry in DueDataPoint
outcome = bool(json.loads(ddp.datapoint.outcome))
data = json.loads(ddp.datapoint.data)
if ddp.response_status != 200: # Missing or bad response
predictions.append(not outcome)
outcomes.append(outcome)
else:
try:
prediction = json.loads(ddp.response_content)["prediction"]
except (json.JSONDecodeError, KeyError):
predictions.append(not outcome)
outcomes.append(outcome)
else:
sex = data["sex"].lower()
if sex not in sensitive_class_sex:
sensitive_class_sex[sex] = {
"outcomes": [],
"predictions": [],
}
sensitive_class_sex[sex]["outcomes"].append(outcome)
sensitive_class_sex[sex]["predictions"].append(prediction)
race = data["race"].lower()
if race not in sensitive_class_race:
sensitive_class_race[race] = {
"outcomes": [],
"predictions": [],
}
sensitive_class_race[race]["outcomes"].append(outcome)
sensitive_class_race[race]["predictions"].append(prediction)
if not isinstance(prediction, bool):
predictions.append(not outcome)
else:
predictions.append(prediction)
outcomes.append(outcome)
logger.info(student_api.student)
f1_score = metrics.f1_score(outcomes, predictions, pos_label=True)
logger.info("f1_score %s" % f1_score)
race_diff = fairness_score_precision(sensitive_class_race)
sex_diff = fairness_score_precision(sensitive_class_sex)
is_fair = race_diff < 0.2 and sex_diff < 0.2
logger.info("race_diff %s" % race_diff)
logger.info("sex_diff %s" % sex_diff)
logger.info("is_fair %s" % is_fair)
if not is_fair:
f1_score -= 0.1
return f1_score | bb4f545835f480c9fac97acc698daef08a7684f2 | 14,515 |
def clean_lhdf(df: pd.DataFrame):
"""
Removes unneccessary columms from the location history data frame and computes new required columns
Parameters
----------
df : pandas.DataFrame
DataFrame to process
Returns
-------
Copy of `df`, altered the following way:
* Colums removed
* `activity`
* `altitude`
* `heading`
* Columns expected in `df`
* `time`
* `latitudeE7`
* `longitudeE7`
* Columns added
* `date` (Format `YYYY-MM-DD`)
* `weekday` (Format: `0-6`; 0 = Sunday)
* `daytime` (Format: HH:ii, 24h style)
* `lat` (Format: dd.ddddd)
* `lon` (Format: dd.ddddd)
"""
df = df.copy()
# Drop unneccessary cols
df.drop(labels=["activity", "altitude", "heading"], axis=1, inplace=True)
# compute time cols
df.loc[:, "date"] = df.time.dt.strftime("%Y-%m-%d")
df.loc[:, "weekday"] = df.time.dt.strftime("%w") #was: %u
df.loc[:, "daytime"] = df.time.dt.strftime("%H:%M")
df.loc[:,"lat"] = pd.to_numeric(df.latitudeE7) / 1e7
df.loc[:,"lng"] = pd.to_numeric(df.longitudeE7) / 1e7
return df | 86280a333082e964553030d4e586a267e93edfae | 14,516 |
def year_from_operating_datetime(df):
"""Add a 'year' column based on the year in the operating_datetime.
Args:
df (pandas.DataFrame): A DataFrame containing EPA CEMS data.
Returns:
pandas.DataFrame: A DataFrame containing EPA CEMS data with a 'year'
column.
"""
df['year'] = df.operating_datetime_utc.dt.year
return df | 1c7bbc6465d174465151e5e777671f319ee656b7 | 14,517 |
def is_thrift(target):
"""Returns True if the target has thrift IDL sources."""
return isinstance(target, JavaThriftLibrary) | 4a56cf5cec923933fec628173cb2ab1a122b0127 | 14,518 |
def get_instance(value, model):
"""Returns a model instance from value. If value is a string, gets by key
name, if value is an integer, gets by id and if value is an instance,
returns the instance.
"""
if not issubclass(model, db.Model):
raise TypeError('Invalid type (model); expected subclass of Model.')
if isinstance(value, basestring):
return model.get_by_key_name(value)
elif isinstance(value, (int, long)):
return model.get_by_id(value)
elif isinstance(value, model):
return value
else:
raise TypeError('Invalid type (value); expected string, number or '
'%s.' % model.__name__) | 85544b057e3e6c82730ba743a625610c55b48ff0 | 14,519 |
def clean_infix(token, INFIX):
"""
Checks token for infixes. (ex. bumalik = balik)
token: word to be stemmed for infixes
returns STRING
"""
if check_validation(token):
return token
for infix in INFIX_SET:
if len(token) - len(infix) >= 3 and count_vowel(token[len(infix):]) >= 2:
if token[0] == token[4] and token[1: 4] == infix:
INFIX.append(infix)
return token[4:]
elif token[2] == token[4] and token[1: 3] == infix:
INFIX.append(infix)
return token[0] + token[3:]
elif token[1: 3] == infix and check_vowel(token[3]):
INFIX.append(infix)
return token[0] + token[3:]
return token | fdd8e90bdea14ca2344dd465622bd2e79905e4fe | 14,520 |
def seq_to_encoder(input_seq):
"""从输入空格分隔的数字id串,转成预测用的encoder、decoder、target_weight等
"""
input_seq_array = [int(v) for v in input_seq.split()]
encoder_input = [PAD_ID] * \
(input_seq_len - len(input_seq_array)) + input_seq_array
decoder_input = [GO_ID] + [PAD_ID] * (output_seq_len - 1)
encoder_inputs = [np.array([v], dtype=np.int32) for v in encoder_input]
decoder_inputs = [np.array([v], dtype=np.int32) for v in decoder_input]
target_weights = [np.array([1.0], dtype=np.float32)] * output_seq_len
return encoder_inputs, decoder_inputs, target_weights | 9a9203aa9e3005acd7d55516fbe8c5710ea25ae3 | 14,521 |
def getMergers(tree, map_strain2species, options):
"""merge strains to species.
returns the new tree with species merged and
a dictionary of genes including the genes that have been merged.
Currently, only binary merges are supported.
"""
n = TreeTools.GetSize(tree) + 1
all_strains = map_strain2species.keys()
all_species = map_strain2species.values()
genes = []
for x in range(n):
g = {}
for s in all_strains:
g[s] = set()
genes.append(g)
# build list of species pairs that can be joined.
map_species2strain = IOTools.getInvertedDictionary(map_strain2species)
pairs = []
for species, strains in map_species2strain.items():
for x in range(len(strains)):
for y in range(0, x):
pairs.append((strains[x], strains[y]))
# map of genes to new genes
# each entry in the list is a pair of genes of the same species
# but different strains to be joined.
map_genes2new_genes = []
# dictionary of merged genes. This is to ensure that no gene
# is merged twice
merged_genes = {}
def count_genes(node_id):
"""record number of genes per species for each node
This is done separately for each strain. The counts are aggregated for each species
over strains by taking the maximum gene count per strain. This ignores any finer
tree structure below a species node.
"""
node = tree.node(node_id)
if node.succ:
this_node_set = genes[node_id]
# process non-leaf node
for s in node.succ:
# propagate: terminated nodes force upper nodes to terminate
# (assigned to None).
if not genes[s]:
this_node_set = None
break
# check if node merges genes that are not part of the positive
# set
for strain in all_strains:
if strain in map_strain2species:
# merge genes from all children
this_node_set[strain] = this_node_set[
strain].union(genes[s][strain])
if len(this_node_set[strain]) > 1:
# more than two genes for a single species, so no
# join
this_node_set = None
break
elif strain not in map_strain2species and \
this_node_set[strain] > 0:
this_node_set = None
break
if this_node_set is None:
genes[node_id] = None
return
for strain_x, strain_y in pairs:
if len(this_node_set[strain_x]) == 1 and len(this_node_set[strain_y]) == 1:
species = map_strain2species[strain_x]
gene_x, gene_y = tuple(this_node_set[strain_x])[0], tuple(
this_node_set[strain_y])[0]
# check if these to genes have already been merged or are
# merged with other partners already
# The merged genes are assigned the same node_id, if they have
# been already merged.
key1 = strain_x + gene_x
key2 = strain_y + gene_y
if key1 > key2:
key1, key2 = key2, key1
merge = False
if key1 in merged_genes and key2 in merged_genes:
if merged_genes[key1] == merged_genes[key2]:
merge = True
elif key1 not in merged_genes and key2 not in merged_genes:
merge = True
merged_genes[key1] = node_id
merged_genes[key2] = node_id
if merge:
map_genes2new_genes.append(
(node_id, species, strain_x, gene_x, strain_y, gene_y))
# once two genes have been joined, they can not be remapped
# further
genes[node_id] = None
return
else:
# process leaf
strain, t, g, q = parseIdentifier(node.data.taxon, options)
if strain in map_strain2species:
genes[node_id][strain].add(g)
else:
# do not process nodes that do not need to be mapped
genes[node_id] = None
tree.dfs(tree.root, post_function=count_genes)
return map_genes2new_genes | 48fb083027e00d93754ee4064edbc268ea4047a5 | 14,522 |
def convolve_with_gaussian(
data: np.ndarray, kernel_width: int = 21
) -> np.ndarray:
"""
Convolves a 1D array with a gaussian kernel of given width
"""
# create kernel and normalize area under curve
norm = stats.norm(0, kernel_width)
X = np.linspace(norm.ppf(0.0001), norm.ppf(0.9999), kernel_width)
_kernnel = norm.pdf(X)
kernel = _kernnel / np.sum(_kernnel)
return np.convolve(data, kernel, mode="same") | 63949d9c235a1a467858077de8dda8455c139551 | 14,523 |
def post_netspeed(event, context):
"""
Speed test data ingestion handler
"""
return process_reading(event['query'], NETSPEED_SQL) | 8414fe3608b7433a177f0c8c54cce61d01339b67 | 14,524 |
import json
def notify_host_disabled(token, host_name):
"""
Notify OpenStack Nova that a host is disabled
"""
url = token.get_service_url(OPENSTACK_SERVICE.NOVA, strip_version=True)
if url is None:
raise ValueError("OpenStack Nova URL is invalid")
# Get the service ID for the nova-compute service.
compute_service_id = get_host_service_id(token, host_name, 'nova-compute')
api_cmd = url + "/v2.1/%s/os-services/%s" % (token.get_tenant_id(),
compute_service_id)
api_cmd_headers = dict()
api_cmd_headers['Content-Type'] = "application/json"
api_cmd_headers['X-OpenStack-Nova-API-Version'] = NOVA_API_VERSION
api_cmd_payload = dict()
api_cmd_payload['forced_down'] = True
response = rest_api_request(token, "PUT", api_cmd, api_cmd_headers,
json.dumps(api_cmd_payload))
return response | 904c951a37b8b84df4aa48951c424686a123ff30 | 14,525 |
def compute_moments_weights_slow(mu, x2, neighbors, weights):
"""
This version exaustively iterates over all |E|^2 terms
to compute the expected moments exactly. Used to test
the more optimized formulations that follow
"""
N = neighbors.shape[0]
K = neighbors.shape[1]
# Calculate E[G]
EG = 0
for i in range(N):
for k in range(K):
j = neighbors[i, k]
wij = weights[i, k]
EG += wij*mu[i]*mu[j]
# Calculate E[G^2]
EG2 = 0
for i in range(N):
EG2_i = 0
for k in range(K):
j = neighbors[i, k]
wij = weights[i, k]
for x in range(N):
for z in range(K):
y = neighbors[x, z]
wxy = weights[x, z]
s = wij*wxy
if s == 0:
continue
if i == x:
if j == y:
t1 = x2[i]*x2[j]
else:
t1 = x2[i]*mu[j]*mu[y]
elif i == y:
if j == x:
t1 = x2[i]*x2[j]
else:
t1 = x2[i]*mu[j]*mu[x]
else: # i is unique since i can't equal j
if j == x:
t1 = mu[i] * x2[j] * mu[y]
elif j == y:
t1 = mu[i] * x2[j] * mu[x]
else: # i and j are unique, no shared nodes
t1 = mu[i] * mu[j] * mu[x] * mu[y]
EG2_i += s * t1
EG2 += EG2_i
return EG, EG2 | 5a2984174f366a34f16490bb7b9252ec4eaf08db | 14,526 |
import functools
def sum_fn(fun, ndims=0):
"""Higher order helper for summing the result of fun."""
@functools.wraps(fun)
def wrapped(*args):
batch_loglik = fun(*args)
return jnp.sum(
batch_loglik.reshape((-1,) +
batch_loglik.shape[-ndims +
len(batch_loglik.shape):]),
axis=0)
return wrapped | 521a4084fee84f16de5714010be9528296f8b231 | 14,527 |
from typing import Optional
def get_control_policy_attachments(language: Optional[str] = None,
output_file: Optional[str] = None,
policy_type: Optional[str] = None,
target_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetControlPolicyAttachmentsResult:
"""
This data source provides the Resource Manager Control Policy Attachments of the current Alibaba Cloud user.
> **NOTE:** Available in v1.120.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.resourcemanager.get_control_policy_attachments(target_id="example_value")
pulumi.export("firstResourceManagerControlPolicyAttachmentId", example.attachments[0].id)
```
:param str language: The language. Valid value `zh-CN`, `en`, and `ja`. Default value `zh-CN`
:param str policy_type: The type of policy.
:param str target_id: The Id of target.
"""
__args__ = dict()
__args__['language'] = language
__args__['outputFile'] = output_file
__args__['policyType'] = policy_type
__args__['targetId'] = target_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:resourcemanager/getControlPolicyAttachments:getControlPolicyAttachments', __args__, opts=opts, typ=GetControlPolicyAttachmentsResult).value
return AwaitableGetControlPolicyAttachmentsResult(
attachments=__ret__.attachments,
id=__ret__.id,
ids=__ret__.ids,
language=__ret__.language,
output_file=__ret__.output_file,
policy_type=__ret__.policy_type,
target_id=__ret__.target_id) | cfa39ca8926c281151b5ae06ef89ce865dfd0af4 | 14,528 |
def get_gdb(chip_name=None,
gdb_path=None,
log_level=None,
log_stream_handler=None,
log_file_handler=None,
log_gdb_proc_file=None,
remote_target=None,
remote_address=None,
remote_port=None, **kwargs):
"""
set to != None value to redefine get_gdb logic
Parameters
----------
chip_name : Any(None, str)
gdb_path : Any(None, str)
log_level : Any(None, str)
log_stream_handler : Any(None, str)
log_file_handler : Any(None, str)
log_gdb_proc_file : Any(None, str)
remote_target : Any(None, str)
remote_address : Any(None, str)
remote_port : Any(None, str)
Returns
-------
Gdb
"""
_gdb = _str_to_class("Gdb" + get_good_name(chip_name))
return _gdb(gdb_path=gdb_path,
log_level=log_level,
log_stream_handler=log_stream_handler,
log_file_handler=log_file_handler,
log_gdb_proc_file=log_gdb_proc_file,
remote_target=remote_target,
remote_address=remote_address,
remote_port=remote_port, **kwargs) | 15dc451b9cbf21c5f96279a17449e4169e0bae83 | 14,529 |
from typing import Callable
from typing import Awaitable
def check(func: Callable[..., Awaitable[Callable[[CommandContext], Awaitable[bool]]]]) -> Check:
"""
A decorator which creates a check from a function.
"""
return Check(func) | 2354eef311e1867333ade47996fb37cee07ce4cd | 14,531 |
def service_c(request):
""" Renders the service chair page with service submissions """
events = ServiceEvent.objects.filter(semester=get_semester())
submissions_pending = ServiceSubmission.objects.filter(semester=get_semester(), status='0').order_by("date")
submissions_submitted = ServiceSubmission.objects.filter(semester=get_semester(), status='1').order_by(
"date")
position = Position.objects.get(title=Position.PositionChoices.SERVICE_CHAIR)
hours_pending = 0
for submission in submissions_pending:
hours_pending += submission.hours
for submission in submissions_submitted:
hours_pending += submission.hours
hours_approved = 0
submissions_approved = ServiceSubmission.objects.filter(semester=get_semester(), status='2')
for submission in submissions_approved:
hours_approved += submission.hours
context = {
'events': events,
'hours_approved': hours_approved,
'hours_pending': hours_pending,
'submissions_pending': submissions_pending,
'submissions_submitted': submissions_submitted,
'position': position,
}
return render(request, 'service-chair/service-chair.html', context) | 96d9b281c562a0ddb31d09723012cc9411c4ff09 | 14,532 |
def get_tank_history(request, tankid):
"""
Returns a response listing the device history for each tank.
"""
# Sanitize tankid
tankid = int(tankid)
# This query is too complex to be worth constructing in ORM, so just use raw SQL.
cursor = connection.cursor()
cursor.execute("""\
SELECT t.time, t.device_id AS mac
FROM (SELECT d.time, d.device_id, LAG(d.device_id) OVER(ORDER BY d.time) AS prev_device_id
FROM (SELECT time, tankid, device_id
FROM devices_datum
WHERE tankid = %s
) AS d
) AS t WHERE t.device_id IS DISTINCT FROM t.prev_device_id;
""", [tankid])
history = dictfetchall(cursor)
history_serializer = TankHistorySerializer(history, many=True)
return JsonResponse(history_serializer.data, safe=False) | 39c21c9761ff0e40e1c1f8904cf9b9881faf13ed | 14,533 |
import inspect
def get_absolute_module(obj):
"""
Get the abolulte path to the module for the given object.
e.g. assert get_absolute_module(get_absolute_module) == 'artemis.general.should_be_builtins'
:param obj: A python module, class, method, function, traceback, frame, or code object
:return: A string representing the import path.
"""
file_path = inspect.getfile(obj)
return file_path_to_absolute_module(file_path) | ea2c85d9ba90414cddce00dcc5ed092b8c6777a2 | 14,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.