content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def collatz_function(n):
"""
This function, collatz function, takes a number n and the entire part on the division
with 2 if n is even or 3*n+1 is n is odd.
"""
if n % 2 == 0:
return n//2
else:
return 3*n+1 | bd9b061e9651e46e4c6efd3f6e45524d824040ff | 6,855 |
def has_enabled_clear_method(store):
"""Returns True iff obj has a clear method that is enabled (i.e. not disabled)"""
return hasattr(store, 'clear') and ( # has a clear method...
not hasattr(store.clear, 'disabled') # that doesn't have a disabled attribute
or not store.clear.disabled
) | 28ee30f92d44d14300e30fec0de37a2a241c8e92 | 6,856 |
import argparse
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description="Arxiv Text-To-Speach")
parser.add_argument('arxivID', help='Arxiv Identifier')
parser.add_argument('-o', '--output', default=False,
help='Output Filename',)
parser.add_argument('-t', '--saveText', default=False,
help='Save the text file')
parser.add_argument('-s', '--saveAudio', default=True,
help='Save the audiofile')
parser.add_argument('-k', '--keepSrc', default=False,
help='Keep source tar file')
parser.add_argument('-l', '--keepTex', default=False,
help='Keep the latex source file name.tex')
parser.add_argument('-e', '--ext', default="mp3",
help='Audio output extension')
parser.add_argument('-a', '--autoplay', default=False,
help='Automatically start playing')
parser.add_argument('-p', '--player', default="mplayer",
help='Mediaplayer to use for autoplay')
parser.add_argument('--options', default="",
help='Extra command line player options')
args = parser.parse_args()
return args | 43b5e7f52751e889d664ca7ba1cc6c493c24d5d8 | 6,857 |
import torch
def coin_flip(prob):
"""
Return the outcome of a biased coin flip.
Args:
prob: the probability of True.
Returns: bool
"""
return prob > 0 and torch.rand(1).item() < prob | 672929fb49a0e65101a4bdfdd13e981ae5eae31c | 6,858 |
def to_pass(line):
"""
Replace a line of code with a pass statement, with
the correct number of leading spaces
Arguments
----------
line : str, line of code
Returns
----------
passed : str, line of code with same leading spaces
but code replaced with pass statement
"""
# the number of leading spaces on the line
spaces = len(line) - len(line.lstrip(' '))
# replace statement with pass and correct leading spaces
passed = (' ' * spaces) + 'pass'
return passed | f8444ecc38523aaef13d535258974881956e30b9 | 6,860 |
import numpy
def reverse_sort_C(C, sorted_index):
"""
Perform the reverse of sort described in sort_by_sorted_index, on rows in a numpy array.
Args:
C (numpy.array): array with C.shape[0] = len(sorted_index)
sorted_index (list of ints): desired order for rows of C
"""
m,n = C.shape
C_new = numpy.zeros(C.shape)
for i in range(len(sorted_index)):
row = sorted_index[i]
for j in range(n):
C_new[row][j] = C[i][j]
return C_new | a0865dba6479104bb1442ea185ec7913ed8cb53c | 6,861 |
import itertools
def string_permutations(test_list, list_to_permutate):
"""Takes a list and a set, and returns a list of all the permutations as strings"""
str_perms = [list(permutation) for permutation in itertools.permutations(list_to_permutate)]
return [str(test_list + str_perm) for str_perm in str_perms] | b4cee2f34e0382a7cd2b49f5b5f22bc85712731a | 6,862 |
def broadcastable_to_str(b):
"""Return string representation of broadcastable."""
named_broadcastable = {
(): "scalar",
(False,): "vector",
(False, True): "col",
(True, False): "row",
(False, False): "matrix",
}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
bcast = ""
return bcast | 35dbe968a8341d076a264333c68fb597212439bc | 6,863 |
def partition(sort_list, low, high):
"""
All the elements smaller than the pivot
will be on the left side of the list
and all the elements on the right side
will be greater than the pivot.
"""
i = (low - 1)
pivot = sort_list[high]
for j in range(low, high):
if sort_list[j] <= pivot:
i += 1
sort_list[i], sort_list[j] = sort_list[j], sort_list[i]
sort_list[i+1], sort_list[high] = sort_list[high], sort_list[i+1]
return (i+1) | 3ae3a569fc5c3968ae047bf20df7a7a59bdfb0cf | 6,865 |
def New_Dataframe(old_df,indicator_name):
""" create a new dataframe that is composed of only one indicator
Args:
old_df (dataframe): general dataframe from which we extract the new one
indicator_name (string): Name onf the indicator that will composed the new dataframe
Returns:
(dataframe): dataframe composed of only the chosen indicator
"""
return old_df.loc[old_df.Indicator == indicator_name] | 5ccd394a01a70b39b64d2a12ed0aac6f39296a0a | 6,866 |
def check_args(args):
"""
Checks validity of command line arguments and, in some cases
modifies them a little bit.
:param args: The command-line arguments.
:type args: argparse.ArgumentParser Namespace
:returns: argparse.ArgumentParser Namespace -- The updated command-line
arguments.
"""
if not args.outbase:
print("Must specify an output base filename (--outbase).")
raise SystemExit
# Supports passing a list of aspect files.
if args.aspfile:
args.aspfile = str(args.aspfile).split(',')
# If the band is not explicity called, attempt to derive it from filenames.
if not args.band and args.raw6file:
print("Trying to derive band from raw6 filename...")
if '-fd-raw6' in args.raw6file:
args.band = 'FUV'
elif '-nd-raw6' in args.raw6file:
args.band = 'NUV'
else:
print("Unable to parse band from raw6 filename. Specify band on"
" command line using --band.")
raise SystemExit
if not args.band:
print("Band not specified.")
else:
args.band = args.band.upper()
if not args.band in ["NUV", "FUV"]:
print("Band must be NUV or FUV. ")
raise SystemExit
if not (args.raw6file or args.scstfile) and not args.eclipse:
print ("Must provide raw6 and scst files or specify eclipse.")
raise SystemExit
return args | 04270a50fce1003ee3960576b60bdcdc21f69767 | 6,868 |
from pathlib import Path
def check_path_in_dir(file_path, directory_path):
"""
Check if a file path is in a directory
:param file_path: Full path to a file
:param directory_path: Full path to a directory the file may be in
:return: True if the file is in the directory
"""
directory = Path(directory_path)
parent = Path(file_path).parent
return parent == directory | 5e96abd89c72ea39a944e75b4548fc20b67892cd | 6,871 |
def flat_out(f, *a, **kw):
"""Flatten the output of target function."""
return f(*a, **kw).flatten() | ffe09ffbaae93657fde818de8a03cc17fee962f1 | 6,873 |
import argparse
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(
description='Interactively normalize fits spectra.')
parser.add_argument("fitsname", type=str,
help="Specturm to continuum normalize.")
parser.add_argument("--suffix", default="inorm",
help="Indicator to add to filename before '.fits'.")
parser.add_argument("-e", "--flux_errors", default=False, action="store_true",
help="Calculate normalization fucntion errors on the flux.")
parser.add_argument("-p", "--plot", default=False, action="store_true",
help="Plot normalized result.")
args = parser.parse_args()
return args | ef5b682909925ef95f7d1388f8c28e1bd8d27027 | 6,874 |
import sys
def get_error_hint(ctx, opts, exc):
"""Get a hint to show to the user (if any)."""
module = sys.modules[__name__]
get_specific_error_hint = (
getattr(module, 'get_%s_error_hint' % exc.status, None))
if get_specific_error_hint:
return get_specific_error_hint(ctx, opts, exc)
return None | 5dad7b9a0c35170ae83efcbc076b2b9f4b3dd1d8 | 6,875 |
def get_relation(filename):
"""read relation file, return rel2idx"""
rel2idx = {}
f = open(filename, 'r')
lines = f.readlines()
for (n, rel) in enumerate(lines):
rel = rel.strip().lower()
rel2idx[rel] = n
f.close()
return rel2idx | 1c239ec3343cf63e502bf9de485171c9a346e240 | 6,876 |
def id_for_base(val):
"""Return an id for a param set."""
if val is None:
return "No base params"
if "editor-command" in val:
return "Long base params"
if "ecmd" in val:
return "Short base params"
return "Unknown base params" | ecf54fa40195ba4de7db13874e44388a04527bed | 6,877 |
def serialize_serializable(obj, spec, ctx):
""" Serialize any class that defines a ``serialize`` method. """
return obj.serafin_serialize(spec, ctx) | 936c3b51257c60c156cd9686e38b09ec55a929f2 | 6,878 |
def parse_labels_mapping(s):
"""
Parse the mapping between a label type and it's feature map.
For instance:
'0;1;2;3' -> [0, 1, 2, 3]
'0+2;3' -> [0, None, 0, 1]
'3;0+2;1' -> [1, 2, 1, 0]
"""
if len(s) > 0:
split = [[int(y) for y in x.split('+')] for x in s.split(';')]
elements = sum(split, [])
assert all(x in range(4) for x in elements)
assert len(elements) == len(set(elements))
labels_mapping = []
for i in range(4):
found = False
for j, l in enumerate(split):
if i in l:
assert not found
found = True
labels_mapping.append(j)
if not found:
labels_mapping.append(None)
assert len(labels_mapping) == 4
else:
labels_mapping = None
return labels_mapping | d2f77876f1759e6d4093afc720e7631f1b4d9ff4 | 6,879 |
from typing import Any
import dataclasses
def _is_nested(x: Any) -> bool:
"""Returns whether a value is nested."""
return isinstance(x, dict) or dataclasses.is_dataclass(x) | 798000adfd8eb900b61be988ab6a31e1b062540d | 6,880 |
def contains_pept(name):
"""Checks if the saccharide name contains the peptide fragment,
such as EES, GS, SA etc"""
contains_pept = False
for pept_stub_name in ('_E', '_G', '_S', '_A'):
if (pept_stub_name in name) and ('_NRE' not in name):
contains_pept = True
return contains_pept | 937679a96b21766e96eb455baca51c1695412287 | 6,881 |
import torch
def check_stacked_complex(data: torch.Tensor) -> torch.Tensor:
"""
Check if tensor is stacked complex (real & imag parts stacked along last dim) and convert it to a combined complex
tensor.
Args:
data: A complex valued tensor, where the size of the final dimension might be 2.
Returns:
A complex valued tensor.
"""
return torch.view_as_complex(data) if data.shape[-1] == 2 else data | afce7ac1840ff64199c9ebc9f4222e1d3f09dafd | 6,882 |
def circle(x, y, a, b, width):
"""
widthで指定された直径の中に含まれているかを判定
:param x:
:param y:
:param a:
:param b:
:param width:
:return:
"""
_x = round(((x - a) ** 2), 3)
_y = round(((y - b) ** 2), 3)
_r = round(((width/2) ** 2), 3)
if (_x + _y) <= _r:
return _r - (_x + _y)
return None | fabddad9e3c404dc36e1cf1830ebcc107cf66516 | 6,883 |
def demo__google_result_open_in_new_tab(raw_text, content_mime):
"""Force google search's result to open in new tab. to avoid iframe problem
在新标签页中打开google搜索结果
"""
def hexlify_to_json(ascii_str):
_buff = ''
for char in ascii_str:
if char in '\'\"<>&=':
_buff += r'\x' + hex(ord(char))[2:]
else:
_buff += char
_buff = _buff.replace('\\', '\\\\')
_buff = _buff.replace('/', r'\/')
return _buff
if content_mime == 'application/json':
raw_text = raw_text.replace(
hexlify_to_json('<h3 class="r"><a href="'),
hexlify_to_json('<h3 class="r"><a target="_blank" href="')
)
raw_text = raw_text.replace(
hexlify_to_json('<h3 class="r"><a class="l" href="'),
hexlify_to_json('<h3 class="r"><a target="_blank" class="l" href="')
)
else:
raw_text = raw_text.replace('<h3 class="r"><a href="', '<h3 class="r"><a target="_blank" href="')
raw_text = raw_text.replace('<h3 class="r"><a class="l" href="', '<h3 class="r"><a target="_blank" class="l" href="')
return raw_text | 9e11f59ab66d037887c60fa0e5b636af2c5fc0c8 | 6,886 |
def iterations_for_terms(terms):
"""
Parameters
----------
terms : int
Number of terms in the singular value expansion.
Returns
-------
Int
The number of iterations of the power method needed to produce
reasonably good image qualit for the given number of terms in the singular value expansion.
By "reasonably good" we mean that using larger number of terms will not produce noticeably
better image, thus it is wasteful.
"""
if terms <= 20:
return 10
if terms <= 50:
return 5
if terms <= 100:
return 3
return 1 | 4142d8325f132e16e0525c36d114dd989873870f | 6,887 |
def get_leaf_nodes(struct):
""" Get the list of leaf nodes.
"""
leaf_list = []
for idx, node in enumerate(struct):
if node['is_leaf']:
leaf_list.append(idx)
return leaf_list | 90c66e49bac0c49ef5d2c75b4c1cbe6f4fdd4b83 | 6,888 |
def get_node_edge(docs, w2d_score, d2d_score):
"""
:param docs:
:param w2d_score:
:param d2d_score:
:return:
"""
wid2wnid = {}
w_id, d_idx = [], []
w_d_wnid, w_d_dnid = [], []
w_d_feat = {"score": [], 'dtype': []}
d_d_dnid1, d_d_dnid2 = [], []
d_d_feat = {"score": [], 'dtype': []}
wnid, dnid = 0, 0
for di, (_, wd_scores) in enumerate(zip(docs, w2d_score)):
for wid, wd_score in wd_scores.items():
if wid not in wid2wnid:
wid2wnid[wid] = wnid
w_id.append(wid)
wnid += 1
w_d_wnid.append(wid2wnid[wid])
w_d_dnid.append(dnid)
w_d_feat["score"].append(wd_score)
w_d_feat["dtype"].append(0)
d_idx.append(di)
dnid += 1
if di > 0:
d_d_dnid1.append(0)
d_d_dnid2.append(di)
# d_d_feat["score"].append(max(0, min(round(math.log(d2d_score[di-1])), 9)))
d_d_feat["score"].append(d2d_score[di - 1])
d_d_feat["dtype"].append(1)
utils = {"w_id": w_id,
"d_idx": d_idx,
"w_d_wnid": w_d_wnid,
"w_d_dnid": w_d_dnid,
"w_d_feat": w_d_feat,
"d_d_dnid1": d_d_dnid1,
"d_d_dnid2": d_d_dnid2,
"d_d_feat": d_d_feat
}
return utils | 40e4d0a11fdd671971319ae8d463ad94a7e3ca9a | 6,889 |
def _get_dir_list(names):
"""This function obtains a list of all "named"-directory [name1-yes_name2-no, name1-no_name2-yes, etc]
The list's building occurs dynamically, depending on your list of names (and its order) in config.yaml.
The entire algorithm is described in "img" in the root directory and in the Readme.md.
:param names: the names of judges, they have to be in the same order as they arranged in a filename.
For example: if a file calls "name1-yes_name2-no_", therefore the name's list looks like [name1, name2]
:return: list of compiled directories.
"""
# Number of columns
n = len(names)
# Number of lines
num_lines = 2**n
# fill the result with '\0'
dir_list = ['' for i in range(num_lines)]
# In our case a name represents a column.
for name in names:
column_index = names.index(name)
# The partition_index is the cue when we should to switch 0 to 1 (no to yes) and vise versa.
partition_index = num_lines / (2**(column_index + 1))
# yes_mode means that we add '-yes_' up to a name, otherwise add '-no_'.
yes_mode = True
line = 0
while line < num_lines:
path_part = name
# Switch the mode to the opposite one.
if line % partition_index == 0:
yes_mode = not yes_mode
# Sets a decision to a name
if not yes_mode:
path_part += '-no_'
else:
path_part += '-yes_'
# Add a path's part (column by column) to the list
dir_list[line] += path_part
line += 1
return dir_list | 284c328878c2c8d0e0ae273c140798e2884ef13f | 6,890 |
def mapCardToImagePath(card):
"""
Given a card, return the relative path to its image
"""
if card == "01c":
return 'src/imgs/2C.png'
if card == "02c":
return 'src/imgs/3C.png'
if card == "03c":
return 'src/imgs/4C.png'
if card == "04c":
return 'src/imgs/5C.png'
if card == "05c":
return 'src/imgs/6C.png'
if card == "06c":
return 'src/imgs/7C.png'
if card == "07c":
return 'src/imgs/8C.png'
if card == "08c":
return 'src/imgs/9C.png'
if card == "09c":
return 'src/imgs/10C.png'
if card == "10c":
return 'src/imgs/JC.png'
if card == "11c":
return 'src/imgs/QC.png'
if card == "12c":
return 'src/imgs/KC.png'
if card == "13c":
return 'src/imgs/AC.png'
if card == "01d":
return 'src/imgs/2D.png'
if card == "02d":
return 'src/imgs/3D.png'
if card == "03d":
return 'src/imgs/4D.png'
if card == "04d":
return 'src/imgs/5D.png'
if card == "05d":
return 'src/imgs/6D.png'
if card == "06d":
return 'src/imgs/7D.png'
if card == "07d":
return 'src/imgs/8D.png'
if card == "08d":
return 'src/imgs/9D.png'
if card == "09d":
return 'src/imgs/10D.png'
if card == "10d":
return 'src/imgs/JD.png'
if card == "11d":
return 'src/imgs/QD.png'
if card == "12d":
return 'src/imgs/KD.png'
if card == "13d":
return 'src/imgs/AD.png'
if card == "01s":
return 'src/imgs/2S.png'
if card == "02s":
return 'src/imgs/3S.png'
if card == "03s":
return 'src/imgs/4S.png'
if card == "04s":
return 'src/imgs/5S.png'
if card == "05s":
return 'src/imgs/6S.png'
if card == "06s":
return 'src/imgs/7S.png'
if card == "07s":
return 'src/imgs/8S.png'
if card == "08s":
return 'src/imgs/9S.png'
if card == "09s":
return 'src/imgs/10S.png'
if card == "10s":
return 'src/imgs/JS.png'
if card == "11s":
return 'src/imgs/QS.png'
if card == "12s":
return 'src/imgs/KS.png'
if card == "13s":
return 'src/imgs/AS.png'
if card == "01h":
return 'src/imgs/2H.png'
if card == "02h":
return 'src/imgs/3H.png'
if card == "03h":
return 'src/imgs/4H.png'
if card == "04h":
return 'src/imgs/5H.png'
if card == "05h":
return 'src/imgs/6H.png'
if card == "06h":
return 'src/imgs/7H.png'
if card == "07h":
return 'src/imgs/8H.png'
if card == "08h":
return 'src/imgs/9H.png'
if card == "09h":
return 'src/imgs/10H.png'
if card == "10h":
return 'src/imgs/JH.png'
if card == "11h":
return 'src/imgs/QH.png'
if card == "12h":
return 'src/imgs/KH.png'
if card == "13h":
return 'src/imgs/AH.png'
else:
return 'INVALID CARD' | f2a8d4918b26617335a274a536a0569c845cb526 | 6,891 |
def consolidate_gauge(df):
""" takes in gauge columns and normalizes them all to stiches per inch """
try:
df['gauge_per_inch'] = df.loc[:,'gauge']/df.loc[:,'gauge_divisor']
except:
print("Error occured when consolidating gauge")
return df | a3a1eecec97b521c19bc50f2d1496f1aba9fbce6 | 6,892 |
import time
def parse_data(driver):
"""Return a float of the current price given the driver open to the TMX page of the specific symbol."""
# The driver needs time to load the page before it can be parsed.
time.sleep(5)
content_obj = driver.find_element(by="id", value="root")
content_text = content_obj.text
price_string = content_text.split("PRICE")[1].split("CHANGE")[0].strip()
try:
price_float = float(price_string.replace("$", "").replace(",", ""))
return price_float
except ValueError as e:
raise e | 289a71909753278336c414a0b3c3854aeb60b05f | 6,893 |
def add_shift_steps_unbalanced(
im_label_list_all, shift_step=0):
"""
Appends a fixed shift step to each large image (ROI)
Args:
im_label_list_all - list of tuples of [(impath, lblpath),]
Returns:
im_label_list_all but with an added element to each tuple (shift step)
"""
return [(j[0], j[1], shift_step) for j in im_label_list_all] | 63fc45bc14e54ec5af473ec955bd45602f3c7041 | 6,894 |
def are_aabb_colliding(a, b):
"""
Return True if given AABB are colliding.
:param AABBCollider a: AABB a
:param AABBCollider b: AABB b
:return: True if AABB are colliding
:rtype bool:
"""
a_min = [a.center[i] - a.size3[i] / 2 for i in range(3)]
a_max = [a.center[i] + a.size3[i] / 2 for i in range(3)]
b_min = [b.center[i] - b.size3[i] / 2 for i in range(3)]
b_max = [b.center[i] + b.size3[i] / 2 for i in range(3)]
return (a_min[0] <= b_max[0] and a_max[0] >= b_min[0]) and \
(a_min[1] <= b_max[1] and a_max[1] >= b_min[1]) and \
(a_min[2] <= b_max[2] and a_max[2] >= b_min[2]) | e4d3174cbde1bcffb8e43a710ad2434fb9e4e783 | 6,895 |
def aic(X, k, likelihood_func):
"""Akaike information criterion.
Args:
X (np.ndarray): Data to fit on.
k (int): Free parameters.
likelihood_func (function): Log likelihood function that takes X as input.
"""
return 2 * k - 2 * likelihood_func(X) | 18ec376d15bdb8190818730b4676febdc01bd476 | 6,897 |
def construct_policy(
bucket_name: str,
home_directory: str,
):
"""
Create the user-specific IAM policy.
Docs: https://docs.aws.amazon.com/transfer/latest/userguide/
custom-identity-provider-users.html#authentication-api-method
"""
return {
'Version': '2012-10-17',
'Statement': [{
'Condition': {
'StringLike': {
's3:prefix': [
f'{home_directory}/*',
f'{home_directory}/',
f'{home_directory}',
],
},
},
'Resource': f'arn:aws:s3:::{bucket_name}',
'Action': 's3:ListBucket',
'Effect': 'Allow',
'Sid': 'ListHomeDir',
}, {
"Sid": "AWSTransferRequirements",
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation",
],
"Resource": "*",
}, {
'Resource': 'arn:aws:s3:::*',
'Action': [
's3:PutObject',
's3:GetObject',
's3:DeleteObjectVersion',
's3:DeleteObject',
's3:GetObjectVersion',
's3:GetObjectACL',
's3:PutObjectACL',
],
'Effect': 'Allow',
'Sid': 'HomeDirObjectAccess',
}],
} | 650459810d01b28cc82d320a3b42592d3bb51170 | 6,898 |
def cleanup(sender=None, dictionary=None):
"""Perform a platform-specific cleanup after the test."""
return True | 655a4f7192d36aa9b73eca40e587eefd3e37f65d | 6,901 |
def find_n_max_vals(list_, num):
"""Function searches the num-biggest values of a given list of numbers.
Returns the num maximas list and the index list wrapped up in a list.
"""
li_ = list_.copy()
max_vals = [] #the values
max_ind = []# the index of the value, can be used to get the param
while num > 0:
max_val = max(li_)
max_id = li_.index(max_val)
max_vals.append(max_val)
max_ind.append(max_id)
li_[max_id] = 0 #better than deleting
num -= 1 # count down
return [max_vals, max_ind] | 48e274a2e2feac04b285b883ce5948c8f39caff3 | 6,903 |
import subprocess
def runGodot(command_args):
"""Runs godot with the command args given (a list)
Returns a string of the output or None"""
try:
byte_string = subprocess.check_output(command_args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return
# convert to a string and return
return byte_string.decode('utf-8') | 1056bb8a9c898cad269318e68ca2b0d948901fd7 | 6,904 |
import json
import requests
def get_lol_version():
""" Get current League of Legends version """
versions = json.loads(requests.get(
"https://ddragon.leagueoflegends.com/api/versions.json").text)
# reformats from 10.14.5 to 10.14
latest = ".".join(versions[0].split(".")[:2])
return latest | 2702b3375cee503cea561f2965bbafdb17a3f232 | 6,906 |
def is_same_float(a, b, tolerance=1e-09):
"""Return true if the two floats numbers (a,b) are almost equal."""
abs_diff = abs(a - b)
return abs_diff < tolerance | a8c10ae330db1c091253bba162f124b10789ba13 | 6,908 |
import subprocess
import shlex
import json
def ffprobe_json(media_file):
"""Uses ffprobe to extract media information returning json format.
Arguments:
media_file: media file to be probed.
Returns:
json output of media information.
return code indicating process result.
"""
ff = 'ffprobe -v quiet -print_format json -show_format -show_streams "{}"'.format(media_file)
process = subprocess.Popen(shlex.split(ff), stdout=subprocess.PIPE)
stdout = process.communicate()[0]
rc = process.poll()
j_out = json.loads(str(stdout,'utf-8'))
return j_out, rc | 7c13366b31de40aacae561442030d5eb0687246e | 6,910 |
def _mp_fabber_options(wsp):
"""
:return: General Fabber options for multiphase decoding
"""
# General options. note that the phase is always PSP number 1
options = {
"method" : "vb",
"noise" : "white",
"model" : "asl_multiphase",
"data" : wsp.asldata,
"mask" : wsp.rois.mask,
"nph" : wsp.asldata.nphases,
"ntis" : wsp.asldata.ntis,
"repeats" : wsp.asldata.rpts[0], # We have already checked repeats are fixed
"save-mean" : True,
"save-model-fit" : True,
"max-iterations": 30,
"PSP_byname1" : "phase",
"PSP_byname1_prec" : 0.2,
}
# Spatial mode
if wsp.mp_spatial:
options.update({
"method" : "spatialvb",
})
if wsp.mp_spatial_phase:
# Make the phase a spatial prior
options["PSP_byname1_type"] = "M"
else:
# Make the magnitudes and offsets spatial priors
prior = 2
for pld_idx in range(wsp.asldata.ntis):
options.update({
"PSP_byname%i" % prior: "mag%i" % (pld_idx+1),
"PSP_byname%i_type" % prior : "M",
"PSP_byname%i" % (prior+1): "offset%i" % (pld_idx+1),
"PSP_byname%i_type" % (prior+1) : "M",
})
# Special case if we have only 1 PLD the magnitude and offset
# parameters are named differently for compatibility
options["PSP_byname2"] = "mag"
options["PSP_byname3"] = "offset"
# Additional user-specified multiphase fitting options override the above
options.update(wsp.ifnone("mp_options", {}))
return options | af1c724bd9b88a0d76e7c7d18a3fa2b19591984e | 6,911 |
def default_cfg():
"""
Set parameter defaults.
"""
# Simulation specification
cfg_spec = dict( nfreq=20,
start_freq=1.e8,
bandwidth=0.2e8,
start_time=2458902.33333,
integration_time=40.,
ntimes=40,
cat_name="gleamegc.dat",
apply_gains=True,
apply_noise=True,
ant_pert=False,
seed=None,
ant_pert_sigma=0.0,
hex_spec=(3,4),
hex_ants_per_row=None,
hex_ant_sep=14.6,
use_ptsrc=True )
# Diffuse model specification
cfg_diffuse = dict( use_diffuse=False,
nside=64,
obs_latitude=-30.7215277777,
obs_longitude = 21.4283055554,
obs_height = 1073,
beam_pol='XX',
diffuse_model='GSM',
eor_random_seed=42,
nprocs=1 )
# Beam model parameters
cfg_beam = dict( ref_freq=1.e8,
spectral_index=-0.6975,
seed=None,
perturb_scale=0.0,
mainlobe_scale_mean=1.0,
mainlobe_scale_sigma=0.0,
xstretch_mean=1.0,
xstretch_sigma=0.0,
ystretch_mean=1.0,
ystretch_sigma=0.0,
xystretch_same=True,
xystretch_dist=None,
rotation_dist='',
rotation_mean=0.0,
rotation_sigma=0.0,
mainlobe_width=0.3,
nmodes=8,
beam_coeffs=[ 0.29778665, -0.44821433, 0.27338272,
-0.10030698, -0.01195859, 0.06063853,
-0.04593295, 0.0107879, 0.01390283,
-0.01881641, -0.00177106, 0.01265177,
-0.00568299, -0.00333975, 0.00452368,
0.00151808, -0.00593812, 0.00351559
] )
# Fluctuating gain model parameters
cfg_gain = dict(nmodes=8, seed=None)
# Noise parameters
cfg_noise = dict(nsamp=1., seed=None, noise_file=None)
# reflection parameters
cfg_reflection = dict(amp=1.e-2, dly=800.)
# xtalk parameters
cfg_xtalk = dict(amp=1.e-2, dly=400.)
# Combine into single dict
cfg = { 'sim_beam': cfg_beam,
'sim_spec': cfg_spec,
'sim_diffuse': cfg_diffuse,
'sim_noise': cfg_noise,
'sim_gain': cfg_gain,
'sim_reflection': cfg_reflection,
'sim_xtalk': cfg_xtalk,
}
return cfg | 0b76e2166ce17d6ab42e4f72d7003ba6c03b11f6 | 6,912 |
def getTail(compiler,version):
"""
Function which generates the Tail of a
Compiler module file.
@input compiler :: compiler name ('intel','pgi',..)
@input version :: version of the compiler
@return :: list of Lua lines
"""
strA = 'local version = "{0}"'.format(version)
strB = 'local mdir = pathJoin(mroot,"Compiler/{0}",version)'.format(compiler.lower())
strC = '-- a. compiled with {0}/{1}'.format(compiler.title(),version)
strD = ' local mdir = pathJoin(mroot,"Compiler",CLUSTERNAME,"{0}",version)'.format(compiler)
res = ['','',
'-- MODULEPATH modification to include packages',
'-- that are compiled with this version of the compiler',
'-- and available ON ALL clusters', strA,
'local mroot = os.getenv("MODULEPATH_ROOT")', strB,
'prepend_path("MODULEPATH",mdir)','','',
'-- MODULEPATH modification to include packages',
'-- that are:', strC,
'-- b. ONLY available ON a specific cluster','',
'local CLUSTERNAME = nil',
'local str = os.getenv("UUFSCELL")','',
'if str ~= nil then',
' if str == "ash.peaks" then',
' CLUSTERNAME = "ash"',
' elseif str == "ember.arches" then',
' CLUSTERNAME = "em"',
' elseif str == "kingspeak.peaks" then',
' CLUSTERNAME = "kp"',
' elseif str == "lonepeak.peaks" then',
' CLUSTERNAME = "lp"',
' end','',
' if CLUSTERNAME ~= nil then', strD,
' prepend_path("MODULEPATH",mdir)',
' end',
'end']
return res | 46df63461d05b26fbc5e5a45e6162a2794f92ed1 | 6,913 |
import curses
def new_border_and_win(ws):
"""
Returns two curses windows, one serving as the border, the other as the
inside from these *_FRAME tuples above.
"""
return (
curses.newwin(ws[1][1], ws[1][0], ws[0][1], ws[0][0]),
curses.newwin(ws[1][1] - 2, ws[1][0] - 2, ws[0][1] + 1, ws[0][0] + 1),
) | cec302bda38ba5fa9d0c88dbfac1c501984a96a0 | 6,915 |
import socket
def mk_sock(mcast_host, mcast_ip, mcast_port):
"""
multicast socket setup
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(mcast_ip) + socket.inet_aton(mcast_host),
)
sock.bind((mcast_host, mcast_port))
return sock | 702a95e5baec4ecf54206f8f16cfa22d8365f8a0 | 6,917 |
def cmd(command_id):
"""
A helper function for identifying command functions
"""
def decorator(func):
func.__COMMAND_ID__ = command_id
return func
return decorator | cff8664ad18c78629bb3c1b4946be592142711e0 | 6,918 |
def xover_selection(snakes, survivors, opts, num_survivors):
""" Picks parents from the current generation of snakes for crossover
params:
snakes: list, current generation of snakes of class Snake
survivors: list, snakes of class Snake that survived
opts: dict, contains hyperparamters
num_survivors: int, how many survivors there should be
returns:
list of parents of class Snake
"""
parents = []
max_num_parents = opts["PopulationSize"] - num_survivors
while len(parents) < max_num_parents:
for survivor in survivors:
if (len(parents) < max_num_parents):
parents.append(survivor)
for snake in snakes:
if snake not in survivors:
if snake.selected:
if (len(parents) < max_num_parents):
parents.append(snake)
return parents | 990e65aac637abe8c4c6c8a661f4a039b0900ca4 | 6,919 |
def interleave_value(t0,series,begin=False,end=False):
"""Add t0 between every element of *series*"""
T = []
if begin: T += [t0]
if len(series) > 0: T += [series[0]]
for t in series[1:]: T += [t0,t]
if end: T += [t0]
return T | 33e3d8562a482e897bb3fd8d49f33a1dfed9bfb9 | 6,920 |
def menu():
"""Menu grafico testuale per programma di gestione Immobili """
x = 1
while x !=0 :
print (" Menu'")
print(" Gestione Immobiliare")
print(" INSERIMENTO IMMOBILE .........digita 1 --> ")
print(" MODIFICA IMMOBILE .........digita 2 --> ")
print(" CANCELLA IMMOBILE .........digita 3 --> ")
print(" STAMPA TUTTI GLI IMMOBILI......digita 4 --> ")
print(" INSERISCI NUOVO CLIENTE........digita 5 --> ")
print(" STAMPA ANAGRAFICA CLIENTI......digita 6 --> ")
print(" CERCA IMMOBILE PER INDIRIZZO...digita 7 --> ")
print(" STAMPA IMMOBILI PER CATALOGO...digita 8 --> ")
print("\n")
print(" PER USCIRE ................digita 0 --> ")
print("\n\n")
x = input("scegli cosa vuoi fare digita 0 per uscire............... --> ")
if x == "1":
return 1
elif x == "2":
return 2
elif x == "3":
return 3
elif x == "4":
return 4
elif x == "5":
return 5
elif x == "6":
return 6
elif x == "7":
return 7
elif x == "8":
return 8
elif x == "0":
x = 0
else:
print(" Scelta non valida - solo numeri da 0 a 8")
x = 1
print("Hai scelto di uscire, Grazie!")
return 0 | bfb16f3a50339b6e9ed672e1002e727b10f7cc39 | 6,921 |
def sort(request):
"""
Valid values for the 'sort' parameter used in the Index
setops methods (intersection, union, etc.)
Caution:
Don't confuse this one with the "sort" fixture used
for DataFrame.append or concat. That one has
parameters [True, False].
We can't combine them as sort=True is not permitted
in in the Index setops methods.
"""
return request.param | 0f1e7bb570b6f8f617a7564695c1a20d71cfbe80 | 6,923 |
import json
def credentials_from_file(file):
"""Load credentials corresponding to an evaluation from file"""
with open(file) as file:
return json.load(file) | 8f73c595b4e61757ae454b1674a7177ab4d05059 | 6,924 |
import requests
def get_workspace_vars(auth, workspace_id):
"""
Function to get variables created in a workspace
"""
headers = {"Content-Type": "application/json"}
url = f"https://intersight.com/tfc/api/v2/workspaces/{workspace_id}/vars"
response = requests.get(url, headers=headers, auth=auth)
response_data = response.json()
print(response_data)
workspace_vars = {}
for var in response_data["data"]:
var_id = var["id"]
workspace_vars[var_id] = {}
workspace_vars[var_id]["var_name"] = var["attributes"]["key"]
workspace_vars[var_id]["var_value"] = var["attributes"]["value"]
workspace_vars[var_id]["sensitive"] = var["attributes"]["sensitive"]
workspace_vars[var_id]["var_description"] = var["attributes"]["description"]
return workspace_vars | ed05bc7fee86d0303e25fe6ea0b0fd898a08e347 | 6,925 |
def format_date(value, format='%Y-%m-%d'):
"""Returns a formatted time string
:param value: The datetime object that should be formatted
:param format: How the result should look like. A full list of available
directives is here: http://goo.gl/gNxMHE
"""
return value.strftime(format) | 3f094918610617e644db69415d987fa770a06014 | 6,926 |
def DatetimeToWmiTime(dt):
"""Take a datetime tuple and return it as yyyymmddHHMMSS.mmmmmm+UUU string.
Args:
dt: A datetime object.
Returns:
A string in CMI_DATETIME format.
http://www.dmtf.org/sites/default/files/standards/documents/DSP0004_2.5.0.pdf
"""
td = dt.utcoffset()
if td:
offset = (td.seconds + (td.days * 60 * 60 * 24)) / 60
if offset >= 0:
str_offset = "+%03d" % offset
else:
str_offset = "%03d" % offset
else:
str_offset = "+000"
return u"%04d%02d%02d%02d%02d%02d.%06d%s" % (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond, str_offset) | 706faec64a116ad4dc255b6ff9b87b4a8488bcff | 6,928 |
def set_field_value(context, field_value):
"""populates variable into a context"""
if field_value:
context['field_value'] = field_value
else:
context['field_value'] = ''
return '' | 68110380f244b78550a04d08ad9bda5df193211e | 6,931 |
def show_books(object_list):
"""
加载指定书籍列表的模板。
:param object_list: Book模型实例的列表
:return: 返回一个字典作为模板的上下文
"""
if len(object_list) > 0:
try:
getattr(object_list[0], 'object')
except AttributeError:
pass
else:
object_list = map(lambda ele: ele.object, object_list)
context = {
'books_list': object_list,
}
return context | 034707460c73eed6e69578726c860ee55a070ac6 | 6,932 |
import re
from typing import OrderedDict
def parse_dict(s, sep=None):
"""
parser for (ordered) dicts
:s: the input string to parse, which should be of the format
key1 := value1,
key2 := value2,
key3 := value3,
where the last comma is optional
:sep: separator (default is ':=')
:returns: OrderedDict(('key1', 'value1'), ...)
"""
if sep is None: sep = ":="
# deal with the comma on the last line
s_split = s.split(",")
if re.match("^\s*$", s_split[-1]):
del s_split[-1]
# now deal with the dict
try: return OrderedDict(
map (
lambda s2: tuple(map(
# split at
lambda s3: s3.strip(),
s2.split(sep)
)),
s_split
)
)
except:
raise
return OrderedDict() | dcfdec6dcc68661f5d27f49a280326bec6cfd90b | 6,933 |
def distr_selectbox_names():
"""
Accessing stats.name.
"""
names = ['alpha',
'anglit',
'arcsine',
'argus',
'beta',
'betaprime',
'bradford',
'burr',
'burr12',
'cauchy',
'chi',
'chi2',
'cosine',
'crystalball',
'dgamma',
'dweibull',
'erlang',
'expon',
'exponnorm',
'exponpow',
'exponweib',
'f',
'fatiguelife',
'fisk',
'foldcauchy',
'foldnorm',
'gamma',
'gausshyper',
'genexpon',
'genextreme',
'gengamma',
#'gengamma',
'genhalflogistic',
'geninvgauss',
'genlogistic',
'gennorm',
'genpareto',
'gilbrat',
'gompertz',
'gumbel_l',
'gumbel_r',
'halfcauchy',
'halfgennorm',
'halflogistic',
'halfnorm',
'hypsecant',
'invgamma',
'invgauss',
'invweibull',
'johnsonsb',
'johnsonsu',
'kappa3',
'kappa4',
#'kappa4',
#'kappa4',
#'kappa4',
'ksone',
'kstwo',
'kstwobign',
'laplace',
'laplace_asymmetric',
'levy',
'levy_l',
'levy_stable',
'loggamma',
'logistic',
'loglaplace',
'lognorm',
'loguniform',
'lomax',
'maxwell',
'mielke',
'moyal',
'nakagami',
'ncf',
'nct',
'ncx2',
'norm',
'norminvgauss',
'pareto',
'pearson3',
'powerlaw',
'powerlognorm',
'powernorm',
'rayleigh',
'rdist',
'recipinvgauss',
'reciprocal',
'rice',
'semicircular',
'skewnorm',
't',
'trapezoid',
'triang',
'truncexpon',
'truncnorm',
#'truncnorm',
'tukeylambda',
'uniform',
'vonmises',
'vonmises_line',
'wald',
'weibull_max',
'weibull_min',
'wrapcauchy']
return names | 5051cab27bf6497d3dfb4d4828daeaeefa528403 | 6,935 |
def read_file(filename):
"""Read filename; return contents as one string."""
with open(filename) as my_file:
return my_file.read() | fa4b47085f5d3ace5c011fcda27e6ffa94c7085a | 6,936 |
def say(number):
"""
print out a number as words in North American English using short scale terms
"""
number = int(number)
if number < 0 or number >= 1e12:
raise ValueError
if number == 0:
return "zero"
def quotient_and_remainder(number, divisor):
"""
return the integer quotient and remainder of dividing number by divisor
"""
divisor = int(divisor)
remainder = number % divisor
quotient = (number - remainder) // divisor
return quotient, remainder
def say_term(which, terms):
"""
return a term from a tuple of strings as a list of one element
"""
return terms[which : which + 1]
def say_tens(number):
"""
return a string representing a number less than 100 in English
"""
terms = []
quotient, remainder = quotient_and_remainder(number, 10)
if quotient == 1:
terms += say_term(remainder,
("ten", "eleven", "twelve", "thirteen", "fourteen",
"fifteen", "sixteen", "seventeen", "eighteen", "nineteen"))
else:
if quotient:
terms += say_term(quotient,
("units", "teens", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy", "eighty", "ninety"))
if remainder:
terms += say_term(remainder,
("zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine"))
return '-'.join(terms)
def say_hundreds(number, final=False):
"""
return a string representing a number less than 1000 in English
"""
terms = []
quotient, remainder = quotient_and_remainder(number, 100)
if quotient:
terms += [say_tens(quotient), "hundred"]
if remainder:
if quotient or final:
terms += ["and"]
terms += [say_tens(remainder)]
return terms
# now finally convert a number less than a million million
terms = []
quotient, remainder = quotient_and_remainder(number, 1e9)
if quotient:
terms += say_hundreds(quotient) + ["billion"]
quotient, remainder = quotient_and_remainder(remainder, 1e6)
if quotient:
terms += say_hundreds(quotient) + ["million"]
quotient, remainder = quotient_and_remainder(remainder, 1e3)
if quotient:
terms += say_hundreds(quotient) + ["thousand"]
if remainder:
terms += say_hundreds(remainder, terms != [])
return ' '.join(terms) | 42b8d321c001c60e37f6bbd94bd2a3404ddf5c66 | 6,937 |
import re
def rm_noise(diff):
"""Filter out noise from diff text.
Args:
diff (str): diff text
Returns:
str: cleaned diff text
"""
result = diff
patterns = ["\n", "\u0020+", "་+?"]
for pattern in patterns:
noise = re.search(pattern, diff)
if noise:
result = result.replace(noise[0], "")
return result | 8a139f22e30e3c98b1dfef3b47fa623db8b22a29 | 6,939 |
import numpy
def word2array(ft_names, word):
"""Converts `word` [[(value, feature),...],...] to a NumPy array
Given a word consisting of lists of lists/sets of (value, feature) tuples,
return a NumPy array where each row is a segment and each column is a
feature.
Args:
ft_names (list): list of feature names (as strings) in order; this
argument controls what features are included in the
array that is output and their order vis-a-vis the
columns of the array
word (list): list of lists of feature tuples (output by
FeatureTable.word_fts)
Returns:
ndarray: array in which each row is a segment and each column
is a feature
"""
vdict = {'+': 1, '-': -1, '0': 0}
def seg2col(seg):
seg = dict([(k, v) for (v, k) in seg])
return [vdict[seg[ft]] for ft in ft_names]
return numpy.array([seg2col(s) for s in word], order='F') | 4305f7b85287f70ffc7cb9ade2c8c2663dc11659 | 6,941 |
def external(field):
"""
Mark a field as external.
"""
field._external = True
return field | 83de43305f9655aa2be9c6b7264552bd3e2783f7 | 6,942 |
def getExactFreePlaceIndexForCoordinate(freePlaceMap, x, y):
"""
Returns the Exact Value for a given Coordinate on the FreePlaceMap
:param freePlaceMap: The generated FreePlaceMap
:param x: The X Coordinate on the FreePlaceMap
:param y: The Y Coordinate on the FreePlaceMap
:return: The Indexvalue on the FreePlaceMap
"""
if freePlaceMap is None or len(freePlaceMap) <= y or len(freePlaceMap[0]) <= x or x < 0 or y < 0:
return None
if freePlaceMap[y][x] != -1:
return freePlaceMap[y][x] - 1
return None | 4af9dec9163bd505f944f02db55a2dcfa80cb434 | 6,943 |
def split_on_text(row):
"""Spliting original text into million character blocks for Spacy"""
val = round(row['original_text_length'] / 1000000)
final_texts = []
count = 1000000
counter = 0
for i in range(0, val):
if (count + 1000000) > row['original_text_length']:
final_texts.append(row.text[count:])
else:
final_texts.append(row.text[counter:count])
counter = counter + 1000000
count = count + 1000000
return final_texts | 678377650df3ca49cfb0d4404382589e32e3c6ae | 6,944 |
def get_number(number):
"""
Repeats back a number to you
---
operationId: getPetsById
parameters:
- name: number
in: path
type: string
description: the number
responses:
200:
description: Hello number!
"""
return "Hello {}!".format(number) | 22d6c8a7a5b3a8ff946e4dccaf5876134a0293cd | 6,945 |
def align_frontiers_on_bars(frontiers, bars):
"""
Aligns the frontiers of segments to the closest bars (in time).
The idea is that frontiers generally occurs on downbeats,
and that realigning the estimation could improve perfomance for low tolerances scores.
Generally used for comparison with techniques which don't align their segmentation on bars.
Parameters
----------
frontiers : list of float
Time of the estimated frontiers.
bars : list of tuple of float
The bars of the signal.
Returns
-------
frontiers_on_bars : list of floats
Frontiers, realigned on bars.
"""
frontiers_on_bars = []
i = 1
for frontier in frontiers:
while i < len(bars) - 1 and bars[i][1] < frontier:
i+=1
if i == len(bars) - 1:
frontiers_on_bars.append(frontier)
else:
if bars[i][1] - frontier < frontier - bars[i][0]:
frontiers_on_bars.append(bars[i][1])
else:
frontiers_on_bars.append(bars[i][0])
return frontiers_on_bars | ef1f3d62a36065f64d31c4e4d7f6ce07045e2e5e | 6,946 |
def _high_bit(value):
"""returns index of highest bit, or -1 if value is zero or negative"""
return value.bit_length() - 1 | 1bd783593ae7d5b15cc56c8a8db5c86798fd8c9f | 6,947 |
def qual(obj):
"""
Return fully qualified name of a class.
"""
return u'{}.{}'.format(obj.__class__.__module__, obj.__class__.__name__) | 5b9779935b84a8bb3653cc9fc2c627dda5dd0e7f | 6,949 |
def default_reply(event, message):
"""Default function called to reply to bot commands."""
return event.unotice(message) | 3c83d8abaea0f4c968db25fff51185bb6c32d26e | 6,950 |
def making_change(amt: int, coins: list) -> int:
"""Iterative implementation of the making change algorithm.
:param amt (int) : Amount, in cents, to be made into change.
:param coins (list) : List of coin denominations
:return (int) : Number of different combinations of change.
"""
# calc[i] represents the number of ways to get to amount i
calc = [0] * (amt + 1)
# 1 way to get zero
calc[0] = 1
# Pick all coins one by one and update calc[] values after the
# index greater than or equal to the value of the picked coin
for coin_val in coins:
for j in range(coin_val, amt + 1):
calc[j] += calc[j - coin_val]
return calc[amt] | 188496f5db4252fa27f153d0a0379031847c669d | 6,951 |
def table_dispatch(kind, table, body):
"""Call body with table[kind] if it exists. Raise an error otherwise."""
if kind in table:
return body(table[kind])
else:
raise BaseException, "don't know how to handle a histogram of kind %s" % kind | 18d827baeabbca8d27848ea87a067328fe82d16a | 6,952 |
import os
def get_testcases(problem):
"""
Gets testcases for problem, which are then displayed if user is Apprentice.
:param problem: id of problem
:return: array of testcases
"""
testcases_dir = os.path.join(os.popen('echo $CG_FILES_TESTCASES').read().strip(), problem)
testcases_dir_sol = os.path.join(os.popen('echo $CG_FILES_TESTCASES_SOL').read().strip(), problem)
testcases = []
testcases_sol = []
try:
for i in range(10):
testcases.append('')
f = open(os.path.join(testcases_dir, '{0}_{1}'.format(problem, i)))
for line in f:
testcases[i] += line.strip()
f.close()
testcases_sol.append('')
f = open(os.path.join(testcases_dir_sol, '{0}_{1}'.format(problem, i)))
for line in f:
testcases_sol[i] += line.strip()
f.close()
except IOError:
return -1, -1 # should not be here
return testcases, testcases_sol | ed6eedac3be57368a79af79692802ebe93d9a5ff | 6,953 |
import base64
def _get_base64(data: str) -> str:
"""Base 64 encodes data."""
ebytes = base64.b64encode(data.encode("utf-8"))
estring = str(ebytes, "utf-8")
return estring | a7bd3080dba077077d96602eb35142db32b003de | 6,954 |
def setSortGroups(sortGroups=None):
"""
Return the sorting groups, either user defined or from the default list
"""
if sortGroups is None: # Default groups
return [('-inf', '+inf'), ('-inf', 100), (101, '+inf')]
else:
sortGroups.insert(0, ('-inf', '+inf'))
return sortGroups | f2e8cff00fe70627e81dcc0ce576f56e4d289228 | 6,955 |
def ubuntu_spec(**kwargs):
"""Ubuntu specs."""
# Setup vars from kwargs
builder = kwargs['data']['builder']
builder_spec = kwargs['data']['builder_spec']
distro = kwargs['data']['distro']
version = kwargs['data']['version']
bootstrap_cfg = 'preseed.cfg'
# https://github.com/mrlesmithjr/packer-builder/issues/83
if builder == 'qemu':
boot_wait = '5s'
else:
boot_wait = '30s'
builder_spec.update(
{
'boot_command': [
'<enter><wait><f6><esc>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs><bs>',
'<bs><bs><bs><bs><bs><bs>',
'<wait>',
'/install/vmlinuz',
'<wait>',
' initrd=/install/initrd.gz',
'<wait>',
' auto=true',
'<wait>',
' priority=critical',
'<wait>',
' url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/'f'{distro}-{version}-{bootstrap_cfg}', # noqa: E501
'<wait>',
'<enter>'
],
'boot_wait': f'{boot_wait}',
'shutdown_command': 'sudo /sbin/halt -h -p'
}
)
return bootstrap_cfg, builder_spec | fca2605c5b10f86519ef5ca952ab340e1f5560f2 | 6,956 |
import subprocess
def run(cmd):
"""Run a command on the command line."""
proc = subprocess.Popen(['sh', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr | f7978e59044cf7aebc76355536c3e3fa1d08b729 | 6,958 |
def NOT_TENSOR_FILTER(arg_value):
"""Only keeps a value if it is not a Tensor or SparseTensor."""
return not arg_value.is_tensor and not arg_value.is_sparse_tensor | 14eb28c1824f58bd7ef6ad1da96922891114fe5a | 6,959 |
def check_if_neighbors_match(src_neighbor, trg_neighbor):
"""Check if any source and target neighbors match and return matches
Args:
src_neighbor (list): Source Neighbor List
trg_neighbor (list): Target Neighbor List
Returns:
list: Matching of neighbors.
"""
matching = {}
for current_neighbor_index in range(len(src_neighbor)):
# Looking for matches
if int(trg_neighbor[src_neighbor[current_neighbor_index]]) == current_neighbor_index:
matching[current_neighbor_index] = src_neighbor[current_neighbor_index]
return matching | c4d91ffca1f175e9964ca67c8b2200b1848b56d9 | 6,960 |
def tapisize(fieldKeyName):
"""Transforms a string into a Tapis query parameter
"""
return fieldKeyName.lower() | cc8032a6cc9e822193430134bb33da8aef74cf06 | 6,963 |
def calculate_average_resolution(sizes):
"""Returns the average dimensions for a list of resolution tuples."""
count = len(sizes)
horizontal = sum([x[0] for x in sizes]) / count
vertical = sum([x[1] for x in sizes]) / count
return (horizontal, vertical) | 06dac1834989df96ce7bff88c435dd4067bfccbd | 6,964 |
from pathlib import Path
def create_folder(subfolder: str,
folder: str) -> None:
"""
Function for creating folder structure for saved stationdata
"""
path_to_create = Path(folder, subfolder)
Path(path_to_create).mkdir(parents=True, exist_ok=True)
return None | e50052d22cb8385e1c3a83caa643ec0d0289d1b0 | 6,965 |
def _partition_fold(v,data):
"""
partition the data ready for cross validation
Inputs:
v: (int) cross validation parameter, number of cross folds
data: (np.array) training data
Outputs:
list of partitioned indicies
"""
partition = []
for i in range(v):
if i==v-1:
partition.append(range(int(i*len(data)/5),len(data)))
else:
partition.append(range(int(i*len(data)/5),int(i*len(data)/5+(len(data)/5))))
return partition | fc833b5120c5d8e479af1758f86e0541c5d7d87c | 6,966 |
def get_distance(m, M, Av=0):
"""
calculate distance [in pc] from extinction-corrected magnitude
using the equation: d=10**((m-M+5-Av)/5)
Note: m-M=5*log10(d)-5+Av
see http://astronomy.swin.edu.au/cosmos/I/Interstellar+Reddening
Parameters
---------
m : apparent magnitude
M : absolute magnitude
Av : extinction (in V band)
"""
assert (m is not None) & (str(m) != "nan")
assert (M is not None) & (str(M) != "nan")
distance = 10 ** (0.2 * (m - M + 5 - Av))
return distance | b4773065d7cf1bc793400ac344c4ca7a580f8567 | 6,967 |
import argparse
def get_args_parser(PORT: int = 4500):
"""
Extendable parser for input arguments
Args:
PORT: default port to be exposed
"""
parser = argparse.ArgumentParser(add_help=True,
description="Backend service API")
parser.add_argument('-p', '--port',
help="Port to expose the API end-points",
default=PORT, type=int)
parser.add_argument('-s', '--secrets',
help="Path to json with secrets",
default=None, type=str)
return parser | 8fcddec4df5f64c9425a028432ad7c80eae6542f | 6,968 |
import json
def load_base_models_json(filename="base_models.json"):
"""Load base models json to allow selecting pre-trained model.
Args:
filename (str) - filename for the json file with pre-trained models
Returns:
base_models - python dict version of JSON key-value pairs
"""
with open(filename) as json_file:
base_models = json.load(json_file)
return base_models | c17f123e192b94e6f87938bca10822ea785e2d91 | 6,969 |
import json
def load_data_from_json(jsonfile):
"""Load the data contained in a .json file and return the corresponding Python object.
:param jsonfile: The path to the .json file
:type jsonfile: str
:rtype: list or dict
"""
jsondata = open(jsonfile).read()
data = json.loads(jsondata)
return data | f0f7a0620be8ffcd15a57fd561dda8525866faa3 | 6,971 |
def precut(layers, links, all_terms, user_info):
"""
This function cuts terms in layers if they do not exist inside
the accuracy file of model 1.
It also cuts all links if one of the terms inside does not exist
inside the accuracy file of model 1.
Finaly it cuts all terms taht do not exist inside the accuracy
file of model 1.
:return:
Cut layers, links (edges) and terms are returned.
"""
new_layers = []
for layer in layers:
new_layer = []
for node in layer:
if node in user_info:
new_layer.append(node)
if len(new_layer) != 0:
new_layers.append(new_layer)
else:
new_layer = []
new_links = set()
for link in links:
if link[0] in user_info and link[1] in user_info:
new_links.add(link)
new_all_terms = {}
for term in all_terms:
if term in user_info:
new_all_terms[term] = all_terms[term]
return new_layers, new_links, new_all_terms | cf04eec77d01ad931f7654a3743baaf51aad53fa | 6,974 |
import os
def fullPathListDir(dir: str) -> list:
"""
Return full path of files in provided directory
"""
return [os.path.join(dir, file) for file in os.listdir(dir)] | b456008e782e6f5a1d3471f5a9b5536ae4aad132 | 6,975 |
def _clean_conargs(**conargs):
"""Clean connection arguments"""
conargs['metadata'] = [x.strip() for x in conargs['metadata'].split(',') if x.strip()]
return conargs | f5942f750949ab674bd99778e79ea35c2d0bb775 | 6,976 |
def get_living_neighbors(i, j, generation):
"""
returns living neighbors around the cell
"""
living_neighbors = 0 # count for living neighbors
neighbors = [(i-1, j), (i+1, j), (i, j-1), (i, j+1),
(i-1, j+1), (i-1, j-1), (i+1, j+1), (i+1, j-1)]
for k, l in neighbors:
if 0 <= k < len(generation) and 0 <= l < len(generation[0]):
if generation[k][l] == 1:
living_neighbors += 1
return living_neighbors | 437229b8152c3b2ce5b90ef6ddef83daa5c24a85 | 6,979 |
def StrToList(val):
""" Takes a string and makes it into a list of ints (<= 8 bits each)"""
return [ord(c) for c in val] | 79ee38dc4952b677896a77379c3cccca8f74eb2c | 6,980 |
def set(data,c):
"""
Set Data to a Constant
Parameters:
* data Array of spectral data.
* c Constant to set data to (may be complex)
"""
data[...,:]=c
return data | cff2592b3973bbd3f9a1a4dbaa6d6ba4b99260bc | 6,983 |
from typing import Any
from typing import Dict
def get_meta(instance: Any) -> Dict[str, Any]:
"""
Returns object pjrpc metadata.
"""
return getattr(instance, '__pjrpc_meta__', {}) | 1357cab8698297b8ba9c10423e4c0473690cb8f0 | 6,984 |
def free_residents(residents_prefs_dict, matched_dict):
"""
In this function, we return a list of resident who do not have empty prefrences list and unmatched with any hospital.
"""
fr = []
for res in residents_prefs_dict:
if residents_prefs_dict[res]:
if not (any(res in match for match in matched_dict.values())):
fr.append(res)
return fr | b07991f6286be3c0e4b163ca2f0991630f910b4c | 6,986 |
import os
import asyncio
async def runCmdWithUser(cmd, addToEnv=None) :
"""Runs a command allowing the users to interact with the command and
then returns the return code. Based upon the Python asyncio subprocesses
documentation. """
if addToEnv is not None :
for aKey, aValue in addToEnv.items() :
os.environ[aKey] = aValue
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=None, stderr=None
)
await proc.wait()
return proc.returncode | e3c075c9e6ccd946724921f763bfe240fb40e4fe | 6,987 |
import pickle
def load(directory):
"""Loads pkl file from directory"""
with open(directory, 'rb') as f:
data = pickle.load(f)
return data | d500c6f717535ee95f452abd435be4d8688a59a4 | 6,988 |
import numpy
def interleave(left, right):
"""Convert two mono sources into one stereo source."""
return numpy.ravel(numpy.vstack((left, right)), order='F') | 29833d8b4516de2bdab9a33246cb165556d287bc | 6,989 |
def get_seconds(time_string):
"""
Convert e.g. 1m5.928s to seconds
"""
minutes = float(time_string.split("m")[0])
seconds = float(time_string.split("m")[1].split("s")[0])
return minutes * 60.0 + seconds | 5a729d24ab6c437fca536cae8ac3d34a45bb9054 | 6,990 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.