content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def attrdict(d: dict) -> AttrDict:
"""Add attribute access to a dict.
This function takes a dict with nested dicts as input and convert into an AttrDict
object which allows attribute access to keys.
Returns:
A dict-like object with attribute access to keys.
"""
def addattrs(d):
if not isinstance(d, dict):
return d
obj = AttrDict()
for k in d:
obj[k] = obj.__dict__[k] = addattrs(d[k])
return obj
obj = AttrDict()
obj.update(d)
obj.__dict__.update(addattrs(d))
return obj
|
c4d8a102ff3bab5fb75b76fb08f4040248c48053
| 24,107 |
def form(cik, year):
"""Returns form 13F for specified CIK number. From https://fmpcloud.io/documentation#thirteenForm
Input:
cik : CIK number for which you'd like the 13F form
year = year for which you'd like the 13F form.
Returns:
Form 13F for specified company
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
url = urlroot + "form-thirteen/" + cik + "?year=" + year + "&apikey=" + apikey
response = urlopen(url)
data = response.read().decode("utf-8")
return safe_read_json(data)
|
e5bcc58251456f15ba31f6b468b6aa934fddc7f1
| 24,108 |
def check_bel_script_line_by_line(bel_script_path, error_report_file_path, bel_version):
"""Check statements in file or string for correct.
result['trees'][line_number] = {'statement': statement, 'tree': tree}
result['errors'][line_number] = {'statement': statement, 'error': ex}
# can be used as comments
empty lines will be skipped
every statement should be in a new line
:param str bel_script_path: path to BEL script
:param str error_report_file_path: path to report
:param str bel_version: BEL version
:return: dict
"""
parser = _BELParser()
data_frame = parser.check_bel_script_line_by_line(bel_script_path, bel_version)
if error_report_file_path:
write_error_report(data_frame=data_frame, file_path=error_report_file_path)
else:
return data_frame
|
5502c0ef9adbcde9fe6feb50aeee51c1600fd5a8
| 24,109 |
def create(cls, **data):
"""Create a single instance of a Resource.
Arguments:
cls (Resource class): The resource to create.
All other keyword arguments will be provided to the request
when POSTing. For example::
create(Foo, name="bar", email="[email protected]")
...would try to create an instance of the Foo resource
with a name set to "bar" and an email set to "[email protected]".
"""
instance = cls()
instance.run_validation(data)
response = request("post", url=cls.get_collection_url(), data=data)
try:
return utils.parse_resources(cls=cls, response=response, many=False)
except IndexError:
return None
|
6dd5f76446974c4e9f37422bf1f7036a099bd576
| 24,110 |
def get_cluster_role_template_binding(cluster_id=None,name=None,role_template_id=None,opts=None):
"""
Use this data source to retrieve information about a Rancher v2 cluster role template binding.
> This content is derived from https://github.com/terraform-providers/terraform-provider-rancher2/blob/master/website/docs/d/clusterRole.html.markdown.
:param str cluster_id: The cluster id where bind cluster role template (string)
:param str name: The name of the cluster role template binding (string)
:param str role_template_id: The role template id from create cluster role template binding (string)
"""
__args__ = dict()
__args__['clusterId'] = cluster_id
__args__['name'] = name
__args__['roleTemplateId'] = role_template_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('rancher2:index/getClusterRoleTemplateBinding:getClusterRoleTemplateBinding', __args__, opts=opts).value
return AwaitableGetClusterRoleTemplateBindingResult(
annotations=__ret__.get('annotations'),
cluster_id=__ret__.get('clusterId'),
group_id=__ret__.get('groupId'),
group_principal_id=__ret__.get('groupPrincipalId'),
id=__ret__.get('id'),
labels=__ret__.get('labels'),
name=__ret__.get('name'),
role_template_id=__ret__.get('roleTemplateId'),
user_id=__ret__.get('userId'),
user_principal_id=__ret__.get('userPrincipalId'))
|
3e84ac201e141c28549d44fff609897afb1fbb73
| 24,112 |
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
|
32627e5e38625148c0193d74b04390098be1d631
| 24,113 |
def score_feedback_comp_micro_shujun(pred_df, gt_df, discourse_type):
"""
A function that scores for the kaggle
Student Writing Competition
Uses the steps in the evaluation page here:
https://www.kaggle.com/c/feedback-prize-2021/overview/evaluation
"""
gt_df = gt_df.loc[gt_df['discourse_type'] == discourse_type,
['id', 'predictionstring']].reset_index(drop=True)
pred_df = pred_df.loc[pred_df['class'] == discourse_type,
['id', 'predictionstring']].reset_index(drop=True)
pred_df['pred_id'] = pred_df.index
gt_df['gt_id'] = gt_df.index
pred_df['predictionstring'] = [(int(pred.split(' ')[0]),int(pred.split(' ')[-1])) for pred in pred_df['predictionstring']]
gt_df['predictionstring'] = [(int(pred.split(' ')[0]),int(pred.split(' ')[-1])) for pred in gt_df['predictionstring']]
# print(pred_df[pred_df['predictionstring']!=pred_df['predictionstring']])
# exit()
#gt_strings=
# Step 1. all ground truths and predictions for a given class are compared.
joined = pred_df.merge(gt_df,
left_on='id',
right_on='id',
how='outer',
suffixes=('_pred','_gt')
)
overlaps = [calc_overlap_shujun(*args) for args in zip(list(joined.predictionstring_pred),
list(joined.predictionstring_gt))]
# 2. If the overlap between the ground truth and prediction is >= 0.5,
# and the overlap between the prediction and the ground truth >= 0.5,
# the prediction is a match and considered a true positive.
# If multiple matches exist, the match with the highest pair of overlaps is taken.
# we don't need to compute the match to compute the score
TP = joined.loc[overlaps]['gt_id'].nunique()
# 3. Any unmatched ground truths are false negatives
# and any unmatched predictions are false positives.
TPandFP = len(pred_df)
TPandFN = len(gt_df)
#calc microf1
my_f1_score = 2*TP / (TPandFP + TPandFN)
return my_f1_score
|
24854d61ac5afa701c89a1382ade52df8dd53714
| 24,114 |
def _url_for_language_resolve_view(url, new_language):
"""
Figure out the new URL by resolving the old URL and re-reversing it using
the new language.
"""
view = urlresolvers.resolve(url)
with language_context(new_language):
new_url = urlresolvers.reverse(view.url_name, args=view.args, kwargs=view.kwargs)
return new_url
|
8cbf1d5c7be7bf782d1dc1206f0e3075c022c4ad
| 24,115 |
def rsp_matrix(m,k):
"""
Description: This function creates the matrix used for finding the parameters of reals signal perceptron using a system of linear equations
is_Implemented:
True
Args:
(m:int): The domain size , the amount of possible variables that each variable can take
(k:int): The arity, the amount of variables that each signal can recieve
Shape:
- Input: integers that define the functional space
- Output: a matrix of m
Examples::
matrix = rsp_matrix(2,2)
print(matrix)
[[0,0,0,0],[0,1,0,0],[0,0,1,0],[1,1]]
"""
aix=np.zeros([k]); #Array of indexes (to order them)
aiw=np.zeros([k]); #Array of indexes (to order them)
ni=m**k #Number of Iterations
n=k #No. of variables
nn=m**n #|m^k| domain space
nnn=m**nn #|Delta|=|m^m^k| function space
# Matrix
A=np.zeros([nn,nn],dtype=np.float32)
divfrec=m-1
i=0; j=0
v=0;
for xi in range(0,ni,1):
kx=xi;
for xj in range(0,k,1):
aix[xj]= int ( kx % m );
kx=int(kx/m);
#print("aix=",aix)
j=0;
#First Inner nested loop that generates all combinations of w for a signal
for wi in range(0,ni,1):
kw=wi;
for wj in range(0,k,1): #Generamos los índices
aiw[wj]= int ( kw % m ) ; #Lo metemos en array
kw=int(kw/m); #siguientes índices
#print(i,j,A[i,j],"|",end='')
exponente=0
#Seconf Inner loop that multiplies and sums
for ii in range(0,k,1):
exponente=exponente + aix[ii]*aiw[ii]
exponente=int(exponente)
#print("exponente=",exponente)
exponente=np.pi*exponente/divfrec
#print(exponente)
#print(np.exp(exponente))
A[i,j]=np.cos(exponente)
#print(A[i,j])
j=j+1
#print("aiw=",aiw,"j=",j)
#for aj in range(0,nc,1):
# print(i,j,A[i,j],"|",end='')
# print()
i=i+1
return A
|
00d327b5f3a1726337a90d31955d37fc3b2c1e5f
| 24,116 |
import json
def pretty_format_dict(dct):
"""
Parameters
----------
dct: dict[Any, Any]
Returns
-------
str
"""
return "{}".format(json.dumps(dct, indent=4))
|
60d9c09da62d7035bd89a6fb52e6f0a1f142f89e
| 24,117 |
def session_try_readonly(dbtype, dbfile, echo=False):
"""Creates a read-only session to an SQLite database.
If read-only sessions are not supported by the underlying sqlite3 python DB
driver, then a normal session is returned. A warning is emitted in case the
underlying filesystem does not support locking properly.
Raises:
NotImplementedError: if the dbtype is not supported.
"""
if dbtype != 'sqlite':
raise NotImplementedError(
"Read-only sessions are only currently supported for SQLite databases")
connector = SQLiteConnector(dbfile, readonly=True, lock='unix-none')
return connector.session(echo=echo)
|
2be571a27e3f876e5961557d776448104cb9246b
| 24,120 |
def generate_lab_saliva(directory, file_date, records):
"""
Generate lab saliva file.
"""
lab_saliva_description = (
lambda: {
'ORDPATNAME': _('random.custom_code', mask='SIS########', digit='#'),
'SAMPLEID': _('random.custom_code', mask='H#########', digit='#'),
'IgG Capture Result': _('choice', items=['#r', '#n', '#e'])
}
)
schema = Schema(schema=lab_saliva_description)
lab_saliva = pd.DataFrame(schema.create(iterations=records))
lab_saliva.to_csv(directory / f"lab_saliva_{file_date}.csv", index=False)
return lab_saliva
|
bc8b81e9a83cbe8f4ac3d23ff272172d3424f459
| 24,121 |
def rebuild_field_path(sort_field, resource):
"""
convert dot connected fields into a valid field reference
:param sort_field:
:return: path_to_field
"""
sorted = strip_sort_indicator(sort_field)
split_sorted = sorted.split()
sort_with_this = ""
for s in split_sorted:
if s in resource:
sort_with_this = sort_with_this + s
return sort_with_this
|
98bdc18b2147babc35119ba6c61ec1dd3e32ccd8
| 24,123 |
def load_data(pkl_paths, use_attr, no_img, batch_size, uncertain_label=False, n_class_attr=2, image_dir='images', resampling=False, resol=299):
"""
Note: Inception needs (299,299,3) images with inputs scaled between -1 and 1
Loads data with transformations applied, and upsample the minority class if there is class imbalance and weighted loss is not used
NOTE: resampling is customized for first attribute only, so change sampler.py if necessary
"""
resized_resol = int(resol * 256/224)
is_training = any(['train.pkl' in f for f in pkl_paths])
if is_training:
transform = transforms.Compose([
#transforms.Resize((resized_resol, resized_resol)),
#transforms.RandomSizedCrop(resol),
transforms.ColorJitter(brightness=32/255, saturation=(0.5, 1.5)),
transforms.RandomResizedCrop(resol),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), #implicitly divides by 255
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])
#transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),
])
else:
transform = transforms.Compose([
#transforms.Resize((resized_resol, resized_resol)),
transforms.CenterCrop(resol),
transforms.ToTensor(), #implicitly divides by 255
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])
#transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),
])
dataset = CUBDataset(pkl_paths, use_attr, no_img, uncertain_label, image_dir, n_class_attr, transform)
if is_training:
drop_last = True
shuffle = True
else:
drop_last = False
shuffle = False
if resampling:
sampler = BatchSampler(ImbalancedDatasetSampler(dataset), batch_size=batch_size, drop_last=drop_last)
loader = DataLoader(dataset, batch_sampler=sampler)
else:
loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
return loader
|
79e6d5cfa8cd7a8204fcef102585f25b3d4f0442
| 24,124 |
def scalar_function(x, y):
"""
Returns the f(x,y) defined in the problem statement.
"""
#Your code here
if x <= y:
out = x*y
else:
out = x/y
return out
raise NotImplementedError
|
f305a5f2680dbebda9322a6fbd17f7dc3ce8a072
| 24,125 |
def getImageParticles(imagedata,stackid,noDie=True):
"""
Provided a Stack Id & imagedata, to find particles
"""
particleq = appiondata.ApParticleData(image=imagedata)
stackpdata = appiondata.ApStackParticleData()
stackpdata['particle'] = particleq
stackpdata['stack'] = appiondata.ApStackData.direct_query(stackid)
stackps = stackpdata.query()
particles = []
if not stackps:
if noDie is True:
return particles,None
apDisplay.printError("partnum="+str(particleid)+" was not found in stackid="+str(stackid))
for stackp in stackps:
particles.append(stackp['particle'])
return particles,stackps
|
4764b25c7830ae58023b7bc0ac9d148844604e50
| 24,126 |
import json
def build_evaluation(
resource_id,
compliance_type,
event,
resource_type=DEFAULT_RESOURCE_TYPE,
annotation=None,
):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule
(default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_cc = {}
if annotation:
eval_cc["Annotation"] = build_annotation(annotation)
eval_cc["ComplianceResourceType"] = resource_type
eval_cc["ComplianceResourceId"] = resource_id
eval_cc["ComplianceType"] = compliance_type
eval_cc["OrderingTimestamp"] = str(
json.loads(event["invokingEvent"])["notificationCreationTime"]
)
return eval_cc
|
d814e8f0d965cc36b40c0b33efbce043d43ef098
| 24,127 |
def match_countries(df_to_match, olympics):
"""Changes the names of the countries in the df_to_match df so that they match
the names of the countries in the olympics df.
Parameters
-----------
df_to_match : either of the two dataframes:
- gdp
- pop
olympics : the olympics dataframe
Returns
-----------
df_to_match : the dataframe given as first parameter that now its countries
match the countries in the olympics df
common_countries : a list with the common countries in the two dataframes
"""
# countries in the to_match df
df_countries = set(df_to_match.columns.tolist())
# countries in the olympics df
ol_regions = set(sorted(olympics.region.unique().tolist()))
# countries in the to_match df that are not in the olympics df
not_in_ol = df_countries.difference(ol_regions)
# countries in the olympics df that are not in the to_match df
not_in_df = ol_regions.difference(df_countries)
# After printing not_in_il and not_int_df, we see that some countries are simply named differently
# Therefore, I renames these countries in the to_match df so that they match the countries from the olympics df
df_to_match.rename(columns={"United States": "USA",
"United Kingdom": "UK",
"Antigua and Barbuda": "Antigua",
"Congo, Dem. Rep.": "Democratic Republic of the Congo",
"Lao": "Laos",
"North Macedonia": "Macedonia",
"Cote d'Ivoire": "Ivory Coast",
"Trinidad and Tobago": "Trinidad",
"Micronesia, Fed. Sts.": "Micronesia",
"St. Vincent and the Grenadines": "Saint Vincent",
"St. Lucia": "Saint Lucia",
"St. Kitts and Nevis": "Saint Kitts",
"Slovak Republic": "Slovakia",
"Kyrgyz Republic": "Kyrgyzstan",
"Bolivia": "Boliva",
"Congo, Rep.": "Republic of Congo"},
inplace=True)
# Check which countries still remain unmatched
df_countries = set(df_to_match.columns.tolist())
ol_regions = set(sorted(olympics.region.unique().tolist()))
# Countries in the to_match df that are still not in the olympics df
not_in_ol = df_countries.difference(ol_regions)
# Countries in the olympics df that are still not in the to_match df
not_in_df = ol_regions.difference(df_countries)
# Printing not_in_ol and not_in_df shows which countries are still not matched. Used as a check.
# save the resulting common countries
common_countries = ol_regions.intersection(df_countries)
return df_to_match, common_countries
|
256eaac81daee5c621e7dac4c8c27d0b96868418
| 24,128 |
import requests
import logging
def get_reply(session, url, post=False, data=None, headers=None, quiet=False):
"""
Download an HTML page using the requests session. Low-level function
that allows for flexible request configuration.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@param quiet: Flag that tells whether to print error message when status
code != 200.
@type quiet: bool
@return: Requests response.
@rtype: requests.Response
"""
request_headers = {} if headers is None else headers
request = requests.Request('POST' if post else 'GET',
url,
data=data,
headers=request_headers)
prepared_request = session.prepare_request(request)
reply = session.send(prepared_request)
try:
reply.raise_for_status()
except requests.exceptions.HTTPError as e:
if not quiet:
logging.error("Error %s getting page %s", e, url)
logging.error("The server replied: %s", reply.text)
raise
return reply
|
4baa985db090d0f88762c8f6cfadff084f2b88ad
| 24,129 |
def get_stock_rack_size():
"""
Returns the number of available positions in a stock rack.
"""
return get_stock_rack_shape().size
|
d41361ae2817e6b6de78b2ae6e2f7b7765c3bfcd
| 24,130 |
def is_valid_constant_type(x):
"""
@return: True if the name is a legal constant type. Only simple types are allowed.
@rtype: bool
"""
return x in PRIMITIVE_TYPES
|
373a4c5d5a35e250ed19c06fc3ae0b6d30f8ec7a
| 24,131 |
import torch
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp.
This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`.
This function is typically used for summing log probabilities.
Parameters
----------
tensor : `torch.FloatTensor`, required.
A tensor of arbitrary size.
dim : `int`, optional (default = `-1`)
The dimension of the tensor to apply the logsumexp to.
keepdim: `bool`, optional (default = `False`)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
stable_vec = tensor - max_score if keepdim else tensor - max_score.unsqueeze(dim)
return max_score + stable_vec.exp().sum(dim, keepdim=keepdim).log()
|
2bcbf60369e359daa7bc5aaaceef45f753fd4f00
| 24,132 |
def shrink_piecwise_linear(r,rvar,theta):
"""Implement the piecewise linear shrinkage function.
With minor modifications and variance normalization.
theta[...,0] : abscissa of first vertex, scaled by sqrt(rvar)
theta[...,1] : abscissa of second vertex, scaled by sqrt(rvar)
theta[...,2] : slope from origin to first vertex
theta[''',3] : slope from first vertex to second vertex
theta[...,4] : slope after second vertex
"""
ab0 = theta[...,0]
ab1 = theta[...,1]
sl0 = theta[...,2]
sl1 = theta[...,3]
sl2 = theta[...,4]
# scale each column by sqrt(rvar)
scale_out = tf.sqrt(rvar)
scale_in = 1/scale_out
rs = tf.sign(r*scale_in)
ra = tf.abs(r*scale_in)
# split the piecewise linear function into regions
rgn0 = tf.to_float( ra<ab0)
rgn1 = tf.to_float( ra<ab1) - rgn0
rgn2 = tf.to_float( ra>=ab1)
xhat = scale_out * rs*(
rgn0*sl0*ra +
rgn1*(sl1*(ra - ab0) + sl0*ab0 ) +
rgn2*(sl2*(ra - ab1) + sl0*ab0 + sl1*(ab1-ab0) )
)
dxdr = sl0*rgn0 + sl1*rgn1 + sl2*rgn2
dxdr = tf.reduce_mean(dxdr,0)
return (xhat,dxdr)
|
b47690d151dcfac2104c58637628aa78fbeb7d63
| 24,133 |
def LoadAuth(decoratee):
"""Decorator to check if the auth is valid and loads auth if not."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
if self.auth is None: # Initialize auth if needed.
self.auth = GoogleAuth()
if self.auth.access_token_expired:
self.auth.LocalWebserverAuth()
if self.auth.service is None: # Check if drive api is built.
self.auth.Authorize()
return decoratee(self, *args, **kwargs)
return _decorated
|
87097fc527ee47af3a91a904881bc62e46181754
| 24,134 |
def cols_with_nulls(df):
""" Convert whitespace entries to NaN, Return columns with NaN
"""
# Note: Empty string will be converted to NaN automatically,
df.replace(r'^\s*$', np.nan, regex=True, inplace=True)
return list(df.isnull().any().index)
|
5c7121c920c4db09f78d648a271337bf45e7f994
| 24,135 |
def recursive_feature_selection_roc_auc(clf,
X,
y,
sample_weight=None,
n_features=10,
cv_steps=10,
n_jobs=1,
forward=True,
matching_features=True):
"""Method building a feature set in a recursive fashion. Depending
on the setting it is run as a forward selection/backward elimination
searching for a set of n features with the highest/lowest mismatch.
To get the set with the size n starting from n_total features the
following approaches are used:
Forward Selection:
To get the k+1 set every not yet selected feature is used to
generate (n_total - k sets). The set with the best score is the
k + 1 set. Those steps are repeated until n features are selected
Backward Elimination:
To get k+1 eliminated features every not yet eleminated feature is used
to generate (n_total - k) sets. The sets consist of all not yet
eliminated features minus the one that is tested. The set with the
best score determines the next feature to eliminate. Those steps are
repeated until n features are eliminated.
What the best score depends also on the settings:
matching_features:
forward: min(|auc - 0.5|)
not forward: max(|aux - 0.5|)
not matching_features:
forward: max(auc )
not forward: min(aux)
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_features : int, optional (default=10)
Number of feature that are selected (forward=True) or eliminated
(forward=False)
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
matching_features: bool, optional (default=True)
Wether for matching or mismatching feature should be searched
Returns
-------
selected_features: list of ints
Return a list containing the indeces of X, that were
selected/eliminated. The order corresponds to the order the
features were selected/eliminated.
auc_scores: np.array float shape(n_features_total, n_features)
Return a array containing the auc values for all steps.
np.nan is the feature was already selected in the specific run.
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['callable:fit'] = True
desired_characteristics.opts['callable:predict_proba'] = True
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
if n_features > X.shape[1]:
logger.info(' \'n_features\' higher than total number of features.'
' \'n_features\' reduced!')
n_features = X.shape[1]
auc_scores = np.zeros((X.shape[1], n_features))
selected_features = []
while len(selected_features) != n_features:
auc_scores_i = get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=sample_weight,
cv_steps=cv_steps,
n_jobs=n_jobs,
forward=forward)
value_best = None
index_best = None
for idx, auc in enumerate(auc_scores_i):
if not np.isfinite(auc):
continue
if value_best is None:
value_best = auc
index_best = idx
if matching_features:
if forward:
if np.abs(auc - 0.5) < np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if np.abs(auc - 0.5) > np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if forward:
if auc > value_best:
value_best = auc
index_best = idx
else:
if auc < value_best:
value_best = auc
index_best = idx
auc_scores[:, len(selected_features)] = auc_scores_i
selected_features.append(index_best)
return selected_features, auc_scores
|
49073354f5f4292716fccdcaa33ec423da18c7d9
| 24,136 |
def mermin_klyshko_quantum_bound(n):
"""The quantum bound for the Mermin-Klyshko inequality is :math:`2^{3(n-1)/2}`.
:param n: The number of measurement nodes.
:type n: Int
:returns: The quantum bound.
:rtype: Float
"""
return 2 ** (3 * (n - 1) / 2)
|
721ca41b19ef72cae77baf1ad6dea5377b6eb67d
| 24,137 |
def get_serial_port_selected():
"""Get the selected serial port from the Settings.
:return: The currently selected serial port in the Settings.
"""
return ServerCompilerSettings().serial_port
|
03273c1e522a7699c7fdbd1522367f9f08a42b9f
| 24,138 |
import platform
def main(args):
"""Produce library bundle"""
if platform.system() == "Darwin":
res = gen_archive_darwin(args.output, args.libs)
else:
ar_script = gen_archive_script(
args.output, [expand_path(lpath) for lpath in args.libs]
)
res = gen_archive(ar_script)
return res
|
029f4eacabcd881827ec89dc76025b889116d02b
| 24,139 |
def run_in_background(func: callable, *args, **kwargs) -> Future:
""" run func(*args, **kwargs) in background and return Future for its outputs """
return GLOBAL_EXECUTOR.submit(func, *args, **kwargs)
|
1ef74344e63b508eb31a3ccc3cbd80e7ba2f5c4a
| 24,140 |
from typing import Union
def get_eigenvectors(
q,
dm: Union[DynamicalMatrix, DynamicalMatrixNAC],
ddm: DerivativeOfDynamicalMatrix,
perturbation=None,
derivative_order=None,
nac_q_direction=None,
):
"""Return degenerated eigenvalues and rotated eigenvalues."""
if nac_q_direction is not None and (np.abs(q) < 1e-5).all():
dm.run(q, q_direction=nac_q_direction)
else:
dm.run(q)
eigvals, eigvecs = np.linalg.eigh(dm.dynamical_matrix)
eigvals = eigvals.real
if perturbation is None:
return eigvals, eigvecs
if derivative_order is not None:
ddm.set_derivative_order(derivative_order)
dD = _get_dD(q, ddm, perturbation)
rot_eigvecs, _ = rotate_eigenvectors(eigvals, eigvecs, dD)
return eigvals, rot_eigvecs
|
8f869f00df9666dc4d343ffd030db93634e42a5a
| 24,141 |
def _get_formatted_atom_types_names_for(connection):
"""Return formatted atom_type names for a connection."""
names = []
for member in connection.connection_members:
if not member.atom_type:
label = ""
else:
label = member.atom_type.name
names.append(label)
return " --- ".join(names)
|
b9b21cb37706aa05f7807df1c252985f09fe6fad
| 24,142 |
def anti_commutator(H1,H2):
""" Calculates the anticommutator of two Hamiltonians :math:`H_1` and :math:`H_2`.
.. math::
\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1
Examples
--------
The following script shows how to compute the anticommutator of two `hamiltonian` objects.
.. literalinclude:: ../../doc_examples/anti_commutator-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
H1 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
H2 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
Returns
--------
obj
Anticommutator: :math:`\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1`
"""
if ishamiltonian(H1) or ishamiltonian(H2):
return H1*H2 + H2*H1
else:
return H1.dot(H2) + H2.dot(H1)
|
78f3adf2b9e7934a2a84b1ea6611ce615be7b013
| 24,143 |
def download(self, auth=False):
"""
needs source url (from webs ite) and destination save location
"""
source_url = 'http://www.spitzer.caltech.edu/uploaded_files/images/0006/3034/ssc2008-11a12_Huge.jpg'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(source_url, headers=hdr)
if auth:
out_file = str(auth) + '.jpeg'
else:
out_file = 'test2.jpeg'
try:
opened = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print(e)
total_size = int(opened.info().getheader('Content-Length').strip())
progress = 0
self.update_state(state='PROGRESS')
with open(out_file, 'wb') as f:
while True:
chunk = opened.read(CHUNK)
if not chunk: break
f.write(chunk)
progress += CHUNK
self.update_state(state='PROGRESS',
meta={'current': progress, 'total': total_size, 'status': 'asdfghjk'})
return {'current': total_size, 'total': total_size, 'status': 'Download completed!'}
|
ded4cbef19c4c498793006af9044ca2fb504a287
| 24,144 |
def windowed_dataset(dataset, size, shift=None, stride=1, drop_remainder=True):
"""Create a windowed `Dataset`.
Arguments:
dataset: A `Dataset` of output shape ((...), (...), ... (...)) or a `dict`
of the same.
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a generate.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the forward
shift of the sliding generate in each iteration. Defaults to `size`.
stride: A `tf.int64` scalar `tf.Tensor`, representing the stride of the
input elements in the sliding generate.
drop_remainder:
Returns:
A windowed `Dataset`.
"""
dataset = dataset.window(size, shift=shift, stride=stride, drop_remainder=drop_remainder)
def map_fn(nested_structure_of_datasets):
"""nested_structure_of_datasets -> dataset"""
structure_type = type(nested_structure_of_datasets)
if structure_type is dict:
for k, v in nested_structure_of_datasets.items():
nested_structure_of_datasets[k] = map_fn(v)
return tf.data.Dataset.zip(nested_structure_of_datasets)
if structure_type is tuple:
return tf.data.Dataset.zip(tuple(map(map_fn, nested_structure_of_datasets)))
return nested_structure_of_datasets.batch(size)
if type(dataset.element_spec) is tuple:
return dataset.flat_map(lambda *e: map_fn(e))
return dataset.flat_map(map_fn)
|
0e7fd8a4ba603e54e63622d5609fea7d6bddfceb
| 24,145 |
def dnds(seq1, seq2):
"""Main function to calculate dN/dS between two DNA sequences per Nei &
Gojobori 1986. This includes the per site conversion adapted from Jukes &
Cantor 1967.
"""
# Strip any whitespace from both strings
seq1 = clean_sequence(seq1)
seq2 = clean_sequence(seq2)
# Check that both sequences have the same length
# assert len(seq1) == len(seq2)
# Check that sequences are codons
# assert len(seq1) % 3 == 0
syn_sites = syn_sum(seq1, seq2)
non_sites = len(seq1)-syn_sites
syn_subs, non_subs = substitutions(seq1, seq2)
pn = float(non_subs)/non_sites
ps = float(syn_subs)/syn_sites
dn = -0.75 * log(1 - (4 * pn / 3))
ds = -0.75 * log(1 - (4 * ps / 3))
return round(float(pn), 3), round(float(ps), 3), round(float(dn), 3), round(float(ds), 3)
|
e3631efb181fc390d8ca47b64c54d03bc3924a98
| 24,146 |
def create_LED_indicator_rect(**kwargs) -> QPushButton:
"""
False: dim red
True : green
"""
# fmt: off
SS = (
"QPushButton {"
"background-color: " + COLOR_INDIAN_RED_2 + ";"
"color: black;"
"border: 1px solid black;"
"border-radius: 0px;"
"min-height: 30px;"
"min-width: 76px;}"
"QPushButton:checked {"
"background-color: " + COLOR_SPRING_GREEN_2 + ";}"
)
# fmt: on
button = QPushButton(checkable=True, enabled=False, **kwargs)
button.setStyleSheet(SS)
return button
|
6d1727bfa7afbf26a92f49d51e0888f104b6b0c0
| 24,147 |
def bind(task):
"""Bind a task method for use in a pipeline
This decorator method adapts a task method to work
in a pipeline. Specifically, it routes successful
Result input to the task logic, and passes through
failure Result input without performing any
additional actions.
Args:
task: A task method that returns a Result
Returns:
function: Bound task that accepts and returns a Result
Example:
<Before bind>
---> Result(Success)
------------- |
data | | |
---> | Task | ---|
| | |
------------- |
---> Result(Failure)
==============================================================
<After bind>
---> Result(Success)
------------- |
data | | |
Result(Success) ---> | Task | ---|
| | |
------------- |
Result(Failure) -------------------------> Result(Failure)
"""
@wraps(task)
def inner(result):
if result.success:
return task(result.payload)
else:
return result
return inner
|
8353c22239b92b86f144d9f449c425016f1b037d
| 24,148 |
def optimal_kernel_bandwidth(spiketimes, times=None, bandwidth=None,
bootstrap=False):
"""
Calculates optimal fixed kernel bandwidth, given as the standard deviation
sigma.
Parameters
----------
spiketimes : np.ndarray
Sequence of spike times (sorted to be ascending).
times : np.ndarray, optional
Time points at which the kernel bandwidth is to be estimated.
If None, `spiketimes` is used.
Default: None.
bandwidth : np.ndarray, optional
Vector of kernel bandwidths (standard deviation sigma).
If specified, optimal bandwidth is selected from this.
If None, `bandwidth` is obtained through a golden-section search on a
log-exp scale.
Default: None.
bootstrap : bool, optional
If True, calculates the 95% confidence interval using Bootstrap.
Default: False.
Returns
-------
dict
'y' : np.ndarray
Estimated density.
't' : np.ndarray
Points at which estimation was computed.
'optw' : float
Optimal kernel bandwidth given as standard deviation sigma
'w' : np.ndarray
Kernel bandwidths examined (standard deviation sigma).
'C' : np.ndarray
Cost functions of `bandwidth`.
'confb95' : tuple of np.ndarray
Bootstrap 95% confidence interval: (lower level, upper level).
If `bootstrap` is False, `confb95` is None.
'yb' : np.ndarray
Bootstrap samples.
If `bootstrap` is False, `yb` is None.
If no optimal kernel could be found, all entries of the dictionary are
set to None.
References
----------
.. [1] H. Shimazaki, & S. Shinomoto, "Kernel bandwidth optimization in
spike rate estimation," Journal of Computational Neuroscience,
vol. 29, no. 1-2, pp. 171-82, 2010. doi:10.1007/s10827-009-0180-4.
"""
if times is None:
time = np.max(spiketimes) - np.min(spiketimes)
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
times = np.linspace(np.min(spiketimes),
np.max(spiketimes),
min(int(time / dt + 0.5),
1000)) # The 1000 seems somewhat arbitrary
t = times
else:
time = np.max(times) - np.min(times)
spiketimes = spiketimes[(spiketimes >= np.min(times)) &
(spiketimes <= np.max(times))].copy()
isi = np.diff(spiketimes)
isi = isi[isi > 0].copy()
dt = np.min(isi)
if dt > np.min(np.diff(times)):
t = np.linspace(np.min(times), np.max(times),
min(int(time / dt + 0.5), 1000))
else:
t = times
dt = np.min(np.diff(times))
yhist, bins = np.histogram(spiketimes, np.r_[t - dt / 2, t[-1] + dt / 2])
N = np.sum(yhist)
yhist = yhist / (N * dt) # density
optw = None
y = None
if bandwidth is not None:
C = np.zeros(len(bandwidth))
Cmin = np.inf
for k, w_ in enumerate(bandwidth):
C[k], yh = cost_function(yhist, N, w_, dt)
if C[k] < Cmin:
Cmin = C[k]
optw = w_
y = yh
else:
# Golden section search on a log-exp scale
wmin = 2 * dt
wmax = max(spiketimes) - min(spiketimes)
imax = 20 # max iterations
bandwidth = np.zeros(imax)
C = np.zeros(imax)
tolerance = 1e-5
phi = 0.5 * (np.sqrt(5) + 1) # The Golden ratio
a = ilogexp(wmin)
b = ilogexp(wmax)
c1 = (phi - 1) * a + (2 - phi) * b
c2 = (2 - phi) * a + (phi - 1) * b
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
k = 0
while (np.abs(b - a) > (tolerance * (np.abs(c1) + np.abs(c2)))) \
and (k < imax):
if f1 < f2:
b = c2
c2 = c1
c1 = (phi - 1) * a + (2 - phi) * b
f2 = f1
f1, y1 = cost_function(yhist, N, logexp(c1), dt)
bandwidth[k] = logexp(c1)
C[k] = f1
optw = logexp(c1)
y = y1 / (np.sum(y1 * dt))
else:
a = c1
c1 = c2
c2 = (2 - phi) * a + (phi - 1) * b
f1 = f2
f2, y2 = cost_function(yhist, N, logexp(c2), dt)
bandwidth[k] = logexp(c2)
C[k] = f2
optw = logexp(c2)
y = y2 / np.sum(y2 * dt)
k = k + 1
# Bootstrap confidence intervals
confb95 = None
yb = None
# If bootstrap is requested, and an optimal kernel was found
if bootstrap and optw:
nbs = 1000
yb = np.zeros((nbs, len(times)))
for ii in range(nbs):
idx = np.floor(np.random.rand(N) * N).astype(int)
xb = spiketimes[idx]
y_histb, bins = np.histogram(
xb, np.r_[t - dt / 2, t[-1] + dt / 2]) / dt / N
yb_buf = fftkernel(y_histb, optw / dt).real
yb_buf = yb_buf / np.sum(yb_buf * dt)
yb[ii, :] = np.interp(times, t, yb_buf)
ybsort = np.sort(yb, axis=0)
y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
confb95 = (y95b, y95u)
# Only perform interpolation if y could be calculated
if y is not None:
y = np.interp(times, t, y)
return {'y': y,
't': times,
'optw': optw,
'w': bandwidth,
'C': C,
'confb95': confb95,
'yb': yb}
|
0613665f9365c95330e766cdbf95557ef94c4d08
| 24,149 |
import logging
import time
def run_lsh_omp_coder(data, dictionary, sparsity, num_buckets=1):
"""Solve the orthogonal matching pursuit problem with LSH bucketing.
Use sklearn.linear_model.orthogonal_mp to solve the following optimization
program:
argmin ||y - X*gamma||^2,
subject to ||gamma||_0 <= n_{nonzero coefs},
where
y is 'data', size = (n_samples, n_targets),
X is 'dictionary', size = (n_samples, n_features). Columns are assumed
to have unit norm,
gamma: sparse coding, size = (n_features, n_targets).
Args:
data: The matrix y in the above program,
dictionary: The matrix X in the above program,
sparsity: n_{nonzero coefs} in the above program.
num_buckets: number of LSH buckets to use, int.
Returns:
gamma.
"""
logging.info("running LSH based sklearn.linear_model.orthogonal_mp ...")
indices = lsh_knn_map(
np.transpose(np.vstack((data, dictionary))), num_buckets, 1)
logging.info("indices shape is %s", indices.shape)
data_buckets = [[] for i in range(num_buckets)]
data_index = [[] for i in range(num_buckets)]
dict_buckets = [[] for i in range(num_buckets)]
dict_index = [[] for i in range(num_buckets)]
for i in range(data.shape[0]):
data_buckets[indices[i][0]].append(data[i, :])
data_index[indices[i][0]].append(i)
for i in range(dictionary.shape[0]):
dict_buckets[indices[data.shape[0] + i][0]].append(dictionary[i, :])
dict_index[indices[data.shape[0] + i][0]].append(i)
code = sparse.lil_matrix((data.shape[0], dictionary.shape[0]))
for i in range(num_buckets):
start_time = time.time()
if len(data_buckets[i]) > 0: # pylint: disable=g-explicit-length-test
if len(dict_buckets[i]) == 0: # pylint: disable=g-explicit-length-test
logging.error(
"lsh bucketing failed...empty bucket with no dictionary elements")
else:
small_code = sklearn.linear_model.orthogonal_mp(
np.transpose(np.vstack(dict_buckets[i])),
np.transpose(np.vstack(data_buckets[i])),
n_nonzero_coefs=sparsity)
small_code = np.transpose(small_code)
row_idx = np.asarray(data_index[i])
col_idx = np.asarray(dict_index[i])
code[row_idx[:, None], col_idx] = small_code
logging.info("running time of OMP for bucket %d = %d seconds",
i, time.time() - start_time)
return code
|
597930ec0c7da4ea4699d087ab89ebe7cd11143f
| 24,150 |
def hamming_options(seq1, seq2):
"""Calculate Hamming distance between two sequences.
Interpret ambiguity as options.
"""
sequence1 = convert_to_nosegment(seq1)
sequence2 = convert_to_nosegment(seq2)
distance = 0
for i, segment1 in enumerate(sequence1.segments):
segment2 = sequence2.segments[i]
if set(segment1.choices) & set(segment2.choices) == set():
distance += 1
return distance
|
db8379961ebe37e5490b7622bb3162faf5388eda
| 24,151 |
from typing import Tuple
import struct
def __getgyro_decoder__(port: serial.Serial, *args, **kwargs) -> Tuple[int, int, int]:
"""
Reads the gyro state from the serial port and decodes it as a (x, y, z) tuple.
"""
val = port.read(8)
if(val[7] != 0xAA):
raise Exception("Updating configuration data failed.")
x = struct.unpack(">H", val[0:2])[0]
y = struct.unpack(">H", val[2:4])[0]
z = struct.unpack(">H", val[4:6])[0]
return (x, y, z)
|
3e77e550b79dceac22d806c9f0cdf9bdedbb2b3b
| 24,152 |
def spatial_knn(coords, expression, n_neighbors=14, n_sp_neighbors=7, radius=None,
which_exprs_dims=None, sample_id=None):
"""
A variant on the standard knn neighbor graph inference procedure that also includes the spatial neighbors of each spot.
With help from Krzysztof Polanski.
:param coords: numpy.ndarray with x,y positions of spots.
:param expression: numpy.ndarray with expression of programmes / cluster expression (cols) of spots (rows).
:param n_neighbors: how many non-spatially-adjacent neighbors to report for each spot
:param n_sp_neighbors: how many spatially-adjacent neighbors to report for each spot. Use 7 for hexagonal grid.
:param radius: Supercedes `n_sp_neighbors` - radius within which to report spatially-adjacent neighbors for each spot. Pick radius based on spot size.
:param which_exprs_dims: which expression dimensions to use (cols)?
"""
# create and query spatial proximity tree within each sample
if radius is None:
coord_ind = np.zeros((coords.shape[0], n_sp_neighbors))
else:
coord_ind = np.zeros(coords.shape[0])
for sam in sample_id.unique():
coord_tree = KDTree(coords[sample_id.isin([sam]), :])
if radius is None:
coord_ind[sample_id.isin([sam]), :] = coord_tree.query(coords[sample_id.isin([sam]), :],
k=n_sp_neighbors, return_distance=False)
else:
coord_ind[sample_id.isin([sam])] = coord_tree.query_radius(coords[sample_id.isin([sam]), :],
radius, count_only=False)
# if selected dimensions not provided choose all
if which_exprs_dims is None:
which_exprs_dims = np.arange(expression.shape[1])
# print(which_exprs_dims)
# extract and index the appropriate bit of the PCA
pca = expression[:, which_exprs_dims]
ckd = cKDTree(pca)
# the actual number of neighbours - you'll get seven extra spatial neighbours in the thing
knn = n_neighbors + n_sp_neighbors
# identify the knn for each spot. this is guaranteed to contain at least n_neighbors non-adjacent spots
# this is exactly what we're after
ckdout = ckd.query(x=pca, k=knn, n_jobs=-1)
# create numeric vectors for subsetting later
numtemp = np.arange(expression.shape[0])
rowtemp = np.arange(knn)
# rejigger the neighour pool by including the spatially adjacent ones
for i in np.arange(expression.shape[0]):
# identify the spatial neighbours for the spot and compute their distance
mask = np.isin(numtemp, coord_ind[i])
# filter spatial neighbours by sample
if sample_id is not None:
mask = mask & sample_id.isin([sample_id[i]])
neigh = numtemp[mask]
ndist_temp = pca[mask, :] - pca[i, :]
ndist_temp = ndist_temp.reshape((mask.sum(), pca.shape[1]))
ndist = np.linalg.norm(ndist_temp, axis=1)
# how many non-adjacent neighbours will we get to keep?
# (this fluctuates as e.g. edge spots will have fewer hex neighbours)
kpoint = knn - len(neigh)
# the indices of the top kpoint number of non-adjacent neighbours (by excluding adjacent ones from the set)
inds = rowtemp[[i not in neigh for i in ckdout[1][0, :]]][:kpoint]
# keep the identified top non-adjacent neighbours
ckdout[0][i, :kpoint] = ckdout[0][i, inds]
ckdout[1][i, :kpoint] = ckdout[1][i, inds]
# add the spatial neighbours in the remaining spots of the knn graph
ckdout[0][i, kpoint:] = ndist
ckdout[1][i, kpoint:] = neigh
# sort each row of the graph in ascending distance order
# (sometimes spatially adjacent neighbours are some of the top ones)
knn_distances, knn_indices = ckdout
newidx = np.argsort(knn_distances, axis=1)
knn_indices = knn_indices[np.arange(np.shape(knn_indices)[0])[:, np.newaxis], newidx]
knn_distances = knn_distances[np.arange(np.shape(knn_distances)[0])[:, np.newaxis], newidx]
# compute connectivities and export as a dictionary
dist, cnts = compute_connectivities_umap(knn_indices, knn_distances, knn_indices.shape[0], knn_indices.shape[1])
neighbors = {'distances': dist,
'connectivities': cnts,
'params': {'n_neighbors': n_neighbors + n_sp_neighbors,
'method': 'spot_factors2knn', 'metric': 'euclidean'}}
return neighbors
|
0ed0052890b8ae5925d39bb001dedff47d824677
| 24,153 |
def parse_q(s):
"""Parse the value of query string q (?q=) into a search sub-term."""
if '=' not in s:
names = s.split()
term = '/'.join(map(lambda x: 'n.name=' + x, names))
return term
else:
subterms = s.split()
res = []
for subterm in subterms:
if '=' not in subterm:
res.append('n.name=' + subterm)
else:
res.append(subterm)
term = '&'.join(res)
return term
|
eae907fcb42be4a2c4be26316721ea63aa0284d6
| 24,154 |
def getCompleteData(client , response ,comp):
"""
This function is useful to receive missing data in tcp packet
Input :
Client = Tcp Object which interact with host end and client
response = received response from the host end
comp = comparitive struct defined by tcp packet
Output :
response = returns missing concatenated bytes data
or say whole packet
"""
remaining = comp.size - len(response)
while(remaining > 0 ):
read = client.recv(remaining)
response += read
remaining -= len(read)
return response
|
0f3ff5785046771f295a65116e0f79b5c7e45525
| 24,155 |
from sage.rings import sum_of_squares
from sage.rings.finite_rings.integer_mod import Mod
def two_squares(n):
"""
Write the integer `n` as a sum of two integer squares if possible;
otherwise raise a ``ValueError``.
INPUT:
- ``n`` -- an integer
OUTPUT: a tuple `(a,b)` of non-negative integers such that
`n = a^2 + b^2` with `a <= b`.
EXAMPLES::
sage: two_squares(389)
(10, 17)
sage: two_squares(21)
Traceback (most recent call last):
...
ValueError: 21 is not a sum of 2 squares
sage: two_squares(21^2)
(0, 21)
sage: a,b = two_squares(100000000000000000129); a,b
(4418521500, 8970878873)
sage: a^2 + b^2
100000000000000000129
sage: two_squares(2^222+1)
(253801659504708621991421712450521, 2583712713213354898490304645018692)
sage: two_squares(0)
(0, 0)
sage: two_squares(-1)
Traceback (most recent call last):
...
ValueError: -1 is not a sum of 2 squares
TESTS::
sage: for _ in range(100):
....: a = ZZ.random_element(2**16, 2**20)
....: b = ZZ.random_element(2**16, 2**20)
....: n = a**2 + b**2
....: aa,bb = two_squares(n)
....: assert aa**2 + bb**2 == n
ALGORITHM:
See http://www.schorn.ch/howto.html
"""
n = ZZ(n)
if n <= 0:
if n == 0:
z = ZZ.zero()
return (z, z)
raise ValueError("%s is not a sum of 2 squares"%n)
if n.nbits() <= 32:
return sum_of_squares.two_squares_pyx(n)
# Start by factoring n (which seems to be unavoidable)
F = n.factor(proof=False)
# First check whether it is possible to write n as a sum of two
# squares: all prime powers p^e must have p = 2 or p = 1 mod 4
# or e even.
for (p,e) in F:
if e % 2 == 1 and p % 4 == 3:
raise ValueError("%s is not a sum of 2 squares"%n)
# We run over all factors of n, write each factor p^e as
# a sum of 2 squares and accumulate the product
# (using multiplication in Z[I]) in a^2 + b^2.
a = ZZ.one()
b = ZZ.zero()
for (p,e) in F:
if e >= 2:
m = p ** (e//2)
a *= m
b *= m
if e % 2 == 1:
if p == 2:
# (a + bi) *= (1 + I)
a,b = a - b, a + b
else: # p = 1 mod 4
# Find a square root of -1 mod p.
# If y is a non-square, then y^((p-1)/4) is a square root of -1.
y = Mod(2,p)
while True:
s = y**((p-1)/4)
if not s*s + 1:
s = s.lift()
break
y += 1
# Apply Cornacchia's algorithm to write p as r^2 + s^2.
r = p
while s*s > p:
r,s = s, r % s
r %= s
# Multiply (a + bI) by (r + sI)
a,b = a*r - b*s, b*r + a*s
a = a.abs()
b = b.abs()
assert a*a + b*b == n
if a <= b:
return (a,b)
else:
return (b,a)
|
d07edc88a6b4c264c3df910cdbcdd80ce93320ff
| 24,156 |
def getattr_by_path(obj, attr, *default):
"""Like getattr(), but can go down a hierarchy like 'attr.subattr'"""
value = obj
for part in attr.split('.'):
if not hasattr(value, part) and len(default):
return default[0]
value = getattr(value, part)
if callable(value):
value = value()
return value
|
3eccbb39e1781a75a6f0061c1c226cefdcfb17c8
| 24,157 |
def entries_to_files(entry_ids):
"""
Format file details (retrieved using the files' entry IDs) to API expectations to include files in API call.
parameter: (list) entry_ids
List of entry ID strings for files uploaded to the warroom
returns:
List of attachment field, value tuples formatted according to API expectations
"""
attachments = []
for entry_id in entry_ids:
execute_results = demisto.getFilePath(entry_id)
file_path = execute_results['path']
file_name = execute_results['name']
attachments.append(('attachments[]', (file_name, open(file_path, 'rb'))))
return attachments
|
da21da1f9068c55738b029e190e6206e63373a03
| 24,158 |
def g_mult(a, b, p):
"""Multiply two polynomials given the irreducible polynomial of a GF"""
c = [i % 2 for i in mult(a, b)]
c, p = lenshift(c, p)
return div(c, p)
|
f1fc9fe52381faaaf23b4fa3d4767aad1bf07d35
| 24,159 |
def data_augmentation_fn(input_image: tf.Tensor, label_image: tf.Tensor, flip_lr: bool=True,
flip_ud: bool=True, color: bool=True) -> (tf.Tensor, tf.Tensor):
"""Applies data augmentation to both images and label images.
Includes left-right flip, up-down flip and color change.
:param input_image: images to be augmented [B, H, W, C]
:param label_image: corresponding label images [B, H, W, C]
:param flip_lr: option to flip image in left-right direction
:param flip_ud: option to flip image in up-down direction
:param color: option to change color of images
:return: the tuple (augmented images, augmented label images) [B, H, W, C]
"""
with tf.name_scope('DataAugmentation'):
if flip_lr:
with tf.name_scope('random_flip_lr'):
sample = tf.random_uniform([], 0, 1)
label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(label_image), lambda: label_image)
input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(input_image), lambda: input_image)
if flip_ud:
with tf.name_scope('random_flip_ud'):
sample = tf.random_uniform([], 0, 1)
label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(label_image), lambda: label_image)
input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(input_image), lambda: input_image)
chanels = input_image.get_shape()[-1]
if color:
input_image = tf.image.random_contrast(input_image, lower=0.8, upper=1.0)
if chanels == 3:
input_image = tf.image.random_hue(input_image, max_delta=0.1)
input_image = tf.image.random_saturation(input_image, lower=0.8, upper=1.2)
return input_image, label_image
|
512be3bb3bcce8e493aa930a0d76dc7232e372b8
| 24,160 |
def get_stack_value(stack, key):
"""Get metadata value from a cloudformation stack."""
for output in stack.outputs:
if output['OutputKey'] == key:
return output['OutputValue']
|
a6b193c7d884bac78668dfd85bc2a5cbbb6b3f3b
| 24,161 |
def cmd_example_cmd_as_module(mixcli: MixCli, **kwargs):
"""
This function would be called by the processing of ArgumentParser. The contract is the positional argument
would be a MixCli instance and the rest are passed as keyword arguments.
We recommend to name this function as cmd_<name_of_group>_<name_of_command>
:param mixcli: a MixCli instance
:param kwargs: keyword arguments
:return:
"""
mixcli.info("example command group cmd_as_module ArgumentParser support function")
example_cmd_as_module(mixcli.httpreq_handler)
return True
|
5e1450997fa439ae83c102d6ad80e903f47112fb
| 24,162 |
def display_credentials(user_name):
"""
Function to display saved account credentials
"""
return Credentials.display_credentials(user_name)
|
7f260b5a38847dcef0d50f5723f72044b77b824f
| 24,163 |
import tqdm
def sk_learn_bootstrap(x, y, z, design_matrix, kf_reg, N_bs=100,
test_percent=0.4, print_results=True):
"""Sci-kit learn bootstrap method."""
x_train, x_test, y_train, y_test = sk_modsel.train_test_split(
np.c_[x.ravel(), y.ravel()], z.ravel(),
test_size=test_percent, shuffle=False)
# Ensures we are on axis shape (N_observations, N_predictors)
y_test = y_test.reshape(-1, 1)
y_train = y_train.reshape(-1, 1)
y_pred = np.empty((y_test.shape[0], N_bs))
X_test = design_matrix(x_test)
R2_ = np.empty(N_bs)
mse_ = np.empty(N_bs)
bias2_ = np.empty(N_bs)
beta_coefs = []
X_train = design_matrix(x_train)
for i_bs in tqdm(range(N_bs), desc="SciKit-Learn bootstrap"):
x_boot, y_boot = sk_utils.resample(X_train, y_train)
# x_boot, y_boot = sk_utils.resample(x_train, y_train)
# X_boot = design_matrix(x_boot)
kf_reg.fit(X_boot, y_boot)
# y_pred[:, i_bs] = kf_reg.predict(cp.deepcopy(x_test)).ravel()
y_predict = kf_reg.predict(X_test)
# print(sk_metrics.r2_score(y_test.flatten(), y_pred[:,i_bs].flatten()))
# R2_[i_bs] = sk_metrics.r2_score(y_test.flatten(), y_pred[:,i_bs].flatten())
# R2_[i_bs] = metrics.R2(y_test, y_predict)
# mse_[i_bs] = metrics.mse(y_test.flatten(), y_pred[:, i_bs].flatten())
# bias2_[i_bs] = metrics.bias2(
# y_test.flatten(), y_pred[:, i_bs].flatten())
y_pred[:, i_bs] = y_predict.ravel()
beta_coefs.append(kf_reg.coef_)
# R2 = np.mean(R2_)
# # print("R2 from each bs step = ",R2)
# # # MSE = mse_.mean()
# # # bias = bias2_.mean()
# # R2 = np.mean(R2_list)
# # R2 = (1 - np.sum(np.average((y_test - y_pred)**2, axis=1)) /
# # np.sum((y_test - np.average(y_test)**2)))
# # print(R2)
# print(y_test.shape, y_pred.shape)
# s1 = np.sum((np.mean((y_test - y_pred)**2, axis=1)))
# s2 = np.sum((y_test - np.mean(y_test))**2)
# print ("R2=",1 - s1/s2)
# R2 = (1 - np.sum(np.mean((y_test - y_pred)**2, axis=0, keepdims=True),keepdims=True) /
# np.sum((y_test - np.mean(y_test, keepdims=True)**2,),keepdims=True))
# print(R2.mean())
# R2 = R2.mean()
R2 = np.mean(metrics.R2(y_test, y_pred, axis=0))
# Mean Square Error, mean((y - y_approx)**2)
_mse = ((y_test - y_pred))**2
MSE = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(y_pred, axis=1, keepdims=True)
bias = np.mean((y_test - _mean_pred)**2)
# Variance, var(y_predictions)
var = np.mean(np.var(y_pred, axis=1, keepdims=True))
beta_coefs_var = np.asarray(beta_coefs).var(axis=0)
beta_coefs = np.asarray(beta_coefs).mean(axis=0)
# # R^2 score, 1 - sum((y-y_approx)**2)/sum((y-mean(y))**2)
# y_pred_mean = np.mean(y_pred, axis=1)
# _y_test = y_test.reshape(-1)
# print ("R2:", metrics.R2(_y_test, y_pred_mean))
# _s1 = np.sum(((y_test - y_pred))**2, axis=1, keepdims=True)
# _s2 = np.sum((y_test - np.mean(y_test))**2)
# print (_s1.mean(), _s2)
# R2 = 1 - _s1.mean()/_s2
# print(np.array([sk_metrics.r2_score(y_test, y_pred[:,i]) for i in range(N_bs)]).mean())
# R2 = metrics.R2(y_test, y_pred, axis=1)
# R2 = np.mean(metrics.R2(y_test, y_pred, axis=1))
# print(np.mean(metrics.R2(y_test, y_pred, axis=1)))
# R2 = R2.mean()
# print(R2.mean())
if print_results:
print("R2: {:-20.16f}".format(R2))
print("MSE: {:-20.16f}".format(MSE))
print("Bias^2:{:-20.16f}".format(bias))
print("Var(y):{:-20.16f}".format(var))
print("Beta coefs: {}".format(beta_coefs))
print("Beta coefs variances: {}".format(beta_coefs_var))
print("Diff: {}".format(abs(MSE - bias - var)))
results = {
"y_pred": np.mean(y_pred, axis=1),
"y_pred_var": np.var(y_pred, axis=1),
"mse": MSE,
"r2": R2,
"var": var,
"bias": bias,
"beta_coefs": beta_coefs,
"beta_coefs_var": beta_coefs_var,
"beta_95c": np.sqrt(beta_coefs_var)*2,
"diff": abs(MSE - bias - var),
}
return results
|
38a22f6fdb2858efdf720f87c573dd7684b33000
| 24,165 |
def vAdd(v, w):
""" Return a new Vector, which is the result of v + w """
return Vector(v[0] + w[0], v[1] + w[1], v[2] + w[2])
|
9f8dcc6026093164f39479a02252dbab035bbde9
| 24,166 |
def dynamic_vm_values(trace, code_start=BADADDR, code_end=BADADDR, silent=False):
"""
Find the virtual machine context necessary for an automated static analysis.
code_start = the bytecode start -> often the param for vm_func and usually starts right after vm_func
code_end = the bytecode end -> bytecode usually a big chunk, so if we identify several x86/x64 inst in a row we reached the end
base_addr = startaddr of the jmp table -> most often used offset in the vm_trace
vm_addr = startaddr of the vm function -> biggest function in .vmp segment,
:param trace: instruction trace
:return: vm_ctx -> [code_start, code_end, base_addr, vm_func_addr, vm_funcs]
"""
base_addr = defaultdict(lambda: 0)
vm_addr = find_vm_addr(deepcopy(trace))
trace, vm_seg_start, vm_seg_end = extract_vm_segment(trace)
code_addrs = []
# try finding code_start
if code_start == BADADDR:
code_start = GetFunctionAttr(vm_addr, FUNCATTR_END)#NextHead(GetFunctionAttr(vm_addr, FUNCATTR_END), vm_seg_end)
code_start = NextHead(code_start, BADADDR)
while isCode(code_start):
code_start = NextHead(code_start, BADADDR)
for line in trace:
# construct base addr dict of offsets -> jmp table should be the one most used
if len(line.disasm) == 2:
try:
offset = re.findall(r'.*:off_([0123456789abcdefABCDEF]*)\[.*\]', line.disasm[1])[0]
base_addr[offset] += 1
except:
pass
# code_start additional search of vm_func params
if line.addr == vm_addr:
for l in trace[:trace.index(line)]:
if l.disasm[0] == 'push':
try:
arg = re.findall(r'.*_([0123456789ABCDEFabcdef]*)', l.disasm[1])
if len(arg) == 1:
code_addrs.append(int(arg[0], 16))
except Exception, e:
print e.message
# finalize base_addr
max_addr = int(max(base_addr, key=base_addr.get), 16) # now we have the base_addr used for offset computation - this will probably be the top of the table but to be sure we need to take its relative position into account
base_addr = max_addr
while GetMnem(PrevHead(base_addr)) == '':
base_addr = PrevHead(base_addr)
# finalize code_start
if not silent:
if code_start not in code_addrs:
code_start = AskAddr(code_start, "Start of bytecode mismatch! Found %x but parameter for vm seem to be %s" % (code_start, [hex(c) for c in code_addrs]))
# code_end -> follow code_start until data becomes code again
if code_end == BADADDR:
code_end = vm_seg_end
# while code_end < vm_seg_end:
# code_end = NextHead(code_end, vm_seg_end)
# if isCode(code_end):
# break
vm_ctx = VMContext()
vm_ctx.code_start = code_start
vm_ctx.code_end = code_end
vm_ctx.base_addr = base_addr
vm_ctx.vm_addr = vm_addr
print code_start, code_end, base_addr, vm_addr
return vm_ctx
|
f970275ad4a2df59f81fb1bbd1f80149e92a9faf
| 24,167 |
def getAddresses(pkt):
"""
0: ('dst', 'src', 'bssid', None), from sta to sta
1: ('dst', 'bssid', 'src', None), out of ds
2: ('bssid', 'src', 'dst', None), in ds
3: ('recv', 'transl', 'dst', 'src') between dss
"""
f = pkt.FCfield & 3 # to-DS and from-DS
if f == 0:
adrs = ('destination', 'source', 'bssid', None)
elif f == 1:
adrs = ('bssid', 'source', 'destination', None)
elif f == 2:
adrs = ('destination', 'bssid', 'source', None)
else:
adrs = (None, 'bssid', 'destination', 'source')
pktAddrs = (pkt.addr1, pkt.addr2, pkt.addr3, pkt.addr4)
class Dummy:
def __init__(self, *pargs, **kwargs):
self.__dict__.update(kwargs)
kw = dict(zip(adrs, pktAddrs))
del kw[None]
r = Dummy(**kw)
r.f = f
return r
|
39056a61992b4dce8d8e5f28e3d274526f940787
| 24,168 |
def _ldmodule_soversion(target, source, env, for_signature):
"""Function to determine what to use for SOVERSION"""
if 'SOVERSION' in env:
return '.$SOVERSION'
elif 'LDMODULEVERSION' in env:
ldmod_version = env.subst('$LDMODULEVERSION')
# We use only the most significant digit of LDMODULEVERSION
return '.' + ldmod_version.split('.')[0]
else:
return ''
|
21d84d9ed8bc4a186d4619b51318c4a2bd780adb
| 24,169 |
def jitter(
grid: Grid,
min_variance: int = None,
max_variance: int = None,
size: int = None,
clamp: bool = False,
variance_list: list[int] = None,
) -> Grid:
"""Randomly jitter all points in a grid
Jitter will apply to both the x and y axises of the grid
If a variance list is given, each point will be jittered by a random value from the jitter list
If one of min_variance or max_variance is specified, points will be jittered from -v to v
If both min_variance or max_variance is specified, points will be jittered from -max to -min or min to max
Args:
grid (Grid): Grid points to jitter
min_variance (int, optional): Minimum jitter amount. Defaults to None.
max_variance (int, optional): Maximum jitter amount. Defaults to None.
size (int, optional): Grid size - useful for clamping. Defaults to None.
clamp (bool, optional): Whether to stop points leaving the bounds. Defaults to False.
variance_list (list[int], optional): List of possible jitter amounts. Defaults to None.
Returns:
Grid: Transformed grid, with each point 'jittered'
"""
# If no size is specified, grab the largest point we have
# if jittering a grid twice this could go badly...
if size is None:
size = max(grid[0], key=lambda x: x[0])[0]
# Argument handling - there's a few cases
# This jit function is then applied to each point to spice em up
if variance_list is not None and len(variance_list) > 0:
def jit(val):
return val + choice(variance_list)
elif min_variance is None and max_variance is None:
def jit(val):
return val
elif min_variance is None and max_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, max_variance)
elif max_variance is None and min_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, min_variance)
elif min_variance >= max_variance:
def jit(val):
return val + choice([-1, 1]) * min_variance
def clampf(x):
# Clamp a point 0 <= x <= size *only* if the clamp flag is enabled
if clamp:
return max(0, min(x, size))
else:
return x
# Jit (and optionally clamp) all points in the grid
return [[(clampf(jit(xx)), clampf(jit(yy))) for (xx, yy) in row] for row in grid]
|
968c601f4c9b3b214fb2c8444e2ea252a40e75a7
| 24,170 |
import re
def loadFasta(fa, sep=None, term=None, nfilter=None):
"""Returns a kapow.Array() with the contents of the file interpreted as a FASTA file, using sep if given."""
def _boundary2int(num, n, start=False):
if num == '': return 0 if start else n
mult = 1
if num[-1] == 'K': mult = 1000
elif num[-1] == 'M': mult = 1000000
elif num[-1] == 'G': mult = 1000000000
elif num[-1] == 'T': mult = 1000000000000
else: num = num + '_'
num = int(num[:-1]) * mult
if num < 0: return n + num
return num
m = re.match(r'(.*)\[([0-9]*[TGMK]?):([0-9]*[TGMK]?)\]', fa)
if m:
fa, left, right = m.groups()
res = loadFile(fa)
ores = kapow.fa_copy_cont_bind(res, sep)
# Filters the long lists of Ns
if nfilter:
ores, lres = kapow.fa_strip_n_bind(ores)
# Binds the desired portion
if m:
ores = ores[_boundary2int(left, len(ores), True):_boundary2int(right, len(ores), False)]
if term != None:
if ores.size < res.size:
ores = res[0:ores.size+1]
else:
ores = kapow.Array(res.size+1,1)
ores.memcpy(res)
del res
ores[ores.size-1] = term
return ores
|
982fa20121c089ed92e83f31590ad5313f6cda00
| 24,171 |
def reverse_bits(counter) -> int:
"""
Reverses the order of the bits in the given counter
:param counter: a 7bit value
:return:
"""
# From Elephant reference code (elephant160v2 > spongent.c > retnuoCl)
return ((counter & 0x01) << 7) | ((counter & 0x02) << 5) | ((counter & 0x04) << 3) \
| ((counter & 0x08) << 1) | ((counter & 0x10) >> 1) | ((counter & 0x20) >> 3) \
| ((counter & 0x40) >> 5) | ((counter & 0x80) >> 7)
|
290f62d794e5d17c4b277a714151523835bc6c16
| 24,172 |
import requests
def check_quota():
"""
Check quota for the RANDOM.ORG API
:return: True if the request is successful AND there is remaining quota available
"""
resp = requests.request('GET', 'https://www.random.org/quota/?format=plain')
if resp.status_code != 200 or int(resp.text) <= 0:
return False
return True
|
ba882714a17dc70fcdc45de5695b139d4e766fbc
| 24,173 |
def idwt2d(input_node, wavelet, levels=1):
"""
Constructs a TF graph that computes the 2D inverse DWT for a given wavelet.
Args:
input_node (tf.placeholder): Input signal. A 3D tensor with dimensions
as [rows, cols, channels]
wavelet (tfwavelets.dwtcoeffs.Wavelet): Wavelet object.
levels (int): Number of levels.
Returns:
Output node of IDWT graph.
"""
c = int(input_node.shape[2])
results = []
for i in range(c):
results.append(
idwt2d_singlechannel(input_node[:,:,i:i+1], wavelet, levels=levels)
)
return tf.concat(results, axis=-1)
|
e75e5b656216475d3148e3ffabb53cba1a509f82
| 24,174 |
def readin_rho(filename, rhofile=True, aniso=False):
"""Read in the values of the resistivity in Ohmm.
The format is variable: rho-file or mag-file.
"""
if aniso:
a = [[0, 1, 2], [2, 3, 4]]
else:
a = [0, 2]
if rhofile:
if filename is None:
filename = 'rho/rho.dat'
with open(filename, 'r') as fid:
mag = np.loadtxt(fid, skiprows=1, usecols=(a[0]))
else:
if filename is None:
filename = read_iter()
with open(filename, 'r') as fid:
mag = np.power(10, np.loadtxt(fid, skiprows=1, usecols=(a[1])))
return mag
|
fb6a602ca2f3218b1feac755adf818f6b019894e
| 24,175 |
from typing import Dict
def cofense_report_image_download_command(client: Client, args: Dict[str, str]) -> dict:
"""
Downloads the image for a specific report.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: File Result
:rtype: ``dict``
"""
report_id = args.get("id")
if not report_id:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("id"))
image_type = args.get("type", DEFAULT_REPORT_IMAGE_TYPE).lower()
if not image_type:
image_type = DEFAULT_REPORT_IMAGE_TYPE
if image_type not in VALID_IMAGE_TYPE:
raise ValueError(MESSAGES["INVALID_IMAGE_TYPE"])
# Appending the id and type to the url_suffix
url_suffix = URL_SUFFIX["REPORT_IMAGE_DOWNLOAD"].format(report_id, image_type)
headers = {
"Accept": f"image/{image_type if image_type == DEFAULT_REPORT_IMAGE_TYPE else 'jpeg'}"
}
# Sending http request
raw_response = client.http_request(url_suffix, resp_type="content", headers=headers)
filename = f"Report ID - {report_id}.{image_type}"
return fileResult(filename, data=raw_response, file_type=entryTypes["image"])
|
3d1d13697a183f612e4b445e201b9abc620f82c8
| 24,176 |
def random_indices(batch_size, num_samples):
"""\
Generate a random sequence of indices for a batch.
:param batch_size: length of the random sequence to generate
:param num_samples: number of samples available, i.e., maximum value to
include in the random sequence + 1
:return: list of integers
"""
return _eddl.random_indices(batch_size, num_samples)
|
027a4f964efa2741cdbec0f4c20cd8f873cdc420
| 24,177 |
def diff_first_last(L, *opArg):
"""
(list) -> boolean
Precondition: len(L) >= 2
Returns True if the first item of the list is different from the last; else returns False.
>>> diff_first_last([3, 4, 2, 8, 3])
False
>>> diff_first_last(['apple', 'banana', 'pear'])
True
>>> diff_first_last([4.0, 4.5])
True
--- Additional Test Cases ---
>>> diff_first_last(3, 4, 2, 8, 3)
False
>>> diff_first_last('apple', 'banana', 'pear')
True
>>> diff_first_last([5, 4], 4, 5, 4)
True
>>> diff_first_last([5, 4], 4, [5, 4])
False
>>> diff_first_last('eeee')
Invalid length. Nothing to compare to.
>>> diff_first_last([5])
Invalid length. Nothing to compare to.
Additional test cases show that the function can handle non-list inputs
of various kinds. Function can also handle invalid inputs of various kinds
"""
print ()
print ('---Checking if first and last values are unequal---')
print ('Input is: ', L, *opArg)
if not opArg:
if type(L) == str:
print ('Invalid length. Nothing to compare input to.')
return None
elif len(L) >= 2:
print (L[0] != L[-1])
return (L[0] != L[-1])
else:
print ('Invalid length. Nothing to compare input to.')
return None
else:
print (L != opArg[-1])
return (L != opArg[-1])
|
30d6afe76c4fdf759d4a989a5c9cc8b4eb8c62c1
| 24,178 |
import time
def update_on_table(df: pd.DataFrame, keys: update_key_type, values: update_key_type, table_name: str,
engine: sa.engine.base.Engine, schema: str) -> int:
"""
:param df: a dataframe with data tha needs to be updated. Must have columns to be used as key and some for values
:param keys: the set of columns to use as key, i.e. update when matched
:param values: the set of columns to update, i.e. set when matched
:param table_name: a table name as in util_function
:param engine: the sqlalchemy engine for the database
:param schema: a schema of interest - None if default schema of database is ok
:return: the number of records updated
"""
# get table
tbl = util_function(table_name, engine, schema)
# change nan to None, make sure columns are modified so that we can easily bindparam
df_ = df.copy()
df_.columns = [f"{el.lower()}_updt" for el in df_.columns]
groups = toolz.partition_all(CHUNK_SIZE, df_.where(pd.notnull(df_), None).to_dict(orient='records'))
if not isinstance(keys, tuple) and not isinstance(keys, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
# create where clause, and update statement
update_statement: dml.Update
if isinstance(keys, tuple):
if not isinstance(values, tuple):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[el] == sa.bindparam(f"{el.lower()}_updt") for el in keys]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((a, sa.bindparam(f"{a.lower()}_updt")) for a in values)
)
if isinstance(keys, dict):
if not isinstance(values, dict):
raise BadArgumentType("keys and values must either be both tuples or both dicts", None)
where = [tbl.c[k] == sa.bindparam(f"{v.lower()}_updt") for k, v in keys.items()]
update_statement = tbl.update().where(sa.and_(*where)).values(
dict((k, sa.bindparam(f"{v.lower()}_updt")) for k, v in values.items())
)
# update
count, last_successful_update = 0, None
with engine.connect() as connection:
for group in groups:
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as _:
# try again
time.sleep(2)
try:
result = connection.execute(update_statement, group)
last_successful_update = group[-1]
count += result.rowcount
except exc.OperationalError as e:
raise OperationalError(
f"Failed to update records. Last successful update: {last_successful_update}", e
)
return count
|
661f07ab08a10119a8d018dcd652ca972b2385e5
| 24,179 |
def administrar_investigaciones(request,tipo):
"""Administrar investigaciones seleccionadas, para ser eliminadas o finalizadas"""
if request.method == 'POST':
ids = request.POST.getlist('checks[]')
if tipo == "incorrecto":
#Se han seleccionado investigaciones para ser finalizadas incorrectamente
for id in ids:
investigacion = Investigacion.objects.filter(id=id)
for c in investigacion:
if c.propietario == request.user:
c.finalizado_incorrecto = True
c.save()
else:
c.usuario.remove(request.user.id)
else:
#Se han seleccionado investigaciones para ser finalizadas correctamente
for id in ids:
investigacion = Investigacion.objects.filter(id=id)
for c in investigacion:
if c.propietario == request.user:
c.finalizado_correcto = True
c.save()
else:
c.usuario.remove(request.user.id)
return HttpResponseRedirect(reverse('investigaciones'))
|
d2b5f02e051a51fe7993680e9015da77847e3f37
| 24,180 |
def container_images_prepare_defaults():
"""Return default dict for prepare substitutions
This can be used as the mapping_args argument to the
container_images_prepare function to get the same result as not specifying
any mapping_args.
"""
return KollaImageBuilder.container_images_template_inputs()
|
511eed4f0582476264387dc875b2d3ac849d3772
| 24,181 |
def discriminantcontrast(x, y, con, w):
"""return discriminant contrast (LDC, crossnobis, CV-Mahalanobis, whatever)."""
betas = lsbetas(x, y)
conest = con @ betas
return np.sum(conest * w, axis=1)
|
aa3043a568cecedb5180f0618277c05b59cb11c9
| 24,182 |
def calculateCosine(point, origin):
"""
calculate the polar angle of the given point to the origin point
"""
x1, y1 = point
x0, y0 = origin
if y1 == y0:
return 1.0
return round((x1 - x0) / calculateDistance(point, origin), ROUND)
|
19a31cc8318e54cca6c3475550cb29572bf6c2a3
| 24,183 |
def ValidClassWmi(class_name):
"""
Tells if this class for our ontology is in a given WMI server, whatever the namespace is.
This is used to display or not, the WMI url associated to a Survol object.
This is not an absolute rule.
"""
return class_name.startswith(("CIM_", "Win32_", "WMI_"))
|
f3fda0492bb42cefaba8a0226cb13558907bf995
| 24,184 |
def results():
"""Calculate results and route to results page"""
# get user input
user_input = dict(request.args)
user_titles = []
for x in user_input.keys():
if x == "algo":
algo_choice = user_input[x]
else:
user_titles.append(user_input[x])
# construct array
input_array = rec.create_array(user_titles, matrix)
# provide recommendations
if algo_choice == "NMF":
recommendations = rec.recommend_nmf(input_array, user_titles, matrix)
elif algo_choice == "CoSim":
recommendations = rec.recommend_cosim(input_array, user_titles, matrix)
return render_template("results.html", movies_html=recommendations)
|
2ba067dd4fad484d6f098286f1b8e7a7d977b8f2
| 24,185 |
from typing import MutableMapping
def flatten(d, separator='_', parent_key=None):
"""
Converts a nested hierarchy of key/value object (e.g. a dict of dicts) into a flat (i.e. non-nested) dict.
:param d: the dict (or any other instance of collections.MutableMapping) to be flattened.
:param separator: the separator to use when concatenating nested key names into flattened key names.
:param parent_key: used internally for recursion.
:return: a flattened dict (i.e. containing no nested dicts as values).
"""
if separator is None:
separator = '_'
if parent_key is None:
parent_key = ''
dict_type = dict if d is None else type(d)
items = []
for k, v in d.items():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(flatten(v, separator=separator, parent_key=new_key).items())
else:
items.append((new_key, v))
return dict_type(items)
|
d07daba5007c4c4efee1ccb2033a42e9a52a7efb
| 24,186 |
def _rendered_size(text, point_size, font_file):
"""
Return a (width, height) pair representing the size of *text* in English
Metric Units (EMU) when rendered at *point_size* in the font defined in
*font_file*.
"""
emu_per_inch = 914400
px_per_inch = 72.0
font = _Fonts.font(font_file, point_size)
px_width, px_height = font.getsize(text)
emu_width = int(px_width / px_per_inch * emu_per_inch)
emu_height = int(px_height / px_per_inch * emu_per_inch)
return emu_width, emu_height
|
f779899cba10fa135b7746421798cf91441984fc
| 24,187 |
from typing import Tuple
def parse_args(args: str) -> Tuple[UFDLType]:
"""
Parses the string representation of a list of type arguments.
:param args:
The type arguments to parse.
:return:
The parsed types.
"""
if args == "":
return tuple()
return tuple(parse_type(arg) for arg in split_args(args))
|
2e9f5383ab1a3e96e03cc9fd030071ad37ec12cd
| 24,188 |
from typing import Iterable
import numpy
def pca_biplot(
predictor: Iterable, response: Iterable, labels: Iterable[str] = None
) -> pyplot.Figure:
"""
produces a pca projection and plot the 2 most significant component score and the component coefficients.
:param predictor:
:param response:
:param labels:
:return:"""
scaler = StandardScaler()
scaler.fit(predictor)
pca = PCA()
return biplot(
pca.fit_transform(scaler.transform(predictor))[:, 0:2],
numpy.transpose(pca.components_[0:2, :]),
response,
labels,
)
|
b2e75749b2c3c504b703c4b7b849a14d2801b6d8
| 24,189 |
from typing import Tuple
import torch
def kb_spmat_interp_adjoint(
data: Tensor, interp_mats: Tuple[Tensor, Tensor], grid_size: Tensor
) -> Tensor:
"""Kaiser-Bessel sparse matrix interpolation adjoint.
See :py:class:`~torchkbnufft.KbInterpAdjoint` for an overall description of
adjoint interpolation.
To calculate the sparse matrix tuple, see
:py:meth:`~torchkbnufft.calc_tensor_spmatrix`.
Args:
data: Scattered data to be interpolated to gridded data.
interp_mats: 2-tuple of real, imaginary sparse matrices to use for
sparse matrix KB interpolation.
Returns:
``data`` calculated at gridded locations.
"""
is_complex = True
if not data.is_complex():
if not data.shape[-1] == 2:
raise ValueError("For real inputs, last dimension must be size 2.")
is_complex = False
data = torch.view_as_complex(data)
image = KbSpmatInterpAdjoint.apply(data, interp_mats, grid_size)
if is_complex is False:
image = torch.view_as_real(image)
return image
|
0c4b131e29138d8d2b1260617fd50b20b991b64b
| 24,191 |
from typing import Callable
import inspect
def help_send(command: str, help_string_call: Callable[[], str]):
"""发送帮助信息"""
class _HELP(ArgAction):
def __init__(self):
super().__init__(HelpActionManager.send_action)
def handle(self, option_dict, varargs, kwargs, is_raise_exception):
action = require_help_send_action(command=command)
if action:
return action(help_string_call())
async def handle_async(self, option_dict, varargs, kwargs, is_raise_exception):
action = require_help_send_action(command=command)
if action:
return await action(help_string_call())
HelpActionManager.helpers.setdefault(command, _HELP())
if command in HelpActionManager.cache:
HelpActionManager.helpers[command].action = HelpActionManager.cache[command]
HelpActionManager.helpers[command].awaitable = inspect.iscoroutinefunction(HelpActionManager.cache[command])
del HelpActionManager.cache[command]
return HelpActionManager.helpers[command]
|
444971ddd639cfd10ad8eee41d3bc35815cff5a2
| 24,192 |
def divide_list(array, number):
"""Create sub-lists of the list defined by number.
"""
if len(array) % number != 0:
raise Exception("len(alist) % number != 0")
else:
return [array[x:x+number] for x in range(0, len(array), number)]
|
09882945b971ce13f7983c33562df0dfde77165c
| 24,193 |
def _sympify(a):
"""Short version of sympify for internal usage for __add__ and __eq__
methods where it is ok to allow some things (like Python integers
and floats) in the expression. This excludes things (like strings)
that are unwise to allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
|
2b383442f407fb932937d25fa9b32ac87bda2780
| 24,194 |
def process_value(setting_info, color):
"""Called by the :class:`rivalcfg.mouse.Mouse` class when processing a
"reactive_rgbcolor" type setting.
:param dict setting_info: The information dict of the setting from the
device profile.
:param str,tuple,list,None color: The reactive color.
:rtype: [int, int, int]
"""
# Disable the reactive color
if color is None or str(color).lower() in ["off", "disable"]:
return [0x00, 0x00, 0x00, 0x00, 0x00]
# Color tuple
if type(color) in (tuple, list):
if len(color) != 3:
raise ValueError("Not a valid color %s" % str(color))
for channel in color:
if type(channel) != int or channel < 0 or channel > 255:
raise ValueError("Not a valid color %s" % str(color))
return [0x01, 0x00] + list(color)
if is_color(color):
return [0x01, 0x00] + list(parse_color_string(color))
raise ValueError("Not a valid color %s" % str(color))
|
4fb3811456cf6740ff3f079d9391a4d2c9492427
| 24,195 |
def generate_ones(num_bits):
"""Returns a numpy array with N ones."""
return np.ones(num_bits, dtype=np.int)
|
13601212494bd29bbe037c357d5ac8a95632fad4
| 24,196 |
def get_exception_message(exception: Exception) -> str:
"""Returns the message part of an exception as string"""
return str(exception).strip()
|
6e002329425f716115a5fddb32cbf36cf568ee81
| 24,197 |
from typing import Tuple
def should_commit(kwargs: Kwargs) -> Tuple[bool, Kwargs]:
"""Function for if a schema class should create a document on instance."""
return kwargs.pop('create') if 'create' in kwargs else True, kwargs
|
3e554d661b069e71da86dc8f3d43e754236a9037
| 24,200 |
def _get_frame_time(time_steps):
""" Compute average frame time.
:param time_steps: 1D array with cumulative frame times.
:type time_steps: numpy.ndarray
:return: The average length of each frame in seconds.
:rtype: float
"""
if len(time_steps.shape) != 1:
raise ValueError("ERROR: Time series must be a 1D array.")
frame_time = time_steps[-1]/(len(time_steps) - 1) # Need to ignore the first frame (0).
return frame_time
|
e849e5d6bcbc14af357365b3e7f98f1c50d93ee4
| 24,202 |
import random
def next_symbol_to_learn(ls):
"""Returns the next symbol to learn. This always returns characters from the
training set, within those, gives higher probability to symbols the user
doesn't know very well yet. `ls` is the learn state. Returns a tuple like
("V", "...-")
"""
total = 0.0
candidates = [ ]
for k in ls["learning_set"]:
weight = 1.0/ls[k]
total += weight
candidates.append((k, weight))
r = random.uniform(0.0, total)
sum = 0.0
for c in candidates:
symbol = c[0]
weight = c[1]
sum += weight
if r <= sum:
return (symbol, morse.to_morse[symbol])
print("Ooops, should have selected a candidate symbol")
|
d4b574a6f841ee3f2e1ce4be9f67a508ed6fb2de
| 24,203 |
def query_table3(song):
"""
This function returns the SQL neccessary to get all users who listened to the song name passed as an argument to this function.
"""
return "select user_name from WHERE_SONG where song_name = '{}';".format(song)
|
ed9a3fb7eb369c17027871e28b02600b78d483a9
| 24,204 |
def train_test_data(x_,y_,z_,i):
"""
Takes in x,y and z arrays, and a array with random indesies iself.
returns learning arrays for x, y and z with (N-len(i)) dimetions
and test data with length (len(i))
"""
x_learn=np.delete(x_,i)
y_learn=np.delete(y_,i)
z_learn=np.delete(z_,i)
x_test=np.take(x_,i)
y_test=np.take(y_,i)
z_test=np.take(z_,i)
return x_learn,y_learn,z_learn,x_test,y_test,z_test
|
7430e9ea2c96356e9144d1689af03f50b36895c6
| 24,206 |
def construct_Tba(leads, tleads, Tba_=None):
"""
Constructs many-body tunneling amplitude matrix Tba from single particle
tunneling amplitudes.
Parameters
----------
leads : LeadsTunneling
LeadsTunneling object.
tleads : dict
Dictionary containing single particle tunneling amplitudes.
tleads[(lead, state)] = tunneling amplitude.
Tba_ : None or ndarray
nbaths by nmany by nmany numpy array containing old values of Tba.
The values in tleads are added to Tba_.
Returns
-------
Tba : ndarray
nleads by nmany by nmany numpy array containing many-body tunneling amplitudes.
The returned Tba corresponds to Fock basis.
"""
si, mtype = leads.si, leads.mtype
if Tba_ is None:
Tba = np.zeros((si.nleads, si.nmany, si.nmany), dtype=mtype)
else:
Tba = Tba_
# Iterate over many-body states
for j1 in range(si.nmany):
state = si.get_state(j1)
# Iterate over single particle states
for j0 in tleads:
(j3, j2), tamp = j0, tleads[j0]
# Calculate fermion sign for added/removed electron in a given state
fsign = np.power(-1, sum(state[0:j2]))
if state[j2] == 0:
statep = list(state)
statep[j2] = 1
ind = si.get_ind(statep)
if ind is None:
continue
Tba[j3, ind, j1] += fsign*tamp
else:
statep = list(state)
statep[j2] = 0
ind = si.get_ind(statep)
if ind is None:
continue
Tba[j3, ind, j1] += fsign*np.conj(tamp)
return Tba
|
83c582535435564b8132d3bd9216690c127ccb79
| 24,207 |
def inplace_update_i(tensor_BxL, updates_B, i):
"""Inplace update a tensor. B: batch_size, L: tensor length."""
batch_size = tensor_BxL.shape[0]
indices_Bx2 = tf.stack([
tf.range(batch_size, dtype=tf.int64),
tf.fill([batch_size], tf.cast(i, tf.int64))
],
axis=-1)
return tf.tensor_scatter_nd_update(tensor_BxL, indices_Bx2, updates_B)
|
61cb7e8a030debf6ff26154d153de674645c23fe
| 24,208 |
async def record_trade_volume() -> RecordTradeVolumeResponse:
"""
This api exists for demonstration purposes so you don't have to wait until the job runs again to pick up new data
"""
await deps.currency_trade_service.update_trade_volumes()
return RecordTradeVolumeResponse(success=True)
|
2921353360c71e85d7d5d64f6aed505e5f9a66b9
| 24,211 |
def logged_in():
"""
Method called by Strava (redirect) that includes parameters.
- state
- code
- error
"""
error = request.args.get('error')
state = request.args.get('state')
if error:
return render_template('login_error.html',
error=error,
competition_title=config.COMPETITION_TITLE)
else:
code = request.args.get('code')
client = Client()
token_dict = client.exchange_code_for_token(client_id=config.STRAVA_CLIENT_ID,
client_secret=config.STRAVA_CLIENT_SECRET,
code=code)
# Use the now-authenticated client to get the current athlete
strava_athlete = client.get_athlete()
athlete_model = data.update_athlete_auth(strava_athlete, token_dict)
if not athlete_model:
return render_template('login_error.html',
error="ATHLETE_NOT_FOUND",
competition_title=config.COMPETITION_TITLE)
multiple_teams = None
no_teams = False
team = None
message = None
try:
team = data.register_athlete_team(
strava_athlete=strava_athlete,
athlete_model=athlete_model,
)
except MultipleTeamsError as multx:
multiple_teams = multx.teams
message = multx
except NoTeamsError as noteamsx:
no_teams = True
message = noteamsx
if not no_teams:
auth.login_athlete(strava_athlete)
return redirect(url_for('user.rides'))
else:
return render_template(
'login_results.html',
athlete=strava_athlete,
team=team,
multiple_teams=multiple_teams,
no_teams=no_teams,
message=message,
competition_title=config.COMPETITION_TITLE,
)
|
71a2590f2f2fbcc67e73a2afb9180a5974d98252
| 24,213 |
def unit_string_to_cgs(string: str) -> float:
"""
Convert a unit string to cgs.
Parameters
----------
string
The string to convert.
Returns
-------
float
The value in cgs.
"""
# distance
if string.lower() == 'au':
return constants.au
# mass
if string.lower() in ('solarm', 'msun'):
return constants.solarm
# time
if string.lower() in ('year', 'years', 'yr', 'yrs'):
return constants.year
raise ValueError('Cannot convert unit')
|
32b16bf6a9c08ee09a57670c82da05655cb3fd16
| 24,214 |
from operator import mul
def Mul(x, x_shape, y, y_shape, data_format=None):
"""mul"""
if data_format:
x_new = broadcast_by_format(x, x_shape, data_format[0], y_shape)
y_new = broadcast_by_format(y, y_shape, data_format[1], x_shape)
else:
x_new = x
y_new = y
return mul.mul(x_new, y_new)
|
b6bf343e8a3ceb5fe5a0dc8c7bd96b34ecb7ab2f
| 24,216 |
import logging
def new_authentication_challenge(usr: User) -> str:
"""
Initiates an authentication challenge. The challenge proceeds as follows:
1. A user (:class:`sni.user`) asks to start a challenge by calling
this method.
2. This methods returns a UUID, and the user has 60 seconds to change its
teamspeak nickname to that UUID.
3. The user notifies SNI that (s)he has done so.
4. The server checks (see
:meth:`sni.teamspeak.complete_authentication_challenge`), and if
sucessful, the corresponding teamspeak client is registered in the
database and bound to that user. The nickname is also automatically
assigned.
"""
logging.info(
"Starting authentication challenge for %s", usr.character_name
)
challenge_nickname = utils.random_code(20)
TeamspeakAuthenticationChallenge.objects(user=usr).update(
set__challenge_nickname=challenge_nickname,
set__created_on=utils.now(),
set__user=usr,
upsert=True,
)
return challenge_nickname
|
d0c27b211aadc94556dc285a1588ff908338b950
| 24,217 |
def create_channel(application_key):
"""Create a channel.
Args:
application_key: A key to identify this channel on the server side.
Returns:
A string id that the client can use to connect to the channel.
Raises:
InvalidChannelTimeoutError: if the specified timeout is invalid.
Other errors returned by _ToChannelError
"""
request = channel_service_pb.CreateChannelRequest()
response = channel_service_pb.CreateChannelResponse()
request.set_application_key(application_key)
try:
apiproxy_stub_map.MakeSyncCall(_GetService(),
'CreateChannel',
request,
response)
except apiproxy_errors.ApplicationError, e:
raise _ToChannelError(e)
return response.client_id()
|
8b54ac3204af4dbeaf603e788aa0b41829f4807b
| 24,218 |
def generate_new_admin_class():
"""
we need to generate a new dashboard view for each `setup_admin` call.
"""
class MockDashboard(DashboardView):
pass
class MockAdmin(Admin):
dashboard_class = MockDashboard
return MockAdmin
|
7f691e8f294bf6d678cb8f1ce59b4f12ca77c866
| 24,219 |
def for_default_graph(*args, **kwargs):
"""Creates a bookkeeper for the default graph.
Args:
*args: Arguments to pass into Bookkeeper's constructor.
**kwargs: Arguments to pass into Bookkeeper's constructor.
Returns:
A new Bookkeeper.
Raises:
ValueError: If args or kwargs are provided and the Bookkeeper already
exists.
"""
graph = tf.get_default_graph()
collection = graph.get_collection(_BOOKKEEPER)
if collection:
if args or kwargs:
raise ValueError('Requesting construction of a BookKeeper that already '
'exists: %s %s' % (args, kwargs))
return collection[0]
else:
books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs)
graph.add_to_collection(_BOOKKEEPER, books)
return books
|
649f2c33c5cdedf4d08c2ac991c0d1a044c50fe4
| 24,220 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.