content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_trace(session, trace_uuid):
"""Retrieves traces given a uuid.
Args:
sesssion: db session
trace_uuid: uuid of trace in question
Returns 2-tuple of plop, flamegraph input or None if trace doesn't exist
(or was garbage collected.
"""
trace = session.query(PerfProfile).filter(PerfProfile.uuid == trace_uuid).first()
if not trace:
raise InvalidUUID()
return trace.plop_input, trace.flamegraph_input | 0f44ebce393277a3660810ae1b359b437f733ec1 | 20,786 |
def first_kind_discrete(orientations, order=4):
"""
Calc orientation tensors of first kind for given discrete vectors
"""
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
# Symmetrize orientations
# orientations_reversed = [-v for v in orientations]
# orientations = orientations + orientations_reversed
einsumStrings = {
1: "ij -> j",
2: "ij, ik -> jk",
3: "ij, ik, il -> jkl",
4: "ij, ik, il, im -> jklm",
5: "ij, ik, il, im, in -> jklmn",
6: "ij, ik, il, im, in, ip -> jklmnp",
}
ori = orientations
if order == 1:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori,)
elif order == 2:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori)
elif order == 3:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori, ori)
elif order == 4:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori)
)
elif order == 5:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori)
)
elif order == 6:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori, ori)
)
else:
raise Exception("Not implemented")
return N | 4b28f426ea551d7ef6a744091a65be11d418e324 | 20,787 |
def fix_repo_url(repo_url, in_type='https', out_type='ssh', format_dict=format_dict):
""" Changes the repo_url format """
for old, new in izip(format_dict[in_type], format_dict[out_type]):
repo_url = repo_url.replace(old, new)
return repo_url | 6382136693bfce4b72e122cdc016fa9eee1fb78f | 20,789 |
def mc(dataset):
"""
Modulus calculation.
Calculates sqrt(real^2 + imag^2)
"""
return np.sqrt(dataset.real ** 2 + dataset.imag ** 2) | 8c7d94ed07c7d4102b2650baef11ea470e5673ea | 20,791 |
from typing import List
def _get_fields_list(data: Data) -> List[Field]:
"""Extracts all nested fields from the data as a flat list."""
result = []
def map_fn(value):
if isinstance(value, GraphPieceBase):
# pylint: disable=protected-access
tf.nest.map_structure(map_fn, value._data)
else:
result.append(value)
tf.nest.map_structure(map_fn, data)
return result | ad6a9b4c11749b085edaf639e318e84e75e58cc7 | 20,792 |
def constructAdvancedQuery(qryRoot):
"""
Turns a qry object into a complex Q object by calling its helper and supplying the selected format's tree.
"""
return constructAdvancedQueryHelper(
qryRoot["searches"][qryRoot["selectedtemplate"]]["tree"]
) | 4a1a23c4810e7b4f30a86c620f0e949da8af4ef8 | 20,793 |
def slice_and_dice(text=text):
"""Strip the whitespace (newlines) off text at both ends,
split the text string on newline (\n).
Next check if the first char of each (stripped) line is lowercase,
if so split the line into words and append the last word to
the results list. Make sure the you strip off any trailing
exclamation marks (!) and dots (.), Return the results list."""
results = []
for line in text.strip().split('\n'):
line = line.strip()
if line[0].islower():
last_word = line.split()[-1]
if last_word[-1] == '.' or last_word[-1] == '!':
last_word = last_word[:-1]
results.append(last_word)
return results | 16d3bb77c60738d654a61ac40b3fc7216ee6ed52 | 20,794 |
def solution(data):
""" Solution to the problem """
seats, first_visible_seats, dim_y, dim_x = preprocess(data)
solver = Simulation(seats, first_visible_seats, dim_y, dim_x)
return solver.solve() | 6bbfa11df003b2dce24430cf50420d6d2bcf9683 | 20,795 |
from typing import Tuple
def add_received_ip_tags(
rows: beam.pvalue.PCollection[Row],
ips_with_metadata: beam.pvalue.PCollection[Tuple[DateIpKey, Row]]
) -> beam.pvalue.PCollection[Row]:
"""Add tags for answer ips (field received.ip) - asnum, asname, http, cert
Args:
rows: PCollection of measurement rows
ips_with_metadata: PCollection of dated ips with geo metadata
Returns:
PCollection of measurement rows with tag information added to the recieved.ip row
"""
# PCollection[Tuple[DateIpKey,Row]]
received_keyed_by_ip_and_date = (
rows | 'key by received ips and dates' >> beam.Map(
lambda row: (_make_date_received_ip_key(row), row)).with_output_types(
Tuple[DateIpKey, Row]))
# Iterable[PCollection[Tuple[DateIpKey,Row]]]
partition_by_domain = (
received_keyed_by_ip_and_date | 'partition by domain' >> beam.Partition(
_get_domain_partition, NUM_DOMAIN_PARTITIONS))
collections = []
for i in range(0, NUM_DOMAIN_PARTITIONS):
elements = partition_by_domain[i]
# PCollection[Tuple[Tuple[date,ip],Dict[input_name_key,List[Row]]]]
grouped_received_metadata_and_rows = (({
IP_METADATA_PCOLLECTION_NAME: ips_with_metadata,
ROWS_PCOLLECION_NAME: elements
}) | f'group by received ip keys {i}' >> beam.CoGroupByKey())
# PCollection[Row]
domain_rows_with_tags = (
grouped_received_metadata_and_rows | f'tag received ips {i}' >>
beam.FlatMapTuple(lambda k, v: merge_metadata_with_rows(
k, v, field='received')).with_output_types(Row))
collections.append(domain_rows_with_tags)
# PCollection[Row]
rows_with_tags = (
collections |
'merge domain collections' >> beam.Flatten().with_output_types(Row))
return rows_with_tags | 36dcbd0ced327a1f4ed0645b7c1edfe65bdeb9f8 | 20,796 |
def set_selector(*args):
"""set_selector(sel_t selector, ea_t paragraph) -> int"""
return _idaapi.set_selector(*args) | 8c4b7119979dda3d4b21b56865d20cfa60900a3a | 20,797 |
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
tweet_text = tweet["text"]
##SENTIMENT
sentiment = sentiment_analyzer.polarity_scores(tweet_text)
words = local_tokenizer.tokenize(tweet_text) #Get text only
num_chars = sum(len(w) for w in words) #num chars in words
num_chars_total = len(tweet_text)
num_terms = len(tweet_text.split())
num_words = len(words)
num_unique_terms = len(set([x.lower() for x in words]))
caps_count = sum([1 if x.isupper() else 0 for x in tweet_text])
caps_ratio = caps_count / num_chars_total
twitter_objs = count_twitter_objs(tweet_text) #Count #, @, and http://
num_media = 0
if "media" in tweet["entities"]:
num_media = len(tweet["entities"]["media"])
retweet = 0
if "rt" in words or "retweeted_status" in tweet:
retweet = 1
has_place = 1 if "coordinates" in tweet else 0
author = tweet["user"]
is_verified = 1 if author["verified"] else 0
log_followers = 0 if author["followers_count"] == 0 else np.log(author["followers_count"])
log_friends = 0 if author["friends_count"] == 0 else np.log(author["friends_count"])
features = [num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'],
sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet, num_media,
is_verified,
# log_followers, log_friends,
# has_place,
caps_ratio,
]
return features | d4b1e158a80b6d9502c02dc0c2380e8749bb0b6f | 20,798 |
def template(spec_fn):
"""
>>> from Redy.Magic.Classic import template
>>> import operator
>>> class Point:
>>> def __init__(self, p):
>>> assert isinstance(p, tuple) and len(p) is 2
>>> self.x, self.y = p
>>> def some_metrics(p: Point):
>>> return p.x + 2 * p.y
>>> @template
>>> def comp_on_metrics(self: Point, another: Point, op):
>>> if not isinstance(another, Point):
>>> another = Point(another)
>>> return op(*map(some_metrics, (self, another)))
>>> class Space(Point):
>>> @comp_on_metrics(op=operator.lt)
>>> def __lt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.eq)
>>> def __eq__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.gt)
>>> def __gt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.le)
>>> def __le__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.ge)
>>> def __ge__(self, other):
>>> ...
>>> p = Space((0, 1))
>>> p > (1, 2)
>>> p < (3, 4)
>>> p >= (5, 6)
>>> p <= (7, 8)
>>> p == (9, 10)
"""
def specify(*spec_args, **spec_kwds):
def call(_):
def inner(*args, **kwds):
return spec_fn(*spec_args, *args, **spec_kwds, **kwds)
return inner
return call
return specify | a8fd64926cdbec73c1a31c20a27174c86af3405e | 20,799 |
def list_sum(*argv, **kwargs):
"""
Summarise items in provided list
Arguments:
- argv: list of item for summarise
Options:
- type: list item type (int if omitted)
Note: All types provided by this lib supported
Returns sum number in 'type' format
"""
_type_name = kwargs.get('type', 'int')
_type = type_factory(_type_name)
_result: _type = 0
try:
for _list in argv:
if isinstance(_list, (list, tuple)):
_result += sum([_type(_item) for _item in _list])
else:
_number = _type(_list)
_result += _number
except (ValueError, IndexError) as e:
raise FrameworkError(f"ROBOT_MATH.LIST_SUM: {e}")
else:
return _result | d047abe3eb99ac7c468dd8b3b1bc42ee7b98e134 | 20,800 |
def image_upload(request):
"""
If it's a post, then upload the image or store it locally based on config. Otherwise, return
the html of the upload.html template.
"""
if request.method == 'POST':
image_file = request.FILES['image_file']
image_type = request.POST['image_type']
if settings.USE_S3:
if image_type == 'private':
upload = UploadPrivate(file=image_file)
else:
upload = Upload(file=image_file)
upload.save()
image_url = upload.file.url
else:
filesystem_storage = FileSystemStorage()
filename = filesystem_storage.save(image_file.name, image_file)
image_url = filesystem_storage.url(filename)
return render(request, 'upload.html', {
'image_url': image_url
})
return render(request, 'upload.html') | 1d5f0f111dfd9f1b7b1f29ebb8d9fb27fb210a5f | 20,802 |
def werbo_c(topics, word_embedding_model, weight=0.9, topk=10):
"""
computes Word embedding based RBO - centroid
Parameters
----------
topics: a list of lists of words
word_embedding_model: word embedding space in gensim word2vec format
weight: p (float), default 1.0: Weight of each agreement at depth d:
p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.
topk: top k words on which the topic diversity will be computed
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in combinations(topics, 2):
word2index = get_word2index(list1, list2)
index2word = {v: k for k, v in word2index.items()}
indexed_list1 = [word2index[word] for word in list1]
indexed_list2 = [word2index[word] for word in list2]
rbo_val = werc(indexed_list1[:topk], indexed_list2[:topk], p=weight,
index2word=index2word, word2vec=word_embedding_model, norm=False)[2]
collect.append(rbo_val)
return np.mean(collect) | 447e3c6037196de56673a62b8841279e6f17d739 | 20,803 |
def G_t(t, G, tau, Ge = 0.0):
"""this function returns the relaxation modulus in time"""
G_rel = np.zeros(np.size(t))
if np.size(G) == 1: #the model is the SLS
for i in range(np.size(t)):
G_rel[i] = Ge + G*np.exp(-t[i]/tau)
else: #the model has more than one arm
for i in range(np.size(t)):
G_rel[i] = Ge + sum(G[:]*np.exp(-t[i]/tau[:]))
return G_rel | b44cb0b49e2423ee3c5aade0c625407d54889044 | 20,804 |
def iou_set(set1, set2):
"""Calculate iou_set """
union = set1.union(set2)
return len(set1.intersection(set2)) / len(union) if union else 0 | f0087b640e8b9a87167d7e2b645aca3c565e09c1 | 20,805 |
import time
def time_measured(fkt):
"""
Decorator to measure execution time of a function
It prints out the measured time
Parameters
----------
fkt : function
function that shall be measured
Returns
-------
None
"""
def fkt_wrapper(*args, **kwargs):
t1 = time.time()
return_vals = fkt(*args, **kwargs)
t2 = time.time()
print("Job needed: {} seconds".format(t2-t1))
return return_vals
return fkt_wrapper | 43fe9fa24fdd27f15e988f5997424dd91f7d92c9 | 20,806 |
def test_find_codon(find_codon):
"""
A function to test another function that looks for a codon within
a coding sequence.
"""
synapto_nuc = ("ATGGAGAACAACGAAGCCCCCTCCCCCTCGGGATCCAACAACAACGAGAACAACAATGCAGCCCAGAAGA"
"AGCTGCAGCAGACCCAAGCCAAGGTGGACGAGGTGGTCGGGATTATGCGTGTGAACGTGGAGAAGGTCCT"
"GGAGCGGGACCAGAAGCTATCGGAACTGGGCGAGCGTGCGGATCAGCTGGAGCAGGGAGCATCCCAGTTC"
"GAGCAGCAGGCCGGCAAGCTGAAGCGCAAGCAATGGTGGGCCAACATGAAGATGATGATCATTCTGGGCG"
"TGATAGCCGTTGTGCTGCTCATCATCGTTCTGGTGTCGCTTTTCAATTGA")
assert find_codon('ATG', synapto_nuc) == 0
assert find_codon('AAT', synapto_nuc) == 54
assert find_codon('TGT', synapto_nuc) == -1
assert find_codon('TGC', synapto_nuc) == -1
return None | 1e8906441d7812fbaefd7688a1c02876210ba8b8 | 20,807 |
def add_keys3(a, A, b, B):
"""
aA + bB
:param a:
:param A:
:param b:
:param B:
:return:
"""
return tcry.xmr_add_keys3_vartime_r(a, A, b, B) | e1f8205b93b2f944da271a8e4097c9466831ad6b | 20,808 |
def cause_state(value):
"""
Usage::
{{ value|cause_state}}
"""
try:
if isinstance(value, (str, unicode)):
value = eval(value)
if Bushfire.CAUSE_STATE_POSSIBLE==value:
return Bushfire.CAUSE_STATE_CHOICES[Bushfire.CAUSE_STATE_POSSIBLE-1][1]
return Bushfire.CAUSE_STATE_CHOICES[Bushfire.CAUSE_STATE_KNOWN-1][1]
except:
return None | 8e905921b1e7e1b498f42e70a134d21f2a146f2b | 20,809 |
def ReadRawSAData(DataDirectory, fname_prefix):
"""
This function reads in the raw SA data to a pandas dataframe
Args:
DataDirectory: the data directory
fname_prefix: the file name prefix
Returns:
pandas dataframe with the raw SA data
Author: FJC
"""
# get the csv filename
fname_suffix = "_SAvertical.csv"
fname = fname_prefix+fname_suffix
df = pd.read_csv(DataDirectory+fname)
return df | d28d91131de82c0049a046231a41eb34d9f3b185 | 20,810 |
from typing import List
def not_valid_score(scores: List[int]):
"""Checks if the set of estimations is ambiguous (all scores are different)."""
return True if len(np.unique(scores)) == len(scores) else False | 9077c93a6bdfe79d0fa5277f04adb8ba58053ba9 | 20,811 |
import numpy
def biom_to_pandas(biom_otu):
"""
Convert data from biom to SparseDataFrame (pandas) for easy access
:param biom_otu: Table
:rtype: DataFrame
"""
tmp_m = biom_otu.matrix_data
df = [SparseSeries(tmp_m[i].toarray().ravel()) for i in numpy.arange(tmp_m.shape[0])]
return (SparseDataFrame(df, index=biom_otu.ids('observation'), columns=biom_otu.ids('sample')).to_dense()) | ce865ce669dd36e60b83be00f998732abc37d6f7 | 20,813 |
def is_classmethod(method: t.Callable):
"""
A python method is a wrapper around a function that also
holds a reference to the class it is a method of.
When bound, it also holds a reference to the instance.
@see https://stackoverflow.com/questions/12935241/python-call-instance-method-using-func
:param method:
"""
# print(instance.a_method) # Bounded
# print(AClass.a_method) # Unbounded
bound_to: t.Type = getattr(method, '__self__', None)
# Bound to: <class '__main__.AClass'>, False
# If double decorated with staticmethod and classmethod
# Bound to: <__main__.AClass object at 0x7ffb18699fd0>, True
if not isinstance(bound_to, type):
# must be bound to a class
return False
name: str = method.__name__
# MRO = Method resolution order
# E.g. Class A: pass
# A.__mro__
# Output: (<class '__main__.AClass'>, <class 'object'>)
for cls in bound_to.__mro__:
# Get decorator
descriptor = vars(cls).get(name)
if descriptor is not None:
return isinstance(descriptor, classmethod)
return False | 6600c9a16df0dc62304a02c2bb333af07f8709f8 | 20,815 |
def masked_accuracy(y_true, y_pred):
"""An accuracy function that masks based on targets (value: 0.5)
Args:
y_true: The true training labels
y_pred: The predicted labels
Returns:
float: the masked accuracy
"""
a = kb.sum(kb.cast(kb.equal(y_true, kb.round(y_pred)), kb.floatx()))
c = kb.sum(kb.cast(kb.not_equal(y_true, 0.5), kb.floatx()))
acc = a / c
return acc | 0d6007eafdf849ad495458d56b4e68a0031d35fa | 20,816 |
def get_shap_interaction_values(x_df, explainer):
"""
Compute the shap interaction values for a given dataframe.
Also checks if the explainer is a TreeExplainer.
Parameters
----------
x_df : pd.DataFrame
DataFrame for which will be computed the interaction values using the explainer.
explainer : shap.TreeExplainer
explainer object used to compute the interaction values.
Returns
-------
shap_interaction_values : np.ndarray
Shap interaction values for each sample as an array of shape (# samples x # features x # features).
"""
if not isinstance(explainer, shap.TreeExplainer):
raise ValueError(f"Explainer type ({type(explainer)}) is not a TreeExplainer. "
f"Shap interaction values can only be computed for TreeExplainer types")
shap_interaction_values = explainer.shap_interaction_values(x_df)
# For models with vector outputs the previous function returns one array for each output.
# We sum the contributions here.
if isinstance(shap_interaction_values, list):
shap_interaction_values = np.sum(shap_interaction_values, axis=0)
return shap_interaction_values | 9b84e0a262ba0d74eb2f24406ac6f41dfbab1625 | 20,817 |
from typing import Optional
from typing import Union
def plot_roc(
y_true: np.ndarray,
y_probas: np.ndarray,
labels: Optional[dict] = None,
classes_to_plot: Optional[list] = None,
plot_micro: Optional[bool] = False,
plot_macro: Optional[bool] = False,
title: str = "ROC Curve",
ax: Optional[matplotlib.axes.Axes] = None,
figsize: Optional[tuple] = None,
cmap: Union[str, matplotlib.colors.Colormap] = "Blues",
title_fontsize: Union[str, int] = "large",
text_fontsize: Union[str, int] = "medium",
) -> matplotlib.axes.Axes:
"""Plot ROC curve.
Parameters
----------
y_true : numpy.ndarray, (n_samples,)
Actual target values.
y_probas : numpy.ndarray, (n_samples, n_classes)
Predicted probabilities of each class.
labels: Optional[dict]
labels for y.
classes_to_plot : Optional[list]
Classes for which the ROC curve should be plotted.
If the class doesn't exists it will be ignored.
If ``None``, all classes will be plotted
(the default is ``None``).
plot_micro : Optional[bool]
Plot micro averaged ROC curve (the default is False)
plot_macro : Optional[bool]
Plot macro averaged ROC curve (the default is False)
title : str
Title for the ROC.
ax: Optional[`matplotlib.axes.Axes`] object
The axes on which plot was drawn.
figsize : Optional[tuple]
Size of the plot.
cmap : Union[str, `matplotlib.colors.Colormap`]
Colormap used for plotting.
https://matplotlib.org/tutorials/colors/colormaps.html
title_fontsize : Union[str, int]
Use 'small', 'medium', 'large' or integer-values
(the default is 'large')
text_fontsize : Union[str, int]
Use 'small', 'medium', 'large' or integer-values
(the default is 'medium')
Returns
-------
`matplotlib.axes.Axes` object
The axes on which plot was drawn.
References
----------
.. [1] https://github.com/reiinakano/scikit-plot
"""
classes = np.unique(y_true)
if not classes_to_plot:
classes_to_plot = classes
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(label=title, fontsize=title_fontsize)
fpr_dict = {}
tpr_dict = {}
indices_to_plot = np.isin(classes, classes_to_plot)
for i, to_plot in enumerate(indices_to_plot):
fpr_dict[i], tpr_dict[i], _ = mt.roc_curve(y_true, y_probas[:, i], pos_label=classes[i])
if to_plot:
roc_auc = mt.auc(fpr_dict[i], tpr_dict[i])
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
class_name = labels[classes[i]] if labels else classes[i]
ax.plot(
fpr_dict[i],
tpr_dict[i],
lw=2,
color=color,
label=f"ROC curve of class {class_name} (AUC= {roc_auc:.2f})",
)
if plot_micro:
binarized_y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
binarized_y_true = np.hstack((1 - binarized_y_true, binarized_y_true))
fpr, tpr, _ = mt.roc_curve(binarized_y_true.ravel(), y_probas.ravel())
roc_auc = mt.auc(tpr, fpr)
ax.plot(
fpr,
tpr,
label=f"micro-average ROC curve (AUC = {roc_auc:.2f})",
color="deeppink",
linestyle=":",
linewidth=4,
)
if plot_macro:
# Compute macro-average ROC curve and it's area.
# First aggregate all the false positive rates
all_fpr = np.unique(np.concatenate([fpr_dict[i] for i, _ in enumerate(classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i, _ in enumerate(classes):
mean_tpr += interp(all_fpr, fpr_dict[i], tpr_dict[i])
# Finally average it and compute AUC
mean_tpr /= len(classes)
roc_auc = mt.auc(all_fpr, mean_tpr)
ax.plot(
all_fpr,
mean_tpr,
label=f"macro-average ROC curve (AUC = {roc_auc:.2f})",
color="navy",
linestyle=":",
linewidth=4,
)
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(xlim=[0.0, 1.0], ylim=[0.0, 1.05])
ax.set_xlabel(f"False Positive Rate", fontsize=text_fontsize)
ax.set_ylabel(f"True Positive Rate", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="lower right", fontsize=text_fontsize)
return ax | e1d92e9a06c3e1d677102f16401e054a7897f1b0 | 20,818 |
def _harmonize_input(data):
"""Harmonize different types of inputs by turning all inputs into dicts."""
if isinstance(data, (pd.DataFrame, pd.Series)) or callable(data):
data = {0: data}
elif isinstance(data, dict):
pass
else:
raise ValueError(
"Moments must be pandas objects or dictionaries of pandas objects."
)
return data | ad09d4bbf9b120825cd12b06e8a404b51ab2d23e | 20,820 |
def herd_closest_to_cluster(
x: np.ndarray,
y: np.ndarray,
t: np.ndarray,
features: np.ndarray,
nb_per_class: np.ndarray
) -> np.ndarray:
"""Herd the samples whose features is the closest to their class mean.
:param x: Input data (images, paths, etc.)
:param y: Labels of the data.
:param t: Task ids of the data.
:param features: Features of shape (nb_samples, nb_dim).
:param nb_per_class: Number of samples to herd per class.
:return: The sampled data x, y, t.
"""
if len(features.shape) != 2:
raise ValueError(f"Expected features to have 2 dimensions, not {len(features.shape)}d.")
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=1, keepdims=True)
dist_to_mean = np.linalg.norm(class_mean - class_features, axis=1)
tmp_indexes = dist_to_mean.argsort()[:nb_per_class]
indexes.append(class_indexes[tmp_indexes])
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes] | 56512598efa343a974b8694e5d157e85b2179283 | 20,821 |
from typing import List
def generate_reference_config(config_entries: List[ConfigEntry]) -> {}:
"""
Generates a dictionary containing the expected config tree filled with default and example values
:return: a dictionary containing the expected config tree
"""
return config_entries_to_dict(config_entries, use_examples=True) | c00da6c96935845d4fb388251e6c5bff718cbe59 | 20,822 |
def reshape_tensor2list(tensor, n_steps, n_input):
"""Reshape tensor [?, n_steps, n_input] to lists of n_steps items with [?, n_input]
"""
# Prepare data shape to match `rnn` function requirements
# Current data input shape (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
#
# Permuting batch_size and n_steps
tensor = tf.transpose(tensor, perm=[1, 0, 2], name='transpose')
# Reshaping to (n_steps*batch_size, n_input)
tensor = tf.reshape(tensor, [-1, n_input], name='reshape')
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
tensor = tf.split(0, n_steps, tensor, name='split')
return tensor | dd42869bbc6d1e97a518cd133725c66408702afa | 20,823 |
def squeeze(xray_obj, dimensions, dimension=None):
"""Squeeze the dimensions of an xray object."""
if dimension is None:
dimension = [d for d, s in dimensions.iteritems() if s == 1]
else:
if isinstance(dimension, basestring):
dimension = [dimension]
if any(dimensions[k] > 1 for k in dimension):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return xray_obj.indexed(**{dim: 0 for dim in dimension}) | abddfe5594600d39dcaece2c0be930d97c5aea2b | 20,825 |
def server_db(request, cp_server, api_server):
"""Enable database access for unit test vectors."""
db = database.get_connection(read_only=False, integrity_check=False)
api_server.db = db # inject into api_server
cursor = db.cursor()
cursor.execute('''BEGIN''')
util_test.reset_current_block_index(db)
request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
request.addfinalizer(lambda: util_test.reset_current_block_index(db))
return db | 40edd06d3aade01b70366cfe9f700772a1f9f2d7 | 20,826 |
import six
import click
def click_exception(exc, error_format):
"""
Return a ClickException object with the message from an input exception
in a desired error message format.
Parameters:
exc (exception or string):
The exception or the message.
error_format (string):
The error format (see ``--error-format`` general option).
Returns:
click.ClickException: The new exception.
"""
if error_format == 'def':
if isinstance(exc, zhmcclient.Error):
error_str = exc.str_def()
else:
assert isinstance(exc, six.string_types)
error_str = "classname: None, message: {msg}".format(msg=exc)
else:
assert error_format == 'msg'
if isinstance(exc, zhmcclient.Error):
error_str = "{exc}: {msg}".format(
exc=exc.__class__.__name__, msg=exc)
else:
assert isinstance(exc, six.string_types)
error_str = exc
new_exc = click.ClickException(error_str)
new_exc.__cause__ = None
return new_exc | 8ebbaf6fc42ac5ef989c0afaac770418aca3b9b1 | 20,827 |
from typing import Optional
import json
import asyncio
import time
import re
def build(program_code: str, data: Data = frozendict(), random_seed: Optional[int] = None) -> Model:
"""Build (compile) a Stan program.
Arguments:
program_code: Stan program code describing a Stan model.
data: A Python dictionary or mapping providing the data for the
model. Variable names are the keys and the values are their
associated values. Default is an empty dictionary, suitable
for Stan programs with no `data` block.
random_seed: Random seed, a positive integer for random number
generation. Used to make sure that results can be reproduced.
Returns:
Model: an instance of Model
Notes:
C++ reserved words and Stan reserved words may not be used for
variable names; see the Stan User's Guide for a complete list.
"""
# `data` must be JSON-serializable in order to send to httpstan
data = json.loads(DataJSONEncoder().encode(data))
async def go():
io = ConsoleIO()
# hack: use stdout instead of stderr because httpstan silences stderr during compilation
building_output = io.section().output
if not io.supports_ansi():
building_output.write("<comment>Building...</comment>")
async with stan.common.HttpstanClient() as client:
# Check to see if model is in cache.
model_name = httpstan.models.calculate_model_name(program_code)
resp = await client.post(f"/{model_name}/params", json={"data": data})
model_in_cache = resp.status != 404
task = asyncio.create_task(client.post("/models", json={"program_code": program_code}))
start = time.time()
while True:
done, pending = await asyncio.wait({task}, timeout=0.1)
if done:
break
if io.supports_ansi():
building_output.clear()
building_output.write(f"<comment>Building:</comment> {time.time() - start:0.1f}s")
building_output.clear() if io.supports_ansi() else building_output.write("\n")
# now that httpstan has released stderr, we can use error_output again
building_output = io.section().error_output
resp = task.result()
if resp.status != 201:
match = re.search(r"""ValueError\(['"](.*)['"]\)""", resp.json()["message"])
if not match: # unknown error, should not happen
raise RuntimeError(resp.json()["message"])
exception_body = match.group(1).encode().decode("unicode_escape")
error_type_match = re.match(r"(Semantic|Syntax) error", exception_body)
if error_type_match:
error_type = error_type_match.group(0)
exception_body_without_first_line = exception_body.split("\n", 1)[1]
building_output.write_line(f"<info>Building:</info> <error>{error_type}:</error>")
building_output.write_line(f"<error>{exception_body_without_first_line}</error>")
raise ValueError(error_type)
else:
raise RuntimeError(exception_body)
building_output.clear() if io.supports_ansi() else building_output.write("\n")
if model_in_cache:
building_output.write("<info>Building:</info> found in cache, done.")
else:
building_output.write(f"<info>Building:</info> {time.time() - start:0.1f}s, done.")
assert model_name == resp.json()["name"]
if resp.json().get("stanc_warnings"):
io.error_line("<comment>Messages from <fg=cyan;options=bold>stanc</>:</comment>")
io.error_line(resp.json()["stanc_warnings"])
resp = await client.post(f"/{model_name}/params", json={"data": data})
if resp.status != 200:
raise RuntimeError(resp.json()["message"])
params_list = resp.json()["params"]
assert len({param["name"] for param in params_list}) == len(params_list)
param_names, dims = zip(*((param["name"], param["dims"]) for param in params_list))
constrained_param_names = sum((tuple(param["constrained_names"]) for param in params_list), ())
return Model(model_name, program_code, data, param_names, constrained_param_names, dims, random_seed)
try:
return asyncio.run(go())
except KeyboardInterrupt:
return | c00f3af45b4f4d65b056653fff865246bf0ddd22 | 20,828 |
def _exec_with_broadcasting(func, *args, **keywords):
"""Main function to broadcast together the shapes of the input arguments
and return results with the broadcasted shape."""
# Identify arguments needing broadcasting
arg_ranks = []
arg_indices = []
for k in range(len(args)) + keywords.keys():
rank = func.BROADCAST_RANKS.get(k, None)
if rank is None: continue
# Get argument
if type(k) == int:
arg = args[k]
else:
arg = keywords[k]
# Ignore args that are not arrays
if not isinstance(arg, np.ndarray): continue
# Determine leading shape, if any
if rank == 0:
shape = arg.shape
else:
shape = arg.shape[:rank]
if shape == (): continue
if shape == (1,): continue
arg_ranks.append(rank)
arg_indices.append(k)
# Call function now if iteration is not needed
if not arg_indices:
return func.__call__(*args, **keywords)
# Broadcast the arrays
cspyce1.chkin(func.array.__name__)
(broadcasted_shape, reshaped_args) = _broadcast_arrays(arg_ranks, args)
if cspyce1.failed():
cspyce1.chkout(func.array.__name__)
return None
# Update the argument list with flattened arrays
args = list(args)
for (k,reshaped_arg) in zip(arg_indices, reshaped_args):
flattened_arg = np.ravel(reshaped_arg)
if type(k) == int:
args[k] = flattened_arg
else:
keywords[k] = flattened_arg
# Execute the function
results = func.__call__(*args, **keywords)
cspyce1.chkout(func.array.__name__)
if cspyce1.failed():
return results
# Reshape the results
if isinstance(results, np.ndarray):
return np.reshape(results, broadcasted_shape)
reshaped_results = []
for result in results:
reshaped_results.append(np.reshape(result, broadcasted_shape))
return reshaped_results | 920cdaa1aae4eeb61467e4ca53647a030181a6a4 | 20,829 |
def compose_paths(path_0, path_1):
"""
The binary representation of a path is a 1 (which means "stop"), followed by the
path as binary digits, where 0 is "left" and 1 is "right".
Look at the diagram at the top for these examples.
Example: 9 = 0b1001, so right, left, left
Example: 10 = 0b1010, so left, right, left
How it works: we write both numbers as binary. We ignore the terminal in path_0, since it's
not the terminating condition anymore. We shift path_1 enough places to OR in the rest of path_0.
Example: path_0 = 9 = 0b1001, path_1 = 10 = 0b1010.
Shift path_1 three places (so there is room for 0b001) to 0b1010000.
Then OR in 0b001 to yield 0b1010001 = 81, which is right, left, left, left, right, left.
"""
mask = 1
temp_path = path_0
while temp_path > 1:
path_1 <<= 1
mask <<= 1
temp_path >>= 1
mask -= 1
path = path_1 | (path_0 & mask)
return path | cffb984c5bacf16691648e0910988495149087ad | 20,830 |
def combine(img1, img2, out_path, write=True):
"""combine(img1, img2, out_path, write=True)
Combines the data of two PyifxImages, ImageVolumes, or ImageLists to form new PyifxImages.
:type img1: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
:param img1: The first image to be added to the combination.
:type img2: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
:param img2: The second image to be added to the combination. Arguments of type ImageVolume and list can be used in conjunction, but images of type PyifxImage must be used together.
:type out_path: str
:param out_path: The path that the combine image(s) will be written to.
:type write: bool
:param write: Whether to write the image or not.
:return: PyifxImage instance, ImageVolume instance, or list with elements of type PyifxImage
:rtype: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
"""
INTERNAL._type_checker(img1, [PyifxImage, ImageVolume, list])
INTERNAL._type_checker(img2, [PyifxImage, ImageVolume, list])
INTERNAL._type_checker(out_path, [str])
INTERNAL._type_checker(write, [bool])
return INTERNAL._combine_handler(img1, img2, out_path, write=write) | ccdee10b6d90ea9c1274f30d3c90bbe0f9e041ce | 20,831 |
import math
def invert_index(i, window, step):
"""Convert truncated squareform index back into row, col, and slice index
Task indexing for LD pruning is based on several optimizations that utilize a
cyclic, truncated squareform pattern for pairwise comparisons (between rows). This pattern
is primarily controlled by window and step parameters, where an example for window = 4 and
step = 3 would look like this:
row index row indexes of other rows to compare to
| |
0 | 1 2 3 4
1 | 2 3 4
2 | 3 4
3 | 4 5 6 7
4 | 5 6 7
5 | 6 7
6 | 7 8 9 10
... and so on ...
The parameter (`i`) indexes these comparisons where in the above, `i` = 0
corresponds to the comparison between rows 0 and 1, `i` = 1 to rows 0 and 2, `i` = 4
to rows 1 and 2, etc. This method converts this comparison index back into the
cycle number (arbitrarily called a "slice") as well as offsets within that cycle for the rows
being compared. The slice number itself indexes some row in the original array
and the offsets can be used to identify comparisons from that row index.
Examples for the same case above for given comparison index values are:
index -> (row, col, slice)
0 -> (0, 1, 0) -
1 -> (0, 2, 0) |
2 -> (0, 3, 0) |
3 -> (0, 4, 0) |
4 -> (1, 2, 0) |--> One "slice" (i.e. one cycle)
5 -> (1, 3, 0) |
6 -> (1, 4, 0) |
7 -> (2, 3, 0) |
8 -> (2, 4, 0) -
9 -> (0, 1, 1) # The pattern repeats here
Parameters
----------
i : int
Comparison index
window : int
Window size used to define pairwise comparisons
step : int
Step size used to define pairwise comparisons
Returns
-------
(i, j, s) : tuple
i = offset from slice (`s`) to first row in comparison
j = offset from slice (`s`) to second row in comparison
s = slice number/index
"""
assert window >= step
# Coerce to large float to avoid potential int overflow
window = np.float64(window)
step = np.float64(step)
# Number of pairs in a "slice" = window + (window - 1) + ... + (window - step)
p = _intsum(window) - _intsum(window - step)
# Calculate slice number (`s`) and offset into that slice (`k`)
s, k = np.int64(i // p), np.int64(i % p)
# Invert squareform index
# See: https://stackoverflow.com/questions/27086195/linear-index-upper-triangular-matrix
n = window + 1 # The "n" in this case is the size of the window + 1 since self comparisons are ignored
i = np.int64(n - 2 - math.floor(math.sqrt(-8 * k + 4 * n * (n - 1) - 7) / 2.0 - 0.5))
j = np.int64(k + i + 1 - n * (n - 1) / 2.0 + (n - i) * ((n - i) - 1) / 2.0)
assert i >= 0
assert j >= 0
assert s >= 0
return i, j, s | 1acb4ef02a7158e584af5231a120f0a81af33bc4 | 20,832 |
def group_masses(ip, dm: float = 0.25):
"""
Groups masses in an isotope pattern looking for differences in m/z greater than the specified delta.
expects
:param ip: a paired list of [[mz values],[intensity values]]
:param dm: Delta for looking +/- within
:return: blocks grouped by central mass
:rtype: list
"""
num = 0
out = [[[], []]]
for ind, val in enumerate(ip[0]):
out[num][0].append(ip[0][ind])
out[num][1].append(ip[1][ind])
try:
if ip[0][ind + 1] - ip[0][ind] > dm:
num += 1
out.append([[], []])
except IndexError:
continue
return out | 918fc4f20fee7c2955218e3c435f9e672dc55f7d | 20,833 |
def save_user_labels(*args):
"""
save_user_labels(func_ea, user_labels)
Save user defined labels into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_labels: collection of user defined labels (C++: const
user_labels_t *)
"""
return _ida_hexrays.save_user_labels(*args) | a4a1a9ba4e37cd7f6e79efed5d9070bcdcfa8c5f | 20,834 |
def add_play():
"""Adds a new play"""
if 'Logged in as: ' + flask_login.current_user.get_id():
play_json = request.json
if request.json is None and request.data:
play_json = request.data
if play_json:
try:
# insert the created by information in play
print('play_json is %s' % type(play_json))
play_obj = play_json
if isinstance(play_json, basestring):
play_obj = json.loads(play_json)
play_obj['created_by'] = '%s' % flask_login.current_user.get_id()
play_json = json.dumps(play_obj)
#
new_play = get_db().add_play_from_json(play_json)
return jsonify(msg='added play %s' % new_play.id,
id=new_play.id,
data=new_play.to_json()), 201
except StructureError, error:
return jsonify('BAD JSON %s: %s' % (error, play_json)), 400
else:
return jsonify('Failed to find JSON data in your POST'), 404
return jsonify('You must be logged in to add a play'), 401 | 7e3e71bb007805a9c379b18c9ce1cce737ed30c9 | 20,835 |
def load_string_list(file_path, is_utf8=False):
"""
Load string list from mitok file
"""
try:
with open(file_path, encoding='latin-1') as f:
if f is None:
return None
l = []
for item in f:
item = item.strip()
if len(item) == 0:
continue
l.append(item)
except IOError:
print('open error %s' % file_path)
return None
else:
return l | 600c2678fdcdf6d5fa4894dd406f74c1ae4e5a96 | 20,837 |
def flash_dev(
disk=None, image_path=None, copy_method="default", port=None, program_cycle_s=4
):
"""Flash a firmware image to a device.
Args:
disk: Switch -d <disk>.
image_path: Switch -f <image_path>.
copy_method: Switch -c <copy_method> (default: shell).
port: Switch -p <port>.
program_cycle_s: Sleep time.
"""
if copy_method == "default":
copy_method = "shell"
result = False
result = host_tests_plugins.call_plugin(
"CopyMethod",
copy_method,
image_path=image_path,
serial=port,
destination_disk=disk,
)
sleep(program_cycle_s)
return result | 6ee569903f3d713fff620e122b4592ca0ed42cde | 20,838 |
def _get_bucket_and_object(gcs_blob_path):
"""Extract bucket and object name from a GCS blob path.
Args:
gcs_blob_path: path to a GCS blob
Returns:
The bucket and object name of the GCS blob
Raises:
ValueError: If gcs_blob_path parsing fails.
"""
if not gcs_blob_path.startswith(_GCS_PATH_PREFIX):
raise ValueError(
f'GCS blob paths must start with gs://, got {gcs_blob_path}')
path = gcs_blob_path[len(_GCS_PATH_PREFIX):]
parts = path.split('/', 1)
if len(parts) < 2:
raise ValueError(
'GCS blob paths must be in format gs://bucket-name/object-name, '
f'got {gcs_blob_path}')
return parts[0], parts[1] | affda5501fbbc932c716d830226dec1c56271294 | 20,839 |
def dq_data(request):
"""Main home method and view."""
try:
cases = []
sdate, edate = None, None
sts = {0: 'Pending', 1: 'Open', 2: 'Closed'}
# Conditions
qa = request.GET.get('q_aspect')
va = request.GET.get('variance')
age = request.GET.get('age')
from_date = request.GET.get('from_date')
to_date = request.GET.get('to_date')
org_unit = request.GET.get('org_unit')
if from_date and to_date:
sdate = convert_date(from_date)
edate = convert_date(to_date)
cage = int(age) if age else 0
vid = int(va) if va else 0
qid = int(qa) if qa else 0
q2 = Q(case_category_id__in=('CTRF', 'CCCT'), age__lt=6)
q3 = Q(case_category_id__in=('CSAB', 'CSHV', 'CCCM', 'CORP'),
age__lt=11)
if qa:
acases = RPTCaseLoad.objects.filter(is_void=False)
if qid == 1:
acases = acases.filter(
Q(age__gte=25) | Q(dob__isnull=True) | Q(age__lt=0))
elif qid == 2:
acases = acases.filter(
Q(case_category_id='CDIS',
age__gt=15) | Q(case_category_id='CSIC',
age__gt=18) | q2 | q3)
elif qid == 3:
acases = acases.filter(
case_category_id__in=('CSHV', 'CSCS'), sex_id='SMAL')
elif qid == 4:
acases = acases.filter(
case_status=1, intervention__isnull=True)
else:
acases = RPTCaseLoad.objects.filter(
Q(age__gte=25) | Q(dob__isnull=True))
if vid == 1:
acases = acases.filter(age=cage)
elif vid == 2:
acases = acases.filter(age__gt=cage)
elif vid == 3:
acases = acases.filter(age__lt=cage)
if edate and sdate:
acases = acases.filter(case_date__range=(sdate, edate))
if org_unit:
acases = acases.filter(org_unit_id=org_unit)
else:
if not request.user.is_superuser:
acases = acases.filter(org_unit_id=org_unit)
for case in acases[:1000]:
cs = case.case_status
fname = case.case.person.first_name
sname = case.case.person.surname[0]
o_name = case.case.person.other_names
oname = o_name[0] if o_name else ''
dt = {"cpims_id": case.case.person_id}
dt['age'] = case.age
dt['case_category'] = case.case_category
dt['case_date'] = case.case_date
dt['sex'] = case.sex
dt['case_status'] = sts[cs] if cs in sts else 'Open'
dt['dob'] = case.dob
dt['org_unit'] = case.org_unit_name
dt['intervention'] = case.intervention
dt['org_unit'] = case.org_unit_name
dt['names'] = '%s %s%s' % (fname, sname, oname)
cases.append(dt)
result = {"data": cases}
return JsonResponse(result, content_type='application/json',
safe=False)
except Exception as e:
print('error - %s' % (e))
raise e
else:
pass | f1270dc0ae1b2a589d3995fe6bc0a9bb2199d5ed | 20,840 |
from typing import Protocol
def create_round_model(
protocol: Protocol,
ber: float,
n_tags: int) -> 'RoundModel':
"""
Factory function for creating round model.
This routine is cached, so calling it multiple time won't add much
overhead.
Parameters
----------
protocol : Protocol
ber : float
n_tags : int
Returns
-------
model : SlotModel
"""
return RoundModel(protocol, ber=ber, n_tags=n_tags) | 2df6387abd189e5364559bb93547c789cf57f8fa | 20,841 |
def local_maxima(a_list):
"""
Takes a NoteList object.
Returns a list of tuples of the form returned by note_onsets().
Each of these (int: bar #, float: beat #) tuples will represent the onset
of a note that is a local maximum in the melody in a_list.
"""
return local_extremities(a_list, maxima=True) | c239d91c341ee6a22cf621b13fb1cdafbe8b7b54 | 20,842 |
def get_fitted_model(data: pd.DataFrame, dataframe: pd.DataFrame) -> CTGAN:
""" The function get_fitted_model uses a CTGAN Checkpoint (see chapter about checkpoints),
to load a trained CTGAN model if one is available with the desired hyperparameters, or
train a new one if none is available. The function then returns the trained CTGAN model.
The CTGAN model created here uses a 'Positive' constraint for the dataframe column 'duration',
which contains the duration of each activity. The 'reject_sampling' strategy is used as
handling strategy for this constraint.
The function logs wether a pre-trained model was loaded or a new one was generated.
"""
cp = CTGANCheckpoint(
config.get_dataset_basename(), config.EPOCHS_CTGAN, config.ENABLED_DP_CTGAN, "{:.1f}".format(config.EPSILON_CTGAN))
return cp.load_if_exists_else_generate(config.RETRAIN_CTGAN, _fit_ctgan, data, dataframe) | c95c8b913f6411e0d0003ae5c2e126305306082b | 20,843 |
import math
def point_based_matching(point_pairs):
"""
This function is based on the paper "Robot Pose Estimation in Unknown Environments by Matching 2D Range Scans"
by F. Lu and E. Milios.
:param point_pairs: the matched point pairs [((x1, y1), (x1', y1')), ..., ((xi, yi), (xi', yi')), ...]
:return: the rotation angle and the 2D translation (x, y) to be applied for matching the given pairs of points
"""
x_mean = 0
y_mean = 0
xp_mean = 0
yp_mean = 0
n = len(point_pairs)
if n == 0:
return None, None, None
for pair in point_pairs:
(x, y), (xp, yp) = pair
x_mean += x
y_mean += y
xp_mean += xp
yp_mean += yp
x_mean /= n
y_mean /= n
xp_mean /= n
yp_mean /= n
s_x_xp = 0
s_y_yp = 0
s_x_yp = 0
s_y_xp = 0
for pair in point_pairs:
(x, y), (xp, yp) = pair
s_x_xp += (x - x_mean)*(xp - xp_mean)
s_y_yp += (y - y_mean)*(yp - yp_mean)
s_x_yp += (x - x_mean)*(yp - yp_mean)
s_y_xp += (y - y_mean)*(xp - xp_mean)
rot_angle = math.atan2(s_x_yp - s_y_xp, s_x_xp + s_y_yp)
translation_x = xp_mean - (x_mean*math.cos(rot_angle) - y_mean*math.sin(rot_angle))
translation_y = yp_mean - (x_mean*math.sin(rot_angle) + y_mean*math.cos(rot_angle))
return rot_angle, translation_x, translation_y | 2d691bbf04d14e3e5b0f9273a7501d934bd0eef4 | 20,844 |
def kb_ids2known_facts(kb_ids):
"""Creates list of all known facts from kb dict"""
facts = set()
for struct in kb_ids:
arrays = kb_ids[struct][0]
num_facts = len(arrays[0])
for i in range(num_facts):
fact = [x[i] for x in arrays]
facts.add(tuple(fact))
return facts | 8dd7f86c8b983f4dffa79a8229c4700a4182c710 | 20,845 |
def PUtilAvgT (inUV, outUV, err, scratch=False, timeAvg=1.0):
""" Average A UV data set in Time
returns Averaged UV data object
inUV = Python UV object to copy
Any selection editing and calibration applied before average.
outUV = Predefined UV data if scratch is False, ignored if
scratch is True.
err = Python Obit Error/message stack
scratch = True if this is to be a scratch file (same type as inUV)
timeAvg = Averaging time in min
"""
################################################################
if inUV.myClass=='AIPSUVData':
raise TypeError("Function unavailable for "+inUV.myClass)
# Checks
if not inUV.UVIsA():
raise TypeError("inUV MUST be a Python Obit UV")
if ((not scratch) and (not outUV.UVIsA())):
raise TypeError("outUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Save parameter
dim = [1,1,1,1,1]
inInfo = PGetList(inUV) #
InfoList.PAlwaysPutFloat (inInfo, "timeAvg", dim, [timeAvg])
# Create output for scratch
if scratch:
outUV = UV("None")
outUV.me = Obit.UVUtilAvgT(inUV.me, scratch, outUV.me, err.me)
if err.isErr:
OErr.printErrMsg(err, "Error averaging UV data")
# Get scratch file info
if scratch:
PUVInfo (outUV, err)
return outUV
# end PUtilAvgT | a1e5931a9d0e7340394e3a6227aa1a3f31e624ab | 20,847 |
def get_table_from_alter_table(line, alter_expr):
"""
Parse the content and return full qualified schema.table from the line if
schema provided, else return the table name.
Fact: if schema name or table name contains any special chars, each should be
double quoted already in dump file.
"""
dot_separator_idx = line.find('.')
last_double_quote_idx = line.rfind('"')
has_schema_table_fmt = True if dot_separator_idx != -1 else False
has_special_chars = True if last_double_quote_idx != -1 else False
if not has_schema_table_fmt and not has_special_chars:
return line[len(alter_expr):].split()[0]
elif has_schema_table_fmt and not has_special_chars:
full_table_name = line[len(alter_expr):].split()[0]
_, table = split_fqn(full_table_name)
return table
elif not has_schema_table_fmt and has_special_chars:
return line[len(alter_expr) + 1 : last_double_quote_idx + 1]
else:
if dot_separator_idx < last_double_quote_idx:
# table name is double quoted
full_table_name = line[len(alter_expr) : last_double_quote_idx + 1]
else:
# only schema name double quoted
ending_space_idx = line.find(' ', dot_separator_idx)
full_table_name = line[len(alter_expr) : ending_space_idx]
_, table = split_fqn(full_table_name)
return table | af2bb25f240b2f9b1a171e4b7e2983000ca0d545 | 20,848 |
def bind_port(socket, ip, port):
""" Binds the specified ZMQ socket. If the port is zero, a random port is
chosen. Returns the port that was bound.
"""
connection = 'tcp://%s' % ip
if port <= 0:
port = socket.bind_to_random_port(connection)
else:
connection += ':%i' % port
socket.bind(connection)
return port | 5613bae6726e2f006706b104463917e48d7ab7ca | 20,850 |
def update_target_graph(actor_tvars, target_tvars, tau):
""" Updates the variables of the target graph using the variable values from the actor, following the DDQN update
equation. """
op_holder = list()
# .assign() is performed on target graph variables with discounted actor graph variable values
for idx, variable in enumerate(target_tvars):
op_holder.append(
target_tvars[idx].assign(
(variable.value() * tau) + ((1 - tau) * actor_tvars[idx].value())
)
)
return op_holder | 15f0d192ff150c0a39495b0dec53f18a8ae01664 | 20,851 |
from typing import Callable
from re import T
def safe(function: Callable[..., T]) -> Callable[..., Result[T, Exception]]:
"""Wraps a function that may raise an exception.
e.g.:
@safe
def bad() -> int:
raise Exception("oops")
"""
def wrapped(*args, **kwargs) -> Result[T, Exception]:
try:
return Ok(function(*args, **kwargs))
except Exception as e:
return Err(e)
return wrapped | f33ce73ef9eed48e0585037f655075d78f9f6a09 | 20,852 |
def strtime(millsec, form="%i:%02i:%06.3f"):
"""
Time formating function
Args:
millsec(int): Number of milliseconds to format
Returns:
(string)Formated string
"""
fc = form.count("%")
days, milliseconds = divmod(millsec, 86400000)
hours, milliseconds = divmod(millsec, 3600000)
minutes, milliseconds = divmod(millsec, 60000)
seconds = float(milliseconds) / 1000
var = {1: (seconds), 2: (minutes, seconds), 3: (hours, minutes, seconds),
4: (days, hours, minutes, seconds)}
return form % var[fc] | 9a1cff92086491941d8b27857169bf7744da8324 | 20,853 |
def parallel_categories():
"""Parallel Categories Plot."""
mean_neighborhood_sfo = sfo_data.groupby(["neighborhood"]).mean()
mean_sale_price_sfo = mean_neighborhood_sfo.sort_values("sale_price_sqr_foot", ascending=False)
sfo = mean_sale_price_sfo.head(10)
a = sfo.reset_index()
parallel_categories_top_10 = px.parallel_categories(a, color="sale_price_sqr_foot", color_continuous_scale=px.colors.sequential.Inferno, title='Average House Value/Neighborhood', labels={'neighborhood': "Neighborhood", 'sale_price_sqr_foot':'Sales Price/Square Foot', 'housing_units':'Housing Units', 'gross_rent':'Gross Rent'})
return parallel_categories_top_10 | fc03f4902f62a2e123e33012d58c58b4d7e55ddc | 20,854 |
def close_project(id_, **kwargs):
"""Close a project
:param id_: The ID of the project object to be updated
:type id_: str
:rtype: ProjectSerializer
"""
proj = get_project_object(id_)
check_project_permission(proj, kwargs["token_info"])
if proj.owner != kwargs["user"]:
raise connexion.ProblemException(status=403, title="Permission Denied",
detail="Doesn't have enough permissions to take this action")
proj.state = project_pb2.STATE.CLOSED
stub = get_projects_services_stub()
response = stub.Update(proj)
return ProjectSerializer.from_dict(util.deserialize_protobuf(response)) | 7607e9627bc9a8eddc629e0deb4a4d1c1cd3fac9 | 20,856 |
import string
def int2base(x, base):
"""
Method to convert an int to a base
Source: http://stackoverflow.com/questions/2267362
"""
digs = string.digits + string.ascii_uppercase
if x < 0: sign = -1
elif x == 0: return digs[0]
else:
sign = 1
x *= sign
digits = []
while x:
digits.append(digs[x % base])
x = int(x / base)
if sign < 0:
digits.append('-')
digits.reverse()
return ''.join(digits) | f9277608cddea0d5590d294621afdb3b7af0fb34 | 20,859 |
def geometric(X):
"""
If x1,x2,...xn ~iid~ GEO(p) then the MLE is 1 / X-bar
Parameters
----------
X : array_like
Returns:
----------
geo_mle : MLE calculation for p-hat for GEO(p)
References
----------
[1] Casella, G., Berger, R. L., "Statistical Inference"
Belmont (California): Brooks/Cole Cengage Learning (2017)
[2] Tone, MAT 562: Mathematical Statistics notes, U of L
"""
_input = np.array(X)
n = len(_input)
discrete_bool = discrete_check(_input)
geo_mle = 1 / np.mean(X)
if discrete_bool == True:
return geo_mle
else:
raise ValueError("X must be a discrete data set (only integers)") | 014072a4c63f9f3d3fe067367b65689a21aa3799 | 20,860 |
def calc_dH(
e_per_atom,
stoich=None,
num_H_atoms=0,
):
"""
The original method is located in:
F:\Dropbox\01_norskov\00_git_repos\PROJ_IrOx_Active_Learning_OER\data\proj_data_irox.py
Based on a E_DFT/atom of -7.047516 for rutile-IrO2
See the following dir for derivation:
PROJ_IrOx_Active_Learning_OER/workflow/energy_treatment_deriv/calc_references
"""
# | - calc_dH
o_ref = -4.64915959
ir_metal_fit = -9.32910211636731
h_ref = -3.20624595
if stoich == "AB2":
dH = (2 + 1) * e_per_atom - 2 * o_ref - ir_metal_fit
dH_per_atom = dH / 3.
elif stoich == "AB3":
dH = (3 + 1) * e_per_atom - 3 * o_ref - ir_metal_fit
dH_per_atom = dH / 4.
elif stoich == "IrHO3" or stoich == "IrO3H" or stoich == "iro3h" or stoich == "iroh3":
dH = (3 + 1 + 1) * e_per_atom - 3 * o_ref - ir_metal_fit - h_ref
dH_per_atom = dH / 5.
return(dH_per_atom)
#__| | 1d6a6d3ed0581662d718fa0342fca76408d0a2d8 | 20,862 |
def _make_asset_build_reqs(asset):
"""
Prepare requirements and inputs lists and display it
:params str asset: name of the asset
"""
def _format_reqs(req_list):
"""
:param list[dict] req_list:
:return list[str]:
"""
templ = "\t{} ({})"
return [templ.format(req[KEY], req[DESC]) if DEFAULT not in req
else (templ + "; default: {}").format(req[KEY], req[DESC], req[DEFAULT]) for req in req_list]
reqs_list = []
if asset_build_packages[asset][REQ_FILES]:
reqs_list.append("- files:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_FILES]))))
if asset_build_packages[asset][REQ_ASSETS]:
reqs_list.append("- assets:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_ASSETS]))))
if asset_build_packages[asset][REQ_PARAMS]:
reqs_list.append("- params:\n{}".format("\n".join(_format_reqs(asset_build_packages[asset][REQ_PARAMS]))))
_LOGGER.info("\n".join(reqs_list)) | 53b586aa00e596854ffd6292da9209c144701126 | 20,863 |
def gsl_eigen_herm_alloc(*args, **kwargs):
"""gsl_eigen_herm_alloc(size_t const n) -> gsl_eigen_herm_workspace"""
return _gslwrap.gsl_eigen_herm_alloc(*args, **kwargs) | 24884e2da582d28ad87ffba91ea9218b5f6f44f6 | 20,864 |
def cosine_similarity(array1, array2):
"""
Calcula la similitud coseno entre dos arrays
"""
# -sum(l2_norm(y_true) * l2_norm(y_pred))
return -dot(array1, array2)/(norm(array1)*norm(array2)) | 975dd0e25b70c93eff3c49ecc83def2bab2f84dd | 20,865 |
def DEW_T(Y, P, all_params):
"""
Y = list of mollar fractions of vapor like [0.2 ,0.8] or [0.1 0.2 0.7]
Sumation of X list must be 1.0
P = Pressure in kPa
all_params = list of parameters for Antonie equations
example for all params:
all_params = [[A1, B1, C1],
[A2, B2, C2],
[A3, B3, C3]]
"""
# creating root finding function
def func(T):
return (P - DEW_P(Y, T, all_params)[0])
# solving and finding Temprature
solve = root(func, 20, method='lm')
T = solve['x'][0]
# Computing X mollar fractions of liqui
X = DEW_P(Y, T, all_params)[1]
return T, X | 3185d9b65d8c29c3df7f28e317e8a0d349db0ce5 | 20,866 |
def plugin_last():
"""This function should sort after other plug-in functions"""
return "last" | d2d6c00bc8d987363bd4db0013950d9b3f524c2f | 20,867 |
def init_seg_table(metadata, tablename, segid_colname=cn.seg_id, chunked=True):
""" Specifies a table for tracking info about a segment. """
columns = [Column("id", BigInteger, primary_key=True),
Column(cn.seg_id, Integer, index=True),
Column(cn.size, Integer),
# Centroid coordinates
Column(cn.centroid_x, Float),
Column(cn.centroid_y, Float),
Column(cn.centroid_z, Float),
# Bounding box
Column(cn.bbox_bx, Integer),
Column(cn.bbox_by, Integer),
Column(cn.bbox_bz, Integer),
Column(cn.bbox_ex, Integer),
Column(cn.bbox_ey, Integer),
Column(cn.bbox_ez, Integer)]
if chunked:
# Chunk id - None if merged across chunks
columns.append(Column(cn.chunk_tag, Text, index=True))
return Table(tablename, metadata, *columns) | f82a5285506d87d0d977661fcd45a336dc185c00 | 20,868 |
def extract_shebang_command(handle):
"""
Extract the shebang_ command line from an executable script.
:param handle: A file-like object (assumed to contain an executable).
:returns: The command in the shebang_ line (a string).
The seek position is expected to be at the start of the file and will be
reset afterwards, before this function returns. It is not an error if the
executable contains binary data.
.. _shebang: https://en.wikipedia.org/wiki/Shebang_(Unix)
"""
try:
if handle.read(2) == b'#!':
data = handle.readline()
text = data.decode('UTF-8')
return text.strip()
else:
return ''
finally:
handle.seek(0) | 27174f96f2da3167cf7a7e28c4a2f1cec72c773c | 20,869 |
import re
def split_abstracts(ftm_df):
"""
Split the mail abstract (item content) into different mails.
This is required to find the 'novel' email, and the rest of the threat. We
create a new row for each email in the threat, but it keeps the ID of the
'novel email'. We add two boolean flags for is_novel, and
is_threat_starter.
Parameters
----------
ftm_df : pandas.DataFrame
FTM dataset with prettified abstracts.
Returns
-------
pandas.DataFrame
FTM dataset with a new row for each email, and flags for is_novel and
is_threat_starter.
"""
# Create a list of strings from novel email and its forwards or reactions
ftm_df['new_abstract'] = ftm_df[ftm_df.title.str.contains('RE ') | ftm_df.title.str.contains(
'FW ')].abstract.apply(lambda row: re.split(FILTER_SENDER, row))
# Create a list of is_novel. First email is novel (1), the rest is
# forwards or reactions (0). Similar for is_threat_starter
ftm_df['is_novel'] = ftm_df[ftm_df.title.str.contains('RE ') | ftm_df.title.str.contains(
'FW ')].new_abstract.apply(lambda row: [1] + [0] * (len(row) - 1))
ftm_df['is_threat_starter'] = ftm_df[ftm_df.title.str.contains('RE ') |
ftm_df.title.str.contains('FW ')
].new_abstract.apply(
lambda row: [0] * (len(row) - 1) + [1])
# explode the lists
ftm_df = ftm_df.explode(['is_novel', 'new_abstract', 'is_threat_starter'])
ftm_df = ftm_df.reset_index()
ftm_df.abstract = ftm_df.new_abstract.fillna(ftm_df.abstract)
return ftm_df.drop(columns=['new_abstract']) | 5725dda53a53e049fbcc7b42ac4fd3a51ec6c38a | 20,870 |
def get_weight_from_alias(blend_shape, alias):
"""
Given a blend shape node and an aliased weight attribute, return the index in .weight to the
alias.
"""
# aliasAttr lets us get the alias from an attribute, but it doesn't let us get the attribute
# from the alias.
existing_indexes = blend_shape.attr('weight').get(mi=True) or []
for idx in existing_indexes:
aliasName = pm.aliasAttr(blend_shape.attr('weight').elementByLogicalIndex(idx), q=True)
if aliasName == alias:
return idx
raise Exception('Couldn\'t find the weight index for blend shape target %s.%s' % (blend_shape, alias)) | 56c870c32ee2ec05af1580fac985e09ffd5e129c | 20,871 |
from typing import List
from typing import Tuple
from typing import Any
def __check_dependences_and_predecessors(pet: PETGraphX, out_dep_edges: List[Tuple[Any, Any, Any]],
parent_task: CUNode, cur_cu: CUNode):
"""Checks if only dependences to self, parent omittable node or path to target task exists.
Checks if node is a direct successor of an omittable node or a task node.
:param pet: PET Graph
:param out_dep_edges: list of outgoing edges
:param parent_task: parent cu of cur_cu
:param cur_cu: current cu node
:return True, if a violation has been found. False, otherwise.
"""
violation = False
# check if only dependencies to self, parent omittable node or path to target task exists
for e in out_dep_edges:
if pet.node_at(e[1]) == cur_cu:
continue
elif pet.node_at(e[1]).tp_omittable is True:
continue
elif check_reachability(pet, parent_task, cur_cu, [EdgeType.DATA]):
continue
else:
violation = True
# check if node is a direct successor of an omittable node or a task node
in_succ_edges = [(s, t, e) for (s, t, e) in pet.in_edges(cur_cu.id) if
e.etype == EdgeType.SUCCESSOR]
is_successor = False
for e in in_succ_edges:
if pet.node_at(e[0]).tp_omittable is True:
is_successor = True
elif pet.node_at(e[0]).tp_contains_task is True:
is_successor = True
if not is_successor:
violation = True
return violation | 1abdb9604fec48073b536fb7cf1e45a29dfea095 | 20,872 |
import json
def clone_master_track(obj, stdata, stindex, stduration):
"""
ghetto-clone ('deep copy') an object using JSON
populate subtrack info from CUE sheet
"""
newsong = json.loads(json.dumps(obj))
newsong['subsong'] = {'index': stindex, 'start_time': stdata['index'][1][0], 'duration': stduration}
newsong['tags']['artist'] = stdata.get('PERFORMER', newsong['tags'].get('artist'))
newsong['tags']['title'] = stdata.get('TITLE', newsong['tags'].get('title'))
newsong['tags']['tracknum'] = stindex
newsong['tags']['trackstr'] = stindex
return newsong | 6721a87abfc88d9dd75f597ff24caf5857be594a | 20,873 |
def create_graph(num_islands, bridge_config):
"""
Helper function to create graph using adjacency list implementation
"""
adjacency_list = [list() for _ in range(num_islands + 1)]
for config in bridge_config:
source = config[0]
destination = config[1]
cost = config[2]
adjacency_list[source].append((destination, cost))
adjacency_list[destination].append((source, cost))
#print("adjacency_list",adjacency_list)
return adjacency_list | b961f5ee2955f4b8de640152981a7cede8ca80b0 | 20,874 |
def distinct_extractors(count=True, active=True):
""" Tool to count unique number of predictors for each Dataset/Task """
active_datasets = ms.Dataset.query.filter_by(active=active)
superset = set([v for (v, ) in ms.Predictor.query.filter_by(active=True).filter(
ms.Predictor.dataset_id.in_(
active_datasets.with_entities('id'))).join(
ms.ExtractedFeature).distinct(
'extractor_name').values('extractor_name')])
res = {}
for en in superset:
for ds in active_datasets:
for t in ds.tasks:
name = f"{ds.name}_{t.name}"
if name not in res:
res[name] = {}
preds = ms.Predictor.query.filter_by(
dataset_id=ds.id, active=True).join(
ms.ExtractedFeature).filter_by(
extractor_name=en).distinct('feature_name')
if count:
r = preds.count()
else:
r = list(preds.values('name'))
res[name][en] = r
return res | 232c10cdb69f5499d927473a6ee6d99c940870c5 | 20,875 |
def get_timepoint( data, tp=0 ):
"""Returns the timepoint (3D data volume, lowest is 0) from 4D input.
You can save memory by using [1]:
nifti.dataobj[..., tp]
instead: see get_nifti_timepoint()
Works with loop_and_save().
Call directly, or with niftify().
Ref:
[1]: http://nipy.org/nibabel/images_and_memory.html
"""
# Replicating seg_maths -tp
tp = int(tp)
if len(data.shape) < 4:
print("Data has fewer than 4 dimensions. Doing nothing...")
output = data
else:
if data.shape[3] < tp:
print("Data has fewer than {0} timepoints in its 4th dimension.".format(tp))
output = data
else:
output = data[:,:,:,tp]
return output
# elif len(data.shape) > 4:
# print("Data has more than 4 dimensions! Assuming the 4th is time ...")
# End get_timepoint() definition | f5a718e5d9f60d1b389839fc0c637bee32b500bf | 20,876 |
def method_from_name(klass, method_name: str):
"""
Given an imported class, return the given method pointer.
:param klass: An imported class containing the method.
:param method_name: The method name to find.
:return: The method pointer
"""
try:
return getattr(klass, method_name)
except AttributeError:
raise NotImplementedError() | 97274754bd89ede62ee5940fca6c4763efdbb95c | 20,877 |
def get_querypage(site: Site, page: str, limit: int = 500):
"""
:type site Site
:type page str
:type limit int
:rtype: list[str]
"""
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Nonportableinfoboxes
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Mostlinkedtemplates
# http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=AllInfoboxes
res = site.get(action='query', list='querypage', qppage=page, qplimit=limit)
return [
# (u'value', u'69'), (u'ns', 10), (u'title', u'Template:Crew TV')
entry['title']
for entry in res['query']['querypage']['results']
] | e4d05e4fb82697867261cc680a8497b8f80c97d4 | 20,878 |
import re
def parse_value(string: str) -> str:
"""Check if value is a normal string or an arrow function
Args:
string (str): Value
Returns:
str: Value if it's normal string else Function Content
"""
content, success = re.subn(r'^\(\s*\)\s*=>\s*{(.*)}$', r'\1', string)
if not success:
return string
return content | ead42d7f300c68b6978699473de8506794bb1ab4 | 20,879 |
def uniqify(seq, idfun=None):
"""Return only unique values in a sequence"""
# order preserving
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result | aaa6de6f3e28b0de9d23b921196591bea97a67d1 | 20,880 |
def inside(Sv, r, r0, r1):
"""
Mask data inside a given range.
Args:
Sv (float): 2D array with data to be masked.
r (float): 1D array with range data.
r0 (int): Upper range limit.
r1 (int): Lower range limit.
Returns:
bool: 2D array mask (inside range = True).
"""
masku = np.ma.masked_greater_equal(r, r0).mask
maskl = np.ma.masked_less(r, r1).mask
idx = np.where(masku & maskl)[0]
mask = np.zeros((Sv.shape), dtype=bool)
mask[idx,:] = True
return mask | 8a953b30a3dbd1bda2ce8f37475088705492e0f2 | 20,881 |
from resistics.common import fs_to_string
from typing import Optional
def get_solution_name(
fs: float, tf_name: str, tf_var: str, postfix: Optional[str] = None
) -> str:
"""Get the name of a solution file"""
solution_name = f"{fs_to_string(fs)}_{tf_name.lower()}"
if tf_var != "":
tf_var = tf_var.replace(" ", "_")
solution_name = solution_name + f"_{tf_var}"
if postfix is None:
return solution_name + ".json"
return solution_name + "_" + postfix + ".json" | bbf11abaad0878462c10d8ab95af13edd900982c | 20,883 |
import re
def escape(message: str) -> str:
"""Escape tags which might be interpreted by the theme tokenizer.
Should be used when passing text from external sources to `theme.echo`.
"""
return re.sub(
rf"<(/?{TAG_RE})>",
r"\<\1>",
message,
) | daf91638c8e763489d2fa529e9f1b2ebfda48cfa | 20,884 |
async def normalize_message(app: FastAPI, message: Message) -> Message:
"""
Given a TRAPI message, updates the message to include a
normalized qgraph, kgraph, and results
"""
try:
merged_qgraph = await normalize_qgraph(app, message.query_graph)
merged_kgraph, node_id_map, edge_id_map = await normalize_kgraph(app, message.knowledge_graph)
merged_results = await normalize_results(message.results, node_id_map, edge_id_map)
return Message.parse_obj({
'query_graph': merged_qgraph,
'knowledge_graph': merged_kgraph,
'results': merged_results
})
except Exception as e:
logger.error(f'normalize_message Exception: {e}') | a6e0d3d4ab0590cfbdf643ec25c7bb23c7aaa8c4 | 20,885 |
def ec_elgamal_encrypt(msg, pk, symmalg):
"""
Computes a random b, derives a key from b*g and ab*g, then encrypts it using symmalg.
Input:
msg Plaintext message string.
pk Public key: a tuple (EC, ECPt, ECPt), that is (ec, generator g, a*g)
symmalg A callable that accepts two arguments, the key and the message.
symmalg(key, msg) should output a symmetric-enciphered ciphertext.
Output:
A tuple (ECPt, str), where the first element is actually b*g.
"""
ec, g, ag=pk
b=random_with_bytes(log2(ec._p)//4)
abg=b*ag
bg=b*g
k=ec_elgamal_derive_symm_key(bg, abg)
return (bg, symmalg(k, msg)) | bc2447377f45a756e325fbed47bc28c17f4a5707 | 20,886 |
def plot_confusion_matrix(cm, classes,
normalize=False,
cmap=plt.cm.YlGnBu):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True',
# fontsize=16,
xlabel='Predicted')
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center", fontsize=14,
color="white" if cm[i, j] > thresh else "black")
plt.xlim(-0.5, len(np.unique(classes))-0.5)
plt.ylim(len(np.unique(classes))-0.5, -0.5)
plt.xlabel("Predicted", fontsize=16)
plt.ylabel('True', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
return ax | 6dcd9dc8d832f28035e25025705a4d13600087d0 | 20,887 |
from pylab import linspace
def meshgrid(params):
"""Returns meshgrid X that can be used for 1D plotting.
params is what is returned by finess.params.util.read_params."""
assert(params['finess', 'ndims'] == 1)
mx = params['grid', 'mx']
xlow = params['grid', 'xlow']
xhigh = params['grid', 'xhigh']
dx = (xhigh-xlow) / float(mx)
X = linspace(xlow + 0.5*dx, xhigh - 0.5*dx, mx)
return X | 3320d8e1eab5b202152f1d90610e2f0d24adda1c | 20,888 |
def _pbe_p12(params, passphrase, hash_alg, cipher, key_size):
"""PKCS#12 cipher selection function for password-based encryption
This function implements the PKCS#12 algorithm for password-based
encryption. It returns a cipher object which can be used to encrypt
or decrypt data based on the specified encryption parameters,
passphrase, and salt.
"""
if (not isinstance(params, tuple) or len(params) != 2 or
not isinstance(params[0], bytes) or not params[0] or
not isinstance(params[1], int) or params[1] == 0):
raise KeyEncryptionError('Invalid PBES1 PKCS#12 encryption parameters')
salt, count = params
key = _pbkdf_p12(hash_alg, passphrase, salt, count, key_size, 1)
if cipher.block_size == 1:
cipher = cipher.new(key)
else:
iv = _pbkdf_p12(hash_alg, passphrase, salt, count,
cipher.block_size, 2)
cipher = _RFC1423Pad(cipher, key, iv)
return cipher | 711376c5f653bd0888b3c35e6885942cad0c4268 | 20,889 |
def add_settings_routes(app):
""" Create routes related to settings """
@app.route('/v1/rule_settings/', methods=['GET'])
@requires_login
@use_kwargs({
'agency_code': webargs_fields.String(required=True),
'file': webargs_fields.String(validate=webargs_validate.
OneOf(FILE_TYPES, error='Must be {}, or {}'.format(', '.join(FILE_TYPES[:-1]),
FILE_TYPES[-1])),
required=True)
})
def get_rule_settings(**kwargs):
""" Returns the rule settings based on the filters provided """
agency_code = kwargs.get('agency_code')
file = kwargs.get('file')
return list_rule_settings(agency_code, file)
@app.route('/v1/save_rule_settings/', methods=['POST'])
@requires_login
@use_kwargs({
'agency_code': webargs_fields.String(required=True),
'file': webargs_fields.String(validate=webargs_validate.
OneOf(FILE_TYPES, error='Must be {}, or {}'.format(', '.join(FILE_TYPES[:-1]),
FILE_TYPES[-1])),
required=True),
'errors': webargs_fields.List(webargs_fields.Dict),
'warnings': webargs_fields.List(webargs_fields.Dict)
})
def post_save_rule_settings(**kwargs):
""" Set the rule settings based on the rules provided """
agency_code = kwargs.get('agency_code')
file = kwargs.get('file')
errors = kwargs.get('errors', [])
warnings = kwargs.get('warnings', [])
return save_rule_settings(agency_code, file, errors, warnings) | 6a199d67e40232e72dc4dcafa42a155f2257b687 | 20,890 |
def delete_by_date_paste(date):
"""
Deletes the paste entries older than a certain date. Note that it will delete any document/index type entered into
it for elasticsearch, the paste restriction is due to postgreql
:return: True once
"""
# Create a connection to the database (seemd to want it in this case)
db = SQLAlchemy(app)
# Add the start of the day to ensure anything older gets deleted
date += " 00:00:00.000000"
# Make the query to get the pastes to be deleted
old_pastes = db.session.query(Paste).filter(Paste.datetime < date)
# Attempt to delete old pastes
for item in old_pastes:
try:
delete_from_es(item)
db.session.delete(item)
db.session.commit()
except:
logger.error("Did not delete item from one or more databases: %s", item)
return True | aeb346d89f58d7894b36f633b90c608f635522f1 | 20,891 |
def convert(origDict, initialSpecies):
"""
Convert the original dictionary with species labels as keys
into a new dictionary with species objects as keys,
using the given dictionary of species.
"""
new_dict = {}
for label, value in origDict.items():
new_dict[initialSpecies[label]] = value
return new_dict | 5143f31acd1efdf1790e68bade3a1f8d8977bcde | 20,893 |
def Vector(point, direction, simple=None):
"""
Easy to use Vector type constructor. If three arguments are passed,
the first two are the x components of the point and the third is
the direction component of the Vector.
"""
if simple is not None:
point = Point(point, direction)
direction = simple
return {
'point': point,
'direction': direction,
} | 58407760ee540fa88dbc49100a5216522bd5de94 | 20,894 |
def createGrid(nx, ny):
"""
Create a grid position array.
"""
direction = 0
positions = []
if (nx > 1) or (ny > 1):
half_x = int(nx/2)
half_y = int(ny/2)
for i in range(-half_y, half_y+1):
for j in range(-half_x, half_x+1):
if not ((i==0) and (j==0)):
if ((direction%2)==0):
positions.append([j,i])
else:
positions.append([-j,i])
direction += 1
return positions | fe74af508e1bc7185d21f9c86b4eab64a66a52f5 | 20,895 |
from typing import Optional
from typing import Sequence
def get_private_network(filters: Optional[Sequence[pulumi.InputType['GetPrivateNetworkFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateNetworkResult:
"""
Get information about a Vultr private network.
## Example Usage
Get the information for a private network by `description`:
```python
import pulumi
import pulumi_vultr as vultr
my_network = vultr.get_private_network(filters=[vultr.GetPrivateNetworkFilterArgs(
name="description",
values=["my-network-description"],
)])
```
:param Sequence[pulumi.InputType['GetPrivateNetworkFilterArgs']] filters: Query parameters for finding private networks.
"""
__args__ = dict()
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('vultr:index/getPrivateNetwork:getPrivateNetwork', __args__, opts=opts, typ=GetPrivateNetworkResult).value
return AwaitableGetPrivateNetworkResult(
date_created=__ret__.date_created,
description=__ret__.description,
filters=__ret__.filters,
id=__ret__.id,
region=__ret__.region,
v4_subnet=__ret__.v4_subnet,
v4_subnet_mask=__ret__.v4_subnet_mask) | 37b409eb5f0fc5a6ddf7110ee26bfec44e56ee17 | 20,896 |
import types
import pandas
def sdc_pandas_series_operator_le(self, other):
"""
Pandas Series operator :attr:`pandas.Series.le` implementation
.. only:: developer
**Test**: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_op7*
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_operator_le*
Parameters
----------
series: :obj:`pandas.Series`
Input series
other: :obj:`pandas.Series` or :obj:`scalar`
Series or scalar value to be used as a second argument of binary operation
Returns
-------
:obj:`pandas.Series`
The result of the operation
"""
_func_name = 'Operator le().'
ty_checker = TypeChecker('Operator le().')
self_is_series, other_is_series = isinstance(self, SeriesType), isinstance(other, SeriesType)
if not (self_is_series or other_is_series):
return None
if not isinstance(self, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(self, 'pandas.series or scalar', 'self')
if not isinstance(other, (SeriesType, types.Number, types.UnicodeType)):
ty_checker.raise_exc(other, 'pandas.series or scalar', 'other')
operands_are_series = self_is_series and other_is_series
if operands_are_series:
none_or_numeric_indexes = ((isinstance(self.index, types.NoneType) or check_index_is_numeric(self))
and (isinstance(other.index, types.NoneType) or check_index_is_numeric(other)))
series_indexes_comparable = check_types_comparable(self.index, other.index) or none_or_numeric_indexes
if not series_indexes_comparable:
raise TypingError('{} Not implemented for series with not-comparable indexes. \
Given: self.index={}, other.index={}'.format(_func_name, self.index, other.index))
series_data_comparable = check_types_comparable(self, other)
if not series_data_comparable:
raise TypingError('{} Not supported for not-comparable operands. \
Given: self={}, other={}'.format(_func_name, self, other))
if not operands_are_series:
def _series_operator_le_scalar_impl(self, other):
if self_is_series == True: # noqa
return pandas.Series(self._data <= other, index=self._index, name=self._name)
else:
return pandas.Series(self <= other._data, index=other._index, name=other._name)
return _series_operator_le_scalar_impl
else:
# optimization for series with default indexes, that can be aligned differently
if (isinstance(self.index, types.NoneType) and isinstance(other.index, types.NoneType)):
def _series_operator_le_none_indexes_impl(self, other):
left_size, right_size = len(self._data), len(other._data)
if (left_size == right_size):
return pandas.Series(self._data <= other._data)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_le_none_indexes_impl
else:
if none_or_numeric_indexes:
ty_left_index_dtype = types.int64 if isinstance(self.index, types.NoneType) else self.index.dtype
ty_right_index_dtype = types.int64 if isinstance(other.index, types.NoneType) else other.index.dtype
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[ty_left_index_dtype, ty_right_index_dtype], [])
def _series_operator_le_common_impl(self, other):
left_index, right_index = self.index, other.index
if sdc_check_indexes_equal(left_index, right_index):
if none_or_numeric_indexes == True: # noqa
new_index = astype(left_index, numba_index_common_dtype)
else:
new_index = self._index
return pandas.Series(self._data <= other._data,
new_index)
else:
raise ValueError("Can only compare identically-labeled Series objects")
return _series_operator_le_common_impl
return None | 101e2cf36208f76acebd06be56b1222f1e07b41e | 20,897 |
import numpy
def getArrFromFile(path = fromPath):
"""
读取原始csv文件,返回numpy数组
:param
path: 原始csv文件路径 string类型 default:fromPath
:return:
X: 由原始文件生成的数组 二维numpy数组类型
"""
X = numpy.genfromtxt(path,dtype=float,delimiter=',')[1:,:-2]
return X | 8c5c053391b305354ed398311fad6fdd988c153d | 20,898 |
import torch
def erode(binary_image, erosion=1):
"""
Sets 1s at boundaries of binary_image to 0
"""
batch_array = binary_image.data.cpu().numpy()
return torch.tensor(
np.stack([
binary_erosion(
array,
iterations=erosion,
border_value=1, # so that we don't get border of zeros
).astype(array.dtype)
for array in batch_array])
).to(binary_image.device) | 60cea284d90e891e7f234174c673bd2a3a3f49dc | 20,899 |
def plot_chirpam_fit(cell_mean, param_d, QI=None, fit_f=sinexp_sigm,
start=420, stop=960, ax=None):
"""
Helper function to visualize the fit of a cell response to a chirp_am stimulus.
params:
- cell_mean: Cell's mean response to the stimulus
- param_d: Parameter dictionary of the fit for fit_f
- QI: Quality index of the fit
- fit_f: Function used for the fit
- start: Where the fit started in index of cell_mean
- stop: Where the fit stopped in index of cell_mean
- ax: Axis where to plot the figure. If None, a new figure of size (50,2) is created
return:
- The axis of the figure
"""
if ax is None:
fig, ax = plt.subplots(figsize=(50,2))
ax.plot(np.linspace(0, len(cell_mean)/60, len(cell_mean), endpoint=False), cell_mean)
if param_d is not None:
ax.plot(np.linspace(start/60, stop/60, stop-start, endpoint=False),
fit_f(np.linspace(0, (stop-start)/60, stop-start, endpoint=False), **param_d))
if QI is not None:
ax.text((start/60), max(cell_mean)*80/100, str(round(QI,3)), fontdict={'size':22})
ax.set_xlim(0, len(cell_mean)/60)
if param_d is not None:
param_d = {k: round(v,2) for k, v in param_d.items()}
ax.set_title(str(param_d))
return ax | 3b55e45ac69772ed17fc154050e3714066335fbf | 20,900 |
def command_ltc(bot, user, channel, args):
"""Display current LRC exchange rates from BTC-E"""
r = bot.get_url("https://btc-e.com/api/2/ltc_usd/ticker")
j = r.json()['ticker']
return bot.say(channel, "BTC-E: avg:$%s last:$%s low:$%s high:$%s vol:%s" % (j['avg'], j['last'], j['low'], j['high'], j['vol'])) | 7aa411b6708e54b09cf2b9aef9c8b01899b95298 | 20,901 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.