content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def status(request):
"""
Status page for the Product Database
"""
app_config = AppSettings()
is_cisco_api_enabled = app_config.is_cisco_api_enabled()
context = {
"is_cisco_api_enabled": is_cisco_api_enabled
}
if is_cisco_api_enabled:
# test access (once every 30 minutes)
cisco_eox_api_test_successful = cache.get("CISCO_EOX_API_TEST", False)
# defaults, overwritten if an exception is thrown
cisco_eox_api_available = True
cisco_eox_api_message = "successful connected to the Cisco EoX API"
if not cisco_eox_api_test_successful:
try:
result = utils.check_cisco_eox_api_access(client_id=app_config.get_cisco_api_client_id(),
client_secret=app_config.get_cisco_api_client_secret(),
drop_credentials=False)
cache.set("CISCO_EOX_API_TEST", result, 60 * 30)
except Exception as ex:
cisco_eox_api_available = True
cisco_eox_api_message = str(ex)
context["cisco_eox_api_available"] = cisco_eox_api_available
context["cisco_eox_api_message"] = cisco_eox_api_message
# determine worker status
context['worker_status'] = mark_safe(utils.get_celery_worker_state_html())
return render(request, "config/status.html", context=context)
|
ef4c2f5444cfa38b46edd5e138531846f3ac04ee
| 26,107 |
import torch
from typing import Optional
def _evaluate_batch(
batch: torch.LongTensor, # statements
model: QualifierModel,
column: int,
evaluator: RankBasedEvaluator,
all_pos_triples: Optional[torch.LongTensor], # statements?
relation_filter: Optional[torch.BoolTensor],
filtering_necessary: bool,
data_geometric: Optional[Data] = None,
padding_idx: int = 0,
ignore_entity_mask: Optional[torch.BoolTensor] = None,
) -> torch.BoolTensor:
"""Evaluate on a single batch."""
if column not in {0, 2}:
raise ValueError(f'column must be either 0 or 2, but is column={column}')
# Predict scores once
if column == 2: # tail scores
batch_scores_of_corrupted = model.score_t(batch[:, 0:2], qualifiers=batch[:, 3:], data_geometric=data_geometric)
else:
batch_scores_of_corrupted = model.score_h(batch[:, 1:3], qualifiers=batch[:, 3:], data_geometric=data_geometric)
assert (batch[:, column] < batch_scores_of_corrupted.shape[1]).all()
# Select scores of true
batch_scores_of_true = batch_scores_of_corrupted[
torch.arange(0, batch.shape[0]),
batch[:, column],
]
# Create positive filter for all corrupted
if filtering_necessary:
# Needs all positive triples
if all_pos_triples is None:
raise ValueError('If filtering_necessary of positive_masks_required is True, all_pos_triples has to be '
'provided, but is None.')
# Create filter
positive_filter, relation_filter = create_sparse_positive_filter_(
hrt_batch=batch,
all_pos_triples=all_pos_triples,
relation_filter=relation_filter,
filter_col=column,
)
batch_scores_of_corrupted = filter_scores_(
scores=batch_scores_of_corrupted,
filter_batch=positive_filter,
)
# The scores for the true triples have to be rewritten to the scores tensor
batch_scores_of_corrupted[
torch.arange(0, batch.shape[0]),
batch[:, column],
] = batch_scores_of_true
# mask padding idx
batch_scores_of_corrupted[:, padding_idx] = float("nan")
# Restrict evaluation to certain entities, e.g. non-qualifier entities
if ignore_entity_mask is not None:
batch_scores_of_corrupted = torch.masked_fill(batch_scores_of_corrupted, ignore_entity_mask.unsqueeze(dim=0), value=float("nan"))
evaluator._update_ranks_(
true_scores=batch_scores_of_true[:, None],
all_scores=batch_scores_of_corrupted,
side="head" if column == 0 else "tail",
)
return relation_filter
|
2cba006cf6ceb7ec0941c436cb26cf8b1a34bdeb
| 26,108 |
import codecs
def readFile(f):
"""
This helper method returns an appropriate file handle given a path f.
This handles UTF-8, which is itself an ASCII extension, so also ASCII.
"""
return codecs.open(f, 'r', encoding='UTF-8')
|
b3eb20cf26c6ebe58aec181f1dafc77997f36b6c
| 26,109 |
import re
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
|
5a458b165b999dda809b61433fec0cdfd2eb5a54
| 26,110 |
def reverse_complement_sequence(sequence, complementary_base_dict):
"""
Finds the reverse complement of a sequence.
Parameters
----------
sequence : str
The sequence to reverse complement.
complementary_base_dict: dict
A dict that maps bases (`str`) to their complementary bases
(`str`).
Returns
-------
str
The reverse complement of the input sequence.
"""
rev_comp_bases = [complementary_base_dict[b] for b in
sequence[::-1]]
return ''.join(rev_comp_bases)
|
1130f5b321daf72cdd40704fc8671ba331376ded
| 26,111 |
def mergeWithFixes(pullRequest: PullRequest.PullRequest, commit_title: str,
commit_message: str, merge_method: str, sha: str):
"""Fixed version of PyGithub merge with commit_title, merge_method, and sha."""
post_parameters = dict()
post_parameters["commit_title"] = commit_title
post_parameters["commit_message"] = commit_message
post_parameters["merge_method"] = merge_method
post_parameters["sha"] = sha
headers, data = pullRequest._requester.requestJsonAndCheck(
"PUT",
pullRequest.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(pullRequest._requester, headers, data, completed=True)
|
25709e30d209e18cffe34a6dd09c29d5e2286f08
| 26,112 |
import math
def top2daz(north, east, up):
"""Compute azimouth, zenith and distance from a topocentric vector.
Given a topocentric vector (aka north, east and up components), compute
the azimouth, zenith angle and distance between the two points.
Args:
north (float): the north component (in meters)
east (float) : the east component (in meters)
up (float) : the up component (in meters)
Returns:
tuple (floats): a tuple of three floats is returned, as:
[distance, azimouth, zenith], where distance is
in meters, and azimouth and zenith are in radians.
"""
distance = math.sqrt(north*north + east*east + up*up)
a = math.atan2(east, north) % (math.pi*2e0) # normalized [0, 2pi]
zenith = math.acos(up/distance);
return distance, a, zenith
|
67a127957b0dc131a6fe5505de05b89871542009
| 26,113 |
import torch
def get_ground_truth_vector(classes : torch.Tensor, n_domains : int,
n_classes : int) -> torch.Tensor:
"""
Get the ground truth vector for the phase where the feature extractor
tries that discriminator cannot distinguish the domain that the sample
comes from.
Args:
classes (torch.Tensor): Class labels.
n_domains (int): Number of domains.
n_classes (int): Number of classes.
Returns:
torch.Tensor: Tensor containing the ground truth for each sample.
"""
# Create the ground truth tensor
total_size = n_domains * n_classes
gt = torch.zeros(len(classes), total_size)
# Value to be placed in the corresponding categories and domains positions
# It is uniform so the discriminator cannot distinguish which domain the
# sample comes from
non_zero_value = 1 / n_domains
for row in range(len(classes)):
# The indices are the corresponding position for each class into each
# domain
non_zero_indices = [i+classes[row] for i in range(0, total_size, n_classes)]
gt[row, non_zero_indices] = non_zero_value
return gt
|
429c0c69d85572073aab66372d651d4981324e2b
| 26,114 |
from numpy import array
from numpy import array, log10
def get_midpoints(ar, mode='linear'):
"""
Returns the midpoints of an array; i.e. if you have the left edge of a set
of bins and want the middle, this will do that for you.
:param ar:
The array or list of length L to find the midpoints of
:param mode:
Whether to find the midpoint in logspace ('log') or linear
space ('linear')
:returns:
An array of the midpoints of length L - 1
"""
_valid_modes = ['linear', 'log']
if mode not in _valid_modes:
raise TypeError("Unrecognize midpoint method; must be one of {}}.".format(
_valid_modes))
if mode == 'linear':
lst = [ar[i] + (ar[i+1]-ar[i])/2 for i in range(len(ar))
if i != len(ar) - 1]
return array(lst)
elif mode == 'log':
lst = [10**(log10(ar[i]) + (log10(ar[i+1])-log10(ar[i]))/2)
for i in range(len(ar)) if i != len(ar) - 1]
return array(lst)
else:
raise TypeError("How did I get here? provided mode = {}".format(mode))
|
5bb8ccd674d6a1eb71c02ff8577149f495f3bed4
| 26,115 |
def tokenize(text):
"""
INPUT:
text (str): message strings which need to be cleaned
OUTPUT:
clean_tokens: tokens of the messages which are cleaned
"""
# remove punctuations
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
tokens = [w for w in tokens if w not in stopwords.words('english')]
# lemmatize as shown in the classroom
lemmatizer = WordNetLemmatizer()
clean_tokens = []
# remove extra spaces if any and convert all the strings to lowercase
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
|
8d8bf0cc4933bd79db7e59870ae50d33b0fe2fb2
| 26,116 |
import random
import example
def time(is_train):
"""Questions for calculating start, end, or time differences."""
context = composition.Context()
start_minutes = random.randint(1, 24*60 - 1)
while True:
duration_minutes = random.randint(1, 12*60 - 1)
if train_test_split.is_train(duration_minutes) == is_train:
break
end_minutes = start_minutes + duration_minutes
def format_12hr(minutes):
"""Format minutes from midnight in 12 hr format."""
hours = (minutes // 60) % 24
minutes %= 60
#am_pm = 'AM' if hours < 12 else 'PM'
#hours = (hours - 1) % 12 + 1
return '{}:{:02}'.format(hours, minutes)
#return '{}:{:02} {}'.format(hours, minutes, am_pm)
start = format_12hr(start_minutes)
end = format_12hr(end_minutes)
which_question = random.randint(0, 3)
if which_question == 0:
# Question: What is start = end - duration?
template = random.choice([
'Che ore sono se mancano {duration} minuti alle {end}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, end=end),
answer=start)
elif which_question == 1:
# Question: What is end = start + duration?
template = random.choice([
'Che ore sono se sono passati {duration} minuti dalle {start}?',
])
return example.Problem(
question=example.question(
context, template, duration=duration_minutes, start=start),
answer=end)
else:
# Question: What is duration = end - start?
template = random.choice([
'Quanti minuti ci sono tra le {start} e le {end}?',
])
return example.Problem(
question=example.question(context, template, start=start, end=end),
answer=duration_minutes)
|
19c4726282776db74bac09c76f1c8b59dce99f4d
| 26,117 |
def countpaths(pseq, distinct=True, trim=True):
"""
This function returns the number of paths in a path dictionary or list.
The distinct parameter excludes paths that are reverses of other paths
in the count.
The trim parameter excludes loops.
"""
if isinstance(pseq, list):
if not distinct:
count = 0
for path in pseq:
if path[0] != path[-1] or not trim:
count += 1
return count
else:
pd = distinctpaths(pseq, nolist=True)
count = 0
for src in pd:
count += len(pd[src])
if trim and src in pd[src]:
count -= 1
return count
elif isinstance(pseq, dict):
if not distinct:
count = 0
for src in pseq:
count += len(pseq[src])
if trim and src in pseq[src]:
count -= 1
return count
else:
count = 0
for src in pseq:
for dst in pseq[src]:
if src == dst:
count += int(not trim)
elif src < dst:
count += 1
else: # src > dst
count += int(not (reversepath(pseq[src][dst]) == pseq.get(dst, dict()).get(src, None)))
return count
else:
raise TypeError
|
0b15bcdc3277e812cbbc0d51a764269f5db6135a
| 26,119 |
def get_all_active_alerts(strategy):
"""
Generate and return an array of all the actives alerts :
symbol: str market identifier
id: int alert identifier
"""
results = []
with strategy._mutex:
try:
for k, strategy_trader in strategy._strategy_traders.items():
with strategy_trader._mutex:
for alert in strategy_trader.alerts:
results.append({
'mid': strategy_trader.instrument.market_id,
'sym': strategy_trader.instrument.symbol,
'id': alert._id,
'vers': alert.version(),
'name': alert.name(),
'ts': alert._created,
'tf': timeframe_to_str(alert._timeframe),
'expiry': alert._expiry,
'ctd': alert._countdown,
'msg': alert._message,
'cond': alert.condition_str(strategy_trader.instrument),
'cancel': alert.cancellation_str(strategy_trader.instrument),
})
except Exception as e:
error_logger.error(repr(e))
return results
|
85c3f1f43bb701fa6adb7dde8b2bac4423ece143
| 26,120 |
def one_hot_vector(val, lst):
"""
Converts a value to a one-hot vector based on options in lst
"""
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst)
|
101865c6ddb3fae03838ecdd73d517ca8bd19828
| 26,121 |
def to_rgb(array):
"""Add a channel dimension with 3 entries"""
return np.tile(array[:, :, np.newaxis], (1, 1, 3))
|
1fba6b7ab8a0ea4c4a2b36cfbf6315b1870bcc80
| 26,122 |
import re
import keyword
def is_valid_identifier(string):
"""
Check if string is a valid package name
:param string: package name as string
:return: boolean
"""
if not re.match("[_A-Za-z][_a-zA-Z0-9]*$", string):
return False
if keyword.iskeyword(string):
return False
return True
|
138d740dbe335f094af46bfe0f8d7b6636311d61
| 26,123 |
from typing import Callable
from typing import Any
import time
def _wait_for_indexer(func: Callable) -> Callable:
"""A decorator function to automatically wait for indexer timeout
when running `func`.
"""
# To preserve the original type signature of `func` in the sphinx docs
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
# First wait for the indexer to catch up with the latest `algod_round`
algod_round = _algod_client().status()["last-round"]
while _indexer_client().health()["round"] < algod_round:
time.sleep(1)
# Give the indexer a number of tries before erroring out
timeout = 0
while timeout < ConfigParams.indexer_timeout:
try:
ret = func(*args, **kwargs)
break
except IndexerHTTPError:
time.sleep(1)
timeout += 1
else:
raise TimeoutError("Timeout reached waiting for indexer.")
return ret
return wrapped
|
10d3f5070a88ed3c592022442e1fdb1547b8fef7
| 26,124 |
def test10(args):
"""
CIFAR-10 test set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Test reader creator.
:rtype: callable
"""
return reader_creator_filepath(args.data, 'test_batch', False, args)
|
cf403342c879e6e1a199d21472fd2991ec8c196d
| 26,125 |
def get_saver(moving_average_decay, nontrainable_restore_names=None):
"""
Gets the saver that restores the variavles for testing
by default, restores to moving exponential average versions of
trainable variables
if nontrainable_restore_names is set, then restores
nontrainable variables that match (this can be used
for restoring mean averages for batch normalization)
"""
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay
)
variables_to_restore = {}
for v in tf.global_variables():
# if the variable is trainable or its name has the desird substring
if v in tf.trainable_variables() or nontrainable_restore_names is not None and nontrainable_restore_names in v.name:
print(v.name)
restore_name = variable_averages.average_name(v)
else:
restore_name = v.op.name
variables_to_restore[restore_name] = v
saver = tf.train.Saver(variables_to_restore)
return saver
|
ddb3670b323d310a7ab24a7c507d65afa5abfcaa
| 26,126 |
def _get_data(preload=False):
"""Get data."""
raw = read_raw_fif(raw_fname, preload=preload, verbose='warning')
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
|
7fbafeb3a21d653388c35988e26871293e69fb69
| 26,128 |
def make_full_filter_set(filts, signal_length=None):
"""Create the full set of filters by extending the filterbank to negative FFT
frequencies.
Args:
filts (array_like): Array containing the cochlear filterbank in frequency space,
i.e., the output of make_erb_cos_filters_nx. Each row of ``filts`` is a
single filter, with columns indexing frequency.
signal_length (int, optional): Length of the signal to be filtered with this filterbank.
This should be equal to filter length * 2 - 1, i.e., 2*filts.shape[1] - 1, and if
signal_length is None, this value will be computed with the above formula.
This parameter might be deprecated later.
Returns:
ndarray:
**full_filter_set** -- Array containing the complete filterbank in
frequency space. This output can be directly applied to the frequency
representation of a signal.
"""
if signal_length is None:
signal_length = 2 * filts.shape[1] - 1
# note that filters are currently such that each ROW is a filter and COLUMN idxs freq
if np.remainder(signal_length, 2) == 0: # even -- don't take the DC & don't double sample nyquist
neg_filts = np.flipud(filts[1:filts.shape[0] - 1, :])
else: # odd -- don't take the DC
neg_filts = np.flipud(filts[1:filts.shape[0], :])
fft_filts = np.vstack((filts, neg_filts))
# we need to switch representation to apply filters to fft of the signal, not sure why, but do it here
return fft_filts.T
|
3d5bdd176ced736c6b06468a90510e9af5a1c635
| 26,129 |
def from_autonetkit(topology):
"""Convert an AutoNetKit graph into an FNSS Topology object.
The current implementation of this function only renames the weight
attribute from *weight* to *ospf_cost*
Parameters
----------
topology : NetworkX graph
An AutoNetKit NetworkX graph
Returns
-------
fnss_topology : FNSS Topology
FNSS topology
"""
topology = topology.copy()
rename_edge_attribute(topology, 'ospf_cost', 'weight')
return topology
|
a087af7739506d86b91aaaeb31d8db4eb1dcc075
| 26,130 |
def create_default_fake_filter():
"""Return the default temporal, spatial, and spatiotemporal filters."""
nx, ny, nt = get_default_filter_size()
time = np.arange(nt)
return create_spatiotemporal_filter(nx, ny, nt)
|
01a64ecd2b599810c61690c67c1539c5126eb160
| 26,131 |
def ClementsBekkers(data, sign='+', samplerate=0.1, rise=2.0, decay=10.0, threshold=3.5,
dispFlag=True, subtractMode=False, direction=1,
lpfilter=2000, template_type=2, ntau=5,
markercolor=(1, 0, 0, 1)):
"""Use Clementes-Bekkers algorithm to find events in a data vector.
The Clementes-Bekkers algorithm uses a template event shape (set by `template_type`) that is
"swept" across the `data` set to find potential events similar to the template.
Parameters
----------
data : array
One-dimensional array that will be searched for matching events.
sign : str {'+', '-'}, default '+'
Sign of events that will be searched.
samplerate : float (default 0.1)
the sampling rate for the data, (milliseconds)
rise : float (default 2.0)
The rise time of the events to be detected (milliseconds)
decay : float (default 10.0)
The decay time of the events to be detected (milliseconds)
threshold : float (default 3.5)
Threshold of event relative to stdev of data to pass detection.
dispFlag : boolean (True)
Controls whether or not data is displayed while searching (inactive in Python version)
subtractMode : boolean (False)
subtracts the best matched templates from the data array as we go. Used to
find overlapping events.
direction : int (1)
Controls direction of the search: 1 is forward from beginning, -1 is backwards from
end of data array
lpfilter : float (2000.)
Low pass filter to apply to the data set before starting search. (Hz)
template_type : int (2)
The template type that will be used. Templates are:
1 : alpha function (template = ((ti - predelay) / decay) *
np.exp((-(t[i] - predelay)) / decay))
2 : exp rise to power, exp fall
3 : exp rise to power, double exp fall
4 : average waveform (not implemented)
5 :
(see cb_template for details)
ntau : int (5)
number of decay time constants to use when computing template
markerclor : (rgba) (1, 0, 0, 1)
color of the marker used to identify matches when dispFlag is True
Returns
-------
eventlist : np.array
array of event times
peaklist : np.array
array of event points in the original data
crit : numpy array
the critierion array, a waveform of the same length as the data array
scale : numpy array
the scale array, a waveform of the same length as the data array
cx : numpy array
the cx array
template : numpy array
the template used for the matching algorithm
"""
# fsamp = 1000.0/samplerate; # get sampling frequency
# fco = 1600.0; # cutoff frequency in Hz
# wco = fco/(fsamp/2); # wco of 1 is for half of the sample rate, so set it like this...
# if(wco < 1) # if wco is > 1 then this is not a filter!
# [b, a] = butter(8, wco); # fir type filter... seems to work best, with highest order min distortion of dv/dt...
# data = filter(b, a, data); # filter all the traces...
# generate the template
[template, predelay] = cb_template(funcid=template_type, samplerate=samplerate,
rise=rise, decay=decay, lpfilter=lpfilter, ntau=ntau)
N = len(template)
if template_type is 4: # use data
Npost = len(template)
else:
Npost = int(decay * ntau / samplerate)
isign = 1
if sign is '-':
isign = -1.0
# template = isign*template
sumD = 0.0
sumD2 = 0.0
sumT = np.sum(template) # only need to compute once.
sumT2 = np.sum(np.multiply(template, template))
nData = len(data)
# initialize arrays used in the computation
critwave = np.zeros(nData) # waves for internal reference
scalewave = np.zeros(nData)
offsetwave = np.zeros(nData)
cx = []
scale = []
pkl = []
eventlist = []
evn = [] # list of events
isamp = []
icoff = [] # cutoff
crit = [] # criteria
nevent = 0 # number of events
minspacing = int(25.0 / samplerate) # 2.0 msec minimum dt. Events cannot
# be closer than this direction determines whether detection is done in
# forward or reverse time.
if direction == 1:
start = 0
finish = nData - N
else:
start = nData - N - 1
finish = 0
fN = float(N)
lasti = start
resetFlag = False # force reset of running sum calculations
# subtractMode determines whether we subtract the best fits from the data
# as we go
i = start
for i in range(start, finish, direction):
iEnd = N + i
if i == start or resetFlag is True:
# print "resetting i = %d" % (i)
sumD = np.sum(data[i:iEnd]) # optimization...
sumD2 = np.sum(np.multiply(data[i:iEnd], data[i:iEnd]))
ld = data[iEnd]
fd = data[i]
resetFlag = False
else: # only add or subtract the end points
if direction == 1:
sumD = sumD + data[iEnd] - fd
sumD2 = sumD2 + np.multiply(data[iEnd], data[iEnd]) - (fd * fd)
fd = data[i]
if direction == -1:
sumD = sumD - ld + data[i]
sumD2 = sumD2 - (ld * ld) + np.multiply(data[i], data[i])
ld = data[iEnd]
sumTxD = np.sum(np.multiply(data[i:iEnd], template))
S = (sumTxD - (sumT * sumD / fN)) / (sumT2 - (sumT * sumT / fN))
C = (sumD - S * sumT) / fN
# if S*isign < 0.0: # only work with correct signed matches in scaling.
# S = 0.0 # added, pbm 7/20/09
# f = S*template+C
SSE = sumD2 + (S * S * sumT2) + (fN * C * C) - 2.0 * \
(S * sumTxD + C * sumD - S * C * sumT)
if SSE < 0:
# needed to prevent round-off errors in above calculation
CRITERIA = 0.0
else:
CRITERIA = S / np.sqrt(SSE / (fN - 1.0))
critwave[i] = CRITERIA
scalewave[i] = S
offsetwave[i] = C
# best fit to template has the wrong sign, so skip it
if isign * S < 0.0:
continue
# get this peak position
peak_pos = np.argmax(isign * data[i:iEnd]) + i
addevent = False
replaceevent = False
# criteria must exceed threshold in the right direction
if isign * CRITERIA > threshold:
if len(eventlist) == 0: # always add the first event
addevent = True
else:
# and events that are adequately spaced
if abs(peak_pos - pkl[-1]) > minspacing:
addevent = True
else:
# events are close, but fit is better for this point -
# replace
if isign * CRITERIA > isign * crit[-1]:
replaceevent = True
if addevent:
eventlist.append(i)
jEnd = iEnd
pkl.append(peak_pos)
crit.append(CRITERIA)
scale.append(S)
cx.append(C)
if replaceevent:
if subtractMode is True:
j = eventlist[-1]
jEnd = j + N
data[j:jEnd] = data[j:jEnd] + \
(scale[-1] * template + cx[-1]) # add it back
# replace last event in the list with the current event
eventlist[-1] = i
pkl[-1] = peak_pos
crit[-1] = CRITERIA
scale[-1] = S
cx[-1] = C
if subtractMode is True and (addevent or replaceevent):
resetFlag = True
# and subtract the better one
data[i:iEnd] = data[i:iEnd] - (S * template + C)
il = i
i = jEnd # restart...
lasti = i
nevent = len(eventlist)
if nevent == 0:
print('ClementsBekkers: No Events Detected')
else:
print('ClementsBekkers: %d Events Detected' % (nevent))
if dispFlag is True and nevent > 0:
mpl.figure(1)
t = samplerate * np.arange(0, nData)
mpl.subplot(4, 1, 1)
mpl.plot(t, data, 'k')
mpl.hold(True)
mpl.plot(t[pkl], data[pkl], marker='o',
markerfacecolor=markercolor, linestyle='')
mpl.plot(t[eventlist], data[eventlist], marker='s',
markerfacecolor=markercolor, linestyle='')
for i in range(0, len(eventlist)):
tev = t[eventlist[i]: eventlist[i] + len(template)]
mpl.plot(tev, cx[i] + scale[i] * template, color=markercolor)
mpl.subplot(4, 1, 2)
mpl.plot(t, critwave, color=markercolor)
mpl.hold(True)
mpl.plot([t[0], t[-1]], [threshold, threshold], 'k-')
mpl.plot([t[0], t[-1]], [-threshold, -threshold], 'k-')
mpl.subplot(4, 1, 3)
mpl.plot(t, scalewave, color=markercolor, linestyle='-')
mpl.hold(True)
mpl.plot(t, offsetwave, color=markercolor, linestyle='--')
tt = samplerate * np.arange(0, len(template))
mpl.subplot(4, 2, 7)
mpl.plot(tt, template, color=markercolor)
mpl.draw()
return(np.array(eventlist), np.array(pkl), np.array(crit),
np.array(scale), np.array(cx), np.array(template))
|
242e7cdcbed9b8c85d08a086ed08dd2889756029
| 26,132 |
import re
def extract_words(path_to_file):
"""
Takes a path to a file and returns the non-stop
words, after properly removing nonalphanumeric chars
and normalizing for lower case
"""
words = re.findall('[a-z]{2,}', open(path_to_file).read().lower())
stopwords = set(open('../stop_words.txt').read().split(','))
return [w for w in words if w not in stopwords]
|
4f5031693d10542c31a3055341d1794d0d7bf4f0
| 26,133 |
import math
def umass_coherence(T, corpus):
"""
Find the UMass coherence of a topic
input:
T (list of strings): topic to find coherence of
"""
sum = 0
# Go through all distinct pairs in the topic
for i, item_i in enumerate(T):
w_i, weight_i = item_i
for j, item_j in enumerate(T):
w_j, weight_j = item_j
if not (i < j): continue
# Calculate how many documents w_i appears in and how many
# documents w_i and w_j both appear in
counts = D(corpus, w_i, second_word=w_j)
# Update sum
val = (counts["both"] + 1) / counts[w_i]
sum += math.log(val, 10)
return sum
|
d92a8f463b123eb62f4732db358cc8e401091c9f
| 26,134 |
def get_train_test_sents(corpus, split=0.8, shuffle=True):
"""Get train and test sentences.
Args:
corpus: nltk.corpus that supports sents() function
split (double): fraction to use as training set
shuffle (int or bool): seed for shuffle of input data, or False to just
take the training data as the first xx% contiguously.
Returns:
train_sentences, test_sentences ( list(list(string)) ): the train and test
splits
"""
sentences = np.array(corpus.sents(), dtype=object)
fmt = (len(sentences), sum(map(len, sentences)))
print "Loaded %d sentences (%g tokens)" % fmt
if shuffle:
rng = np.random.RandomState(shuffle)
rng.shuffle(sentences) # in-place
train_frac = 0.8
split_idx = int(train_frac * len(sentences))
train_sentences = sentences[:split_idx]
test_sentences = sentences[split_idx:]
fmt = (len(train_sentences), sum(map(len, train_sentences)))
print "Training set: %d sentences (%d tokens)" % fmt
fmt = (len(test_sentences), sum(map(len, test_sentences)))
print "Test set: %d sentences (%d tokens)" % fmt
return train_sentences, test_sentences
|
796ce26db107f3582e9002a5df7144fbcb2fb960
| 26,135 |
def load_cifar10(filename):
"""
Load the Preprocessed data
"""
features, labels = pckl.load(open(filename, mode="rb"))
return features, labels
|
9f6ee68c5b580370e1165287c9a26ab80d666615
| 26,136 |
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestBodyEval, "test"))
return test_suite
|
2b3c4c642e1964d887e7b3d18db89d0cb69555c3
| 26,137 |
from typing import List
def fetch_quality(data: dict, classification: ClassificationResult, attenuations: dict) -> dict:
"""Returns Cloudnet quality bits.
Args:
data: Containing :class:`Radar` and :class:`Lidar` instances.
classification: A :class:`ClassificationResult` instance.
attenuations: Dictionary containing keys `liquid_corrected`, `liquid_uncorrected`.
Returns:
Dictionary containing `quality_bits`, an integer array with the bits:
- bit 0: Pixel contains radar data
- bit 1: Pixel contains lidar data
- bit 2: Pixel contaminated by radar clutter
- bit 3: Molecular scattering present (currently not implemented!)
- bit 4: Pixel was affected by liquid attenuation
- bit 5: Liquid attenuation was corrected
- bit 6: Data gap in radar or lidar data
"""
bits: List[np.ndarray] = [np.ndarray([])] * 7
radar_echo = data["radar"].data["Z"][:]
bits[0] = ~radar_echo.mask
bits[1] = ~data["lidar"].data["beta"][:].mask
bits[2] = classification.is_clutter
bits[4] = attenuations["liquid_corrected"] | attenuations["liquid_uncorrected"]
bits[5] = attenuations["liquid_corrected"]
qbits = _bits_to_integer(bits)
return {"quality_bits": qbits}
|
4de1fd337ed31c7b3434458c31aa8bd350b7a8fa
| 26,138 |
from typing import Any
def _batch_set_multiple(batch: Any, key: Any, value: Any) -> Any:
"""Sets multiple key value pairs in a non-tuple batch."""
# Numpy arrays and Torch tensors can take tuples and lists as keys, so try to do a normal
# __getitem__ call before resulting to list comprehension.
try:
# Check if one can do a __getitem__ before doing a __setitem__ because dicts can
# do __setitem__ for elements not in the dict and we do not want that.
batch[key]
batch[key] = value
return batch
# Indexing a list with a sequence results in TypeError
# Indexing an array/tensor with a sequence that is longer than the rank of the array
# results in an IndexError.
except (IndexError, TypeError, KeyError):
pass
if not hasattr(value, '__len__') or isinstance(value, str):
raise ValueError(f'value must be a sequence or array or tensor! and not {type(value)}')
if len(key) != len(value):
raise ValueError(f'value must be the same length as key ({len(key)}), but it is {len(value)} instead')
for single_key, single_value in zip(key, value):
batch = _batch_set(batch, single_key, single_value)
return batch
|
224c560698bf00e31eb6e9df616773bb7f3ebc5e
| 26,139 |
def add_subscription_channel(username, profile_name, device_id, channel):
"""
:param username: Username of a user requesting the data
:param profile_name:
:param device_id:
:param channel:
:return:
"""
def append_channel(subscriptions):
subscriptions.append(channel)
return subscriptions
get_set_subscription_channels(username, profile_name, device_id, append_channel)
|
62ca9028a46119389bd21182c5aef84671d2ee0e
| 26,141 |
def fft2erb(fft, fs, nfft):
"""
Convert Bark frequencies to Hz.
Args:
fft (np.array) : fft bin numbers.
Returns:
(np.array): frequencies in Bark [Bark].
"""
return hz2erb((fft * fs) / (nfft + 1))
|
fbc699b3474405a1d939d38389bed62df7545e57
| 26,142 |
def get_activity_id(activity_name):
"""Get activity enum from it's name."""
activity_id = None
if activity_name == 'STAND':
activity_id = 0
elif activity_name == 'SIT':
activity_id = 1
elif activity_name == 'WALK':
activity_id = 2
elif activity_name == 'RUN':
activity_id = 3
elif activity_name == 'WALK_UPSTAIRS':
activity_id = 4
elif activity_name == 'WALK_DOWNSTAIRS':
activity_id = 5
elif activity_name == 'LIE':
activity_id = 6
elif activity_name == 'BIKE':
activity_id = 7
elif activity_name == 'DRIVE':
activity_id = 8
elif activity_name == 'RIDE':
activity_id = 9
else:
activity_id = 10
return activity_id
|
b5e18063b5448c3f57636daa732dfa2a4f07d801
| 26,143 |
import socket
def is_ipv4(v):
"""
Check value is valid IPv4 address
>>> is_ipv4("192.168.0.1")
True
>>> is_ipv4("192.168.0")
False
>>> is_ipv4("192.168.0.1.1")
False
>>> is_ipv4("192.168.1.256")
False
>>> is_ipv4("192.168.a.250")
False
>>> is_ipv4("11.24.0.09")
False
"""
X = v.split(".")
if len(X) != 4:
return False
try:
return len([x for x in X if 0 <= int(x) <= 255]) == 4 and bool(socket.inet_aton(v))
except Exception:
return False
|
e81fae435a8e59dbae9ab1805941ce3ba9909ba9
| 26,144 |
from typing import Optional
from typing import Callable
import warnings
def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a function as deprecated.
Args:
help_message (`Optional[str]`): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
"""
def decorator(deprecated_function: Callable):
global _emitted_deprecation_warnings
warning_msg = (
(
f"{deprecated_function.__name__} is deprecated and will be removed "
"in the next major version of datalab."
)
+ f" {help_message}"
if help_message
else ""
)
@wraps(deprecated_function)
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)
wrapper._decorator_name_ = "deprecated"
return wrapper
return decorator
|
cc353ac874c61e679ebac5c00e85d1266ee831e5
| 26,145 |
def permitted_info_attributes(info_layer_name, permissions):
"""Get permitted attributes for a feature info result layer.
:param str info_layer_name: Layer name from feature info result
:param obj permissions: OGC service permissions
"""
# get WMS layer name for info result layer
wms_layer_name = permissions.get('feature_info_aliases', {}) \
.get(info_layer_name, info_layer_name)
# return permitted attributes for layer
return permissions['layers'].get(wms_layer_name, {})
|
57fcb05e5cd1c7e223c163929e78ecdb00d1ad09
| 26,146 |
def generate_balanced_tree(branching, depth, return_all=False):
""" Returns a DirectedGraph that is a balanced Tree and the root of the tree. (From left to right)
branching = 2, depth = 2
Depth 0 : "ROOT"
Depth 1 : "!#_B0_" "!#_B1_"
Depth 2 : "B0_B0_" "B1_B0_" "B0_B1_" "B1_B1_"
"""
total_size = branching**depth-1
alphabet = Alphabet(branching)
graph = nx.DiGraph()
root = get_root_name(depth)
graph.add_node(root) #root
_generate_balanced_tree(graph, alphabet, root, 0, branching, depth)
if return_all:
return graph, root, alphabet.alphabet
else:
return graph
|
b78f65af8b8609cb6db2e5d7c2cd6da2634c588a
| 26,147 |
import string
def rand_ssn():
"""Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
"""
return "%s-%s-%s" % (rand_str(3, string.digits),
rand_str(2, string.digits),
rand_str(4, string.digits))
|
c878de150989e040683280493b931350e9191d40
| 26,148 |
from typing import Dict
from typing import Any
def cast_arguments(args: Dict[str, Any]) -> Dict[str, str]:
"""Cast arguments.
:param args: args
:return: casted args
"""
casted_args = {}
for arg_name, arg_value in args.items():
casted_args[arg_name] = cast_argument(arg_value)
return casted_args
|
bedf813f1b4cb07468bc59c31ac4656757b547f2
| 26,149 |
def lmax_modes(lmax):
"""Compute all (l, m) pairs with 2<=l<=lmax"""
return [(l, m) for l in range(2, lmax + 1) for m in range(-l, l + 1)]
|
10ca6d19c2e4f4ffa07cf52c3b6e9b14f8242d09
| 26,150 |
def scale_image(image, flag, nodata_value=VALUE_NO_DATA):
""" Scale an image based on preset flag.
Arguments:
image - 3d array with assumed dimensions y,x,band
flag - scaling flag to use (None if no scaling)
Return:
An image matching the input image dimension with scaling applied to it.
"""
if flag is None:
return image
elif flag == "mean_std":
return scale_image_mean_std(image, nodata_value)
elif flag == "mean":
return scale_image_mean(image, nodata_value)
elif flag == "minmax":
return scale_image_minmax(image, nodata_value)
else:
raise NotImplementedError("Unknown scaling flag")
|
03bba109b7e5da098b2ef80beb399daf62dfda20
| 26,151 |
import re
def import_student_to_module(request, pk):
"""
Take .xlsx file, as produced by def:export_student_import_format and retrieve all students and metainformation from it.
Case 1 - Known User - Add the user to the active module edition with the right study and role by making a new Studying object
Case 2 - Unknown User - Add the student to the database by making a new Person object and proceed like Case 1
Case 3 - Already Added User - Ignore row
The function returns a view in which all newly added users, all users that are now added to the module edition and all users that were already in the module are shown.
:param request: Django request
:param pk: The module edition id
:return: /students-module-imported.html redirect in which all fails, successes and addeds are given
:raises: Permission denied if the user is not the Module Coordinator
:raises: SuspiciousOperation in case of faulty file input
"""
# Check if user is a module coordinator.
module_edition = get_object_or_404(ModuleEdition, pk=pk)
person = Person.objects.filter(user=request.user).first()
if not is_coordinator_or_assistant_of_module(person, module_edition):
raise PermissionDenied('Not allowed to upload students to module.')
if request.method == "POST":
student_form = ImportStudentForm(request.POST, request.FILES)
if student_form.is_valid():
file = request.FILES['file']
dict = file.get_book_dict()
# Select first page
students_to_module = dict[list(dict.keys())[0]]
key_rows = {}
emailpattern = re.compile('e[-]?mail*')
for i in range(0,len(students_to_module[0])):
if 'number' in students_to_module[0][i].lower():
key_rows['number'] = i
elif 'name' in students_to_module[0][i].lower():
key_rows['name'] = i
elif emailpattern.match(students_to_module[0][i].lower()):
key_rows['email'] = i
elif 'role' in students_to_module[0][i].lower():
key_rows['role'] = i
# Check dimensions
if not (len(students_to_module) > 1 and len(key_rows) == 4):
return bad_request(request, {'message': 'Not all required columns [university_number, name, email, '
'role] are in the excel sheet, or no rows to import.'})
context = {'created': [], 'studying': [], 'failed': []}
for i in range(1, len(students_to_module)):
# Sanitize number input
try:
if str(students_to_module[i][key_rows['number']])[0] == 's' and int(students_to_module[i][key_rows['number']][1:]) > 0:
username = str(students_to_module[i][key_rows['number']])
elif str(students_to_module[i][key_rows['number']])[0] == 'm' and int(students_to_module[i][key_rows['number']][1:]) > 0:
return HttpResponseBadRequest('Trying to add an employee as a student to a module.')
elif int(students_to_module[i][key_rows['number']]) > 0:
username = 's{}'.format(str(students_to_module[i][key_rows['number']]))
else:
raise ValueError
except ValueError:
return bad_request(request, {'message': '{} is not a student number.'
.format(students_to_module[i][key_rows['number']])})
user, created = User.objects.get_or_create(
username=username,
defaults={
'email': students_to_module[i][key_rows['email']]
}
)
student, created = Person.objects.get_or_create(
university_number=username,
defaults={
'user': user,
'name': students_to_module[i][key_rows['name']],
'email': students_to_module[i][key_rows['email']],
}
)
# Update name and email
student.name = students_to_module[i][key_rows['name']]
student.email = students_to_module[i][key_rows['email']]
student.save()
if created:
context['created'].append([student.name, student.full_id])
studying, created = Studying.objects.get_or_create(
person=student,
module_edition=ModuleEdition.objects.get(pk=pk),
defaults={
'role': students_to_module[i][key_rows['role']],
}
)
if created:
module_ed = ModuleEdition.objects.get(id=studying.module_edition.pk)
module = Module.objects.get(moduleedition=module_ed)
context['studying'].append(
[student.name, student.full_id, module.name, module_ed.code]) # studying.study])
else:
module_ed = ModuleEdition.objects.get(id=studying.module_edition.pk)
module = Module.objects.get(moduleedition=module_ed)
context['failed'].append(
[student.name, student.full_id, module.name, module_ed.code]) # studying.study])
context['studying'].append(
[student.name, student.full_id, module.name, module_ed.code]) # studying.study])
return render(request, 'importer/students-module-imported.html', context={'context': context})
else:
raise SuspiciousOperation('Bad POST')
else: # if Module_ed.objects.filter(pk=pk):
student_form = ImportStudentModule()
return render(request, 'importer/import-module-student.html',
{'form': student_form, 'pk': pk, 'module_edition': ModuleEdition.objects.get(pk=pk)})
|
607341968909fa5442832e7e27c0df9e70020650
| 26,152 |
def adjust_poses(poses,refposes):
"""poses, poses_ref should be in direct"""
# atoms, take last step as reference
for i in range(len(poses)):
for x in range(3):
move = round(poses[i][x] - refposes[i][x], 0)
poses[i][x] -= move
refposes = poses.copy()
return poses, refposes
|
9bf0ea3a07efdd8666a3482798c28fe5256c5556
| 26,153 |
def eval_validation(
left: pd.DataFrame,
right: pd.DataFrame,
left_on: IndexLabel,
right_on: IndexLabel,
validate: str,
):
"""
Evaluate the validation relationship between left and right and ensures
that the left and right dataframes are compatible with the validation
strategy.
Parameters
----------
left : pd.DataFrame
Left dataframe
right : pd.DataFrame
Right dataframe
left_on : IndexLabel
Left index label
right_on : IndexLabel
Right index label
validate : str {'one_to_one', 'one_to_many', 'many_to_one'}
Validation strategy.
Returns
-------
tuple[pd.DataFrame, pd.DataFrame]
Left and right dataframes after validation.
"""
if validate == "one_to_many":
left = left.drop_dup_and_log(subset=left_on)
elif validate == "many_to_one":
right = right.drop_dup_and_log(subset=right_on)
elif validate == "one_to_one":
left = left.drop_dup_and_log(subset=left_on)
right = right.drop_dup_and_log(subset=right_on)
else:
raise ValueError(f'Unsupported "validate" argument {validate}')
return left, right
|
184f73e77133fef751b4aeec6cdfccbca0a817ad
| 26,154 |
def get_region_props_binary_fill(heatmapbinary, heatmap):
"""
The function is to get location and probability of heatmap
using ndimage.
:param heatmapbinary: the binarized heatmap
:type heatmap_binary: array
:param heatmap: the original heat_map
:type heatmap: array
:returns: regionprops
:rtype: object
"""
# heatmapbinary = closing(heatmapbinary, square[3])
# heatmapbinary = clear_border(heatmapbinary)
# open_heatmapbinary = nd.binary_opening(heatmapbinary)
# close_heatmapbinary = nd.binary_closing(open_heatmapbinary)
heatmap_binary = heatmapbinary.astype("uint8")
filled_image = nd.morphology.binary_fill_holes(heatmap_binary)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#heatmap_close = cv2.morphologyEx(heatmap_binary, cv2.MORPH_CLOSE, kernel)
#heatmap_open = cv2.morphologyEx(heatmap_close, cv2.MORPH_OPEN, kernel)
matplotlib.image.imsave('%s/%s.png' % (result_folder, label_heatmap_paths[i].split('/')[-3]),
filled_image)
labeled_img = label(filled_image, connectivity=2)
return regionprops(labeled_img, intensity_image=heatmap)
|
0b7351e7a52f0c1d71b4839894fb2f71aa4e0774
| 26,156 |
def login_response(request, username, password):
"""
Authenticates user and redirects with the appropriate state.
"""
user = authenticate(username=username, password=password)
if user:
login(request, user)
logger.info("User '{}' logged in.".format(user.username))
return redirect('index')
logger.warning("User '{}' tried to login with '{}' password IP: {}".format(
username, password, request.META.get('REMOTE_ADDR')
))
return render(request, 'index.html', {'invalid_authentication': True})
|
99c9dc48052a0b99728babd591c0c5e3edae8969
| 26,157 |
def is_metageneration_specified(query_params):
"""Return True if if_metageneration_match is specified."""
if_metageneration_match = query_params.get("ifMetagenerationMatch") is not None
return if_metageneration_match
|
d37c513f3356aef104e7f6db909d5245e2664c90
| 26,158 |
import functools
def converter(fmt):
"""
Create a converter function for some specific format
Parameters
----------
fmt : str
The format to convert to, e.g. `"html"` or `"latex"`.
Returns
-------
function
"""
return functools.partial(convert, fmt=fmt)
|
b1ff4830a42eb5ced35205f1041367784eab4be6
| 26,159 |
def createRunner(name, account, user, conf={}):
"""
Creates a reusable spark runner
Params:
name: a name for the runner
account: an account
user: a user
conf: a json encoded configuration dictionary
"""
master = ""
if LOCAL:
master = "local"
else:
masterInfos = getSparkMasters()
master = "spark://%s" % ','.join(["%s:%s" % (info['host'], info['port']) for info in masterInfos])
return JobRunner(account=account, master=master, conf=conf, uploadFolder=UPLOAD_FOLDER,
iamUsername=IAM_USERNAME, accessKeyId=ACCESS_KEY_ID, accessKeySecret=ACCESS_KEY_SECRET)
|
cf434ddd8b234776eb1269fe461e1f420b8f50ef
| 26,160 |
def auth_req_generator(
rf, mocker, rq_mocker, jwks_request, settings, openid_configuration
):
"""Mock necessary functions and create an authorization request"""
def func(id_token, user=None, code="code", state="state", nonce="nonce"):
# Mock the call to the external token API
rq_mocker.post(
openid_configuration["token_endpoint"], json={"id_token": id_token}
)
# Mock the user association call
authenticate = mocker.patch("nens_auth_client.views.django_auth.authenticate")
authenticate.return_value = user
# Disable automatic RemoteUser creation
settings.NENS_AUTH_AUTO_CREATE_REMOTE_USER = False
# Create the request
request = rf.get(
"http://testserver/authorize/?code={}&state={}".format(code, state)
)
request.session = {
"_cognito_authlib_state_": state,
"_cognito_authlib_nonce_": nonce,
LOGIN_REDIRECT_SESSION_KEY: "http://testserver/success",
}
return request
return func
|
56990b36dff74a8c27dcb6261066902a83f17184
| 26,161 |
from datetime import datetime
def get_current_time():
"""
Returns the current system time
@rtype: string
@return: The current time, formatted
"""
return str(datetime.datetime.now().strftime("%H:%M:%S"))
|
1a3c2ae5427ba53ebdffc08d26e0d0c28643b6ef
| 26,162 |
def mock_addon_options(addon_info):
"""Mock add-on options."""
return addon_info.return_value["options"]
|
52d20879710e60b4973b302279a624f617bbc6c2
| 26,163 |
from typing import Optional
import torch
from typing import Tuple
def run(
task: Task,
num_samples: int,
num_simulations: int,
num_observation: Optional[int] = None,
observation: Optional[torch.Tensor] = None,
population_size: Optional[int] = None,
distance: str = "l2",
epsilon_decay: float = 0.2,
distance_based_decay: bool = True,
ess_min: Optional[float] = None,
initial_round_factor: int = 5,
batch_size: int = 1000,
kernel: str = "gaussian",
kernel_variance_scale: float = 0.5,
use_last_pop_samples: bool = True,
algorithm_variant: str = "C",
save_summary: bool = False,
sass: bool = False,
sass_fraction: float = 0.5,
sass_feature_expansion_degree: int = 3,
lra: bool = False,
lra_sample_weights: bool = True,
kde_bandwidth: Optional[str] = "cv",
kde_sample_weights: bool = False,
) -> Tuple[torch.Tensor, int, Optional[torch.Tensor]]:
"""Runs SMC-ABC from `sbi`
SMC-ABC supports two different ways of scheduling epsilon:
1) Exponential decay: eps_t+1 = epsilon_decay * eps_t
2) Distance based decay: the new eps is determined from the "epsilon_decay"
quantile of the distances of the accepted simulations in the previous population. This is used if `distance_based_decay` is set to True.
Args:
task: Task instance
num_samples: Number of samples to generate from posterior
num_simulations: Simulation budget
num_observation: Observation number to load, alternative to `observation`
observation: Observation, alternative to `num_observation`
population_size: If None, uses heuristic: 1000 if `num_simulations` is greater
than 10k, else 100
distance: Distance function, options = {l1, l2, mse}
epsilon_decay: Decay for epsilon; treated as quantile in case of distance based decay.
distance_based_decay: Whether to determine new epsilon from quantile of
distances of the previous population.
ess_min: Threshold for resampling a population if effective sampling size is
too small.
initial_round_factor: Used to determine initial round size
batch_size: Batch size for the simulator
kernel: Kernel distribution used to perturb the particles.
kernel_variance_scale: Scaling factor for kernel variance.
use_last_pop_samples: If True, samples of a population that was quit due to
budget are used by filling up missing particles from the previous
population.
algorithm_variant: There are three SMCABC variants implemented: A, B, and C.
See doctstrings in SBI package for more details.
save_summary: Whether to save a summary containing all populations, distances,
etc. to file.
sass: If True, summary statistics are learned as in
Fearnhead & Prangle 2012.
sass_fraction: Fraction of simulation budget to use for sass.
sass_feature_expansion_degree: Degree of polynomial expansion of the summary
statistics.
lra: If True, posterior samples are adjusted with
linear regression as in Beaumont et al. 2002.
lra_sample_weights: Whether to weigh LRA samples
kde_bandwidth: If not None, will resample using KDE when necessary, set
e.g. to "cv" for cross-validated bandwidth selection
kde_sample_weights: Whether to weigh KDE samples
Returns:
Samples from posterior, number of simulator calls, log probability of true params if computable
"""
assert not (num_observation is None and observation is None)
assert not (num_observation is not None and observation is not None)
log = sbibm.get_logger(__name__)
smc_papers = dict(A="Toni 2010", B="Sisson et al. 2007", C="Beaumont et al. 2009")
log.info(f"Running SMC-ABC as in {smc_papers[algorithm_variant]}.")
prior = task.get_prior_dist()
simulator = task.get_simulator(max_calls=num_simulations)
kde = kde_bandwidth is not None
if observation is None:
observation = task.get_observation(num_observation)
if population_size is None:
population_size = 100
if num_simulations > 10_000:
population_size = 1000
population_size = min(population_size, num_simulations)
initial_round_size = clip_int(
value=initial_round_factor * population_size,
minimum=population_size,
maximum=max(0.5 * num_simulations, population_size),
)
inference_method = SMCABC(
simulator=simulator,
prior=prior,
simulation_batch_size=batch_size,
distance=distance,
show_progress_bars=True,
kernel=kernel,
algorithm_variant=algorithm_variant,
)
# Output contains raw smcabc samples or kde object from them.
output, summary = inference_method(
x_o=observation,
num_particles=population_size,
num_initial_pop=initial_round_size,
num_simulations=num_simulations,
epsilon_decay=epsilon_decay,
distance_based_decay=distance_based_decay,
ess_min=ess_min,
kernel_variance_scale=kernel_variance_scale,
use_last_pop_samples=use_last_pop_samples,
return_summary=True,
lra=lra,
lra_with_weights=lra_sample_weights,
sass=sass,
sass_fraction=sass_fraction,
sass_expansion_degree=sass_feature_expansion_degree,
kde=kde,
kde_sample_weights=kde_sample_weights,
kde_kwargs={} if kde_bandwidth is None else dict(kde_bandwidth=kde_bandwidth),
)
if save_summary:
log.info("Saving smcabc summary to csv.")
pd.DataFrame.from_dict(
summary,
).to_csv("summary.csv", index=False)
assert simulator.num_simulations == num_simulations
# Return samples from kde or raw samples.
if kde:
kde_posterior = output
samples = kde_posterior.sample(num_simulations)
# LPTP can only be returned with KDE posterior.
if num_observation is not None:
true_parameters = task.get_true_parameters(num_observation=num_observation)
log_prob_true_parameters = kde_posterior.log_prob(true_parameters.squeeze())
return samples, simulator.num_simulations, log_prob_true_parameters
else:
samples = output
return samples, simulator.num_simulations, None
|
ebcec3f8985dd30cbffbfa01cdd6a686123806dc
| 26,165 |
def _preprocess_symbolic_input(x, data_format, mode, **kwargs):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor.
"""
backend = keras.backend
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# print("torch")
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
mean_tensor = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(mean_tensor):
x = backend.bias_add(
x, backend.cast(mean_tensor, backend.dtype(x)),
data_format=data_format)
else:
x = backend.bias_add(x, mean_tensor, data_format)
if std is not None:
x /= std
return x
|
2a42723cc1c61f143b8e1df20622f5d83ce5e719
| 26,166 |
from typing import Dict
def line_column(position: Position) -> Dict[str, int]:
"""Translate pygls Position to Jedi's line / column
Returns a dictionary because this return result should be unpacked as a
function argument to Jedi's functions.
Jedi is 1-indexed for lines and 0-indexed for columns. LSP is 0-indexed for
lines and 0-indexed for columns. Therefore, add 1 to LSP's request for the
line.
"""
return dict(line=position.line + 1, column=position.character)
|
b7f41db9d748b5d3c4f29470d4c5479aa59f95ec
| 26,168 |
import math
def rotZ(theta):
""" returns Rotation matrix such that R*v -> v', v' is rotated about z axis through theta_d.
theta is in radians.
rotZ = Rz'
"""
st = math.sin(theta)
ct = math.cos(theta)
return np.matrix([[ ct, -st, 0. ],
[ st, ct, 0. ],
[ 0., 0., 1. ]])
|
0daf8b021f0011c1bd70adf6c86fb2b91308232a
| 26,170 |
import json
def main(event, context):
"""Validate POST request and trigger Lambda copy function."""
try:
body = json.loads(event['body'])
bucket = body['bucket']
project = body['project']
token = body['token']
except (TypeError, KeyError):
return {
"statusCode": 400,
"body": json.dumps({"message": "Please provide bucket, project and token"})
}
# Simple authentication
if not token == FRAMEIO_TOKEN:
return {
"statusCode": 401,
"body": json.dumps({"message": "Unauthorized"})
}
# Validate project and bucket
root_asset_id = project_root_asset_id(project)
if not root_asset_id:
return {
"statusCode": 400,
"body": json.dumps({"message": f"Unable to find Frame.io project {project}"})
}
try:
s3.meta.client.head_bucket(Bucket=bucket)
except ClientError as e:
print(e)
return {
"statusCode": 400,
"body": json.dumps({"message": f"Unable to find bucket {bucket}"})
}
lambda_client.invoke(
FunctionName='s3-to-frameio-lambda-copy-dev-copy',
InvocationType='Event',
Payload=json.dumps({
'bucket_name': bucket,
'previous_asset': '',
'continued_run': 'false',
'parent_ids': {'': root_asset_id}}) # Dict with folder:asset_id pairs. '' is root.
)
return {
"statusCode": 200,
"body": json.dumps({"message": "Copy started!"})
}
|
66742763a71c55006072f99b8c13309bec913cde
| 26,171 |
import random
def random_k(word_vectors, k):
"""
:return: ({cluster_id: [cluster elements]})
"""
clusters = defaultdict(list)
for word in word_vectors.vocab:
chosen_cluster = random.randint(0, k-1)
clusters[chosen_cluster].append(word)
return clusters
|
e22e28f1836f8701edbefb92b16751713f1580af
| 26,172 |
from typing import Optional
def get_human_label(scope: str) -> Optional[str]:
"""The the human-readable label for a scope, for display to end users."""
return _HUMAN_LABELS.get(scope)
|
d3259e5dc3b1ab825c3fe9c3f0a860f0a1ebe7c0
| 26,173 |
import inspect
def lineno():
"""Returns the current line number in our program."""
return str(' - RuleDumper - line number: '+str(inspect.currentframe().f_back.f_lineno))
|
c6956415e297b675082913d8e17c606d45a01147
| 26,174 |
def run_simulation(input_dataframe, num_cells_per_module, temperature_series, lookup_table, mod_temp_path,
module_start, module_end, database_path, module_name, bypass_diodes, subcells, num_irrad_per_module):
"""
:param input_dataframe: irradiation on every cell/subcell
:param num_cells_per_module: cells_per_module
:param temperature_series:
:param lookup_table:
:return:
"""
modules_in_df = (len(input_dataframe.columns) - 4) / num_irrad_per_module
if database_path==None:
print "Please add the module database path"
# Data is looked up in CEC module database
module_df = pvsyst.retrieve_sam(path=database_path)
# e.g. [module0[hour0[i[],v[]]], module1[hour1[i[],v[]]], ..., modulen[hour8759[i[],v[]]]
# basically for every module there is a sublist for hourly values, for each hour there is a sublist with i and v
# a specific i or v list can be called as follows: modules_iv[module][hour][i/v]
# for module in range(modules_in_df):
for module in range(module_start, module_end + 1, 1):
columns_from = module * num_irrad_per_module + 4 # The module data starts at column 4
columns_to = columns_from + num_irrad_per_module - 1 # -1 because the column from already counts to the module
module_df_temp = input_dataframe.loc[:, columns_from:columns_to]
hourly_iv_curves = []
for row in range(len(module_df_temp.index)):
irradiation_on_module = module_df_temp.loc[row, :].tolist()
# print irradiation_on_module
if max(irradiation_on_module) == 0: # Filter out the hours without irradiation
i_module_sim = np.asarray(0)
v_module_sim = np.asarray(0)
hourly_iv_curves.append([i_module_sim, v_module_sim])
print "hour = " + str(row)
else:
i_module_sim, v_module_sim, lookup_table = ps.partial_shading(irradiation_on_module,
temperature=temperature_series[row],
irrad_temp_lookup_df=lookup_table,
module_df=module_df,
module_name=module_name,
numcells=num_cells_per_module,
n_bypass_diodes=bypass_diodes,
num_subcells=subcells)
hourly_iv_curves.append([i_module_sim, v_module_sim])
print "hour = " + str(row)
# The results for each module are saved in a file. This is required to lower the memory consumption
# of the program when a high amount of modules is considered
module_path = mod_temp_path + "\module" + str(module) + ".pkl"
with open(module_path, 'w') as f:
pickle.dump(hourly_iv_curves, f)
print "module done = " + str(module)
return lookup_table
|
43c25140d5a619479160d0f723ad7150e82db5b4
| 26,175 |
import six
def qualified_name(obj):
"""Return the qualified name (e.g. package.module.Type) for the given object."""
try:
module = obj.__module__
qualname = obj.__qualname__ if six.PY3 else obj.__name__
except AttributeError:
type_ = type(obj)
module = type_.__module__
qualname = type_.__qualname__ if six.PY3 else type_.__name__
return qualname if module in ('typing', 'builtins') else '{}.{}'.format(module, qualname)
|
02444c19d200650de8dc9e2214f7788fca0befd2
| 26,176 |
def cast_mip_start(mip_start, cpx):
"""
casts the solution values and indices in a Cplex SparsePair
Parameters
----------
mip_start cplex SparsePair
cpx Cplex
Returns
-------
Cplex SparsePair where the indices are integers and the values for each variable match the variable type specified in CPLEX Object
"""
assert isinstance(cpx, Cplex)
assert isinstance(mip_start, SparsePair)
vals = list(mip_start.val)
idx = np.array(list(mip_start.ind), dtype = int).tolist()
types = cpx.variables.get_types(idx)
for j, t in enumerate(types):
if t in ['B', 'I']:
vals[j] = int(vals[j])
elif t in ['C']:
vals[j] = float(vals[j])
return SparsePair(ind = idx, val = vals)
|
df51191d5621cf65d39897ff474785a05ae722d1
| 26,177 |
def encode(value, unchanged = None):
"""
encode/decode are performed prior to sending to mongodb or after retrieval from db.
The idea is to make object embedding in Mongo transparent to the user.
- We use jsonpickle package to embed general objects. These are encoded as strings and can be decoded as long as the original library exists when decoding.
- pandas.DataFrame are encoded to bytes using pickle while numpy arrays are encoded using the faster array.tobytes() with arrays' shape & type exposed and searchable.
:Example:
----------
>>> from pyg import *; import numpy as np
>>> value = Dict(a=1,b=2)
>>> assert encode(value) == {'a': 1, 'b': 2, '_obj': '{"py/type": "pyg_base._dict.Dict"}'}
>>> assert decode({'a': 1, 'b': 2, '_obj': '{"py/type": "pyg_base._dict.Dict"}'}) == Dict(a = 1, b=2)
>>> value = dictable(a=[1,2,3], b = 4)
>>> assert encode(value) == {'a': [1, 2, 3], 'b': [4, 4, 4], '_obj': '{"py/type": "pyg_base._dictable.dictable"}'}
>>> assert decode(encode(value)) == value
>>> assert encode(np.array([1,2])) == {'data': bytes,
>>> 'shape': (2,),
>>> 'dtype': '{"py/reduce": [{"py/type": "numpy.dtype"}, {"py/tuple": ["i4", false, true]}, {"py/tuple": [3, "<", null, null, null, -1, -1, 0]}]}',
>>> '_obj': '{"py/function": "pyg_base._encode.bson2np"}'}
:Example: functions and objects
-------------------------------
>>> from pyg import *; import numpy as np
>>> assert encode(ewma) == '{"py/function": "pyg.timeseries._ewm.ewma"}'
>>> assert encode(Calendar) == '{"py/type": "pyg_base._drange.Calendar"}'
:Parameters:
----------------
value : obj
An object to be encoded
:Returns:
-------
A pre-json object
"""
return _encode(value, unchanged)
|
c7ab759a5c25199e3de2f5e2a691c09cca77eaa7
| 26,178 |
def ConvertToCamelCase(input):
"""Converts the input string from 'unix_hacker' style to 'CamelCase' style."""
return ''.join(x[:1].upper() + x[1:] for x in input.split('_'))
|
8070516c61768ea097eccc62633fc6dea2fa7096
| 26,179 |
from supersmoother import SuperSmoother as supersmoother
def flatten(time,
flux,
window_length=None,
edge_cutoff=0,
break_tolerance=None,
cval=None,
return_trend=False,
method='biweight',
kernel=None,
kernel_size=None,
kernel_period=None,
proportiontocut=constants.PROPORTIONTOCUT,
robust=False,
max_splines=constants.PSPLINES_MAX_SPLINES,
return_nsplines=False,
mask=None,
verbose=False
):
"""
``flatten`` removes low frequency trends in time-series data.
Parameters
----------
time : array-like
Time values
flux : array-like
Flux values for every time point
window_length : float
The length of the filter window in units of ``time`` (usually days), or in
cadences (for cadence-based sliders ``savgol`` and ``medfilt``).
method : string, default: ``biweight``
Detrending method. Rime-windowed sliders: ``median``, ``biweight``, ``hodges``,
``tau``, ``welsch``, ``huber``, ``huber_psi``, ``andrewsinewave``, ``mean``,
``hampel``, ``ramsay``, ``trim_mean``, ``hampelfilt``, ``winsorize``. Cadence
based slider: ``medfilt``. Splines: ``hspline``, ``rspline`, ``pspline``.
Locally weighted scatterplot smoothing: ``lowess``. Savitzky-Golay filter:
``savgol``. Gaussian processes: ``gp``. Cosine Filtering with Autocorrelation
Minimization: ``cofiam``. Cosine fitting: ``cosine``, Friedman's Super-Smoother:
``supersmoother``. Gaussian regressions: ``ridge``, ``lasso``, ``elasticnet``.
break_tolerance : float, default: window_length/2
If there are large gaps in time (larger than ``window_length``/2), flatten will
split the flux into several sub-lightcurves and apply the filter to each
individually. ``break_tolerance`` must be in the same unit as ``time`` (usually
days). To disable this feature, set ``break_tolerance`` to 0. If the method is
``supersmoother`` and no ``break_tolerance`` is provided, it will be taken as
`1` in units of ``time``.
edge_cutoff : float, default: None
Trends near edges are less robust. Depending on the data, it may be beneficial
to remove edges. The ``edge_cutoff`` defines the length (in units of time) to be
cut off each edge. Default: Zero. Cut off is maximally ``window_length``/2, as
this fills the window completely. Applicable only for time-windowed sliders.
cval : float or int
Tuning parameter for the robust estimators. See documentation for defaults.
Larger values for make the estimate more efficient but less robust. For the
super-smoother, cval determines the bass enhancement (smoothness) and can be
`None` or in the range 0 < ``cval`` < 10. For the ``savgol``, ``cval``
determines the (integer) polynomial order (default: 2).
proportiontocut : float, default: 0.1
Fraction to cut off (or filled) of both tails of the distribution using methods
``trim_mean`` (or ``winsorize``)
kernel : str, default: `squared_exp`
Choice of `squared_exp` (squared exponential), `matern`, `periodic`,
`periodic_auto`.
kernel_size : float, default: 1
The length scale of the Gaussian Process kernel.
kernel_period : float
The periodicity of the Gaussian Process kernel (in units of ``time``). Must be
provided for the kernel `periodic`. Can not be specified for the
`periodic_auto`, for which it is determined automatically using a Lomb-Scargle
periodogram pre-search.
robust : bool, default: False
If `True`, the fitting process will be run iteratively. In each iteration,
2-sigma outliers from the fitted trend will be clipped until convergence.
Supported by the Gaussian Process kernels `squared_exp` and `matern`, as well as
`cosine` fitting.
return_trend : bool, default: False
If `True`, the method will return a tuple of two elements
(``flattened_flux``, ``trend_flux``) where ``trend_flux`` is the removed trend.
Returns
-------
flatten_flux : array-like
Flattened flux.
trend_flux : array-like
Trend in the flux. Only returned if ``return_trend`` is `True`.
"""
if method not in constants.methods:
raise ValueError('Unknown detrending method')
# Numba can't handle strings, so we're passing the location estimator as an int:
if method == 'biweight':
method_code = 1
elif method == 'andrewsinewave':
method_code = 2
elif method == 'welsch':
method_code = 3
elif method == 'hodges':
method_code = 4
elif method == 'median':
method_code = 5
elif method == 'mean':
method_code = 6
elif method == 'trim_mean':
method_code = 7
elif method == 'winsorize':
method_code = 8
elif method == 'hampelfilt':
method_code = 9
elif method == 'huber_psi':
method_code = 10
elif method == 'tau':
method_code = 11
error_text = 'proportiontocut must be >0 and <0.5'
if not isinstance(proportiontocut, float):
raise ValueError(error_text)
if proportiontocut >= 0.5 or proportiontocut <= 0:
raise ValueError(error_text)
# Default cval values for robust location estimators
if cval is None:
if method == 'biweight':
cval = 5
elif method == 'andrewsinewave':
cval = 1.339
elif method == 'welsch':
cval = 2.11
elif method == 'huber':
cval = 1.5
elif method == 'huber_psi':
cval = 1.28
elif method in ['trim_mean', 'winsorize']:
cval = proportiontocut
elif method == 'hampelfilt':
cval = 3
elif method == 'tau':
cval = 4.5
elif method == 'hampel':
cval = (1.7, 3.4, 8.5)
elif method == 'ramsay':
cval = 0.3
elif method == 'savgol': # polyorder
cval = 2 # int
elif method in 'ridge lasso elasticnet':
cval = 1
else:
cval = 0 # avoid numba type inference error: None type multi with float
if cval is not None and method == 'supersmoother':
if cval > 0 and cval < 10:
supersmoother_alpha = cval
else:
supersmoother_alpha = None
# Maximum gap in time should be half a window size.
# Any larger is nonsense, because then the array has a full window of data
if window_length is None:
window_length = 2 # so that break_tolerance = 1 in the supersmoother case
if break_tolerance is None:
break_tolerance = window_length / 2
if break_tolerance == 0:
break_tolerance = inf
# Numba is very fast, but doesn't play nicely with NaN values
# Therefore, we make new time-flux arrays with only the floating point values
# All calculations are done within these arrays
# Afterwards, the trend is transplanted into the original arrays (with the NaNs)
if mask is None:
mask = np.ones(len(time))
else:
mask = array(~mask, dtype=float64) # Invert to stay consistent with TLS
time = array(time, dtype=float64)
flux = array(flux, dtype=float64)
mask_nans = isnan(time * flux)
time_compressed = np.ma.compressed(np.ma.masked_array(time, mask_nans))
flux_compressed = np.ma.compressed(np.ma.masked_array(flux, mask_nans))
mask_compressed = np.ma.compressed(np.ma.masked_array(mask, mask_nans))
# Get the indexes of the gaps
gaps_indexes = get_gaps_indexes(time_compressed, break_tolerance=break_tolerance)
trend_flux = array([])
trend_segment = array([])
nsplines = array([]) # Chosen number of splines per segment for method "pspline"
# Iterate over all segments
for i in range(len(gaps_indexes) - 1):
time_view = time_compressed[gaps_indexes[i]:gaps_indexes[i+1]]
flux_view = flux_compressed[gaps_indexes[i]:gaps_indexes[i+1]]
mask_view = mask_compressed[gaps_indexes[i]:gaps_indexes[i+1]]
methods = ["biweight", "andrewsinewave", "welsch", "hodges", "median", "mean",
"trim_mean", "winsorize", "huber_psi", "hampelfilt", "tau"]
if method in methods:
trend_segment = running_segment(
time_view,
flux_view,
mask_view,
window_length,
edge_cutoff,
cval,
method_code)
elif method in ["huber", "hampel", "ramsay"]:
trend_segment = running_segment_slow(
time_view,
flux_view,
mask_view,
window_length,
edge_cutoff,
cval,
method
)
elif method == 'lowess':
trend_segment = lowess(
time_view,
flux_view,
mask_view,
window_length
)
elif method == 'hspline':
trend_segment = detrend_huber_spline(
time_view,
flux_view,
mask_view,
knot_distance=window_length)
elif method == 'supersmoother':
try:
except:
raise ImportError('Could not import supersmoother')
win = window_length / (max(time)-min(time))
trend_segment = supersmoother(
alpha=supersmoother_alpha,
primary_spans=(
constants.primary_span_lower * win,
win,
constants.primary_span_upper * win
),
middle_span=constants.middle_span * win,
final_span=constants.upper_span * win
).fit(time_view, flux_view,).predict(time_view)
elif method == 'cofiam':
trend_segment = detrend_cofiam(
time_view, flux_view, window_length)
elif method == 'cosine':
trend_segment = detrend_cosine(
time_view, flux_view, window_length, robust, mask_view)
elif method == 'savgol':
if window_length%2 == 0:
window_length += 1
trend_segment = savgol_filter(flux_view, window_length, polyorder=int(cval))
elif method == 'medfilt':
trend_segment = medfilt(flux_view, window_length)
elif method == 'gp':
trend_segment = make_gp(
time_view,
flux_view,
mask_view,
kernel,
kernel_size,
kernel_period,
robust
)
elif method == 'rspline':
trend_segment = iter_spline(time_view, flux_view, mask_view, window_length)
elif method == 'pspline':
if verbose:
print('Segment', i + 1, 'of', len(gaps_indexes) - 1)
trend_segment, nsplines_segment = pspline(
time_view, flux_view, edge_cutoff, max_splines, return_nsplines, verbose
)
nsplines = append(nsplines, nsplines_segment)
elif method in "ridge lasso elasticnet":
trend_segment = regression(time_view, flux_view, method, window_length, cval)
trend_flux = append(trend_flux, trend_segment)
# Insert results of non-NaNs into original data stream
trend_lc = full(len(time), nan)
mask_nans = where(~mask_nans)[0]
for idx in range(len(mask_nans)):
trend_lc[mask_nans[idx]] = trend_flux[idx]
trend_lc[trend_lc==0] = np.nan # avoid division by zero
flatten_lc = flux / trend_lc
if return_trend and return_nsplines:
return flatten_lc, trend_lc, nsplines
if return_trend and not return_nsplines:
return flatten_lc, trend_lc
if not return_trend and not return_nsplines:
return flatten_lc
|
048ae9b148ee0e248e0078ade08c56a641b426e1
| 26,180 |
def frequency(seq, useall=False, calpc=False):
"""Count the frequencies of each bases in sequence including every letter"""
length = len(seq)
if calpc:
# Make a dictionary "freqs" to contain the frequency(in % ) of each base.
freqs = {}
else:
# Make a dictionary "base_counts" to contain the frequency(whole number) of each base.
base_counts = {}
if useall:
# If we want to look at every letter that appears in the sequence.
seqset = set(seq)
else:
# If we just want to look at the four bases A, T, C, G
seqset = ("A", "T", "G", "C")
for letter in seqset:
num = seq.count(letter)
if calpc:
# The frequency is calculated out of the total sequence length, even though some bases are not A, T, C, G
freq = round(num/length, 2)
freqs[letter] = freq
else:
# Contain the actual number of bases.
base_counts[letter] = num
if calpc:
return freqs
else:
return base_counts
|
84f17aef0e02fbc927b4da49dab2e31c6121730d
| 26,181 |
def packages(request):
"""
Return a list of all packages or add a list of packages
"""
if request.method == "GET":
package_list = Package.objects.all()
serializer = PackageSerializer(package_list, many=True)
return Response(serializer.data)
if request.method == "POST":
package_license_used_differ = []
package_already_in_database = []
package_wants_to_use_unknown_license = []
package_added_to_database = []
packagereturn = {}
# Iterate through all recieved Packages and sort them into one of the four above lists
for package_dic in request.data:
# changes the KEY's of the packageDic into Lowercases
package_dic = dict((k.lower(), v) for k, v in package_dic.items())
# Case_1 Package is already in Database
if Package.objects.filter(package_name=package_dic["package_name"]).filter(
package_version=package_dic["package_version"]
):
package_from_database = Package.objects.filter(
package_name=package_dic["package_name"]
).filter(package_version=package_dic["package_version"])[0]
# Case_1_1 Package already in Database but is using another used_license or has no used_license
if package_dic["license_used"]:
if not (
package_from_database.license_used.title
== package_dic["license_used"]
) or not package_dic["license_used"] in package_dic["license_list"]:
package_license_used_differ.append(package_dic)
# Case_1_2 Package Is in Database and doesnt differ
else:
package_already_in_database.append(package_dic)
# Case_2 Package is not in Database yet
else:
# Case_2_1 Package is using a license which is not in the Database
if not are_all_licenses_in_database(package_dic):
package_wants_to_use_unknown_license.append(package_dic)
# Case_2_2 Package can be added to the Database
else:
package_added_to_database.append(package_dic)
# Add all Packages which are added to the package_added_to_database to the Database
serializer = PackageSerializer(data=package_added_to_database, many=True)
if serializer.is_valid():
serializer.save()
# Return all four lists of Packages in one dictionary and return it to the LBS
packagereturn[
"Packages where added to the Database"
] = package_added_to_database
packagereturn[
"Packages where the license used differ"
] = package_license_used_differ
packagereturn[
"Packages where not added to the Database, they were already in the Database"
] = package_already_in_database
packagereturn[
"Packages wants to use unknown License"
] = package_wants_to_use_unknown_license
return Response(packagereturn, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_400_BAD_REQUEST)
|
778607965b867578b9df363eee19382bc97078ca
| 26,182 |
def progressive_multiple_alignment(
Gs, cost_params, similarity_tuples, method, max_iters,
max_algorithm_iterations, max_entities, shuffle, self_discount,
do_evaluate, update_edges=False):
"""
Merge input graphs one graph at a time using any 2-network alignment method.
Input:
(same as merge_multiple_networks())
"""
assert len(Gs) > 1, "Must have at least 2 graphs to align."
# Construct candidate matches data structure
candidate_matches = {}
if similarity_tuples is not None:
for (g1, n1, g2, n2, sim) in similarity_tuples:
if (g1, n1) not in candidate_matches:
candidate_matches[(g1, n1)] = []
candidate_matches[(g1, n1)].append((g2, n2, sim))
else:
# Collect candidate matches based on 'name' attributes of nodes
for g1i, g1 in enumerate(Gs):
for n1_label in g1.nodes():
gn1 = (g1i, n1_label)
# Every node can be mapped to itself
candidate_matches[gn1] = [(g1i, n1_label, 1)]
for g2i, g2 in enumerate(Gs):
if g2i == g1i:
continue
for n2_label in g2.nodes():
if g1.node[n1_label]['name'] == \
g2.node[n2_label]['name']:
candidate_matches[gn1].append((g2i, n2_label, 1))
# Dict from (graph,node) -> master_graph_node
node2idx = {}
idx2node = {}
# Set of nodes mapped to themselves (to which we might map future nodes)
active_nodes = set()
# The assignment / asg.matches
alignment = {}
# Add the first graph
first_graph_idx = 0
master_G = Gs[first_graph_idx]
for node_idx, node_label in enumerate(master_G.nodes()):
node2idx[(first_graph_idx, node_label)] = node_idx
idx2node[node_idx] = (first_graph_idx, node_label)
active_nodes.add((first_graph_idx, node_label))
alignment[(first_graph_idx, node_label)] = (first_graph_idx, node_label)
master_G = nx.relabel_nodes(master_G, {label: i for i, label in
enumerate(master_G.nodes())})
# Start aligning other graphs one at a time
for graph_idx in range(len(Gs)):
if graph_idx == first_graph_idx:
continue
print "Progressively aligning graph {}.".format(graph_idx)
current_G = Gs[graph_idx]
current_Gs = [master_G, current_G]
# Construct a problem with only a subset of similarity_tuples
temp_sim_tuples = []
# Filter the similarity tuples between active_nodes and current graph
for node_label in current_G:
# Node can always be mapped to itself
temp_sim_tuples.append((1, node_label, 1, node_label, 1))
for cand_graph, cand_node, sim in candidate_matches[(graph_idx,
node_label)]:
if (cand_graph, cand_node) in active_nodes:
master_node_idx = node2idx[(cand_graph, cand_node)]
temp_sim_tuples.append((1, node_label, 0, master_node_idx,
sim))
x, other = amn.align_multiple_networks(
current_Gs, cost_params, temp_sim_tuples, method, max_iters,
max_algorithm_iterations, max_entities, shuffle, self_discount,
do_evaluate)
new_nodes = other['graph2nodes'][1]
old_nodes = other['graph2nodes'][0]
master_G = other['G']
for node_i, node_idx in enumerate(new_nodes):
node_label = current_G.nodes()[node_i]
node2idx[(graph_idx, node_label)] = node_idx
idx2node[node_idx] = (graph_idx, node_label)
if x[node_idx] != node_idx: # Map to master graph
assert x[node_idx] in old_nodes, \
"Node should map to itself or to master graph"
else: # Map to itself
active_nodes.add((graph_idx, node_label))
alignment[(graph_idx, node_label)] = idx2node[x[node_idx]]
if update_edges:
# Update edge weights on master graph
current_map = {label: i for i, label in enumerate(current_G.nodes())
}
for u_label, v_label in current_G.edges():
u = current_map[u_label]
v = current_map[v_label]
u_idx = new_nodes[u]
v_idx = new_nodes[v]
if x[u_idx] != u_idx or x[v_idx] != v_idx:
u_x = x[u_idx]
v_x = x[v_idx]
if v_x not in master_G.edge[u_x]:
master_G.add_edge(u_x, v_x, weight=1)
"""
elif 'weight' in master_G.edge[u_x][v_x]:
# Weighted edges similar to FLAN (hasn't been properly
# tested yet)
master_G.edge[u_x][v_x]['weight'] += 1
"""
full_x = []
for idx in master_G:
graph_idx, node_idx = idx2node[idx]
full_x.append(node2idx[alignment[(graph_idx, node_idx)]])
results = {'best_x': full_x, 'feasible_scores': [-1], 'iterations': -1,
'lb': -1, 'ub': -1}
return results, master_G
|
ed2353aa196b1666ac8b165f0b758d429e7a5a32
| 26,183 |
def meet_sigalg(dist, rvs, rv_mode=None):
"""
Returns the sigma-algebra of the meet of random variables defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
A list of lists. Each list specifies a random variable to be
met with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0,1],[1,2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
msa : frozenset of frozensets
The induced sigma-algebra of the meet.
"""
# We require unique indexes within each random variable and want the
# indexes in distribution order. We don't need the names.
parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
unique=False, sort=True)[1]
indexes = [parse(rv) for rv in rvs]
sigalgs = [induced_sigalg(dist, rv, rv_mode=RV_MODES.INDICES)
for rv in indexes]
# \sigma( X meet Y ) = \sigma(X) \cap \sigma(Y) )
# Intersect all the sigma algebras.
first_sa = sigalgs[0]
msa = first_sa.intersection(*sigalgs[1:])
return msa
|
c03bbceae5bcec561ffe38ad8df8a85443f84639
| 26,184 |
import json
def ongoing_series():
""" Retrieve all series that are supposedly ongoing.
This call is *very* slow and expensive when not cached;
in practice, users should *never* get a non-cached response.
:return: json list with subset representation of
:class:`marvelous.Series` instances
"""
response = cache.get('ongoing')
if response:
return response
try:
fetched = get_ongoing()
response_json = json.dumps(fetched, default=json_serial)
cache.set('ongoing', response_json, series_cache_time())
return response_json
except ApiError as a:
app.logger.error(a.args)
return abort(422)
|
05eaf4314eda6f4622c7624c6a742d23447a5e31
| 26,185 |
def check_inputs(Y, T, X, W=None, multi_output_T=True, multi_output_Y=True):
"""
Input validation for CATE estimators.
Checks Y, T, X, W for consistent length, enforces X, W 2d.
Standard input checks are only applied to all inputs,
such as checking that an input does not have np.nan or np.inf targets.
Converts regular Python lists to numpy arrays.
Parameters
----------
Y : array_like, shape (n, ) or (n, d_y)
Outcome for the treatment policy.
T : array_like, shape (n, ) or (n, d_t)
Treatment policy.
X : array-like, shape (n, d_x)
Feature vector that captures heterogeneity.
W : array-like, shape (n, d_w) or None (default=None)
High-dimensional controls.
multi_output_T : bool
Whether to allow more than one treatment.
multi_output_Y: bool
Whether to allow more than one outcome.
Returns
-------
Y : array_like, shape (n, ) or (n, d_y)
Converted and validated Y.
T : array_like, shape (n, ) or (n, d_t)
Converted and validated T.
X : array-like, shape (n, d_x)
Converted and validated X.
W : array-like, shape (n, d_w) or None (default=None)
Converted and validated W.
"""
X, T = check_X_y(X, T, multi_output=multi_output_T, y_numeric=True)
_, Y = check_X_y(X, Y, multi_output=multi_output_Y, y_numeric=True)
if W is not None:
W, _ = check_X_y(W, Y, multi_output=multi_output_Y, y_numeric=True)
return Y, T, X, W
|
f9514eb4d8717dfbd2aae9ef89d50353324f14f8
| 26,186 |
def register(request):
""" Register a new node
"""
if request.method != 'POST':
return HttpResponse("Invalid GET")
#
# Registration requires only the exchange of a PSK.
#
# hash
skey = request.POST.get('skey')
node_id = None
if 'node_id' in request.POST:
node_id = request.POST.get('node_id')
# compare
if sha256(settings.SECRET_KEY).hexdigest() != skey:
return HttpResponse("Invalid Key")
try:
node = Node.objects.get(id=node_id)
except Node.DoesNotExist:
# new node
node = Node.objects.create(
ip=request.META.get('REMOTE_ADDR'),
active=True,
start_time=utility.timestamp(),
session_name=request.POST.get("session"),
session_fuzzer=request.POST.get("fuzzer")
)
node.save()
return HttpResponse("Node %d Active" % node.id)
|
dd9ce231dfb6ff9c71ccb90595ab6c0cc3d7632d
| 26,187 |
def get_tag_messages(tags_range, log_info):
"""
Add tag messages, commit objects and update the max author length in
`log_info`.
Args:
tags_range(list[:class:`~git.refs.tag.TagReference`]): Range of tags to
get information from
log_info(dict): Dictionary containing log information from commits
Raises:
:class:`exception.ValueError`: Can't find tag info
Returns:
dict: A dictionary containing log info for tags
"""
for tag in tags_range:
# Find where info is stored (depends on whether annotated or
# lightweight tag)
if hasattr(tag.object, 'author'):
tag_info = tag.object
elif hasattr(tag.object.object, 'author'):
tag_info = tag.object.object
else:
raise ValueError("Can't find tag info")
sha = tag.object.hexsha[:7]
author = tag_info.author.name
time_stamp = tag_info.committed_date
summary = tag_info.summary.replace('\n', ' ')
# Summary is included in message, so just get extra part
message = tag_info.message[len(summary):].replace('\n', ' ')
summary += ' (RELEASE: ' + tag.name + ')'
formatted_time = convert_time_stamp(time_stamp)
log_info['logs'].append([time_stamp, sha, author, summary,
formatted_time, message])
# Add to dictionary of commit objects for creating diff info later
log_info['commit_objects'][sha] = tag.commit
# Find longest author name to pad all lines to the same length
if len(author) > log_info['max_author_length']:
log_info['max_author_length'] = len(author)
return log_info
|
743705cf191593fda37396e92905742fff3e4e06
| 26,188 |
def _profile_info(user, username=None, following=False, followers=False):
"""
Helper to give basic profile info for rendering the profile page or its child pages
"""
follows = False
if not username or user.username == username: #viewing own profile
username = user.username
if following:
msg_type = 'self_following'
elif followers:
msg_type = 'self_followers'
else:
msg_type = 'self_profile_stream'
else:
follows = user.profile.follows.filter(user__username=username).exists()
if following:
msg_type = 'following'
elif followers:
msg_type = 'followers'
else:
msg_type = 'profile_stream'
empty_search_msg = EMPTY_SEARCH_MSG[msg_type]
profile_user = get_object_or_404(User, username=username)
return username, follows, profile_user, empty_search_msg
|
e4c7bf4397468799c7a3e176f385e6209155c2fa
| 26,189 |
def _ProcessMergedIntoSD(fmt):
"""Convert a 'mergedinto' sort directive into SQL."""
left_joins = [
(fmt('IssueRelation AS {alias} ON Issue.id = {alias}.issue_id '
'AND {alias}.kind = %s'),
['mergedinto'])]
order_by = [(fmt('ISNULL({alias}.dst_issue_id) {sort_dir}'), []),
(fmt('{alias}.dst_issue_id {sort_dir}'), [])]
return left_joins, order_by
|
290dd7e79f936f5d125a12a5bb920c65f1812296
| 26,190 |
def Plot3DImage(rgb_image,depth_image,sample=True,samplerate=.5,ignoreOutliers=True):
"""
Input:
rgb_image[pixely,pixelx,rgb] - Standard rgb image [0,255]
depth_image[pixely,pixelx,depth] - Image storing depth measurements as greyscale - 255=5m, 0=0m
sample(Bool) - Plot full image or random sample of points
samplerate(0,1) - Percentage of pixels to plot
ignoreOutliers - Ignores values at the camera lens (0,0,0)
Output:
None (Creates a matplotlib plot)
"""
XYZ=depth2XYZ(depth_image)
flatXYZ=XYZ.reshape((XYZ.shape[0]*XYZ.shape[1],XYZ.shape[2]))
flatColors=rgb_image.reshape((rgb_image.shape[0]*rgb_image.shape[1],rgb_image.shape[2]))/255.0
plotIndices=(np.ones((rgb_image.shape[0]*rgb_image.shape[1]))==1)
if(ignoreOutliers):
zeroIndices=(flatXYZ[:,0]**2+flatXYZ[:,1]**2+flatXYZ[:,2]**2)>0
plotIndices=plotIndices & zeroIndices
if(sample):
plotIndices=plotIndices & (np.random.rand(plotIndices.shape[0])<=samplerate)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(flatXYZ[plotIndices==True,0],flatXYZ[plotIndices==True,1],flatXYZ[plotIndices==True,2],c=flatColors[plotIndices==True,:], s=8, edgecolors='none')
ax.view_init(elev=-115, azim=-90)
plt.show()
return(None)
|
0b5ee3abde0255d6a853afbf31f12d72c9aef809
| 26,192 |
def smooth_curve(x, y, npts=50, deg=12):
"""Smooth curve using a polyfit
Parameters
----------
x : numpy array
X-axis data
y : numpy array
Y-axis data
npts : int
Optional, number of points to resample curve
deg : int
Optional, polyfit degree
Returns
-------
smoothed x array
smoothed y array
"""
xx = np.linspace(1, np.max(x), npts)
yhat = np.poly1d(np.polyfit(x, y, deg))
yh = yhat(xx)
return xx, yh
|
998ffaa0dfd9fc2d2e0f04e7f0d8a5fa8fe00943
| 26,193 |
def gather_allele_freqs(record, samples, males_set, females_set, parbt, pop_dict, pops,
sex_chroms, no_combos=False):
"""
Wrapper to compute allele frequencies for all sex & population pairings
"""
# Add PAR annotation to record (if optioned)
if record.chrom in sex_chroms and len(parbt) > 0:
if in_par(record, parbt):
rec_in_par = True
record.info['PAR'] = True
else:
rec_in_par = False
else:
rec_in_par = False
# Get allele frequencies for all populations
calc_allele_freq(record, samples)
if len(males_set) > 0:
if record.chrom in sex_chroms and not rec_in_par:
calc_allele_freq(record, males_set, prefix='MALE', hemi=True)
else:
calc_allele_freq(record, males_set, prefix='MALE')
if len(females_set) > 0:
calc_allele_freq(record, females_set, prefix='FEMALE')
# Adjust global allele frequencies on sex chromosomes, if famfile provided
if record.chrom in sex_chroms and not rec_in_par \
and svu.is_biallelic(record) and len(males_set) + len(females_set) > 0:
update_sex_freqs(record)
# Get allele frequencies per population
if len(pops) > 0:
for pop in pops:
pop_samps = [
s for s in samples if pop_dict.get(s, None) == pop]
calc_allele_freq(record, pop_samps, prefix=pop)
if len(males_set) > 0 and not no_combos:
if record.chrom in sex_chroms and not rec_in_par:
calc_allele_freq(record, list([s for s in pop_samps if s in males_set]),
prefix=pop + '_MALE', hemi=True)
else:
calc_allele_freq(record, list([s for s in pop_samps if s in males_set]),
prefix=pop + '_MALE')
if len(females_set) > 0 and not no_combos:
calc_allele_freq(record, list([s for s in pop_samps if s in females_set]),
prefix=pop + '_FEMALE')
# Adjust per-pop allele frequencies on sex chromosomes, if famfile provided
if record.chrom in sex_chroms and not rec_in_par \
and svu.is_biallelic(record) and len(males_set) + len(females_set) > 0:
update_sex_freqs(record, pop=pop)
# Get POPMAX AF biallelic sites only
if svu.is_biallelic(record):
AFs = [record.info['{0}_AF'.format(pop)][0] for pop in pops]
popmax = max(AFs)
record.info['POPMAX_AF'] = popmax
return record
|
8e7b1f605d6473e77128806e4e6c64e3a338fff5
| 26,194 |
from datetime import datetime
import json
def get_history_events_closest():
""" Получение полного списка страниц в json"""
try:
event_schema = HistoryEventsSchema(many=True)
datef = datetime.today()
further_date = datef + timedelta(days=14)
if further_date.year > datetime.today().year:
further_date = date(year=datetime.today().year, month=12, day=31)
events = HistoryEvents.query.filter(
extract('month', HistoryEvents.event_date) <= further_date.month,
extract('month', HistoryEvents.event_date) >= datef.month,
extract('day', HistoryEvents.event_date) <= further_date.day,
extract('day', HistoryEvents.event_date) >= datef.day,
)
edata = event_schema.dump(events)
edata = edata.data
response = Response(
response=json.dumps(edata),
status=200,
mimetype='application/json'
)
except Exception:
response = server_error(request.args.get("dbg"))
return response
|
a35cab21c9fbd18ff8c31fc08760995ce1d22158
| 26,195 |
def get_archive_url(url, timestamp):
"""
Returns the archive url for the given url and timestamp
"""
return WEB_ARCHIVE_TEMPLATE.format(timestamp=timestamp.strftime(WEB_ARCHIVE_TIMESTAMP_FORMAT), url=url)
|
a7b79868ccaf1244591b8b5ffec7ac8ff2310772
| 26,196 |
import requests
import json
import pprint
def get_text(pathToImage):
"""
Accesses API to get json file containing recognized text
"""
print('Processing: ' + pathToImage)
headers = {
'Ocp-Apim-Subscription-Key': API_KEY,
'Content-Type': 'application/octet-stream'
}
params = {
'language': 'en',
'detectOrientation ': 'true'
}
payload = open(pathToImage, 'rb').read()
response = requests.post(ENDPOINT, headers=headers, params=params, data=payload)
results = json.loads(response.content)
# prints the json from the API
pprint(results)
return results
|
037f22c0fda02ddb7df59def274395c537b67369
| 26,197 |
def match_summary(df):
""" summarize the race and ethnicity information in the dataframe
Parameters
----------
df : pd.DataFrame including columns from race_eth_cols
"""
s = pd.Series(dtype=float)
s['n_match'] = df.pweight.sum()
for col in race_eth_cols:
if col in df.columns:
s[col] = np.sum(df.pweight * df[col]) / df.pweight.sum()
return s
|
5886563d59426f952542bdf5ae579b612475b934
| 26,198 |
def parse_git_version(git) :
"""Parses the version number for git.
Keyword arguments:
git - The result of querying the version from git.
"""
return git.split()[2]
|
f4af03f0fad333ab87962160ed0ebf5dcbeea22a
| 26,199 |
def onRequestLogin(loginName, password, clientType, datas):
"""
KBEngine method.
账号请求登陆时回调
此处还可以对登陆进行排队,将排队信息存放于datas
"""
INFO_MSG('onRequestLogin() loginName=%s, clientType=%s' % (loginName, clientType))
errorno = KBEngine.SERVER_SUCCESS
if len(loginName) > 64:
errorno = KBEngine.SERVER_ERR_NAME
if len(password) > 64:
errorno = KBEngine.SERVER_ERR_PASSWORD
return (errorno, loginName, password, clientType, datas)
|
08c428f5ac27e009b9d1ffe1af9daee1c5cca2f1
| 26,200 |
import logging
from typing import Callable
from typing import Any
def log_calls(
logger: logging.Logger, log_result: bool = True
) -> GenericDecorator:
"""
Log calls to the decorated function.
Can also decorate classes to log calls to all its methods.
:param logger: object to log to
"""
def log_function(
target: Callable[..., TargetReturnT], # TargetFunctionT,
*args: Any,
**kwargs: Any
) -> TargetReturnT:
logger.info(f"{target.__name__} args: {args!r} {kwargs!r}")
result = target(*args, **kwargs)
if log_result:
logger.info(f"{target.__name__} result: {result!r}")
return result
decorator = GenericDecorator(log_function)
return decorator
|
43e71d224ed45a4f92ddda5f4ac042922b254104
| 26,201 |
def bag(n, c, w, v):
"""
测试数据:
n = 6 物品的数量,
c = 10 书包能承受的重量,
w = [2, 2, 3, 1, 5, 2] 每个物品的重量,
v = [2, 3, 1, 5, 4, 3] 每个物品的价值
"""
# 置零,表示初始状态
value = [[0 for j in range(c + 1)] for i in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, c + 1):
value[i][j] = value[i - 1][j]
# 背包总容量够放当前物体,遍历前一个状态考虑是否置换
if j >= w[i - 1]:
value[i][j] = max(value[i-1][j],value[i - 1][j - w[i - 1]] + v[i - 1])
for x in value:
print(x)
return value
|
27dd9c5f9367afe865686c8f68853bc966bcdaa6
| 26,202 |
def _to_bytes(key: type_utils.Key) -> bytes:
"""Convert the key to bytes."""
if isinstance(key, int):
return key.to_bytes(128, byteorder='big') # Use 128 as this match md5
elif isinstance(key, bytes):
return key
elif isinstance(key, str):
return key.encode('utf-8')
else:
raise TypeError(f'Invalid key type: {type(key)}')
|
8df22eb02674cb61ef0e8a413f1eed7dc92f0aef
| 26,203 |
def _online(machine):
""" Is machine reachable on the network? (ping)
"""
# Skip excluded hosts
if env.host in env.exclude_hosts:
print(red("Excluded"))
return False
with settings(hide('everything'), warn_only=True, skip_bad_hosts=True):
if local("ping -c 2 -W 1 %s" % machine).failed:
print(yellow('%s is Offline \n' % machine))
return False
return True
|
114b9b708d81c6891a2f99c5c600d9d4aa59c138
| 26,204 |
def get_expectation_value(
qubit_op: QubitOperator, wavefunction: Wavefunction, reverse_operator: bool = False
) -> complex:
"""Get the expectation value of a qubit operator with respect to a wavefunction.
Args:
qubit_op: the operator
wavefunction: the wavefunction
reverse_operator: whether to reverse order of qubit operator
before computing expectation value. This should be True if the convention
of the basis states used for the wavefunction is the opposite of the one in
the qubit operator. This is the case when the wavefunction uses
Rigetti convention (https://arxiv.org/abs/1711.02086) of ordering qubits.
Returns:
the expectation value
"""
n_qubits = wavefunction.amplitudes.shape[0].bit_length() - 1
# Convert the qubit operator to a sparse matrix. Note that the qubit indices
# must be reversed because OpenFermion and our Wavefunction use
# different conventions for how to order the computational basis states!
if reverse_operator:
qubit_op = reverse_qubit_order(qubit_op, n_qubits=n_qubits)
sparse_op = get_sparse_operator(qubit_op, n_qubits=n_qubits)
# Computer the expectation value
exp_val = openfermion_expectation(sparse_op, wavefunction.amplitudes)
return exp_val
|
e0e634bc6c0d5c9d5252e29963333fac3ccc3acd
| 26,207 |
def create_proxy_to(logger, ip, port):
"""
:see: ``HostNetwork.create_proxy_to``
"""
action = CREATE_PROXY_TO(
logger=logger, target_ip=ip, target_port=port)
with action:
encoded_ip = unicode(ip).encode("ascii")
encoded_port = unicode(port).encode("ascii")
# The first goal is to configure "Destination NAT" (DNAT). We're just
# going to rewrite the destination address of traffic arriving on the
# specified port so it looks like it is destined for the specified ip
# instead of destined for "us". This gets the packets delivered to the
# right destination.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# Destination NAT has to happen "pre"-routing so that the normal
# routing rules on the machine will use the re-written destination
# address and get the packet to that new destination. Accomplish
# this by appending the rule to the PREROUTING chain.
b"--append", b"PREROUTING",
# Only re-route traffic with a destination port matching the one we
# were told to manipulate. It is also necessary to specify TCP (or
# UDP) here since that is the layer of the network stack that
# defines ports.
b"--protocol", b"tcp", b"--destination-port", encoded_port,
# And only re-route traffic directed at this host. Traffic
# originating on this host directed at some random other host that
# happens to be on the same port should be left alone.
b"--match", b"addrtype", b"--dst-type", b"LOCAL",
# Tag it as a flocker-created rule so we can recognize it later.
b"--match", b"comment", b"--comment", FLOCKER_PROXY_COMMENT_MARKER,
# If the filter matched, jump to the DNAT chain to handle doing the
# actual packet mangling. DNAT is a built-in chain that already
# knows how to do this. Pass an argument to the DNAT chain so it
# knows how to mangle the packet - rewrite the destination IP of
# the address to the target we were told to use.
b"--jump", b"DNAT", b"--to-destination", encoded_ip,
])
# Bonus round! Having performed DNAT (changing the destination) during
# prerouting we are now prepared to send the packet on somewhere else.
# On its way out of this system it is also necessary to further
# modify and then track that packet. We want it to look like it
# comes from us (the downstream client will be *very* confused if
# the node we're passing the packet on to replies *directly* to them;
# and by confused I mean it will be totally broken, of course) so we
# also need to "masquerade" in the postrouting chain. This changes
# the source address (ip and port) of the packet to the address of
# the external interface the packet is exiting upon. Doing SNAT here
# would be a little bit more efficient because the kernel could avoid
# looking up the external interface's address for every single packet.
# But it requires this code to know that address and it requires that
# if it ever changes the rule gets updated and it may require some
# steps to do port allocation (not sure what they are yet). So we'll
# just masquerade for now.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# As described above, this transformation happens after routing
# decisions have been made and the packet is on its way out of the
# system. Therefore, append the rule to the POSTROUTING chain.
b"--append", b"POSTROUTING",
# We'll stick to matching the same kinds of packets we matched in
# the earlier stage. We might want to change the factoring of this
# code to avoid the duplication - particularly in case we want to
# change the specifics of the filter.
#
# This omits the LOCAL addrtype check, though, because at this
# point the packet is definitely leaving this host.
b"--protocol", b"tcp", b"--destination-port", encoded_port,
# Do the masquerading.
b"--jump", b"MASQUERADE",
])
# Secret level!! Traffic that originates *on* the host bypasses the
# PREROUTING chain. Instead, it passes through the OUTPUT chain. If
# we want connections from localhost to the forwarded port to be
# affected then we need a rule in the OUTPUT chain to do the same kind
# of DNAT that we did in the PREROUTING chain.
iptables(logger, [
# All NAT stuff happens in the netfilter NAT table.
b"--table", b"nat",
# As mentioned, this rule is for the OUTPUT chain.
b"--append", b"OUTPUT",
# Matching the exact same kinds of packets as the PREROUTING rule
# matches.
b"--protocol", b"tcp",
b"--destination-port", encoded_port,
b"--match", b"addrtype", b"--dst-type", b"LOCAL",
# Do the same DNAT as we did in the rule for the PREROUTING chain.
b"--jump", b"DNAT", b"--to-destination", encoded_ip,
])
iptables(logger, [
b"--table", b"filter",
b"--insert", b"FORWARD",
b"--destination", encoded_ip,
b"--protocol", b"tcp", b"--destination-port", encoded_port,
b"--jump", b"ACCEPT",
])
# The network stack only considers forwarding traffic when certain
# system configuration is in place.
#
# https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
# will explain the meaning of these in (very slightly) more detail.
conf = FilePath(b"/proc/sys/net/ipv4/conf")
descendant = conf.descendant([b"default", b"forwarding"])
with descendant.open("wb") as forwarding:
forwarding.write(b"1")
# In order to have the OUTPUT chain DNAT rule affect routing decisions,
# we also need to tell the system to make routing decisions about
# traffic from or to localhost.
for path in conf.children():
with path.child(b"route_localnet").open("wb") as route_localnet:
route_localnet.write(b"1")
return Proxy(ip=ip, port=port)
|
006ae25e069f3ac82887e9071170b334a0c6ea74
| 26,208 |
def get_output_shape(tensor_shape, channel_axis):
"""
Returns shape vector with the number of channels in the given channel_axis location and 1 at all other locations.
Args:
tensor_shape: A shape vector of a tensor.
channel_axis: Output channel index.
Returns: A shape vector of a tensor.
"""
return [-1 if i is channel_axis else 1 for i in range(len(tensor_shape))]
|
7c7058c2da9cb5a4cdb88377ece4c2509727894a
| 26,209 |
import numbers
def process_padding(pad_to_length, lenTrials):
"""
Simplified padding interface, for all taper based methods
padding has to be done **before** tapering!
Parameters
----------
pad_to_length : int, None or 'nextpow2'
Either an integer indicating the absolute length of
the trials after padding or `'nextpow2'` to pad all trials
to the nearest power of two. If `None`, no padding is to
be performed
lenTrials : sequence of int_like
Sequence holding all individual trial lengths
Returns
-------
abs_pad : int
Absolute length of all trials after padding
"""
# supported padding options
not_valid = False
if not isinstance(pad_to_length, (numbers.Number, str, type(None))):
not_valid = True
elif isinstance(pad_to_length, str) and pad_to_length not in availablePaddingOpt:
not_valid = True
# bool is an int subclass, have to check for it separately...
if isinstance(pad_to_length, bool):
not_valid = True
if not_valid:
lgl = "`None`, 'nextpow2' or an integer like number"
actual = f"{pad_to_length}"
raise SPYValueError(legal=lgl, varname="pad_to_length", actual=actual)
# zero padding of ALL trials the same way
if isinstance(pad_to_length, numbers.Number):
scalar_parser(pad_to_length,
varname='pad_to_length',
ntype='int_like',
lims=[lenTrials.max(), np.inf])
abs_pad = pad_to_length
# or pad to optimal FFT lengths
elif pad_to_length == 'nextpow2':
abs_pad = _nextpow2(int(lenTrials.max()))
# no padding in case of equal length trials
elif pad_to_length is None:
abs_pad = int(lenTrials.max())
if lenTrials.min() != lenTrials.max():
msg = f"Unequal trial lengths present, padding all trials to {abs_pad} samples"
SPYWarning(msg)
# `abs_pad` is now the (soon to be padded) signal length in samples
return abs_pad
|
b018e32c2c12a67e05e5e324f4fcd8547a9d992b
| 26,210 |
from ..constants import asecperrad
def angular_to_physical_size(angsize,zord,usez=False,**kwargs):
"""
Converts an observed angular size (in arcsec or as an AngularSeparation
object) to a physical size.
:param angsize: Angular size in arcsecond.
:type angsize: float or an :class:`AngularSeparation` object
:param zord: Redshift or distance
:type zord: scalar number
:param usez:
If True, the input will be interpreted as a redshift, and kwargs
will be passed into the distance calculation. The result will be in
pc. Otherwise, `zord` will be interpreted as a distance.
:type usez: boolean
kwargs are passed into :func:`cosmo_z_to_dist` if `usez` is True.
:returns:
A scalar value for the physical size (in pc if redshift is used,
otherwise in `zord` units)
"""
if usez:
d = cosmo_z_to_dist(zord,disttype=2,**kwargs)*1e6 #pc
else:
if len(kwargs)>0:
raise TypeError('if not using redshift, kwargs should not be provided')
d = zord
if hasattr(angsize,'arcsec'):
angsize = angsize.arcsec
sintheta = np.sin(angsize/asecperrad)
return d*(1/sintheta/sintheta-1)**-0.5
#return angsize*d/asecperrad
|
80fbd5aa90807536e06157c338d86973d6050c7c
| 26,211 |
def is_tuple(x):
"""
Check that argument is a tuple.
Parameters
----------
x : object
Object to check.
Returns
-------
bool
True if argument is a tuple, False otherwise.
"""
return isinstance(x, tuple)
|
ac129c4ab7c6a64401f61f1f03ac094b3a7dc6d4
| 26,213 |
def find_verbalizer(tagged_text: str) -> pynini.FstLike:
"""
Given tagged text, e.g. token {name: ""} token {money {fractional: ""}}, creates verbalization lattice
This is context-independent.
Args:
tagged_text: input text
Returns: verbalized lattice
"""
lattice = tagged_text @ verbalizer.fst
return lattice
|
fef98d7a2038490f926dc89fc7aefbbda52ed611
| 26,214 |
def get_color_pattern(input_word: str, solution: str) -> str:
"""
Given an input word and a solution, generates the resulting
color pattern.
"""
color_pattern = [0 for _ in range(5)]
sub_solution = list(solution)
for index, letter in enumerate(list(input_word)):
if letter == solution[index]:
color_pattern[index] = 2
sub_solution[index] = "_"
for index, letter in enumerate(list(input_word)):
if letter in sub_solution and color_pattern[index] != 2:
color_pattern[index] = 1
sub_solution[sub_solution.index(letter)] = "_"
color_pattern = "".join([str(c) for c in color_pattern])
return color_pattern
|
a8746a5854067e27e0aefe451c7b950dd9848f50
| 26,215 |
import inspect
def getargspec(obj):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Modified version of inspect.getargspec from the Python Standard
Library."""
if inspect.isfunction(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.__func__
else:
raise TypeError('arg is not a Python function')
args, varargs, varkw = inspect.getargs(func_obj.__code__)
return args, varargs, varkw, func_obj.__defaults__
|
bcfe75de95ccf22bcefdba52c8556f0722dbbcb7
| 26,216 |
def nearest(x, xp, fp=None):
"""
Return the *fp* value corresponding to the *xp* that is nearest to *x*.
If *fp* is missing, return the index of the nearest value.
"""
if len(xp) == 1:
if np.isscalar(x):
return fp[0] if fp is not None else 0
else:
return np.array(len(x)*(fp if fp is not None else [0]))
# if fp is not provided, want to return f as an index into the array xp
# for the target values x, so set it to integer indices. if fp is
# provided, make sure it is an array.
fp = np.arange(len(xp)) if fp is None else np.asarray(fp)
# make sure that the xp array is sorted
xp = np.asarray(xp)
if np.any(np.diff(xp) < 0.):
index = np.argsort(xp)
xp, fp = xp[index], fp[index]
# find the midpoints of xp and use that as the index
xp = 0.5*(xp[:-1] + xp[1:])
return fp[np.searchsorted(xp, x)]
|
77ebcb08adbc5b1a70d801c7fe1f4b31f1b14717
| 26,217 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.