content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def parseAreas(area):
"""Parse the strings into address. This function is highly customized and demonstrates the general steps for transforming raw covid cases data to a list of address searchable in Google Map.
Arguments:
area: raw data downloaded from a news app
Return:
l: a list of human-readable address searchable in Google Map
"""
#FIXME: This function ideally should be generalized if the data source is still news app
l = []
ll = area.split(";")
for k in ll:
kk = k.split("、")
if len(kk)>1:
if len(kk[1])<=3: # all members of kk belong to the same residential area
l.append(kk[0][:(len(kk[0])-len(kk[1]))])
else: # members of kk belong to different residential area
l.append(kk[0])
for mm in range(1,len(kk)):
if kk[0][2]== "区":
kk[mm] = kk[0][:3] + kk[mm]
elif kk[0][3]== "区":
kk[mm] = kk[0][:4] + kk[mm]
l.append(kk[mm])
else:
l.append(k)
return(l) | f205fca1d0b76aa63839a7c00552a790110998e4 | 17,500 |
def is_valid_instruction(instr: int, cpu: Cpu = Cpu.M68000) -> bool:
"""Check if an instruction is valid for the specified CPU type"""
return bool(lib.m68k_is_valid_instruction(instr, cpu.value)) | ae528e503e24698507971334d33dc6abf0f4c39c | 17,501 |
import textwrap
import sys
def wrap_text(translations, linewrap=0):
"""Pretty print translations.
If linewrap is set to 0 disble line wrapping.
Parameters
----------
translations : list
List of word translations.
linewrap : int
Maximum line length before wrapping.
"""
# pylint: disable=too-many-locals
def wrap(text, width=linewrap, findent=0, sindent=0, bold=False):
if width == 0:
text = " " * findent + text
else:
text = textwrap.fill(
text,
width=width,
initial_indent=" " * findent,
subsequent_indent=" " * sindent,
)
# don't use bold when stdout is pipe or redirect
if bold and sys.stdout.isatty():
text = "\033[0;1m" + text + "\033[0m"
return text
indent = 5
result = []
for i1, trans in enumerate(translations):
if i1 > 0:
result.append("\n")
for w in trans.word:
result.append(wrap(w, bold=True))
for i2, t in enumerate(trans.parts_of_speech):
if i2 > 0:
result.append("")
if t.part:
result.append("[{part}]".format(part=t.part))
for i3, m in enumerate(t.meanings, 1):
if i3 > 1:
result.append("")
meaning = "{index:>3}. {meanings}".format(
index=i3, meanings=", ".join(m.meaning)
)
result.append(wrap(meaning, sindent=indent, bold=True))
eindent = indent + 1
for e in m.examples:
result.append("")
result.append(wrap(e[0], findent=eindent, sindent=eindent))
if len(e) == 2 and e[1]:
result.append(wrap(e[1], findent=eindent, sindent=eindent + 1))
return "\n".join(result) | adecfba77f017d4888fffda43c0466e9125b43ea | 17,502 |
def docker_available():
"""Check if Docker can be run."""
returncode = run.run(["docker", "images"], return_code=True)
return returncode == 0 | 43ce2c7f5cb16657b4607faa5eac61b20e539e53 | 17,503 |
from datetime import datetime
def is_bc(symbol):
"""
判断是否背驰
:param symbol:
:return:
"""
bars = get_kline(symbol, freq="30min", end_date=datetime.now(), count=1000)
c = CZSC(bars, get_signals=get_selector_signals)
factor_ = Factor(
name="背驰选股",
signals_any=[
Signal("30分钟_倒1笔_三笔形态_向下盘背_任意_任意_0"),
Signal("30分钟_倒1笔_基础形态_底背驰_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类一买_任意_任意_0"),
Signal("30分钟_倒1笔_类买卖点_类二买_任意_任意_0"),
],
signals_all=[
# Signal("30分钟_倒0笔_潜在三买_构成中枢_任意_任意_0")
]
)
# c.open_in_browser()
if factor_.is_match(c.signals):
return True
else:
return False | 07dc2f01374f95544898375b8bc02b6128d70090 | 17,504 |
import time
import calendar
def IEEE2030_5Time(dt_obj, local=False):
""" Return a proper IEEE2030_5 TimeType object for the dt_obj passed in.
From IEEE 2030.5 spec:
TimeType Object (Int64)
Time is a signed 64 bit value representing the number of seconds
since 0 hours, 0 minutes, 0 seconds, on the 1st of January, 1970,
in UTC, not counting leap seconds.
:param dt_obj: Datetime object to convert to IEEE2030_5 TimeType object.
:param local: dt_obj is in UTC or Local time. Default to UTC time.
:return: Time XSD object
:raises: If utc_dt_obj is not UTC
"""
if dt_obj.tzinfo is None:
raise Exception("IEEE 2030.5 times should be timezone aware UTC or local")
if dt_obj.utcoffset() != timedelta(0) and not local:
raise Exception("IEEE 2030.5 TimeType should be based on UTC")
if local:
return xsd_models.TimeType(valueOf_=int(time.mktime(dt_obj.timetuple())))
else:
return xsd_models.TimeType(valueOf_=int(calendar.timegm(dt_obj.timetuple()))) | fbb9466e927f1162226760efbe609bf3e779e163 | 17,505 |
def learning_rate_schedule(params, global_step):
"""Handles learning rate scaling, linear warmup, and learning rate decay.
Args:
params: A dictionary that defines hyperparameters of model.
global_step: A tensor representing current global step.
Returns:
A tensor representing current learning rate.
"""
base_learning_rate = params['base_learning_rate']
lr_warmup_step = params['lr_warmup_step']
first_lr_drop_step = params['first_lr_drop_step']
second_lr_drop_step = params['second_lr_drop_step']
batch_size = params['batch_size']
scaling_factor = params['gpu_num'] * batch_size / ssd_constants.DEFAULT_BATCH_SIZE
adjusted_learning_rate = base_learning_rate * scaling_factor
learning_rate = (tf.cast(global_step, dtype=tf.float32) /
lr_warmup_step) * adjusted_learning_rate
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate | b88d67dd0d241d26bf183e90e3d3c215e0abd957 | 17,506 |
import logging
import os
import subprocess
def run(
command,
cwd=None,
capture_output=False,
input=None,
check=True,
**subprocess_run_kwargs
):
""" Wrapper for subprocess.run() that sets some sane defaults """
logging.info("Running {} in {}".format(" ".join(command), cwd or os.getcwd()))
if isinstance(input, str):
input = input.encode("utf-8")
env = os.environ.copy()
env["HOMEBREW_NO_AUTO_UPDATE"] = "1"
return subprocess.run(
command,
cwd=cwd,
input=input,
stdout=subprocess.PIPE if capture_output else None,
check=check,
env=env,
**subprocess_run_kwargs
) | 5595721fc28da8a87230a1d367cbc5c3bacdc779 | 17,507 |
def profile(step):
"""
Profiles a Pipeline step and save the results as HTML file in the project output
directory.
Usage:
@profile
def step(self):
pass
"""
@wraps(step)
def wrapper(*arg, **kwargs):
pipeline_instance = arg[0]
project = pipeline_instance.project
with Profiler() as profiler:
result = step(*arg, **kwargs)
output_file = project.get_output_file_path("profile", "html")
output_file.write_text(profiler.output_html())
pipeline_instance.log(f"Profiling results at {output_file.resolve()}")
return result
return wrapper | f300000a0471a2439ae951a2d33b8a03aa61b333 | 17,508 |
from modin.pandas.series import Series
def make_dataframe_wrapper(DataFrame):
"""
Prepares a "delivering wrapper" proxy class for DataFrame.
It makes DF.loc, DF.groupby() and other methods listed below deliver their
arguments to remote end by value.
"""
conn = get_connection()
class ObtainingItems:
def items(self):
return conn.obtain_tuple(self.__remote_end__.items())
def iteritems(self):
return conn.obtain_tuple(self.__remote_end__.iteritems())
ObtainingItems = _deliveringWrapper(Series, mixin=ObtainingItems)
class DataFrameOverrides(_prepare_loc_mixin()):
@classmethod
def _preprocess_init_args(
cls,
data=None,
index=None,
columns=None,
dtype=None,
copy=None,
query_compiler=None,
):
(data,) = conn.deliver((data,), {})[0]
return (), dict(
data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy,
query_compiler=query_compiler,
)
@property
def dtypes(self):
remote_dtypes = self.__remote_end__.dtypes
return ObtainingItems(__remote_end__=remote_dtypes)
DeliveringDataFrame = _deliveringWrapper(
DataFrame,
[
"groupby",
"agg",
"aggregate",
"__getitem__",
"astype",
"drop",
"merge",
"apply",
"applymap",
],
DataFrameOverrides,
"DataFrame",
)
return DeliveringDataFrame | a2d523f6e9cb9d23ae722195a091d8e2b68139cc | 17,509 |
def download_cmems_ts(lats, lons, t0, tf, variables, fn=None):
"""Subset CMEMS output using OpenDAP
:params:
lats = [south, north] limits of bbox
lons = [west, east] limits of bbox
t0 = datetime for start of time series
tf = datetime for end of time series
variables = list of variables in ["zos", "uo", "vo", "so", "thetao"]
:returns:
Xarray Dataset of selected variables
"""
validate_datetime(t0)
validate_datetime(tf)
try:
validate_cmems_variable(variables)
except NameError:
raise NameError("Input 'variable' needs to be specified")
_variables, zos = fix_zos(variables)
request = (
"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
"longitude[0:1:4319],latitude[0:1:2040],depth[0:1:49],time[0:1:10012]"
)
# query dataset to get coordinates and convert bbox to indicies for OpenDAP
coords = xr.open_dataset(request)
lon_ll = cmemslon2index(lons[0], coords) # lower left longtiude of bbox
lon_ur = cmemslon2index(lons[1], coords)
lat_ll = cmemslat2index(lats[0], coords)
lat_ur = cmemslat2index(lats[1], coords)
t0i = time2index(t0, coords)
tfi = time2index(tf, coords)
request = (
f"https://my.cmems-du.eu/thredds/dodsC/cmems_mod_glo_phy_my_0.083_P1D-m?"
f"longitude[{lon_ll}:1:{lon_ur}],latitude[{lat_ll}:1:{lat_ur}],depth[0:1:49],time[{t0i}:1:{tfi}],"
)
request = request + "".join(
[
f"{variable}[{t0i}:1:{tfi}][0:1:49][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
for variable in _variables
]
)
# append surf_el if present
if zos is not None:
request = (
request + f"{zos}[{t0i}:1:{tfi}][{lat_ll}:1:{lat_ur}][{lon_ll}:1:{lon_ur}]"
)
ds = xr.open_dataset(request)
if fn is not None:
ds.to_netcdf(fn)
return ds | b97de3a7428d6e2b50ab36b28e47afe479c24042 | 17,510 |
def construct_gpu_info(statuses):
""" util for unit test case """
m = {}
for status in statuses:
m[status.minor] = status
m[status.uuid] = status
return m | b8b2f41799b863d2e22066005b901f17a610d858 | 17,511 |
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
max_tokens=10000):
"""Return the iterator and the vocabulary of the time machine dataset."""
data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter,
max_tokens)
return data_iter, data_iter.vocab | ed9d6b63c34cf9d1a750daabbdb81e03e467e939 | 17,512 |
def scan_paths(paths, only_detect, recursive, module_filter):
"""
Scans paths for known bots and dumps information from them
@rtype : dict
@param paths: list of paths to check for files
@param only_detect: only detect known bots, don't process configuration information
@param recursive: recursively traverse folders
@param module_filter: if not None, only modules in list will be used
@return: dictionary of file to dictionary of information for each file
"""
results = {}
while len(paths) != 0:
file_path = abspath(paths[0])
del paths[0]
if isfile(file_path):
with open(file_path, mode='rb') as file_handle:
file_content = file_handle.read()
r = scan_file_data(file_content, module_filter, only_detect)
if r is not None:
results[file_path] = r
elif isdir(file_path):
for p in listdir(file_path):
p = join(file_path, p)
if isfile(p) or (isdir(p) and recursive):
paths.append(p)
return results | f58216f1ed5955828738689fa67522a8cc0e497a | 17,513 |
def generate_masks(input_size, output_size=1, observed=None):
"""
Generates some basic input and output masks.
If C{input_size} is an integer, the number of columns of the mask will be
that integer. If C{input_size} is a list or tuple, a mask with multiple channels
is created, which can be used with RGB images, for example.
By default, the input region will cover the upper half of the mask, also known as a
*causal neighborhood*. If any of the channels is observed, the input region in that
channel will cover a full square neighborhood around the output region.
Examples:
>>> input_mask, output_mask = generate_masks(8, 2)
>>> input_mask, output_mask = generate_masks([3, 7, 7], 1, [1, 0, 0])
@type input_size: C{int} / C{list}
@param input_size: determines the size of the input region
@type output_size: C{int}
@param output_size: determines the size of the output region
@type observed: C{list}
@param observed: can be used to indicate channels which are observed
@rtype: C{tuple}
@return: one input mask and one output mask
"""
if not iterable(input_size):
if iterable(observed):
input_size = [input_size] * len(observed)
else:
input_size = [input_size]
if observed is None:
observed = [False] * len(input_size)
if len(observed) != len(input_size):
raise ValueError("Incompatible `input_size` and `observed`.")
num_channels = len(input_size)
num_cols = max(input_size)
num_rows = num_cols if any(observed) else (num_cols + 1) // 2 + output_size // 2
input_mask = zeros([num_rows, num_cols, num_channels], dtype='bool')
output_mask = zeros_like(input_mask)
tmp1 = (num_cols + 1) // 2
tmp2 = output_size // 2
tmp3 = (output_size + 1) // 2
for k in range(num_channels):
offset = tmp1 - (input_size[k] + 1) // 2
if observed[k]:
input_mask[
offset:num_cols - offset,
offset:num_cols - offset, k] = True
else:
input_mask[offset:tmp1 + tmp2, offset:num_cols - offset, k] = True
for i in range(output_size):
input_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:, k] = False
output_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:tmp1 + output_size // 2, k] = True
if input_mask.shape[2] == 1:
input_mask.resize(input_mask.shape[0], input_mask.shape[1])
output_mask.resize(output_mask.shape[0], output_mask.shape[1])
return input_mask, output_mask | dee12176f72a158e9f39036981fa1dbd6be81817 | 17,514 |
import yaml
import logging
import sys
def parse_config_file(config):
"""
Load config file (primarily for endpoints)
"""
fail = False
with open(config, 'r') as fp:
content = yaml.load(fp.read())
if 'endpoints' not in content.keys():
return
for title, items in content['endpoints'].items():
if not 'url' in items.keys():
fail = True
logging.error("no url found in endpoint '%s'", title)
if not items['url'].startswith('http'):
fail = True
logging.error("non HTTP(S) url found in endoint '%s'", title)
if not items['url'].startswith('https'):
logging.warning("non SSL url found in endoint '%s'", title)
if fail:
logging.info("stopping execution due to blocking config issues")
sys.exit(1)
return content | 2a9d4a47d5c5b8eea9fda0b33284a1c91bfc19b6 | 17,515 |
def average(time_array,height_array,data_array,height_bin_size=100,time_bin_size=3600):
"""
average: function that averages the radar signal by height and time
Args:
time_array: numpy 1d array with timestamps
height_array: numpy 1d array with height range
data_array: numpy 2d array size len(time_array) X len(height_array)
height_bin_size: the averaging window in meters
time_bin_size: the averaging window in seconds
Returns:
time: returns the new time dimension
height: returns the new height dimension
averaged: the data averaged size len(time) X len(height)
"""
past_time = time_array[0]
bins_time = []
for time in time_array:
if past_time + time_bin_size > time:
continue
else:
bins_time.append((past_time,time))
past_time = time
bins_time.append((time,time_array[-1]))
bin_range_time = [bini[0] for bini in bins_time]
pixel_in_bin_time = []
for time in time_array:
pixel_in_bin_time.append(find_bin(time,bins_time))
max_val_time = np.max(pixel_in_bin_time)
pixel_in_bin_time = np.array(pixel_in_bin_time)
bins = create_bins(height_array[0],height_array[-1],height_bin_size)
bin_range = [bini[0] for bini in bins]
pixel_in_bin = []
for height in height_array:
pixel_in_bin.append(find_bin(height,bins))
max_val = np.max(pixel_in_bin)
pixel_in_bin = np.array(pixel_in_bin)
averaged = np.zeros((len(bins_time),len(bins)))
for i in range(max_val_time+1):
for j in range(max_val+1):
min_time = np.where(pixel_in_bin_time==i)[0][0]
max_time = np.where(pixel_in_bin_time==i)[0][-1]
min_height = np.where(pixel_in_bin==j)[0][0]
max_height = np.where(pixel_in_bin==j)[0][-1]
temp_selection = data_array[min_time:max_time,min_height:max_height]
temp_average = np.nanmean(temp_selection)
averaged[i,j] = temp_average
time = bin_range_time
height = bin_range
return time,height,averaged | 710f4c8821cffe110511bda0dd3d4fd3052f33a9 | 17,516 |
def daily_report(api, space_name, charts, num_days=1, end_time=None):
"""Get a report of SLO Compliance for the previous day(s)
Returns: list of dicts of threshold breaches. Example Dict format:
{u'measure_time': 1478462400, u'value': 115.58158333333334}
:param api: An instance of sloc_report.LibratoApi
:param space_name: The name of the space where the charts are located
:param charts: A list of dicts containing the SLO thresholds, indexed
by the chart names (see _enumerate_sloc_charts() for an
example data structure)
:param num_days: Number of days to get report for. Default is 1 day
:param end_time: The time that the report should count back from.
Default: now
"""
sloc_charts = _enumerate_sloc_charts(api, space_name, charts)
if end_time is None:
end_time = sloc_time.time_now()
# get start and end times for each day
days = sloc_time.get_day_times(num_days, end_time)
threshold_breaches = []
# loop through every day for every chart
# TODO: decide on a better data structure - or return an object per chart?
for chart in sloc_charts:
chart_breaches = {
'chart_name': chart.metric(), 'total': 0, 'breaches': []
}
for day in days:
response = _get_composite_with_retry(
api, chart,
start_time=day[0],
end_time=day[1]
)
# build a list of threshold breaches
for s in response['measurements'][0]['series']:
if s['value'] > chart.threshold:
chart_breaches['total'] += 1
# chart_breaches['breaches'].append(s)
threshold_breaches.append(chart_breaches)
return threshold_breaches | 86d25f5f2dd93a827f5f7b8cd44287f549921438 | 17,517 |
def new_pitch():
"""
route to new pitch form
:return:
"""
form = PitchForm()
if form.validate_on_submit():
title = form.title.data
pitch = form.pitch.data
category = form.category.data
fresh_pitch = Pitch(title=title, pitch_actual=pitch, category=category, user_id=current_user.id)
fresh_pitch.save_pitch()
return redirect(url_for('.profile', uname=current_user.username))
title = 'New pitch'
return render_template('new_pitch.html' , title=title, pitch_form=form) | a7724149a7e6b9d545559fef643dcc8fd2f5c731 | 17,518 |
def get_entity_bios(seq,id2label):
"""Gets entities from sequence.
note: BIOS
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
# >>> seq = ['B-PER', 'I-PER', 'O', 'S-LOC']
# >>> get_entity_bios(seq)
[['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(seq):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("S-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[2] = indx
chunk[0] = tag.split('-')[1]
chunks.append(chunk)
chunk = (-1, -1, -1)
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif tag.startswith('I-') and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(seq) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks | 25219d29ba8ecb2d44ca5a8245059432f3220d8d | 17,519 |
import torch
import copy
def early_stopping_train(model, X, Y_, x_test, y_test, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
best_model, best_accuracy = None, 0
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
# evaluate the model on the test dataset
probs = eval(model, x_test)
Y = np.argmax(probs, axis=1)
accuracy, recall, matrix = data.eval_perf_multi(Y, y_test)
print("Current accuracy on testset: ", accuracy)
if accuracy > best_accuracy:
best_model = copy.copy(model)
best_accuracy = accuracy
return best_model | 83a8acdd24a4fde3db77184c3b4a99a1c1783349 | 17,520 |
def my_vtk_grid_props(vtk_reader):
"""
Get grid properties from vtk_reader instance.
Parameters
----------
vtk_reader: vtk Reader instance
vtk Reader containing information about a vtk-file.
Returns
----------
step_x : float
For regular grid, stepsize in x-direction.
step_y : float
For regular grid, stepsize in y-direction.
npts_x : float
Number of cells in x-direction.
npts_y : float
Number of cells in y-direction.
low_m_x : float
Middle of first x cell
high_m_x : float
Middle of last x cell
low_m_y : float
Middle of first y cell
high_m_y : float
Middle of last y cell
low_x : float
Edge of first x cell
high_x : float
Edge of last x cell
low_y : float
Edge of first y cell
high_y : float
Edge of last y cell
Notes
----------
0: step_x
1: step_y
2: npts_x
3: npts_y
4: low_m_x - Middle of cells: first x cell
5: high_m_x - Middle of cells: last x cell
6: low_m_y - Middle of cells: first y cell
7: high_m_y - Middle of cells: last y cell
8: low_x - Edge of cells: first x cell
9: high_x - Edge of cells: last x cell
10: low_y - Edge of cells: first y cell
11: high_y - Edge of cells: last y cell
"""
vtk_output = vtk_reader.GetOutput()
# Read attributes of the vtk-Array
# num_cells = vtk_output.GetNumberOfCells()
# num_points = vtk_output.GetNumberOfPoints()
# whole_extent = vtk_output.GetExtent()
grid_bounds = vtk_output.GetBounds()
grid_dims = vtk_output.GetDimensions()
# Grid information
step_x = (grid_bounds[1] - grid_bounds[0]) / (grid_dims[0] - 1)
step_y = (grid_bounds[3] - grid_bounds[2]) / (grid_dims[1] - 1)
if grid_bounds[0] == 0.0: # CELLS
npts_x = grid_dims[0] - 1
npts_y = grid_dims[1] - 1
low_m_x = grid_bounds[0] + 0.5 * step_x
high_m_x = grid_bounds[1] - 0.5 * step_x
low_m_y = grid_bounds[2] + 0.5 * step_y
high_m_y = grid_bounds[3] - 0.5 * step_y
low_x = grid_bounds[0]
high_x = grid_bounds[1]
low_y = grid_bounds[2]
high_y = grid_bounds[3]
else: # POINTS
npts_x = grid_dims[0]
npts_y = grid_dims[1]
low_m_x = grid_bounds[0]
high_m_x = grid_bounds[1]
low_m_y = grid_bounds[2]
high_m_y = grid_bounds[3]
low_x = grid_bounds[0] - 0.5 * step_x
high_x = grid_bounds[1] + 0.5 * step_x
low_y = grid_bounds[2] - 0.5 * step_y
high_y = grid_bounds[3] + 0.5 * step_y
return step_x, step_y, \
npts_x, npts_y, \
low_m_x, high_m_x, low_m_y, high_m_y, \
low_x, high_x, low_y, high_y | 26ef8a51648ea487372ae06b54c8ccf953aeb414 | 17,521 |
def make_env(stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
env = grc.RemoteEnv('tmp/sock')
env = SonicDiscretizer(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
if stack:
env = FrameStack(env, 4)
return env | 347376103fa00d4d43714f30097b0d129ef45f43 | 17,522 |
def plot_distr_cumsum(result, measure="degree", scale=['log', 'log'], figures=[], prefix="", show_std=True, show_figs=True, mode="safe", colors=('r', 'b')):
""" plots the cummulative distribution functions
special care has to be taken because averaging these is not trivial in comparison to e.g. degree
"""
maj_name=f'{measure}_distr_cumsum_maj'
min_name=f'{measure}_distr_cumsum_min'
maj_x = f'{measure}_distr_cumsum_maj_x'
min_x = f'{measure}_distr_cumsum_min_x'
tmp=result.groupby(['homophily']).agg({maj_name : list, min_name:list, min_x:list, maj_x:list})
maj = []
for x,y in zip(tmp[maj_x], tmp[maj_name]):
x_out, mean_out, std_out = cumsum_mean(x,y, mode=mode)
maj.append((x_out, mean_out, std_out))
mino = []
for x,y in zip(tmp[min_x], tmp[min_name]):
x_out, mean_out, std_out = cumsum_mean(x,y,mode=mode)
mino.append((x_out, mean_out, std_out))
if len(figures)==0:
figures = [plt.Figure() for _ in range(len(tmp.index))]
for fig in figures:
if len(fig.axes)==0:
ax = fig.add_subplot()
for h, (min_xx, min_vals, min_std), (maj_xx, maj_vals, maj_std), fig in zip(tmp.index, maj, mino, figures):
plt.figure()
x=min_xx
x2=maj_xx
ax = fig.axes[0]
ax.set_xscale(scale[0])
ax.set_yscale(scale[1])
if show_std:
ax.errorbar(x,min_vals, yerr=min_std, label=prefix + "min", color=colors[0])
ax.errorbar(x2,maj_vals,yerr=maj_std, label=prefix + "maj", color=colors[1])
else:
ax.plot(x,min_vals,label=prefix + "min", color=colors[0])
ax.plot(x2,maj_vals, label=prefix + "maj", color=colors[1])
#print(maj_vals)
ax.set_xlabel(f"{measure}")
ax.set_ylabel(f"{measure} distrubution")
ax.set_title(f"h={h}")
ax.legend()
return figures | 6b0a526cf8f09dd66ac7b0988c9445d57416be21 | 17,523 |
import socket
import sys
def recvall(sock, n, silent=False):
"""Helper function for recv_msg()."""
data = b''
while len(data) < n:
try:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
except (socket.error, OSError) as e:
if not silent:
print("recvall() - Socket error:\n\t" + str(e), file=sys.stderr)
print(current_thread().name, file=sys.stderr)
raise ConnectionError
return data | 0e5800929dfd2829fb922bbc0904a1ed893e79bf | 17,524 |
import sys
import os
def install_jenkins(dest_folder=".", fLOG=print, install=True, version=None):
"""
install `Jenkins <http://jenkins-ci.org/>`_ (only on Windows)
@param dest_folder where to download the setup
@param fLOG logging function
@param install install (otherwise only download)
@param version version to install (unused)
@return temporary file
.. versionadded:: 1.1
"""
if version is not None:
raise ValueError("cannot specify a version")
if not sys.platform.startswith("win"):
raise NotImplementedError(
"SciTE can only be installed on Windows at the moment")
url = "http://mirrors.jenkins.io/war/latest/jenkins.war"
outfile = os.path.join(dest_folder, "jenkins.war")
if not os.path.exists(outfile):
download_file(url, outfile)
if install:
raise NotImplementedError("Does not install jenkins.war")
return outfile | 7c67cc69f2fe3add7a97abbd8bb19451ee36fddd | 17,525 |
def state_space_model(A, z_t_minus_1, B, u_t_minus_1):
"""
Calculates the state at time t given the state at time t-1 and
the control inputs applied at time t-1
"""
state_estimate_t = (A @ z_t_minus_1) + (B @ u_t_minus_1)
return state_estimate_t | 0e04207028df8d4162c88aad6606e792ef618f5a | 17,526 |
def get_post(id , check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
#u = User.query(User ).filter(User.posts.id == id ).first()
post = db_session.query(Post).filter(Post.id == id).first()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post | a15ac3816d134f1dd89bf690c2f800e412d7219b | 17,527 |
def get_pixel(x, y):
"""Get the RGB value of a single pixel.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7
"""
global _pixel_map
return _pixel_map[y][x] | 47a77090683a5b8e7178b3c7d83ae5b1a090342f | 17,528 |
from typing import Callable
import re
def check_for_NAs(func: Callable) -> Callable:
"""
This decorator function checks whether the input string qualifies as an
NA. If it does it will return True immediately. Otherwise it will run
the function it decorates.
"""
def inner(string: str, *args, **kwargs) -> bool:
if re.fullmatch("^|0|NA$", string) is not None:
return True
else:
return func(string, *args, **kwargs)
return inner | e9336cca2e6cd69f81f6aef1d11dc259492774f8 | 17,529 |
from typing import Union
from typing import Callable
def integrateEP_w0_ode( w_init: np.ndarray, w0: Union[ Callable, np.ndarray ], w0prime: Union[ Callable, np.ndarray ],
B: np.ndarray, s: np.ndarray, s0: float = 0, ds: float = None,
R_init: np.ndarray = np.eye( 3 ), Binv: np.ndarray = None, arg_check: bool = True,
wv_only: bool = False ) -> (np.ndarray, np.ndarray, np.ndarray):
""" integrate Euler-Poincare equation for needle shape sensing for given intrinsic angular deformation
using scipy.integrate
Author: Dimitri Lezcano
Args:
w_init: 3-D initial deformation vector
w0: Callable function or N x 3 intrinsic angular deformation
w0prime: Callable function or N x 3 d/ds w0
B: 3 x 3 needle stiffness matrix
s: the arclengths desired (Not implemented)
s0: (Default = 0) the initial length to start off with
ds: (Default = None) the arclength increments desired
Binv: (Default = None) inv(B) Can be provided for numerical efficiency
R_init: (Default = 3x3 identity) SO3 matrix for initial rotation angle
arg_check: (Default = False) whether to check if the arguments are valid
wv_only: (Default = False) whether to only integrate wv or not.
Return:
(N x 3 needle shape, N x 3 x 3 SO3 matrices of orientations), N x 3 angular deformation)
(None, None, N x 3 angular deformation) if 'wv_only' is True
"""
if arg_check:
assert (w_init.size == 3)
w_init = w_init.flatten()
assert (B.shape == (3, 3))
assert (geometry.is_SO3( R_init ))
assert (s0 >= 0)
# if
# argument parsing
s = s[ s >= s0 ]
if Binv is None:
Binv = np.linalg.inv( B )
elif arg_check:
assert (Binv.shape == (3, 3))
# setup intrinsic curvature functions
if callable( w0 ):
w0_fn = w0
else:
w0_fn = interpolate.interp1d( s, w0.T, fill_value='extrapolate' )
# w0_fn = lambda t: jit_linear_interp1d( t, w0, s )
if callable( w0prime ):
w0prime_fn = w0prime
else:
w0prime_fn = interpolate.interp1d( s, w0prime.T, fill_value='extrapolate' )
# w0prime_fn = lambda t: jit_linear_interp1d( t, w0prime, s )
# perform integration
ode_EP = lambda s, wv: differential_EPeq( wv, s, w0_fn, w0prime_fn, B, Binv )
wv = odeint( ode_EP, w_init, s, full_output=False, hmin=ds/2, h0=ds/2, tfirst=True )
# wv = solve_ivp( ode_EP, (s0, s.max()), w_init, method='RK45', t_eval=s,
# first_step=ds ) # 'RK23' for speed (all slower than odeint)
# integrate angular deviation vector in order to get the pose
if wv_only:
pmat, Rmat = None, None
else:
pmat, Rmat = integratePose_wv( wv, s=s, s0=s0, ds=ds, R_init=R_init )
return pmat, Rmat, wv | 75a042b94ac46b7ecbb86e23abacde0d4034b9fe | 17,530 |
def change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height,
1.0 / win_width)
return new_keypoints | 2aa69a55d7f8177784afb41f50cd7ccfbffdbde3 | 17,531 |
import os
def eval_log_type(env_var_name):
"""get the log type from environment variable"""
ls_log = os.environ.get(env_var_name, '').lower().strip()
return ls_log if ls_log in LOG_LEVELS else False | 7d2711075b830fc5861961ed52714b58fd01b96b | 17,532 |
import random
def _get_name(filename: str) -> str:
"""
Function returns a random name (first or last)
from the filename given as the argument.
Internal function. Not to be imported.
"""
LINE_WIDTH: int = 20 + 1 # 1 for \n
with open(filename) as names:
try:
total_names = int(next(names))
nth_name_to_read: int = random.randint(1, total_names)
# Here 'nth_name_to_read' lines are skipped that include
# the first line (with no of lines) and n-1 names
# Next read would always be the desired name
bytes_to_seek: int = LINE_WIDTH * nth_name_to_read
_ = names.seek(bytes_to_seek) # Now skipped n - 1 names
name: str = next(names).strip()
return name
except StopIteration:
# Return empty string if the file is empty
return '' | 1b4cd75488c6bd1814340aee5669d1631318e77f | 17,533 |
def map_to_udm_section_associations(enrollments_df: DataFrame) -> DataFrame:
"""
Maps a DataFrame containing Canvas enrollments into the Ed-Fi LMS Unified Data
Model (UDM) format.
Parameters
----------
enrollments_df: DataFrame
Pandas DataFrame containing all Canvas enrollments
Returns
-------
DataFrame
A LMSSectionAssociations-formatted DataFrame
DataFrame columns are:
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a the section-association by
the source system
SourceSystem: The system code or name providing the user data
EnrollmentStatus: Possible values are Active, Expired, Invite pending, Request Pending, Archived
LMSUserSourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source
system
LMSSectionSourceSystemIdentifier: A unique number or alphanumeric code assigned to a section by the
source system
CreateDate: Date/time at which the record was first retrieved
LastModifiedDate: Date/time when the record was modified, or when first retrieved
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
if enrollments_df.empty:
return enrollments_df
assert "id" in enrollments_df.columns
assert "enrollment_state" in enrollments_df.columns
assert "user_id" in enrollments_df.columns
assert "course_section_id" in enrollments_df.columns
assert "created_at" in enrollments_df.columns
assert "updated_at" in enrollments_df.columns
enrollments_df = enrollments_df[
[
"id",
"enrollment_state",
"user_id",
"course_section_id",
"created_at",
"updated_at",
"CreateDate",
"LastModifiedDate",
]
].copy()
enrollments_df.rename(
columns={
"id": "SourceSystemIdentifier",
"enrollment_state": "EnrollmentStatus",
"user_id": "LMSUserSourceSystemIdentifier",
"course_section_id": "LMSSectionSourceSystemIdentifier",
"created_at": "SourceCreateDate",
"updated_at": "SourceLastModifiedDate",
},
inplace=True,
)
enrollments_df["SourceCreateDate"] = enrollments_df["SourceCreateDate"].apply(
_get_date_formated
)
enrollments_df["SourceLastModifiedDate"] = enrollments_df[
"SourceLastModifiedDate"
].apply(_get_date_formated)
enrollments_df["EnrollmentStatus"] = enrollments_df["EnrollmentStatus"].apply(
_get_enrollment_status
)
enrollments_df["SourceSystem"] = SOURCE_SYSTEM
return enrollments_df | 303223302e326854f7a19b2f3c9d0b626a2807bc | 17,534 |
def plot_electrodes(mris, grid, values=None, ref_label=None, functional=None):
"""
"""
surf = mris.get('pial', None)
if surf is None:
surf = mris.get('dura', None)
pos = grid['pos'].reshape(-1, 3)
norm = grid['norm'].reshape(-1, 3)
labels = grid['label'].reshape(-1)
right_or_left = sign(mean(surf['pos'][:, 0]))
if values is None:
iswire = labels == WIRE
colors = labels.copy()
colors[iswire] = 'red'
colors[~iswire] = 'black'
if ref_label is not None:
colors[labels == ref_label] = 'green'
marker = dict(
size=MARKER_SIZE,
color=colors,
)
hovertext = labels
else:
values = values['value'].reshape(-1)
marker = dict(
size=MARKER_SIZE,
color=values,
colorscale=COLORSCALE,
showscale=True,
cmin=nanmin(values),
cmax=nanmax(values),
colorbar=dict(
title='electrode values',
),
)
hovertext = [f'{x0}<br>{x1:0.3f}' for x0, x1 in zip(labels, values)]
traces = [
go.Mesh3d(
x=surf['pos'][:, 0],
y=surf['pos'][:, 1],
z=surf['pos'][:, 2],
i=surf['tri'][:, 0],
j=surf['tri'][:, 1],
k=surf['tri'][:, 2],
color='pink',
hoverinfo='skip',
flatshading=False,
lighting=dict(
ambient=0.18,
diffuse=1,
fresnel=0.1,
specular=1,
roughness=0.1,
),
lightposition=dict(
x=0,
y=0,
z=-1,
),
),
]
if functional is not None:
traces.append(
go.Scatter3d(
x=functional['pos'][:, 0],
y=functional['pos'][:, 1],
z=functional['pos'][:, 2],
mode='markers',
hoverinfo='skip',
marker=dict(
size=5,
color=functional['value'],
symbol='diamond',
colorscale='RdBu',
reversescale=True,
cmid=0,
colorbar=dict(
x=1.2,
title='functional values',
),
),
opacity=1,
))
elif False:
"""do not show Cone, it's not easy to see"""
traces.append(
go.Cone(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
u=norm[:, 0] * -1,
v=norm[:, 1] * -1,
w=norm[:, 2] * -1,
sizeref=2,
sizemode='absolute',
anchor='tail',
text=labels,
showscale=False,
colorscale=[
[0, 'rgb(0, 0, 0)'],
[1, 'rgb(0, 0, 0)'],
],
hoverinfo='skip',
),
)
traces.append(
go.Scatter3d(
x=pos[:, 0],
y=pos[:, 1],
z=pos[:, 2],
text=labels,
mode='markers',
hovertext=hovertext,
hoverinfo='text',
marker=marker,
),
)
fig = go.Figure(
data=traces,
layout=go.Layout(
showlegend=False,
scene=dict(
xaxis=AXIS,
yaxis=AXIS,
zaxis=AXIS,
camera=dict(
eye=dict(
x=right_or_left,
y=0,
z=0.5,
),
projection=dict(
type='orthographic',
),
),
),
),
)
return fig | 0bcc5545c625675be080e6b70bf7a74d247ba1c9 | 17,535 |
from typing import Tuple
def _get_laplace_matrix(bcs: Boundaries) -> Tuple[np.ndarray, np.ndarray]:
"""get sparse matrix for laplace operator on a 1d Cartesian grid
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
tuple: A sparse matrix and a sparse vector that can be used to evaluate
the discretized laplacian
"""
dim = bcs.grid.dim
if dim == 1:
result = _get_laplace_matrix_1d(bcs)
elif dim == 2:
result = _get_laplace_matrix_2d(bcs)
else:
raise NotImplementedError(f"{dim:d}-dimensional Laplace matrix not implemented")
return result | 80880c7fb1d54a7d4502e1096c2f2ade4d30ce21 | 17,536 |
import warnings
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape)) | ef3a5bfe7a1ae07b925c1d9b897bce0eff29b275 | 17,537 |
def conv_tower(
inputs,
filters_init,
filters_end=None,
filters_mult=None,
divisible_by=1,
repeat=1,
**kwargs
):
"""Construct a reducing convolution block.
Args:
inputs: [batch_size, seq_length, features] input sequence
filters_init: Initial Conv1D filters
filters_end: End Conv1D filters
filters_mult: Multiplier for Conv1D filters
divisible_by: Round filters to be divisible by (eg a power of two)
repeat: Tower repetitions
Returns:
[batch_size, seq_length, features] output sequence
"""
def _round(x):
return int(np.round(x / divisible_by) * divisible_by)
# flow through variable current
current = inputs
# initialize filters
rep_filters = filters_init
# determine multiplier
if filters_mult is None:
assert filters_end is not None
filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1))
for ri in range(repeat):
# convolution
current = conv_block(current, filters=_round(rep_filters), **kwargs)
# update filters
rep_filters *= filters_mult
return current | 82ff878423309e2963090a9569f14090a85d30e5 | 17,538 |
def edit_coach(request, coach_id):
""" Edit a coach's information """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only the owners can do that.')
return redirect(reverse('home'))
coach = get_object_or_404(Coach, pk=coach_id)
if request.method == 'POST':
form = CoachForm(request.POST, request.FILES, instance=coach)
if form.is_valid():
coach = form.save()
messages.success(request, 'Successfully updated coach!')
return redirect(reverse('view_coach', args=[coach.id]))
else:
messages.error(request, (
'Failed to update coach. Please ensure the form is valid.'))
else:
form = CoachForm(instance=coach)
messages.info(request, f'You are editing {coach.first_name}')
template = 'coaches/edit_coach.html'
context = {
'form': form,
'coach': coach,
}
return render(request, template, context) | ecaf07df3249d3349928b4e9da9c0524b27e603e | 17,539 |
import torch
def estimate_translation(S,
joints_2d,
focal_length=5000.,
img_size=224.,
use_all_joints=False,
rotation=None):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (B, 49, 3) 3D joint locations
joints: (B, 49, 3) 2D joint locations and confidence
Returns:
(B, 3) camera translation vectors
"""
device = S.device
if rotation is not None:
S = torch.einsum('bij,bkj->bki', rotation, S)
# Use only joints 25:49 (GT joints)
if use_all_joints:
S = S.cpu().numpy()
joints_2d = joints_2d.cpu().numpy()
else:
S = S[:, 25:, :].cpu().numpy()
joints_2d = joints_2d[:, 25:, :].cpu().numpy()
joints_conf = joints_2d[:, :, -1]
joints_2d = joints_2d[:, :, :-1]
trans = np.zeros((S.shape[0], 3), dtype=np.float32)
# Find the translation for each example in the batch
for i in range(S.shape[0]):
S_i = S[i]
joints_i = joints_2d[i]
conf_i = joints_conf[i]
trans[i] = estimate_translation_np(S_i,
joints_i,
conf_i,
focal_length=focal_length,
img_size=img_size)
return torch.from_numpy(trans).to(device) | 70b5cc75dc28919b6bb6cea70b49eae8ca593452 | 17,540 |
import random
def create_midterm_data(all_students):
"""
Create the midterm data set
Ten questions, two from each topic, a percentage of students did not
show up, use it as an example of merge
Rules:
- International students have a 10% drop out rate
- Performance changes by PROGRAM!
:param all_students:
:return: dictionary with the midterm answers
"""
midterm_choices = ['A', 'B', 'C', 'D']
midterm_solution = []
for _ in range(0, 10):
midterm_solution.append(random.choice(midterm_choices))
# Insert the solution row
midterm_answers = pd.DataFrame(
[[0, '', 'SOLUTION', 'SOLUTION'] + midterm_solution + ['100']],
columns=midterm_answers_fields)
for idx, student_info in all_students.iterrows():
midterm_score = {}
# Detect if a student has to be dropped
skip = False
for enrolment, rate in midterm_dropout_rates:
# print random.random(), rate
if student_info['Enrolment Type'] == enrolment and \
random.random() <= rate:
skip = True
if skip:
continue
midterm_score['SID'] = student_info['SID']
midterm_score['email'] = student_info['email']
midterm_score['Last Name'] = student_info['Surname']
midterm_score['First Name'] = student_info['GivenName']
# Select the score based on the program
prg = student_info['Program']
score = int(round(random.normalvariate(
midterm_score_average[prg][0] / 10,
midterm_score_average[prg][1] / 10)))
if score > 10:
score = 10
if score < 0:
score = 0
# Score contains the number of questions that are correct
text_score = str(10 * score)
midterm_score['Total'] = text_score
# Add the score also to the all_student database for further reference
student_info['MIDTERM_SCORE'] = text_score
# Generate the set of answers for the midterm
correct_answers = random.sample(list(range(0, 10)), score)
for x in range(0, 10):
field = midterm_answers_fields[x + 4]
if x in correct_answers:
answer = midterm_solution[x]
score = 1
else:
incorrect = list(midterm_choices)
incorrect.remove(midterm_solution[x])
answer = random.choice(incorrect)
score = 0
midterm_score[field] = answer
midterm_score[field[1:]] = score
midterm_answers = midterm_answers.append(midterm_score,
ignore_index=True)
return midterm_answers | b1f946ebab616362113ada54a17cc3e857b33f98 | 17,541 |
def identify_outliers(x_vals, y_vals, obj_func, outlier_fraction=0.1):
"""Finds the indices of outliers in the provided data to prune for subsequent curve fitting
Args:
x_vals (np.array): the x values of the data being analyzed
y_vals (np.array): the y values of the data being analyzed
obj_func (str): the objective function to use for curve fitting to determine outliers
outlier_fraction (float): the fractional deviation from predicted value required in
order to classify a data point as an outlier
Returns:
np.array: the indices of the identified outliers"""
# get objective function
objective = create_objective_function(obj_func)
# get fitted values
popt, _ = curve_fit(objective, x_vals, y_vals)
# create generate function
func = create_prediction_function(name=obj_func, weights=popt)
# generate predictions
preds = func(x_vals)
# specify outlier bounds based on multiple of predicted value
upper_bound = preds * (1 + outlier_fraction)
lower_bound = preds * (1 - outlier_fraction)
# identify outliers
outlier_mask = np.logical_or(y_vals > upper_bound, y_vals < lower_bound)
outlier_idx = np.where(outlier_mask)[0]
return outlier_idx | e1742747ac63b34c39d1e57cbc896b9df5af85e0 | 17,542 |
def GetTypeMapperFlag(messages):
"""Helper to get a choice flag from the commitment type enum."""
return arg_utils.ChoiceEnumMapper(
'--type',
messages.Commitment.TypeValueValuesEnum,
help_str=(
'Type of commitment. `memory-optimized` indicates that the '
'commitment is for memory-optimized VMs.'),
default='general-purpose',
include_filter=lambda x: x != 'TYPE_UNSPECIFIED') | f00e645a2dbfcae94a33fc5b016809f72e87c0a9 | 17,543 |
from typing import List
def gaussian_2Dclusters(n_clusters: int,
n_points: int,
means: List[float],
cov_matrices: List[float]):
"""
Creates a set of clustered data points, where the distribution within each
cluster is Gaussian.
Parameters
----------
n_clusters:
The number of clusters
n_points:
A list of the number of points in each cluster
means:
A list of the means [x,y] coordinates of each cluster in the plane
i.e. their centre)
cov_matrices:
A list of the covariance matrices of the clusters
Returns
-------
data
A dict whose keys are the cluster labels, and values are a matrix of
the with the x and y coordinates as its rows.
TODO
Output data as Pandas DataFrame?
"""
args_in = [len(means), len(cov_matrices), len(n_points)]
assert all(item == n_clusters for item in args_in),\
"Insufficient data provided for specified number of clusters"
data = {}
for i in range(n_clusters):
cluster_mean = means[i]
x, y = np.random.multivariate_normal(cluster_mean, cov_matrices[i], n_points[i]).T
coords = np.array([x, y])
tmp_dict = {str(i): coords.T}
data.update(tmp_dict)
return data | 9c950f9c5541c343a3a9a27dc3bff34be2006f8b | 17,544 |
def prepare_concepts_index(create=False):
"""
Creates the settings and mappings in Elasticsearch to support term search
"""
index_settings = {
"settings": {"analysis": {"analyzer": {"folding": {"tokenizer": "standard", "filter": ["lowercase", "asciifolding"]}}}},
"mappings": {
"_doc": {
"properties": {
"top_concept": {"type": "keyword"},
"conceptid": {"type": "keyword"},
"language": {"type": "keyword"},
"id": {"type": "keyword"},
"category": {"type": "keyword"},
"provisional": {"type": "boolean"},
"type": {"type": "keyword"},
"value": {
"analyzer": "standard",
"type": "text",
"fields": {"raw": {"type": "keyword"}, "folded": {"analyzer": "folding", "type": "text"}},
},
}
}
},
}
if create:
se = SearchEngineFactory().create()
se.create_index(index=CONCEPTS_INDEX, body=index_settings)
return index_settings | a33e7e6172c7a7c8577abab77cb467125e629e39 | 17,545 |
def pack_wrapper(module, att_feats, att_masks):
"""
for batch computation, pack sequences with different lenghth with explicit setting the batch size at each time step
"""
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats) | ff5e02ac5977cf525a0e2f2a96714ff8a6cf1fe3 | 17,546 |
from sys import exc_info
def processor(preprocessed_data_id, param_id, param_constructor):
"""Dispatch the processor work"""
preprocessed_data = PreprocessedData(preprocessed_data_id)
params = param_constructor(param_id)
sp = StudyProcessor()
try:
process_out = sp(preprocessed_data, params)
except Exception as e:
error_msg = ''.join(format_exception_only(e, exc_info()))
preprocessed_data.processing_status = "failed: %s" % error_msg
process_out = None
return process_out | 25c2771c1f627d8eb24b4e214e419ebab779352e | 17,547 |
def recommendation_inspiredby(film: str, limit: int=20) -> list:
"""Movie recommandations from the same inspiration with selected movie
Args:
film (str): URI of the selected movie
limit (int, optional): Maximum number of results to return. Defaults to 20.
Returns:
list: matching moveis with URI, title, inspiration list,
number of awards recieved, score on Rotten Tomato and a "relevance score"
"""
# In the query, we assume that movies have a score < 100
# (removes noise - movies with few reviews)
query = f"""
{get_prefix()}
SELECT ?film ?filmLabel
(GROUP_CONCAT(DISTINCT ?inspiredbyLabel; separator="; ") AS ?inspiredbyList)
(COUNT(DISTINCT ?award) AS ?numAwards)
?score
((?score + ?numAwards)*100/138 AS ?totalScore)
WHERE {{
{{
SELECT ?originInspiredby
WHERE {{ wd:{film} wdt:P941 ?originInspiredby . }}
}}
?film wdt:P31 wd:Q11424;
wdt:P941 ?inspiredby;
wdt:P444 ?brutScore.
OPTIONAL {{?film wdt:P166 ?award.}}
SERVICE wikibase:label {{
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en".
?film rdfs:label ?filmLabel.
?inspiredby rdfs:label ?inspiredbyLabel.
}}
FILTER (?inspiredby IN (?originInspiredby))
FILTER regex(?brutScore, "^[0-9]+%$")
BIND(xsd:integer(REPLACE(?brutScore, "%$", "")) AS ?score)
FILTER (?score != 100)
FILTER(?film != wd:{film})
}}
GROUP BY ?film ?filmLabel ?score
ORDER BY DESC(?totalScore)
LIMIT {limit}
"""
print(query)
sp_wrapper = get_sparql()
sp_wrapper.setQuery(query)
sp_wrapper.setReturnFormat(JSON)
return resp_format(sp_wrapper.query().convert()['results']['bindings']) | d70d6a30eabc5d1a5b5a7c3b0cebc28a9dcb0fa9 | 17,548 |
import string
def str2twixt(move):
""" Converts one move string to a twixt backend class move.
Handles both T1-style coordinates (e.g.: 'd5', 'f18'') as well as tsgf-
style coordinates (e.g.: 'fg', 'bi') as well as special strings
('swap' and 'resign'). It can handle letter in upper as well as lowercase.
Args:
move: string with a move
Returns:
twixt.SWAP or twixt.RESIGN or twixt.Point
Raises
ValueError if the move_str can't be parsed in any valid format
Examples:
>>> str2twixt('b3')
b3
>>> str2twixt('i18')
i18
>>> str2twixt('fj')
f10
>>> str2twixt('swap')
'swap'
>>> str2twixt('resign')
'resign'
>>> str2twixt('123')
ValueError: Can't parse move: '123'
>>> str2twixt('invalid')
ValueError: Can't parse move: 'invalid'
"""
# Handle swap and resign
if move.lower() == twixt.SWAP.lower():
return twixt.SWAP
elif move.lower() == twixt.RESIGN.lower():
return twixt.RESIGN
# Handle T1-style moves
elif move[0] in string.ascii_letters and move[-1] in string.digits:
return twixt.Point(move)
# Handle tsgf-stype moves
elif len(move) == 2 and all(c in string.ascii_letters for c in move):
return twixt.Point(move[0] + str(ord(move[1].lower()) - ord('a') + 1))
# Can't handle move. Throw exception
raise ValueError(f"Can't parse move: '{move}'") | fe1e644519f7d6fe7df2be8a38754ba230981a91 | 17,549 |
from datetime import datetime
import re
def celery_health_view(request):
"""Admin view that displays the celery configuration and health."""
if request.method == 'POST':
celery_health_task.delay(datetime.now())
messages.success(request, 'Health task created.')
return HttpResponseRedirect(request.path)
capital = re.compile('^[A-Z]')
settings = [key for key in dir(current_app.conf) if capital.match(key)]
sorted_settings = [
{
'key': key,
'value': ('*****' if 'password' in key.lower()
else getattr(current_app.conf, key))
} for key in sorted(settings)
]
return render(request, 'admin/celery_health_view.html', {
'settings': sorted_settings,
'title': 'Celery Settings and Health'
}) | 52f7fb76af5dc5557e22976b1930c19e6249f1cc | 17,550 |
def get_n_runs(slurm_array_file):
"""Reads the run.sh file to figure out how many conformers or rotors were meant to run
"""
with open(slurm_array_file, 'r') as f:
for line in f:
if 'SBATCH --array=' in line:
token = line.split('-')[-1]
n_runs = 1 + int(token.split('%')[0])
return n_runs
return 0 | 5574ef40ef87c9ec5d9bbf2abd7d80b62cead2ab | 17,551 |
def consume(pipeline, data, cleanup=None, **node_contexts):
"""Handles node contexts before/after calling pipeline.consume()
Note
----
It would have been better to subclass Pipeline and implement this logic
right before/after the core consume() call, but there is a bug in pickle
that prevents that from working with multiprocessing.
"""
update_node_contexts(pipeline, node_contexts)
try:
contexts = get_node_contexts(pipeline)
dbg("size=%s\n%s" % (size(data, "n/a"), pf(contexts)), indent="label")
try:
if data is None:
return consume_none(pipeline)
else:
return pipeline.consume(iterize(data))
finally:
if cleanup:
clean_up_nodes(cleanup, contexts)
finally:
reset_node_contexts(pipeline, node_contexts) | b4d3df619600892fe02d418a19993af1f0715d84 | 17,552 |
def get_field_attribute(field):
"""
Format and return a whole attribute string
consists of attribute name in snake case and field type
"""
field_name = get_field_name(field.name.value)
field_type = get_field_type(field)
strawberry_type = get_strawberry_type(
field_name, field.description, field.directives
)
field_type += strawberry_type if strawberry_type else ""
return f"{str_converters.to_snake_case(field.name.value)}: {field_type}" | fbbe2dbdf6c5f0427365fbbb0d5f43df8bb74678 | 17,553 |
def shuffle_data(data):
"""
Shuffle the data
"""
rng_state = np.random.get_state()
for c, d in data.items():
np.random.set_state(rng_state)
np.random.shuffle(d)
data[c] = d
return data | 5a1fa1f81fbec54092c8d7b50ebf75f8edb526c7 | 17,554 |
def index(current_user=None):
""" Display home page """
return render_template('homepage.html', username=current_user['name'], \
logged_in=current_user['is_authenticated'], \
display_error=request.cookies.get('last_attempt_error') == 'True', \
login_banner=APP.config['LOGIN_BANNER']) | 287d8101ef318cb7ca308340e0d11ab157538450 | 17,555 |
import os
import pandas
def _read_data_file(data_path):
"""
Reads a data file into a :class:`pandas:pandas.DataFrame` object.
Parameters
----------
data_path : str
Path of the data file with extension. Supports ``.csv``, ``.xlsx``/``.xls``, ``.json``, and ``.xml``.
Returns
-------
:class:`pandas:pandas.DataFrame`
A dataframe object of the file data.
Author
------
Richard Wen <[email protected]>
Example
-------
.. code::
from msdss_data_api.cli import _read_data_file
data = _read_data_file('path/to/data.json')
"""
data_ext = os.path.splitext(data_path)[1].lower()
if data_ext == '.csv':
out = pandas.read_csv(data_path)
elif data_ext in ('.xlsx', '.xls'):
out = pandas.read_excel(data_path)
elif data_ext == '.json':
out = pandas.read_json(data_path)
elif data_ext == '.xml':
out = pandas.read_xml(data_path)
return out | 9ddfed58134a564ed63f712e305090ad6d9b6d41 | 17,556 |
def disable_admin_access(session, return_type=None, **kwargs):
"""
Disable Admin acccess
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = "/api/users/admin_access/disable.json"
return session.post_api(path=path, return_type=return_type, **kwargs) | 5ac6c09ed3098f5b99baa2d5749d7b42a465e9f4 | 17,557 |
def _get_bbox_indices(x, y, bbox):
"""
Convert bbox values to array indices
:param x, y: arrays with the X, Y coordinates
:param bbox: minx, miny, maxx, maxy values
:return: bbox converted to array indices
"""
minx, miny, maxx, maxy = bbox
xindices, = np.where((x >= minx) & (x <= maxx))
yindices, = np.where((y >= miny) & (y <= maxy))
return xindices[0], xindices[-1]+1, yindices[0], yindices[-1]+1 | dd71f1852971dbd2d3026c1720f9f477b3093fc8 | 17,558 |
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby = ~rtable.priority,
)
response.headers["Content-Type"] = "application/json"
return records.json() | 7edc87d20d36d25b05337365ed903126ef02742f | 17,559 |
def calc_field_changes(element, np_id):
"""
Walk up the tree of geo-locations, finding the new parents
These will be set onto all the museumobjects.
"""
fieldname = element._meta.concrete_model.museumobject_set.\
related.field.name
field_changes = {}
field_changes[fieldname] = element.id
if hasattr(element, 'parent'):
field_changes.update(
calc_field_changes(element.parent, element.parent.id))
return field_changes | cba816488dcf10a774bc18b1b3f6498e1d8dc3d8 | 17,560 |
def index(request, _):
"""
路由请求
`` request `` 请求对象
"""
if request.method == 'GET' or request.method == 'get':
return index_page(request)
elif request.method == 'POST' or request.method == 'post':
return send_wxmsg(request)
else:
rsp = JsonResponse({'code': -1, 'errorMsg': '请求方式错误'},
json_dumps_params={'ensure_ascii': False})
logger.info('response result: {}'.format(rsp.content.decode('utf-8')))
return render(request, 'index.html') | 5006cf1e5cb23e49b17e9083fca66c7731f5559b | 17,561 |
def get_daisy_client():
"""Get Daisy client instance."""
endpoint = conf.get('discoverd', 'daisy_url')
return daisy_client.Client(version=1, endpoint=endpoint) | 6ed0df1259672becfca3197f2d115a1c789306a1 | 17,562 |
from pathlib import Path
import multiprocessing
def intermediate_statistics(
scores, ground_truth, audio_durations, *,
segment_length=1., time_decimals=6, num_jobs=1,
):
"""
Args:
scores (dict, str, pathlib.Path): dict of SED score DataFrames
(cf. sed_scores_eval.utils.scores.create_score_dataframe)
or a directory path (as str or pathlib.Path) from where the SED
scores can be loaded.
ground_truth (dict, str or pathlib.Path): dict of lists of ground truth
event tuples (onset, offset, event label) for each audio clip or a
file path from where the ground truth can be loaded.
audio_durations: The duration of each audio file in the evaluation set.
segment_length: the segment length of the segments that are to be
evaluated.
time_decimals (int): the decimal precision used for evaluation. If
chosen to high detected or ground truth events that have
onsets or offsets right on a segment boundary may swap over to the
adjacent segment because of small deviations due to limited
floating point precision.
num_jobs (int): the number of processes to use. Default is 1 in which
case no multiprocessing is used.
Returns:
"""
if not isinstance(num_jobs, int) or num_jobs < 1:
raise ValueError(
f'num_jobs has to be an integer greater or equal to 1 but '
f'{num_jobs} was given.'
)
scores, ground_truth, audio_ids = parse_inputs(scores, ground_truth)
if isinstance(audio_durations, (str, Path)):
audio_durations = Path(audio_durations)
assert audio_durations.is_file(), audio_durations
audio_durations = read_audio_durations(audio_durations)
if audio_durations is not None and not audio_durations.keys() == set(audio_ids):
raise ValueError(
f'audio_durations audio ids do not match audio ids in scores. '
f'Missing ids: {set(audio_ids) - audio_durations.keys()}. '
f'Additional ids: {audio_durations.keys() - set(audio_ids)}.'
)
_, event_classes = validate_score_dataframe(scores[audio_ids[0]])
single_label_ground_truths = multi_label_to_single_label_ground_truths(
ground_truth, event_classes)
def worker(audio_ids, output_queue=None):
segment_scores = None
segment_targets = None
for audio_id in audio_ids:
scores_k = scores[audio_id]
timestamps, _ = validate_score_dataframe(
scores_k, event_classes=event_classes)
timestamps = np.round(timestamps, time_decimals)
if segment_scores is None:
segment_scores = {class_name: [] for class_name in event_classes}
segment_targets = {class_name: [] for class_name in event_classes}
scores_k = scores_k[event_classes].to_numpy()
if audio_durations is None:
duration = max(
[timestamps[-1]] + [t_off for t_on, t_off, _ in ground_truth[audio_id]]
)
else:
duration = audio_durations[audio_id]
n_segments = int(np.ceil(duration / segment_length))
segment_boundaries = np.round(
np.arange(n_segments+1) * segment_length,
time_decimals
)
segment_onsets = segment_boundaries[:-1]
segment_offsets = segment_boundaries[1:]
for class_name in event_classes:
gt = single_label_ground_truths[class_name][audio_id]
if len(gt) == 0:
segment_targets[class_name].append(
np.zeros(n_segments, dtype=np.bool_))
else:
segment_targets[class_name].append(
np.any([
(segment_onsets < gt_offset)
* (segment_offsets > gt_onset)
* (segment_offsets > segment_onsets)
for gt_onset, gt_offset in
single_label_ground_truths[class_name][audio_id]
], axis=0)
)
for i in range(n_segments):
idx_on = get_first_index_where(
timestamps, "gt", segment_onsets[i]) - 1
idx_on = max(idx_on, 0)
idx_off = get_first_index_where(
timestamps, "geq", segment_offsets[i])
idx_off = min(idx_off, len(timestamps)-1)
if idx_off <= idx_on:
scores_ki = np.zeros(scores_k.shape[-1])
else:
scores_ki = np.max(scores_k[idx_on:idx_off], axis=0)
for c, class_name in enumerate(event_classes):
segment_scores[class_name].append(scores_ki[c])
if output_queue is not None:
output_queue.put((segment_scores, segment_targets))
return segment_scores, segment_targets
if num_jobs == 1:
segment_scores, segment_targets = worker(audio_ids)
else:
queue = multiprocessing.Queue()
shard_size = int(np.ceil(len(audio_ids) / num_jobs))
shards = [
audio_ids[i*shard_size:(i+1)*shard_size] for i in range(num_jobs)
if i*shard_size < len(audio_ids)
]
processes = [
multiprocessing.Process(
target=worker, args=(shard, queue), daemon=True,
)
for shard in shards
]
try:
for p in processes:
p.start()
segment_scores, segment_targets = None, None
count = 0
while count < len(shards):
seg_scores_i, seg_targets_i = queue.get()
if segment_scores is None:
segment_scores = seg_scores_i
segment_targets = seg_targets_i
else:
for class_name in segment_scores:
segment_scores[class_name].extend(seg_scores_i[class_name])
segment_targets[class_name].extend(seg_targets_i[class_name])
count += 1
finally:
for p in processes:
p.terminate()
stats = {}
for class_name in event_classes:
segment_scores[class_name] = np.array(segment_scores[class_name]+[np.inf])
sort_idx = np.argsort(segment_scores[class_name])
segment_scores[class_name] = segment_scores[class_name][sort_idx]
segment_targets[class_name] = np.concatenate(
segment_targets[class_name]+[np.zeros(1)])[sort_idx]
tps = np.cumsum(segment_targets[class_name][::-1])[::-1]
n_sys = np.arange(len(tps))[::-1]
segment_scores[class_name], unique_idx = np.unique(segment_scores[class_name], return_index=True)
n_ref = tps[0]
fns = n_ref - tps
tns = n_sys[0] - n_sys - fns
stats[class_name] = {
'tps': tps[unique_idx],
'fps': n_sys[unique_idx] - tps[unique_idx],
'tns': tns,
'n_ref': n_ref,
}
return {
class_name: (segment_scores[class_name], stats[class_name])
for class_name in event_classes
} | 5039bec8ceafed7952833aa2f39c5d44d0909790 | 17,563 |
def vnorm(v1):
"""vnorm(ConstSpiceDouble [3] v1) -> SpiceDouble"""
return _cspyce0.vnorm(v1) | 00016eaa6a765f564ce247c4126c4a360aa2b60d | 17,564 |
def mean_iou(y_true, y_pred):
"""F2 loss"""
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0) | a2503703bae7c8c83b42ac93406178bc4c52a675 | 17,565 |
def _encode_string_parts(value, encodings):
"""Convert a unicode string into a byte string using the given
list of encodings.
This is invoked if `encode_string` failed to encode `value` with a single
encoding. We try instead to use different encodings for different parts
of the string, using the encoding that can encode the longest part of
the rest of the string as we go along.
Parameters
----------
value : text type
The unicode string as presented to the user.
encodings : list
The encodings needed to encode the string as a list of Python
encodings, converted from the encodings in Specific Character Set.
Returns
-------
byte string
The encoded string, including the escape sequences needed to switch
between different encodings.
Raises
------
ValueError
If `value` could not be encoded with the given encodings.
"""
encoded = bytearray()
unencoded_part = value
while unencoded_part:
# find the encoding that can encode the longest part of the rest
# of the string still to be encoded
max_index = 0
best_encoding = None
for encoding in encodings:
try:
unencoded_part.encode(encoding)
# if we get here, the whole rest of the value can be encoded
best_encoding = encoding
max_index = len(unencoded_part)
break
except UnicodeError as e:
if e.start > max_index:
# e.start is the index of first character failed to encode
max_index = e.start
best_encoding = encoding
# none of the given encodings can encode the first character - give up
if best_encoding is None:
raise ValueError()
# encode the part that can be encoded with the found encoding
encoded_part = unencoded_part[:max_index].encode(best_encoding)
if best_encoding not in handled_encodings:
encoded += ENCODINGS_TO_CODES.get(best_encoding, b'')
encoded += encoded_part
# set remaining unencoded part of the string and handle that
unencoded_part = unencoded_part[max_index:]
# unencoded_part is empty - we are done, return the encoded string
return encoded | 58f514ed7cbd9a6e2c10e6d8b22f32a32d71d6a7 | 17,566 |
import re
import os
def get_files(path, pattern):
"""
Recursively find all files rooted in <path> that match the regexp <pattern>
"""
L = []
if not path.endswith('/'): path += '/'
# base case: path is just a file
if (re.match(pattern, os.path.basename(path)) != None) and os.path.isfile(path):
L.append(path)
return L
# general case
if not os.path.isdir(path):
return L
contents = os.listdir(path)
for item in contents:
item = path + item
if (re.search(pattern, os.path.basename(item)) != None) and os.path.isfile(item):
L.append(item)
elif os.path.isdir(path):
L.extend(get_files(item + '/', pattern))
return L | 0e71a8290b6d011eeebd75fc1b07ba5fb945521a | 17,567 |
def SocketHandler(qt):
""" `SocketHandler` wraps a websocket connection.
HTTP GET /ws
"""
class _handler(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
qt.log("new socket open ...")
qt.register_socket(self)
def on_close(self):
qt.remove_socket(self)
def on_message(self, msg):
qt.log("Got socket command: %s" % (msg))
qt.command(msg)
return _handler | 001f9dbee77560d4d5970fce731084b5a9cca7af | 17,568 |
from typing import Optional
def var_swap(asset: Asset, tenor: str, forward_start_date: Optional[str] = None,
*, source: str = None, real_time: bool = False) -> Series:
"""
Strike such that the price of an uncapped variance swap on the underlying index is zero at inception. If
forward start date is provided, then the result is a forward starting variance swap.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param forward_start_date: forward start date e.g. 2m, 1y; defaults to none
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if forward_start_date is None:
_logger.debug('where tenor=%s', tenor)
where = dict(tenor=[tenor])
df = _get_var_swap_df(asset, where, source, real_time)
series = ExtendedSeries() if df.empty else ExtendedSeries(df[Fields.VAR_SWAP.value])
series.dataset_ids = getattr(df, 'dataset_ids', ())
return series
else:
if not isinstance(forward_start_date, str):
raise MqTypeError('forward_start_date must be a relative date')
x = _tenor_to_month(tenor)
y = _tenor_to_month(forward_start_date)
z = x + y
yt = _month_to_tenor(y)
zt = _month_to_tenor(z)
tenors = _var_swap_tenors(asset)
if yt not in tenors or zt not in tenors:
series = ExtendedSeries()
series.dataset_ids = ()
return series
_logger.debug('where tenor=%s', f'{yt},{zt}')
where = dict(tenor=[yt, zt])
df = _get_var_swap_df(asset, where, source, real_time)
dataset_ids = getattr(df, 'dataset_ids', ())
if df.empty:
series = ExtendedSeries()
else:
grouped = df.groupby(Fields.TENOR.value)
try:
yg = grouped.get_group(yt)[Fields.VAR_SWAP.value]
zg = grouped.get_group(zt)[Fields.VAR_SWAP.value]
except KeyError:
_logger.debug('no data for one or more tenors')
series = ExtendedSeries()
series.dataset_ids = ()
return series
series = ExtendedSeries(sqrt((z * zg ** 2 - y * yg ** 2) / x))
series.dataset_ids = dataset_ids
return series | 0991ff1fb889b14b6a3d3e850caa2364a4f4d044 | 17,569 |
def extract_flow_global_roi(flow_x, flow_y, box):
"""
create global roi cropped flow image (for numpy image)
image:
numpy array image
box:
list of [xmin, ymin, xmax, ymax]
"""
flow_x_roi = extract_global_roi(flow_x, box)
flow_y_roi = extract_global_roi(flow_y, box)
if flow_x_roi is None or flow_y_roi is None:
return None
else:
return (flow_x_roi, flow_y_roi) | 1b6d22d413693e978dc31cfbf1708c93d9256cf1 | 17,570 |
from unittest.mock import patch
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods."""
def shell_success(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` and `DeviceFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
def shell_fail_python(self, cmd):
"""Mock the `AdbDeviceTcpFake.shell` method when it fails."""
self.shell_cmd = cmd
raise AttributeError
def shell_fail_server(self, cmd):
"""Mock the `DeviceFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_success), "server": patch("{}.DeviceFake.shell".format(__name__), shell_success)}
return {"python": patch("{}.AdbDeviceTcpFake.shell".format(__name__), shell_fail_python), "server": patch("{}.DeviceFake.shell".format(__name__), shell_fail_server)} | cdf4df2bb383c4c8b49b59442550e2c73ca828aa | 17,571 |
def __setAdjacent_square__(self, pos):
"""
Sets all adjacencies in the map for a map with square tiles.
"""
self.__checkIndices__(pos)
i, j = pos; adjacent = []
# Function to filter out nonexistent cells.
def filterfn(p):
do_not_filter = 0 <= p[0] < self.__numrows__ and 0 <= p[1] < self.__numcols__
return do_not_filter and not self.__isdisabled__[p[0]][p[1]]
for cell in filter(filterfn, ( (i+1,j), (i-1,j), (i,j+1), (i,j-1) )):
adjacent += [cell]
self.__adjacent__[i][j] = adjacent | ebdd3ee3d0104b5bd26cc48e07760de027615263 | 17,572 |
def model_definition_nested_events():
"""Test model for state- and parameter-dependent heavisides.
ODEs
----
d/dt x_1:
inflow_1 - decay_1 * x1
d/dt x_2:
- decay_2 * x_2
Events:
-------
event_1:
trigger: x_1 > inflow_1 / decay_2
bolus: [[ 0],
[ -1 / time]]
event_2:
trigger: x_2 > 0.5
bolus: [[ bolus],
[ bolus]]
"""
# Model components
species = ['x_1', 'x_2']
initial_assignments = {
'x_1': 'k1',
'x_2': 'k2',
}
rate_rules = {
'x_1': 'inflow_1 - decay_1 * x_1',
'x_2': '- decay_2 * x_2',
}
parameters = {
'k1': 0,
'k2': 0,
'inflow_1': 4,
'decay_1': 2,
'decay_2': 5,
'bolus': 0, # for bolus != 0, nested event sensitivities are off!
}
events = {
'event_1': {
'trigger': 'x_1 > inflow_1 / decay_2',
'target': 'x_2',
'assignment': 'x_2 - 1 / time'
},
'event_2': {
'trigger': 'x_2 < - 0.5',
'target': ['x_1', 'x_2'],
'assignment': ['x_1 + bolus', 'x_2 + bolus'],
}
}
timepoints = np.linspace(0, 1, 101)
# Analytical solution
def x_pected(t, k1, k2, inflow_1, decay_1, decay_2, bolus):
# gather temporary variables
# event_time = x_1 > inflow_1 / decay_2
equil = inflow_1 / decay_1
tmp1 = inflow_1 / decay_2 - inflow_1 / decay_1
tmp2 = k1 - inflow_1 / decay_1
event_time = (- 1 / decay_1) * np.log( tmp1 / tmp2)
def get_early_x(t):
# compute dynamics before event
x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1*np.exp(-decay_1 * t)
x_2 = k2 * np.exp(-decay_2 * t)
return np.array([[x_1], [x_2]])
if t < event_time:
x = get_early_x(t).flatten()
else:
# compute state after event
x_tau = get_early_x(event_time)
tau_x1 = x_tau[0] + bolus
tau_x2 = x_tau[1] - 1 / event_time + bolus
# compute dynamics after event
inhom = np.exp(decay_1 * event_time) * tau_x1
x_1 = equil * (1 - np.exp(decay_1 * (event_time - t))) + \
inhom * np.exp(- decay_1 * t)
x_2 = tau_x2 * np.exp(decay_2 * event_time) * np.exp(-decay_2 * t)
x = np.array([[x_1], [x_2]])
return x.flatten()
def sx_pected(t, parameters):
# get sx, w.r.t. parameters, via finite differences
sx = []
for ip in parameters:
eps = 1e-6
perturbed_params = deepcopy(parameters)
perturbed_params[ip] += eps
sx_p = x_pected(t, **perturbed_params)
perturbed_params[ip] -= 2*eps
sx_m = x_pected(t, **perturbed_params)
sx.append((sx_p - sx_m) / (2 * eps))
return np.array(sx)
return (
initial_assignments,
parameters,
rate_rules,
species,
events,
timepoints,
x_pected,
sx_pected
) | f42a5c7c01fd6f966ecec11b28c9620022dd7aaf | 17,573 |
async def address_balance_history(
request: Request,
address: Address,
token_id: TokenID = Query(None, description="Optional token id"),
timestamps: bool = Query(
False, description="Include timestamps in addition to block heights"
),
flat: bool | None = Query(True, description="Return data as flat arrays."),
limit: int | None = Query(50, gt=0, le=10000),
offset: int | None = Query(0, ge=0),
desc: bool | None = Query(True, description="Most recent first"),
):
"""
ERG or token balance history of an address.
"""
query = f"""
select d.height
{', h.timestamp' if timestamps else ''}
, sum(d.value) over (order by d.height) as balance
from bal.{'erg' if token_id is None else 'tokens'}_diffs d
join core.headers h on h.height = d.height
where d.address = $1
{'' if token_id is None else 'and token_id = $4'}
order by 1 {'desc' if desc else ''}
limit $2 offset $3;
"""
opt_args = [] if token_id is None else [token_id]
async with request.app.state.db.acquire() as conn:
rows = await conn.fetch(query, address, limit, offset, *opt_args)
if not rows:
raise HTTPException(status_code=404, detail=DETAIL_404)
if flat:
if timestamps:
return {
"heights": [r["height"] for r in rows],
"timestamps": [r["timestamp"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return {
"heights": [r["height"] for r in rows],
"balances": [r["balance"] for r in rows],
}
else:
return rows | 2fcae2ab775611e51fd056e98928afbcb6bf1278 | 17,574 |
def load(as_pandas=None):
"""
Loads the Grunfeld data and returns a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas) | 183c37228619b835a36dc4a1cc1e1a7649fca6ec | 17,575 |
def rule_if_system(system_rule, non_system_rule, context):
"""Helper function to pick a rule based on system-ness of context.
This can be used (with functools.partial) to choose between two
rule names, based on whether or not the context has system
scope. Specifically if we will fail the parent of a nested policy
check based on scope_types=['project'], this can be used to choose
the parent rule name for the error message check in
common_policy_check().
"""
if context.system_scope:
return system_rule
else:
return non_system_rule | 2149c2ffdd6afdd64f7d33a2de4c6a23b3143dee | 17,576 |
def find_inactive_ranges(note_sequence):
"""Returns ranges where no notes are active in the note_sequence."""
start_sequence = sorted(
note_sequence.notes, key=lambda note: note.start_time, reverse=True)
end_sequence = sorted(
note_sequence.notes, key=lambda note: note.end_time, reverse=True)
notes_active = 0
time = start_sequence[-1].start_time
inactive_ranges = []
if time > 0:
inactive_ranges.append(0.)
inactive_ranges.append(time)
start_sequence.pop()
notes_active += 1
# Iterate through all note on events
while start_sequence or end_sequence:
if start_sequence and (start_sequence[-1].start_time <
end_sequence[-1].end_time):
if notes_active == 0:
time = start_sequence[-1].start_time
inactive_ranges.append(time)
notes_active += 1
start_sequence.pop()
else:
notes_active -= 1
if notes_active == 0:
time = end_sequence[-1].end_time
inactive_ranges.append(time)
end_sequence.pop()
# if the last note is the same time as the end, don't add it
# remove the start instead of creating a sequence with 0 length
if inactive_ranges[-1] < note_sequence.total_time:
inactive_ranges.append(note_sequence.total_time)
else:
inactive_ranges.pop()
assert len(inactive_ranges) % 2 == 0
inactive_ranges = [(inactive_ranges[2 * i], inactive_ranges[2 * i + 1])
for i in range(len(inactive_ranges) // 2)]
return inactive_ranges | 8db86584908283385958c5f710fb36d95795f7b1 | 17,577 |
def is_connected(G):
"""Returns True if the graph is connected, False otherwise.
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
connected : bool
True if the graph is connected, false otherwise.
Raises
------
NetworkXNotImplemented:
If G is directed.
Examples
--------
>>> G = nx.path_graph(4)
>>> print(nx.is_connected(G))
True
See Also
--------
is_strongly_connected
is_weakly_connected
is_semiconnected
is_biconnected
connected_components
Notes
-----
For undirected graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept('Connectivity is undefined ',
'for the null graph.')
return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G) | 03a2602629db60565702bee044a1d70ba026a8aa | 17,578 |
import math
def show_result(img,
result,
skeleton=None,
kpt_score_thr=0.3,
bbox_color=None,
pose_kpt_color=None,
pose_limb_color=None,
radius=4,
thickness=1,
font_scale=0.5,
win_name='',
show=False,
show_keypoint_weight=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
skeleton (list[list]): The connection of keypoints.
kpt_score_thr (float, optional): Minimum score of keypoints
to be shown. Default: 0.3.
pose_kpt_color (np.array[Nx3]`): Color of N keypoints.
If None, do not draw keypoints.
pose_limb_color (np.array[Mx3]): Color of M limbs.
If None, do not draw limbs.
radius (int): Radius of circles.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
show (bool): Whether to show the image. Default: False.
show_keypoint_weight (bool): Whether to change the transparency
using the predicted confidence scores of keypoints.
wait_time (int): Value of waitKey param.
Default: 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
Tensor: Visualized image only if not `show` or `out_file`
"""
img = cv2.imread(img)
img = img[:,:,::-1]
img = img.copy()
img_h, img_w, _ = img.shape
pose_result = []
for res in result:
pose_result.append(res['keypoints'])
for _, kpts in enumerate(pose_result):
# draw each point on image
if pose_kpt_color is not None:
assert len(pose_kpt_color) == len(kpts)
for kid, kpt in enumerate(kpts):
x_coord, y_coord, kpt_score = int(kpt[0]), int(
kpt[1]), kpt[2]
if kpt_score > kpt_score_thr:
if show_keypoint_weight:
img_copy = img.copy()
r, g, b = pose_kpt_color[kid]
cv2.circle(img_copy, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
transparency = max(0, min(1, kpt_score))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
r, g, b = pose_kpt_color[kid]
cv2.circle(img, (int(x_coord), int(y_coord)),
radius, (int(r), int(g), int(b)), -1)
# draw limbs
if skeleton is not None and pose_limb_color is not None:
assert len(pose_limb_color) == len(skeleton)
for sk_id, sk in enumerate(skeleton):
pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1, 1]))
pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1, 1]))
if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0
and pos1[1] < img_h and pos2[0] > 0
and pos2[0] < img_w and pos2[1] > 0
and pos2[1] < img_h
and kpts[sk[0] - 1, 2] > kpt_score_thr
and kpts[sk[1] - 1, 2] > kpt_score_thr):
r, g, b = pose_limb_color[sk_id]
if show_keypoint_weight:
img_copy = img.copy()
X = (pos1[0], pos2[0])
Y = (pos1[1], pos2[1])
mX = np.mean(X)
mY = np.mean(Y)
length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5
angle = math.degrees(
math.atan2(Y[0] - Y[1], X[0] - X[1]))
stickwidth = 2
polygon = cv2.ellipse2Poly(
(int(mX), int(mY)),
(int(length / 2), int(stickwidth)), int(angle),
0, 360, 1)
cv2.fillConvexPoly(img_copy, polygon,
(int(r), int(g), int(b)))
transparency = max(
0,
min(
1, 0.5 *
(kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))
cv2.addWeighted(
img_copy,
transparency,
img,
1 - transparency,
0,
dst=img)
else:
cv2.line(
img,
pos1,
pos2, (int(r), int(g), int(b)),
thickness=thickness)
if show:
imshow(img, win_name, wait_time)
if out_file is not None:
imwrite(img, out_file)
return img | af90da2b30ff9891613654d70724162ce7b4d702 | 17,579 |
def D2(X, Y, Y2=None, YT=None):
""" Calculate the pointwise (squared) distance.
Arguments:
X: of shape (n_sample, n_feature).
Y: of shape (n_center, n_feature).
Y2: of shape (1, n_center).
YT: of shape (n_feature, n_center).
Returns:
pointwise distances (n_sample, n_center).
"""
X2 = K.sum(K.square(X), axis = 1, keepdims=True)
if Y2 is None:
if X is Y:
Y2 = X2
else:
Y2 = K.sum(K.square(Y), axis = 1, keepdims=True)
Y2 = K.reshape(Y2, (1, K.shape(Y)[0]))
if YT is None:
YT = K.transpose(Y)
d2 = K.reshape(X2, (K.shape(X)[0], 1)) \
+ Y2 - 2 * K.dot(X, YT) # x2 + y2 - 2xy
return d2 | daa8940e939eb2806e043f9b4521bf8cd1aefd2e | 17,580 |
def read_table(path):
"""Lee un archivo tabular (CSV o XLSX) a una lista de diccionarios.
La extensión del archivo debe ser ".csv" o ".xlsx". En función de
ella se decidirá el método a usar para leerlo.
Si recibe una lista, comprueba que todos sus diccionarios tengan las
mismas claves y de ser así, la devuelve intacta. Levanta una Excepción
en caso contrario.
Args:
path(str o list): Como 'str', path a un archivo CSV o XLSX.
Returns:
list: Lista de diccionarios con claves idénticas representando el
archivo original.
"""
assert isinstance(path, string_types + (list, )), """
{} no es un `path` valido""".format(path)
# Si `path` es una lista, devolverla intacta si tiene formato tabular.
# Si no, levantar una excepción.
if isinstance(path, list):
if helpers.is_list_of_matching_dicts(path):
return path
else:
raise ValueError("""
La lista ingresada no esta formada por diccionarios con las mismas claves.""")
# Deduzco el formato de archivo de `path` y redirijo según corresponda.
suffix = path.split(".")[-1]
if suffix == "csv":
return _read_csv_table(path)
elif suffix == "xlsx":
return _read_xlsx_table(path)
else:
raise ValueError("""
{} no es un sufijo reconocido. Pruebe con .csv o .xlsx""".format(suffix)) | e2de4230f64cd45f3ff4caaa068c7df85c0c30df | 17,581 |
from typing import Dict
from typing import Any
from typing import cast
def spec_from_json_dict(
json_dict: Dict[str, Any]
) -> FieldSpec:
""" Turns a dictionary into the appropriate FieldSpec object.
:param dict json_dict: A dictionary with properties.
:raises InvalidSchemaError:
:returns: An initialised instance of the appropriate FieldSpec
subclass.
"""
try:
if json_dict.get('ignored', False):
return Ignore(json_dict['identifier'])
type_str = json_dict['format']['type']
spec_type = cast(FieldSpec, FIELD_TYPE_MAP[type_str])
except KeyError as e:
raise InvalidSchemaError("the feature definition {} is incomplete. Must contain: {}".format(json_dict, e))
return spec_type.from_json_dict(json_dict) | 9bf557364a7a17cea0c84c65ece5b1d0e3983b2f | 17,582 |
import scipy
def hyp_pfq(A, B, x, out=None, n=0):
"""
This function is decorated weirdly because its extra params are lists.
"""
out = np_hyp_pfq([a+n for a in A], [b+n for b in B], x, out)
with np.errstate(invalid='ignore'):
out *= np.prod([scipy.special.poch(a, n) for a in A])
out /= np.prod([scipy.special.poch(b, n) for b in B])
return out | f1d9e0454fa63d24b1a8a403bbae12e00b818bb2 | 17,583 |
from typing import Optional
from datetime import datetime
def create_new_token(
data: dict,
expires_delta: Optional[timedelta] = None,
page_only: bool = False):
"""Creates a token with the given permission and expiry"""
to_encode = data.copy()
if page_only:
expires = datetime.max
elif expires_delta:
expires = datetime.utcnow() + timedelta(minutes=expires_delta)
else:
expires = datetime.utcnow() + timedelta(minutes=TOKEN_EXPIRATION_TIME)
to_encode.update({"exp": expires})
to_encode.update({"scope": "userauth:none" if page_only else "userauth:full"})
return jwt.encode(to_encode, SECRET, ALGORITHM) | 3a0a2aebc6b814850333a5d4f5db72b1396cf208 | 17,584 |
from pathlib import Path
from typing import Union
import re
def parse_json_year_date(year: Number, fullpath: Path) -> Union[Path, None]:
"""
Filtra os arquivos json por ano.
"""
if not isinstance(fullpath, Path):
raise TypeError("O parâmetro path deve do tipo Path.")
pattern_finder = re.search(f"_{year}\.json", fullpath.name)
if pattern_finder:
return fullpath
else:
return None | 1d482bf916c3574225fdc31e700fb570c47555b1 | 17,585 |
from malaya_speech.utils import describe_availability
def available_fastspeech2():
"""
List available FastSpeech2, Text to Mel models.
"""
return describe_availability(
_fastspeech2_availability,
text = '`husein` and `haqkiem` combined loss from training set',
) | b7fa7f6132eb478cf27068a4377688f8b3ec5c7b | 17,586 |
def solve(A, b, method='gauss', verbose=0, eps=1e-6, max_itration_times=100000, omega=1.9375):
"""
Solve equations in specified method.
:param A: coefficient matrix of the equations
:param b: vector
:param method: the way to solve equations
:param verbose: whether show the running information
:param eps: *epsilon*
:param max_itration_times: the maximum *rounds* of iteration
:param omega: *relaxation factor* for SOR method.
:return: the solution x or 'None' if error occurs
"""
# _show_equations(A, b) # only when dim <= 10
start = dt.now()
global _verbose, _eps, _max_itration_times, _omega
_verbose = verbose
_eps = eps
_max_itration_times = max_itration_times
_omega = omega
func = {
'gauss': gauss,
'lu': lu,
'chase': chase,
'square_root': square_root,
'jacobi': jacobi,
'gauss_seidel': gauss_seidel,
'sor': sor,
'cg': cg,
'qr': qr
}.get(method, 'other_method')
if func == 'other_method':
_raise_equasolerror_no_method(method)
# make a copy of A and b to make sure they will not be changed.
# show_equations(A, b)
A0 = np.copy(A)
b0 = np.copy(b)
answer = func(A0, b0)
if _verbose == 1:
print('[%s] time cost: %.4f s.' % (method, (dt.now() - start).total_seconds()))
return answer | 50c7cdc5a2c8b146a062c028c4cb684c0b7efc2f | 17,587 |
def returns_unknown():
"""Tuples are a not-supported type."""
return 1, 2, 3 | 9fc003c890b4e053362c684b1a5f0dfca59bbe42 | 17,588 |
def get_user(
cmd,
app_id: str,
token: str,
assignee: str,
api_version: str,
central_dns_suffix=CENTRAL_ENDPOINT,
) -> User:
"""
Get information for the specified user.
Args:
cmd: command passed into az
app_id: name of app (used for forming request URL)
token: (OPTIONAL) authorization token to fetch device details from IoTC.
MUST INCLUDE type (e.g. 'SharedAccessToken ...', 'Bearer ...')
assignee: unique ID of the user
central_dns_suffix: {centralDnsSuffixInPath} as found in docs
Returns:
users: dict
"""
result = _make_call(
cmd,
app_id=app_id,
method="get",
path=assignee,
payload=None,
token=token,
central_dns_suffix=central_dns_suffix,
api_version=api_version,
)
return _utility.get_object(result, MODEL, api_version) | cc387259c97ebfecadd5d82dc6acf8f970d19478 | 17,589 |
import subprocess
def run_test(
bess_addr,
ptfdir,
trex_server_addr=None,
extra_args=(),
):
"""
Runs PTF tests included in provided directory.
"""
# create a dummy interface for PTF
if not create_dummy_interface() or not set_up_interfaces([DUMMY_IFACE_NAME]):
return False
pypath = "/upf-tests/lib"
# build the ptf command to be run
cmd = ["ptf"]
cmd.extend(["--test-dir", ptfdir])
cmd.extend(["--pypath", pypath])
cmd.extend(["-i", f"296@{DUMMY_IFACE_NAME}"])
test_params = "bess_upf_addr='{}'".format(bess_addr)
if trex_server_addr is not None:
test_params += ";trex_server_addr='{}'".format(trex_server_addr)
cmd.append("--test-params={}".format(test_params))
cmd.extend(extra_args)
info("Executing PTF command: {}".format(" ".join(cmd)))
try:
# run ptf and send output to stdout
p = subprocess.Popen(cmd)
p.wait()
except Exception:
error("Error when running PTF tests")
return False
finally:
# always clean up the dummy interface
remove_dummy_interface()
return p.returncode == 0 | c1a41561a0d6f37b8f5693dbaa8a1fc2799e0786 | 17,590 |
def get(fg, bg=None, attribute = 0):
"""
Return string with ANSI escape code for set text colors
fg: html code or color index for text color
attribute: use Attribute class variables
"""
if type(fg) is str:
bg = bg if bg else "#000000"
return by_hex(fg, bg, attribute=attribute)
elif type(fg) is int and 0 <= fg <= 255:
bg = bg if bg else 0
return by_index(fg, bg, attribute=attribute)
else:
raise TypeError("You can use only string or int.") | 16ee7ea3bd5c66c415a6466632cee1c5b337696b | 17,591 |
def get_Qi(Q,i,const_ij,m):
"""
Aim:
----
Equalising two polynomials where one is obtained by a SOS
decomposition in the canonical basis and the other one is expressed
in the Laguerre basis.
Parameters
----------
Q : matrix for the SOS decomposition
i : integer
degree at which we compte the coefficients.
const_ij : list
contains indices of Q at which coefficients i+j= const.
Returns
-------
Real that is a sum of coefficients
"""
return sum(factorial(l)*binom(l,i)*\
sum(Q[j]/sqrt(factorial(j[0])*factorial(j[1])) \
for j in const_ij[2*l]) for l in np.arange(i,m+1)) | a54313c8763777840c4a018dedb2fe6363e09d55 | 17,592 |
import os
def run_openlego(analyze_mdao_definitions, cmdows_dir=None, initial_file_path=None,
data_folder=None, run_type='test', approx_totals=False, driver_debug_print=False):
# type: (Union[int, list, str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[bool], Optional[bool]) -> Union[tuple, LEGOProblem]
"""Run OpenLEGO for a list of MDAO definitions.
Parameters
----------
analyze_mdao_definitions : list
List of MDAO definitions to be analyzed.
cmdows_dir : str
Path to directory with CMDOWS files
initial_file_path : str
Path to file containing initial values
data_folder : str
Path to directory where results will be stored
run_type : str
Option to indicate the type of run, as this changes the return statement used
approx_totals : bool
Setting on whether to use approx_totals on the model
driver_debug_print : bool
Setting on whether to print debug information in the log
Returns
-------
Union[Tuple[float], LEGOProblem]
"""
# Check and analyze inputs
mdao_defs_loop = get_loop_items(analyze_mdao_definitions)
file_dir = os.path.dirname(__file__)
if not cmdows_dir:
cmdows_dir = os.path.join(file_dir, 'cmdows_files')
if not initial_file_path:
initial_file_path = os.path.join(file_dir, 'SSBJ-base.xml')
if not data_folder:
data_folder = ''
# Run the
for mdao_def in mdao_defs_loop:
print('\n-----------------------------------------------')
print('Running the OpenLEGO of Mdao_{}.xml...'.format(mdao_def))
print('------------------------------------------------')
"""Solve the SSBJ problem using the given CMDOWS file."""
# 1. Create Problem
prob = LEGOProblem(cmdows_path=os.path.join(cmdows_dir, 'Mdao_{}.xml'.format(mdao_def)),
kb_path=os.path.join(file_dir, 'kb'), # Knowledge base path
data_folder=data_folder, # Output directory
base_xml_file=os.path.join(data_folder,
'ssbj-output-{}.xml'.format(mdao_def)))
if driver_debug_print:
prob.driver.options['debug_print'] = ['desvars', 'nl_cons', 'ln_cons', 'objs']
prob.set_solver_print(0) # Set printing of solver information
if approx_totals:
prob.model.approx_totals()
# 2. Initialize the Problem and export N2 chart
prob.store_model_view()
prob.initialize_from_xml(initial_file_path) # Set the initial values from an XML file
# 3. Run the Problem
test_distributed = mdao_def in ['CO', 'BLISS-2000'] and run_type == 'test'
if test_distributed:
prob.run_model()
else:
prob.run_driver() # Run the driver (optimization, DOE, or convergence)
# 4. Read out the case reader
if not test_distributed:
prob.collect_results()
if run_type == 'test':
# 5. Collect test results for test assertions
tc = prob['/dataSchema/aircraft/geometry/tc'][0]
h = prob['/dataSchema/reference/h'][0]
M = prob['/dataSchema/reference/M'][0]
AR = prob['/dataSchema/aircraft/geometry/AR'][0]
Lambda = prob['/dataSchema/aircraft/geometry/Lambda'][0]
Sref = prob['/dataSchema/aircraft/geometry/Sref'][0]
if mdao_def not in ['CO', 'BLISS-2000']:
lambda_ = prob['/dataSchema/aircraft/geometry/lambda'][0]
section = prob['/dataSchema/aircraft/geometry/section'][0]
Cf = prob['/dataSchema/aircraft/other/Cf'][0]
T = prob['/dataSchema/aircraft/other/T'][0]
R = prob['/dataSchema/scaledData/R/value'][0]
extra = prob['/dataSchema/aircraft/weight/WT'][0]
elif mdao_def == 'CO':
lambda_ = prob.model.SubOptimizer0.prob['/dataSchema/aircraft/geometry/lambda'][0]
section = prob.model.SubOptimizer0.prob['/dataSchema/aircraft/geometry/section'][0]
Cf = prob.model.SubOptimizer1.prob['/dataSchema/aircraft/other/Cf'][0]
T = prob.model.SubOptimizer2.prob['/dataSchema/aircraft/other/T'][0]
R = prob['/dataSchema/scaledData/R/value'][0]
extra = (prob['/dataSchema/distributedArchitectures/group0/objective'],
prob['/dataSchema/distributedArchitectures/group1/objective'],
prob['/dataSchema/distributedArchitectures/group2/objective'])
else:
lambda_, section, Cf, T, R, extra = None, None, None, None, None, None
# 6. Cleanup and invalidate the Problem afterwards
prob.invalidate()
return tc, h, M, AR, Lambda, Sref, lambda_, section, Cf, T, R, extra
elif run_type == 'validation':
return prob
else:
prob.invalidate() | b198f9dfbbf051a3b119a1c4fa8002f3c2e8183a | 17,593 |
def strToBool(s):
"""
Converts string s to a boolean
"""
assert type(s) == str or type(s) == unicode
b_dict = {'true': True, 'false': False, 'yes': True, 'no': False}
return b_dict[s.lower()] | 84e59429523e6e59a90739b0f1b160fa9e84bdc8 | 17,594 |
import json
def publish_to_sns(topic_name, message, region=None):
"""
Post a message to an SNS topic
"""
AWS = AWSCachedClient(region) # cached client object
partition = None
if region:
partition = partition_from_region(region)
else:
partition = 'aws'
region = 'us-east-1'
topic_arn = 'arn:' + partition + ':sns:' + region + ':' + AWS.account + ':' + topic_name
json_message = json.dumps({"default":json.dumps(message)})
message_id = AWS.get_connection('sns', region).publish(
TopicArn=topic_arn,
Message=json_message,
MessageStructure='json'
).get('MessageId', 'error')
return message_id | 5a3c35c0367873e2c0b3c79a176b7c384d2b74ed | 17,595 |
import os
def bandpass_voxels(realigned_file, bandpass_freqs, sample_period=None):
"""
Performs ideal bandpass filtering on each voxel time-series.
Parameters
----------
realigned_file : string
Path of a realigned nifti file.
bandpass_freqs : tuple
Tuple containing the bandpass frequencies. (LowCutoff_HighPass HighCutoff_LowPass)
sample_period : float, optional
Length of sampling period in seconds. If not specified,
this value is read from the nifti file provided.
Returns
-------
bandpassed_file : string
Path of filtered output (nifti file).
"""
def ideal_bandpass(data, sample_period, bandpass_freqs):
# Derived from YAN Chao-Gan 120504 based on REST.
sample_freq = 1. / sample_period
sample_length = data.shape[0]
data_p = np.zeros(int(2**np.ceil(np.log2(sample_length))))
data_p[:sample_length] = data
LowCutoff, HighCutoff = bandpass_freqs
if (LowCutoff is None): # No lower cutoff (low-pass filter)
low_cutoff_i = 0
elif (LowCutoff > sample_freq / 2.):
# Cutoff beyond fs/2 (all-stop filter)
low_cutoff_i = int(data_p.shape[0] / 2)
else:
low_cutoff_i = np.ceil(
LowCutoff * data_p.shape[0] * sample_period).astype('int')
if (HighCutoff > sample_freq / 2. or HighCutoff is None):
# Cutoff beyond fs/2 or unspecified (become a highpass filter)
high_cutoff_i = int(data_p.shape[0] / 2)
else:
high_cutoff_i = np.fix(
HighCutoff * data_p.shape[0] * sample_period).astype('int')
freq_mask = np.zeros_like(data_p, dtype='bool')
freq_mask[low_cutoff_i:high_cutoff_i + 1] = True
freq_mask[
data_p.shape[0] -
high_cutoff_i:data_p.shape[0] + 1 - low_cutoff_i
] = True
f_data = fft(data_p)
f_data[freq_mask != True] = 0.
data_bp = np.real_if_close(ifft(f_data)[:sample_length])
return data_bp
nii = nb.load(realigned_file)
data = nii.get_data().astype('float64')
mask = (data != 0).sum(-1) != 0
Y = data[mask].T
Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1))
if not sample_period:
hdr = nii.get_header()
sample_period = float(hdr.get_zooms()[3])
# Sketchy check to convert TRs in millisecond units
if sample_period > 20.0:
sample_period /= 1000.0
Y_bp = np.zeros_like(Y)
for j in range(Y.shape[1]):
Y_bp[:, j] = ideal_bandpass(Yc[:, j], sample_period, bandpass_freqs)
data[mask] = Y_bp.T
img = nb.Nifti1Image(data, header=nii.get_header(),
affine=nii.get_affine())
bandpassed_file = os.path.join(os.getcwd(),
'bandpassed_demeaned_filtered.nii.gz')
img.to_filename(bandpassed_file)
return bandpassed_file | 0792647b7faa1beaa517d99f448d554168401446 | 17,596 |
from typing import List
from typing import Tuple
def get_subset(
classes: List,
train_data,
train_labels,
val_data,
val_labels,
test_data,
test_labels,
) -> Tuple:
"""
creates a binary subset of training, validation, and testing set using the specified list of classes to select
:param classes: list of classes in the labels that are to be selected in the subset (only specify two)
:param train_data: list or numpy array containing training data
:param train_labels: list or numpy array containing training labels
:param val_data: list or numpy array containing validation/training phase 2 data
:param val_labels: list or numpy array containing validation/training phase 2 labels
:param test_data: list or numpy array containing testing data
:param test_labels: list or numpy array containing testing labels
:return: tuple of training sub-set, validation/training phase 2 sub-set, testing sub-set.
"sub-set" here is a tuple of training and testing numpy arrays
"""
train_set = np.isin(train_labels, classes)
val_set = np.isin(val_labels, classes)
test_set = np.isin(test_labels, classes)
train_data = train_data[train_set]
train_labels = train_labels[train_set] == classes[0]
val_data = val_data[val_set]
val_labels = val_labels[val_set] == classes[0]
test_data = test_data[test_set]
test_labels = test_labels[test_set] == classes[0]
return (train_data, train_labels), (val_data, val_labels), (test_data, test_labels) | 8857b7f5c4563692b3236b68889201bd3a28507e | 17,597 |
def to_xyzw(matrix):
"""Convenience/readibility function to bring spatial (trailing) axis to start.
Args:
matrix (...x4 array): Input matrix.
Returns:
4x... array
"""
return np.rollaxis(matrix, -1) | 7c74b9bd6dc271db4a5dd925bbcfec4eef7ca791 | 17,598 |
import numpy
def do_3d_pooling(feature_matrix, stride_length_px=2,
pooling_type_string=MAX_POOLING_TYPE_STRING):
"""Pools 3-D feature maps.
:param feature_matrix: Input feature maps (numpy array). Dimensions must be
M x N x H x C or 1 x M x N x H x C.
:param stride_length_px: See doc for `do_2d_pooling`.
:param pooling_type_string: Pooling type (must be accepted by
`_check_pooling_type`).
:return: feature_matrix: Output feature maps (numpy array). Dimensions will
be 1 x m x n x h x C.
"""
error_checking.assert_is_numpy_array_without_nan(feature_matrix)
error_checking.assert_is_integer(stride_length_px)
error_checking.assert_is_geq(stride_length_px, 2)
_check_pooling_type(pooling_type_string)
if len(feature_matrix.shape) == 4:
feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)
feature_tensor = K.pool3d(
x=K.variable(feature_matrix), pool_mode=pooling_type_string,
pool_size=(stride_length_px, stride_length_px, stride_length_px),
strides=(stride_length_px, stride_length_px, stride_length_px),
padding='valid', data_format='channels_last'
)
return feature_tensor.numpy() | 180ceae7364dcd1dd55d23a00389d0c3bb43cc38 | 17,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.