content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def fix_lng_degrees(lng: float) -> float:
"""
For a lng degree outside [-180;180] return the appropriate
degree assuming -180 = 180°W and 180 = 180°E.
"""
sign = 1 if lng > 0 else -1
lng_adj = (abs(lng) % 360) * sign
if lng_adj > 180:
return (lng_adj % 180) - 180
elif lng_adj < -180:
return lng_adj % 180
return lng_adj | bde58152883874095b15ec38cfb24ea68d73c188 | 16,700 |
def create_code(traits):
"""Assign bits to list of traits.
"""
code = 1
result = {INVALID: code}
if not traits:
return result
for trait in traits:
code = code << 1
result[trait] = code
return result | cfc7b1662edaf7f3e3763009a460157f7ec677bb | 16,701 |
from typing import List
from typing import Dict
from typing import Any
from typing import Optional
def get_current_table(grid_id: str) -> List[Dict[Any, Any]]:
""" Get current Data from the grid
Args:
grid_id: Grid ID to retrieve data from.
Returns:
list: Exsiting grid data.
"""
current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id)
if current_table is None:
raise ValueError(f"The grid id isn't valid: {grid_id}")
return pd.DataFrame(current_table) | d1a8c21398aa2aca54ca587aa577c8ff50d8d46f | 16,702 |
def read_graph(filepath):
"""Creates a graph based on the content of the file at given filepath.
Parameters
----------
filename : filepath
Path to a file containing an adjacency matrix.
"""
g_data = np.loadtxt(open(filepath, "rb"), delimiter=",")
return nx.from_numpy_matrix(g_data) | 74e0b687c6cf9e404d9446505799a84b5680c5b3 | 16,703 |
def get_seed(seed=None):
"""Get valid Numpy random seed value"""
# https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM
random = np.random.RandomState(seed)
return random.randint(0, 2147483647) | 5ac1280a30265518edcf8bb07a03cfe5fb0ae21d | 16,704 |
import traceback
import sys
def explain_predictions_best_worst(pipeline, input_features, y_true, num_to_explain=5, top_k_features=3,
include_shap_values=False, metric=None, output_format="text"):
"""Creates a report summarizing the top contributing features for the best and worst points in the dataset as measured by error to true labels.
XGBoost models and CatBoost multiclass classifiers are not currently supported.
Arguments:
pipeline (PipelineBase): Fitted pipeline whose predictions we want to explain with SHAP.
input_features (ww.DataTable, pd.DataFrame): Input data to evaluate the pipeline on.
y_true (ww.DataColumn, pd.Series): True labels for the input data.
num_to_explain (int): How many of the best, worst, random data points to explain.
top_k_features (int): How many of the highest/lowest contributing feature to include in the table for each
data point.
include_shap_values (bool): Whether SHAP values should be included in the table. Default is False.
metric (callable): The metric used to identify the best and worst points in the dataset. Function must accept
the true labels and predicted value or probabilities as the only arguments and lower values
must be better. By default, this will be the absolute error for regression problems and cross entropy loss
for classification problems.
output_format (str): Either "text" or "dict". Default is "text".
Returns:
str, dict, or pd.DataFrame - A report explaining the top contributing features for the best/worst predictions in the input_features.
For each of the best/worst rows of input_features, the predicted values, true labels, metric value,
feature names, prediction contribution, and SHAP Value (optional) will be listed.
Raises:
ValueError: if input_features does not have more than twice the requested features to explain.
ValueError: if y_true and input_features have mismatched lengths.
ValueError: if an output_format outside of "text", "dict" or "dataframe is provided.
"""
input_features = infer_feature_types(input_features)
input_features = _convert_woodwork_types_wrapper(input_features.to_dataframe())
y_true = infer_feature_types(y_true)
y_true = _convert_woodwork_types_wrapper(y_true.to_series())
if not (input_features.shape[0] >= num_to_explain * 2):
raise ValueError(f"Input features must be a dataframe with more than {num_to_explain * 2} rows! "
"Convert to a dataframe and select a smaller value for num_to_explain if you do not have "
"enough data.")
if y_true.shape[0] != input_features.shape[0]:
raise ValueError("Parameters y_true and input_features must have the same number of data points. Received: "
f"true labels: {y_true.shape[0]} and {input_features.shape[0]}")
if output_format not in {"text", "dict", "dataframe"}:
raise ValueError(f"Parameter output_format must be either text, dict, or dataframe. Received {output_format}")
if not metric:
metric = DEFAULT_METRICS[pipeline.problem_type]
try:
if is_regression(pipeline.problem_type):
if is_time_series(pipeline.problem_type):
y_pred = pipeline.predict(input_features, y=y_true).to_series()
else:
y_pred = pipeline.predict(input_features).to_series()
y_pred_values = None
y_true_no_nan, y_pred_no_nan = drop_rows_with_nans(y_true, y_pred)
errors = metric(y_true_no_nan, y_pred_no_nan)
else:
if is_time_series(pipeline.problem_type):
y_pred = pipeline.predict_proba(input_features, y=y_true).to_dataframe()
y_pred_values = pipeline.predict(input_features, y=y_true).to_series()
else:
y_pred = pipeline.predict_proba(input_features).to_dataframe()
y_pred_values = pipeline.predict(input_features).to_series()
y_true_no_nan, y_pred_no_nan, y_pred_values_no_nan = drop_rows_with_nans(y_true, y_pred, y_pred_values)
errors = metric(pipeline._encode_targets(y_true_no_nan), y_pred_no_nan)
except Exception as e:
tb = traceback.format_tb(sys.exc_info()[2])
raise PipelineScoreError(exceptions={metric.__name__: (e, tb)}, scored_successfully={})
errors = pd.Series(errors, index=y_pred_no_nan.index)
sorted_scores = errors.sort_values()
best_indices = sorted_scores.index[:num_to_explain]
worst_indices = sorted_scores.index[-num_to_explain:]
index_list = best_indices.tolist() + worst_indices.tolist()
pipeline_features = pipeline.compute_estimator_features(input_features, y_true).to_dataframe()
data = _ReportData(pipeline, pipeline_features, input_features, y_true, y_pred, y_pred_values, errors, index_list, metric)
report_creator = _report_creator_factory(data, report_type="explain_predictions_best_worst",
output_format=output_format, top_k_features=top_k_features,
include_shap_values=include_shap_values, num_to_explain=num_to_explain)
return report_creator(data) | 9138b92d13e3aa44a31b91d512cbbbd08e35ecd6 | 16,705 |
import typing
import inspect
def resolve_lookup(
context: dict, lookup: str, call_functions: bool = True
) -> typing.Any:
"""
Helper function to extract a value out of a context-dict.
A lookup string can access attributes, dict-keys, methods without parameters and indexes by using the dot-accessor (e.g. ``person.name``)
This is based on the implementation of the variable lookup of the django template system:
https://github.com/django/django/blob/master/django/template/base.py
"""
current = context
for bit in lookup.split("."):
try:
current = current[bit]
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try:
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, dict) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (
IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError,
): # unsubscriptable object
return None
# raise LookupError(
# "Failed lookup for key " "[%s] in %r", (bit, current)
# ) # missing attribute
if callable(current) and call_functions:
try: # method call (assuming no args required)
current = current()
except TypeError:
signature = inspect.signature(current) # type: ignore
try:
signature.bind()
except TypeError: # arguments *were* required
pass # but we continue because we might use an attribute on the object instead of calling it
else:
raise
return current | a2090f2488ee10f7c11684952fd7a2498f6d4979 | 16,706 |
def check_actions_tool(tool):
"""2.2.x to 2.3.0 upgrade step checker
"""
atool = getToolByName(tool, 'portal_actions')
try:
atool['user']['change_password']
except KeyError:
return True
try:
atool['global']['members_register']
except KeyError:
return True
try:
atool['global']['search_form']
except KeyError:
return True
try:
atool['global']['search']
except KeyError:
return True
try:
atool['global']['syndication']
except KeyError:
return True
return False | 2ecc6064cd26aa670743c25018dd27e2ce0f41ca | 16,707 |
def integer_byte_length(number):
"""
Number of bytes needed to represent a integer excluding any prefix 0 bytes.
:param number:
Integer value. If num is 0, returns 0.
:returns:
The number of bytes in the integer.
"""
quanta, remainder = divmod(integer_bit_length(number), 8)
if remainder:
quanta += 1
return quanta | 0de5828117107461e23e36cf3c38bab0850b7203 | 16,708 |
def ones(input_dim, output_dim, name=None):
"""All zeros."""
initial = tf.ones((input_dim, output_dim), dtype=tf.float32)
return tf.Variable(initial, name=name) | 02867b278e224e436e470a9eaeac32b44e99a99a | 16,709 |
def enrichment_score2(mat, idx, line_width, norm_factors, distance_range=(20, 40), window_size=10,
stats_test_log=({}, {})):
"""
Calculate the enrichment score of a stripe given its location, width and the contact matrix
Parameters:
----------
mat: np.array (2D)
Contact matrix generated with strata2horizontal() or strata2vertical()
idx: int
The location (index) of the candidate stripe
line_width: int
Stripe width (# of bins)
norm_factors: np.array (1D)
The vector of normalization factors of the contact map.
distance_range: tuple
The distance range (# of bins) for the diagonal for calculating the scores
window_size: int
Window size (# of bins)
stats_test_log: tuple of dict
Previous log for accelerating statistical tests
Returns
----------
new_mat: np.array (1D)
The enrichment score of each pixel along the candidate stripe
"""
_calculated_values, _poisson_stats = stats_test_log
half = int(line_width // 2)
x1, x2 = idx - half, idx - half + line_width
if x1 == x2:
x2 += 1
new_mat = np.zeros((distance_range[1] - distance_range[0],))
for j in range(distance_range[0], distance_range[1]):
y = j - distance_range[0]
_min_temp = subsetNpMatrix(mat, (x1, x2), (j - window_size - half, j + window_size + half + 1))
line_min = np.median([_min_temp])
# print(_min_temp, line_min)
_inner_neighbor = subsetNpMatrix(mat, (idx - half - window_size, x1),
(j - window_size - half, j + window_size + half + 1))
_outer_neighbor = subsetNpMatrix(mat, (x2 + 1, idx + half + window_size + 1),
(j - window_size - half, j + window_size + half + 1))
if _outer_neighbor.size == 0 or _inner_neighbor.size == 0:
continue
neighbor_mean = max(np.mean(_inner_neighbor), np.mean(_outer_neighbor))
# There should be a lower bound for the expected value,
# otherwise situations like (exp=0.01 and obs=0.02) would also be significant
# Currently we can set this to 0 until KR norm factors can be loaded
lower_b = 1 / norm_factors[idx] # This should be (1 / KR_norm_factors) if we refer to JuiceTools HICCUPS
_exp = max(neighbor_mean, lower_b)
_obs = int(line_min) # the same as floor function when line_min > 0
# _calculated_values: store all calculated exp-obs pairs in dictionary, in which keys are obs since
# they are always integers. Each _calculated_values[obs] is a binary tree for quick searching,
# and each tree leaf is a exp value corresponding to the obs value. Since exp values are float,
# there is also an integer index attached for searching the exp-obs in dictionary _poisson_stats
# (float cannot be dict keys).
# _poisson_stats: record all calculated result in a dict. It should be
# _poisson_stats[(_exp, _obs)] = -log10(p). But _exp is a float and cannot be a dict key, we give
# each _exp a unique index and use the index.
# stats_log: record all p value calculation. Just for benchmarking. Delete this when publishing.
# global _calculated_values, _poisson_stats # , stats_log
tolerance = 0.02
# check if obs is a value calculated before
if _obs in _calculated_values:
# Find the nearest _exp values which were calculated before
# One larger, one smaller
(_upper, _lower) = _calculated_values[_obs].search(_exp)
# If _upper is close enough to _exp, directly use the p value from (_upper-_obs) pair
if _upper is not None and (_upper.key - _exp) < tolerance * _exp:
_exp = _upper.key
_exp_idx = _upper.val # The integer index for _upper (float cannot be dict keys!)
mlog_p_val = _poisson_stats[(_exp_idx, _obs)]
else:
# Else, calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats
_exp_idx = _calculated_values[_obs].insert(_exp) # insert to the binary tree and return an index
Poiss = poisson(_exp)
p_val = 1 - Poiss.cdf(_obs)
if 0 < p_val < 1:
mlog_p_val = - np.log10(p_val)
else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace
mlog_p_val = -1
_poisson_stats[(_exp_idx, _obs)] = mlog_p_val
# stats_log.append([_exp, _obs, mlog_p_val])
else: # If _obs is not used before, generate a new binary tree _calculated_values[_obs]
_calculated_values[_obs] = AVLTree()
_exp_idx = _calculated_values[_obs].insert(_exp)
# calculate p value for _obs-_exp pair and store them in _calculated_values and _poisson_stats
Poiss = poisson(_exp)
p_val = 1 - Poiss.cdf(_obs)
if 0 < p_val < 1:
mlog_p_val = - np.log10(p_val)
else: # Some p values are too small, -log(0) will return an error, so we use -1 to temporarily replace
mlog_p_val = -1
_poisson_stats[(_exp_idx, _obs)] = mlog_p_val
# stats_log.append([_exp, _obs, mlog_p_val])
# Store enrichment score in new_mat
new_mat[y] = mlog_p_val
new_mat[new_mat < 0] = np.max(new_mat) # Replace all "-1"s with the largest -log(p)
return new_mat | bfb987bd2e2d0770d81f811ba2486893b62d269d | 16,710 |
def paginate(data, page=1, per_page=None):
"""Create a paginated response of the given query set.
Arguments:
data -- A flask_mongoengine.BaseQuerySet instance
"""
per_page = app.config['DEFAULT_PER_PAGE'] if not per_page else per_page
pagination_obj = data.paginate(page=page, per_page=per_page)
return {
'data': build_pagination_data(pagination_obj),
'meta': build_pagination_metadata(pagination_obj),
} | c5a692067e5f58a971762316c83bcfe6f75051bf | 16,711 |
def compute_mean_wind_dirs(res_path, dset, gids, fracs):
"""
Compute mean wind directions for given dset and gids
"""
with Resource(res_path) as f:
wind_dirs = np.radians(f[dset, :, gids])
sin = np.mean(np.sin(wind_dirs) * fracs, axis=1)
cos = np.mean(np.cos(wind_dirs) * fracs, axis=1)
mean_wind_dirs = np.degrees(np.arctan2(sin, cos))
mask = mean_wind_dirs < 0
mean_wind_dirs[mask] += 360
return mean_wind_dirs | bd3f91cc0f4b05f630d252f6026e3f27c56cd134 | 16,712 |
import numpy
def plot_area_and_score(samples: SampleList, compound_name: str, include_none: bool = False):
"""
Plot the peak area and score for the compound with the given name
:param samples: A list of samples to plot on the chart
:param compound_name:
:param include_none: Whether samples where the compound was not found
should be plotted.
"""
peak_areas, scores = samples.get_areas_and_scores(compound_name, include_none)
fig, ax1 = plt.subplots()
y_positions = numpy.arange(len(peak_areas))
y_positions = [x * 1.5 for x in y_positions]
bar_width = 0.5
offset = bar_width / 2
area_y_pos = [x + offset for x in y_positions]
area_bar = ax1.barh(
area_y_pos,
list(peak_areas.values()),
label="Peak Area",
color="tab:orange",
height=bar_width,
)
ax1.set_xscale("log")
ax1.set_xlabel("Log10(Peak Area)")
ax2 = ax1.twiny()
score_scatter = ax2.scatter(list(scores.values()), area_y_pos, label="Score", color="tab:blue")
ax2.set_xlabel("Score")
ax1.barh([], [], label="Score", color="tab:blue", height=bar_width)
ax1.set_yticks(y_positions)
ax1.set_yticklabels(list(peak_areas.keys()))
fig.suptitle(f"Peak Area and Score for {compound_name}\n")
fig.set_size_inches(A4_landscape)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
ax1.legend()
return fig, ax1, ax2 | cce2dd3c3fca742627dca5c893f498d83e0d7840 | 16,713 |
def get_strides(fm: NpuFeatureMap) -> NpuShape3D:
"""Calculates STRIDE_C/Y/X"""
if fm.strides is not None:
return fm.strides
elem_size = fm.data_type.size_in_bytes()
if fm.layout == NpuLayout.NHWC:
stride_c = elem_size
stride_x = fm.shape.depth * stride_c
stride_y = fm.shape.width * stride_x
else:
stride_x = 16 * elem_size
stride_c = stride_x * fm.shape.width
stride_y = elem_size * fm.shape.width * numeric_util.round_up(fm.shape.depth, 16)
return NpuShape3D(depth=stride_c, height=stride_y, width=stride_x) | e933fd3b06fb53e44b81bcb28341137a14990dec | 16,714 |
def gram_linear(x):
"""Compute Gram (kernel) matrix for a linear kernel.
Args:
x: A num_examples x num_features matrix of features.
Returns:
A num_examples x num_examples Gram matrix of examples.
"""
return x.dot(x.T) | f0a625d3ca6b846396c3c7c723b1bc8130a6c140 | 16,715 |
def to_feature(shape, properties={}):
"""
Create a GeoJSON Feature object for the given shapely.geometry :shape:.
Optionally give the Feature a :properties: dict.
"""
collection = to_feature_collection(shape)
feature = collection["features"][0]
feature["properties"] = properties
# remove some unecessary and redundant data
if "id" in feature:
del feature["id"]
if isinstance(shape, shapely.geometry.Point) and "bbox" in feature:
del feature["bbox"]
return dict(feature) | 39d8e7658ae2043c081d137f0a69ddd4344876fc | 16,716 |
def read_responses(file):
"""
Read dialogs from file
:param file: str, file path to the dataset
:return: list, a list of dialogue (context) contained in file
"""
with open(file, 'r') as f:
samples = f.read().split('<|endoftext|>')
samples = samples[1:] # responses = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
return samples | e654a075622f04c3eca6c18e3d092593387ef237 | 16,717 |
def build_parametric_ev(data, onset, name, value, duration=None,
center=None, scale=None):
"""Make design info for a multi-column constant-value ev.
Parameters
----------
data : DataFrame
Input data; must have "run" column and any others specified.
onset : string
Column name containing event onset information.
name : string
Condition name to use for this ev.
value : string
Column name containing event amplitude information.
duration : string, float, or ``None``
Column name containing event duration information, or a value
to use for all events, or ``None`` to model events as impulses.
center : float, optional
Value to center the ``value`` column at before scaling. If absent,
center at the mean across runs.
scale : callable, optional
Function to scale the centered value column with.
Returns
-------
ev : DataFrame
Returned DataFrame will have "run", "onset", "duration", "value",
and "condition" columns.
"""
ev = data[["run", onset, value]].copy()
ev.columns = ["run", "onset", "value"]
# Center the event amplitude
if center is None:
ev["value"] -= ev.value.mean()
else:
ev["value"] = ev.value - center
# (Possibly) scale the event amplitude
if scale is not None:
ev["value"] = scale(ev["value"])
# Set a condition name for all events
ev["condition"] = name
# Determine the event duration
ev = _add_duration_information(data, ev, duration)
return ev | 47400052e2b2f4bf8217d9eaf71a83257180f5c4 | 16,718 |
import operator
import bisect
def time_aware_indexes(t, train_size, test_size, granularity, start_date=None):
"""Return a list of indexes that partition the list t by time.
Sorts the list of dates t before dividing into training and testing
partitions, ensuring a 'history-aware' split in the ensuing classification
task.
Args:
t (np.ndarray): Array of timestamp tags.
train_size (int): The training window size W (in τ).
test_size (int): The testing window size Δ (in τ).
granularity (str): The unit of time τ, used to denote the window size.
Acceptable values are 'year|quarter|month|week|day'.
start_date (date): The date to begin partioning from (eg. to align with
the start of the year).
Returns:
(list, list):
Indexing for the training partition.
List of indexings for the testing partitions.
"""
# Order the dates as well as their original positions
with_indexes = zip(t, range(len(t)))
ordered = sorted(with_indexes, key=operator.itemgetter(0))
# Split out the dates from the indexes
dates = [tup[0] for tup in ordered]
indexes = [tup[1] for tup in ordered]
# Get earliest date
start_date = utils.resolve_date(start_date) if start_date else ordered[0][0]
# Slice out training partition
boundary = start_date + get_relative_delta(train_size, granularity)
to_idx = bisect.bisect_left(dates, boundary)
train = indexes[:to_idx]
tests = []
# Slice out testing partitions
while to_idx < len(indexes):
boundary += get_relative_delta(test_size, granularity)
from_idx = to_idx
to_idx = bisect.bisect_left(dates, boundary)
tests.append(indexes[from_idx:to_idx])
return train, tests | 96e27c7a3f7284476d615a8d03f7c365f0406187 | 16,719 |
def send_invite_mail(invite, request):
"""
Send an email invitation to user not yet registered in the system.
:param invite: ProjectInvite object
:param request: HTTP request
:return: Amount of sent email (int)
"""
invite_url = build_invite_url(invite, request)
message = get_invite_body(
project=invite.project,
issuer=invite.issuer,
role_name=invite.role.name,
invite_url=invite_url,
date_expire_str=localtime(invite.date_expire).strftime(
'%Y-%m-%d %H:%M'
),
)
message += get_invite_message(invite.message)
message += get_email_footer()
subject = get_invite_subject(invite.project)
return send_mail(subject, message, [invite.email], request) | 4554bb6bea20e03749739026583d6215714febbf | 16,720 |
import os
import json
import google
def get_credentials(quota_project_id=None):
"""Obtain credentials object from json file and environment configuration."""
credentials_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
with open(credentials_path, "r", encoding="utf8") as file_handle:
credentials_data = file_handle.read()
credentials_dict = json.loads(credentials_data)
if "client_email" in credentials_dict:
return service_account.Credentials.from_service_account_file(credentials_path)
if "audience" in credentials_dict:
return identity_pool.Credentials.from_info(credentials_dict)
return google.auth.default(quota_project_id=quota_project_id)[0] | b88c2de4adb034d7373fc8191ecc590c2201d344 | 16,721 |
def binary_n(total_N, min_n=50):
"""
Creates a list of values by successively halving the total length total_N
until the resulting value is less than min_n.
Non-integer results are rounded down.
Args:
total_N (int):
total length
Kwargs:
min_n (int):
minimal length after division
Returns:
list of integers:
total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n
"""
max_exp = np.log2(1.0 * total_N / min_n)
max_exp = int(np.floor(max_exp))
return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)] | 240296c6024243da5750cb5aa7e64bea45ae91ca | 16,722 |
def thresholding(pred,label,thres):
""" Given the threshold return boolean matrix with 1 if > thres 0 if <= 1 """
conf =[]
for i in thres:
pr_th,lab_th = (pred>i),(label>i)
conf += confusion(pr_th,lab_th)
return np.array(conf).reshape(-1,4) | 97727a75b4f7648c82a095c7804709e9a52f13ed | 16,723 |
def unicode_test(request, oid):
"""Simple view to test funky characters from the database."""
funky = News.objects.using('livewhale').get(pk=oid)
return render(request, 'bridge/unicode.html', {'funky': funky}) | 8357d76bfc22fdc3f12176332a4b19fd3bfb79c9 | 16,724 |
from typing import Optional
from typing import Dict
from typing import Any
def _field_to_schema_object(field: BaseType, apistrap: Optional[Apistrap]) -> Optional[Dict[str, Any]]:
"""
Convert a field definition to OpenAPI 3 schema.
:param field: the field to be converted
:param apistrap: the extension used for adding reusable schema definitions
:return: a schema
"""
if isinstance(field, ModelType):
return _model_field_to_schema_object(field, apistrap)
elif isinstance(field, ListType):
if isinstance(field.field, ModelType):
return _model_array_to_schema_object(field, apistrap)
elif isinstance(field.field, BaseType):
return _primitive_array_to_schema_object(field)
elif isinstance(field, DictType):
if isinstance(field.field, ModelType):
return _model_dict_to_schema_object(field, apistrap)
elif isinstance(field.field, UnionType):
return _union_dict_to_schema_object(field, apistrap)
elif isinstance(field.field, ListType) and isinstance(field.field.field, ModelType):
return _dict_of_model_lists_to_schema_object(field, apistrap)
elif isinstance(field.field, BaseType):
return _primitive_dict_to_schema_object(field)
elif isinstance(field, StringType):
return _string_field_to_schema_object(field, apistrap)
elif isinstance(field, AnyType):
return {}
elif isinstance(field, UnionType):
return _union_field_to_schema_object(field, apistrap)
elif isinstance(field, DiscriminatedModelType):
return _discriminated_model_field_to_schema_object(field, apistrap)
elif isinstance(field, PolyModelType):
return _poly_model_field_to_schema_object(field, apistrap)
elif isinstance(field, BaseType):
return _primitive_field_to_schema_object(field)
return None | 1451f8795dc39d3168c141fd0ad8dd2615903163 | 16,725 |
import os
import subprocess
def linux_gcc_name():
"""Returns the name of the `gcc` compiler. Might happen that we are cross-compiling and the
compiler has a longer name.
Args:
None
Returns:
str: Name of the `gcc` compiler or None
"""
cc_env = os.getenv('CC')
if cc_env is not None:
if subprocess.Popen([cc_env, "--version"], stdout=subprocess.DEVNULL):
return cc_env
return "gcc" if subprocess.Popen(["gcc", "--version"], stdout=subprocess.DEVNULL) else None | 47cf0ee9af4b1b7c9169632b86bc9b5e1db96ad3 | 16,726 |
from typing import Dict
from typing import Any
def drop_test(robot, *, z_rot: float, min_torque: bool, initial_height: float = 1.) -> Dict[str, Any]:
"""Params which have been tested for this task:
nfe = 20, total_time = 1.0, vary_timestep_with=(0.8,1.2), 5 mins for solving
if min_torque is True, quite a bit more time is needed as IPOPT refines things
"""
nfe = len(robot.m.fe)
ncp = len(robot.m.cp)
tested_models = ('3D monoped', '3D biped',
'3D quadruped', '3D prismatic monoped')
if not robot.name in tested_models:
visual.warn(
f'This robot configuration ("{robot.name}") hasn\'t been tested!\n'
f'Tested models are: {tested_models}')
body = robot['base_B'] if robot.name == '3D quadruped' else robot['base']
# start at the origin
body['q'][1, ncp, 'x'].fix(0)
body['q'][1, ncp, 'y'].fix(0)
body['q'][1, ncp, 'z'].fix(initial_height)
# fix initial angle
for link in robot.links:
for ang in ('phi', 'theta'):
link['q'][1, ncp, ang].fix(0)
link['q'][1, ncp, 'psi'].fix(z_rot)
# start stationary
for link in robot.links:
for q in link.pyomo_sets['q_set']:
link['dq'][1, ncp, q].fix(0)
# init to y plane
for link in robot.links:
for ang in ('phi', 'theta'):
link['q'][:, :, ang].value = 0
link['q'][:, :, 'psi'].value = z_rot
# legs slightly forward at the end
uplopairs = (('upper', 'lower'),) if robot.name == '3D monoped' \
else (('UL', 'LL'), ('UR', 'LR')) if robot.name == '3D biped' \
else (('UFL', 'LFL'), ('UFR', 'LFR'), ('UBL', 'LBL'), ('UBR', 'LBR')) if robot.name == '3D quadruped' \
else tuple() # <- iterating over this will result in the body not being evaluated
for upper, lower in uplopairs:
ang = 0.01 if not (
robot.name == '3D quadruped' and upper[1] == 'B') else -0.01
robot[upper]['q'][nfe, ncp, 'theta'].setlb(ang)
robot[lower]['q'][nfe, ncp, 'theta'].setub(-ang)
# but not properly fallen over
body['q'][nfe, ncp, 'z'].setlb(0.2)
# objective: reduce CoT, etc
utils.remove_constraint_if_exists(robot.m, 'cost')
torque_cost = torque_squared_penalty(robot)
pen_cost = feet_penalty(robot)
robot.m.cost = Objective(expr=(torque_cost if min_torque else 0)
+ 1000*pen_cost)
return {'torque': torque_cost, 'penalty': pen_cost} | e6a070fd52356a314e5d2992d03fa61ead40f950 | 16,727 |
from typing import List
from typing import Union
from datetime import datetime
from typing import Dict
from typing import Any
import httpx
from typing import cast
def get_user_list(
*, client: Client, an_enum_value: List[AnEnum], some_date: Union[date, datetime],
) -> Union[
List[AModel], HTTPValidationError,
]:
""" Get a list of things """
url = "{}/tests/".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
json_an_enum_value = []
for an_enum_value_item_data in an_enum_value:
an_enum_value_item = an_enum_value_item_data.value
json_an_enum_value.append(an_enum_value_item)
if isinstance(some_date, date):
json_some_date = some_date.isoformat()
else:
json_some_date = some_date.isoformat()
params: Dict[str, Any] = {
"an_enum_value": json_an_enum_value,
"some_date": json_some_date,
}
response = httpx.get(url=url, headers=headers, params=params,)
if response.status_code == 200:
return [AModel.from_dict(item) for item in cast(List[Dict[str, Any]], response.json())]
if response.status_code == 422:
return HTTPValidationError.from_dict(cast(Dict[str, Any], response.json()))
else:
raise ApiResponseError(response=response) | 77ca30fafb6c29f4cb04d25b52b7cca37e3ede04 | 16,728 |
def old_func5(self, x):
"""Summary.
Bizarre indentation.
"""
return x | 5bc9cdbc406fa49960613578296e81bdd4eeb771 | 16,729 |
def set_(
computer_policy=None,
user_policy=None,
cumulative_rights_assignments=True,
adml_language="en-US",
):
"""
Set a local server policy.
Args:
computer_policy (dict):
A dictionary of "policyname: value" pairs of computer policies to
set. 'value' should be how it is displayed in the gpedit GUI, i.e.
if a setting can be 'Enabled'/'Disabled', then that should be passed
Administrative Template data may require dicts within dicts, to
specify each element of the Administrative Template policy.
Administrative Templates policies are always cumulative.
Policy names can be specified in a number of ways based on the type
of policy:
Windows Settings Policies:
These policies can be specified using the GUI display name
or the key name from the _policy_info class in this module.
The GUI display name is also contained in the _policy_info
class in this module.
Administrative Template Policies:
These can be specified using the policy name as displayed in
the GUI (case sensitive). Some policies have the same name,
but a different location (for example, "Access data sources
across domains"). These can be differentiated by the "path"
in the GUI (for example, "Windows Components\\Internet
Explorer\\Internet Control Panel\\Security Page\\Internet
Zone\\Access data sources across domains").
Additionally, policies can be specified using the "name" and
"id" attributes from the ADMX files.
For Administrative Templates that have policy elements, each
element can be specified using the text string as seen in
the GUI or using the ID attribute from the ADMX file. Due to
the way some of the GUI text is laid out, some policy
element names could include descriptive text that appears
lbefore the policy element in the GUI.
Use the get_policy_info function for the policy name to view
the element ID/names that the module will accept.
user_policy (dict):
The same setup as the computer_policy, except with data to configure
the local user policy.
cumulative_rights_assignments (bool):
Determine how user rights assignment policies are configured.
If True, user right assignment specifications are simply added to
the existing policy
If False, only the users specified will get the right (any existing
will have the right revoked)
adml_language (str):
The language files to use for looking up Administrative Template
policy data (i.e. how the policy is displayed in the GUI). Defaults
to 'en-US' (U.S. English).
Returns:
bool: True is successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' lgpo.set computer_policy="{'LockoutDuration': 2, 'RestrictAnonymous': 'Enabled', 'AuditProcessTracking': 'Succes, Failure'}"
"""
if computer_policy and not isinstance(computer_policy, dict):
raise SaltInvocationError("computer_policy must be specified as a dict")
if user_policy and not isinstance(user_policy, dict):
raise SaltInvocationError("user_policy must be specified as a dict")
policies = {}
policies["User"] = user_policy
policies["Machine"] = computer_policy
if policies:
adml_policy_resources = _get_policy_resources(language=adml_language)
for p_class in policies:
_secedits = {}
_netshs = {}
_advaudits = {}
_modal_sets = {}
_admTemplateData = {}
_regedits = {}
_lsarights = {}
_policydata = _policy_info()
if policies[p_class]:
for policy_name in policies[p_class]:
_pol = None
policy_key_name = policy_name
if policy_name in _policydata.policies[p_class]["policies"]:
_pol = _policydata.policies[p_class]["policies"][policy_name]
else:
# Case-sensitive search first
for policy in _policydata.policies[p_class]["policies"]:
_p = _policydata.policies[p_class]["policies"][policy][
"Policy"
]
if _p == policy_name:
_pol = _policydata.policies[p_class]["policies"][policy]
policy_key_name = policy
if _pol is None:
# Still not found, case-insensitive search
for policy in _policydata.policies[p_class]["policies"]:
_p = _policydata.policies[p_class]["policies"][policy][
"Policy"
]
# Case-sensitive search first
if _p.lower() == policy_name.lower():
_pol = _policydata.policies[p_class]["policies"][
policy
]
policy_key_name = policy
if _pol:
# transform and validate the setting
_value = _transform_value(
value=policies[p_class][policy_name],
policy=_policydata.policies[p_class]["policies"][
policy_key_name
],
transform_type="Put",
)
if not _validateSetting(
value=_value,
policy=_policydata.policies[p_class]["policies"][
policy_key_name
],
):
raise SaltInvocationError(
"The specified value {} is not an acceptable setting"
" for policy {}.".format(
policies[p_class][policy_name], policy_name
)
)
if "Registry" in _pol:
# set value in registry
log.trace("%s is a registry policy", policy_name)
_regedits[policy_name] = {"policy": _pol, "value": _value}
elif "Secedit" in _pol:
# set value with secedit
log.trace("%s is a Secedit policy", policy_name)
if _pol["Secedit"]["Section"] not in _secedits:
_secedits[_pol["Secedit"]["Section"]] = []
_secedits[_pol["Secedit"]["Section"]].append(
" ".join([_pol["Secedit"]["Option"], "=", str(_value)])
)
elif "NetSH" in _pol:
# set value with netsh
log.trace("%s is a NetSH policy", policy_name)
_netshs.setdefault(
policy_name,
{
"profile": _pol["NetSH"]["Profile"],
"section": _pol["NetSH"]["Section"],
"option": _pol["NetSH"]["Option"],
"value": str(_value),
},
)
elif "AdvAudit" in _pol:
# set value with advaudit
_advaudits.setdefault(
policy_name,
{
"option": _pol["AdvAudit"]["Option"],
"value": str(_value),
},
)
elif "NetUserModal" in _pol:
# set value via NetUserModal
log.trace("%s is a NetUserModal policy", policy_name)
if _pol["NetUserModal"]["Modal"] not in _modal_sets:
_modal_sets[_pol["NetUserModal"]["Modal"]] = {}
_modal_sets[_pol["NetUserModal"]["Modal"]][
_pol["NetUserModal"]["Option"]
] = _value
elif "LsaRights" in _pol:
log.trace("%s is a LsaRights policy", policy_name)
_lsarights[policy_name] = {"policy": _pol, "value": _value}
else:
_value = policies[p_class][policy_name]
log.trace('searching for "%s" in admx data', policy_name)
(
success,
the_policy,
policy_name_list,
msg,
) = _lookup_admin_template(
policy_name=policy_name,
policy_class=p_class,
adml_language=adml_language,
)
if success:
policy_name = the_policy.attrib["name"]
policy_namespace = the_policy.nsmap[the_policy.prefix]
if policy_namespace not in _admTemplateData:
_admTemplateData[policy_namespace] = {}
_admTemplateData[policy_namespace][policy_name] = _value
else:
raise SaltInvocationError(msg)
if (
policy_namespace
and policy_name in _admTemplateData[policy_namespace]
and the_policy is not None
):
log.trace(
"setting == %s",
str(
_admTemplateData[policy_namespace][policy_name]
).lower(),
)
log.trace(
str(
_admTemplateData[policy_namespace][policy_name]
).lower()
)
if (
str(
_admTemplateData[policy_namespace][policy_name]
).lower()
!= "disabled"
and str(
_admTemplateData[policy_namespace][policy_name]
).lower()
!= "not configured"
):
if ELEMENTS_XPATH(the_policy):
if isinstance(
_admTemplateData[policy_namespace][policy_name],
dict,
):
for elements_item in ELEMENTS_XPATH(the_policy):
for child_item in elements_item:
# check each element
log.trace(
"checking element %s",
child_item.attrib["id"],
)
temp_element_name = None
this_element_name = _getFullPolicyName(
policy_item=child_item,
policy_name=child_item.attrib["id"],
return_full_policy_names=True,
adml_language=adml_language,
)
log.trace(
'id attribute == "%s" '
' this_element_name == "%s"',
child_item.attrib["id"],
this_element_name,
)
if (
this_element_name
in _admTemplateData[
policy_namespace
][policy_name]
):
temp_element_name = (
this_element_name
)
elif (
child_item.attrib["id"]
in _admTemplateData[
policy_namespace
][policy_name]
):
temp_element_name = (
child_item.attrib["id"]
)
else:
raise SaltInvocationError(
'Element "{}" must be included'
" in the policy configuration"
" for policy {}".format(
this_element_name,
policy_name,
)
)
if (
"required" in child_item.attrib
and child_item.attrib[
"required"
].lower()
== "true"
):
if not _admTemplateData[
policy_namespace
][policy_name][temp_element_name]:
raise SaltInvocationError(
'Element "{}" requires a value '
"to be specified".format(
temp_element_name
)
)
if (
etree.QName(child_item).localname
== "boolean"
):
if not isinstance(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
],
bool,
):
raise SaltInvocationError(
"Element {} requires a boolean "
"True or False".format(
temp_element_name
)
)
elif (
etree.QName(child_item).localname
== "decimal"
or etree.QName(child_item).localname
== "longDecimal"
):
min_val = 0
max_val = 9999
if "minValue" in child_item.attrib:
min_val = int(
child_item.attrib[
"minValue"
]
)
if "maxValue" in child_item.attrib:
max_val = int(
child_item.attrib[
"maxValue"
]
)
if (
int(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
]
)
< min_val
or int(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
]
)
> max_val
):
raise SaltInvocationError(
'Element "{}" value must be between '
"{} and {}".format(
temp_element_name,
min_val,
max_val,
)
)
elif (
etree.QName(child_item).localname
== "enum"
):
# make sure the value is in the enumeration
found = False
for enum_item in child_item:
if (
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
]
== _getAdmlDisplayName(
adml_policy_resources,
enum_item.attrib[
"displayName"
],
).strip()
):
found = True
break
if not found:
raise SaltInvocationError(
'Element "{}" does not have'
" a valid value".format(
temp_element_name
)
)
elif (
etree.QName(child_item).localname
== "list"
):
if (
"explicitValue"
in child_item.attrib
and child_item.attrib[
"explicitValue"
].lower()
== "true"
):
if not isinstance(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
],
dict,
):
raise SaltInvocationError(
"Each list item of element "
'"{}" requires a dict '
"value".format(
temp_element_name
)
)
elif not isinstance(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
],
list,
):
raise SaltInvocationError(
'Element "{}" requires a'
" list value".format(
temp_element_name
)
)
elif (
etree.QName(child_item).localname
== "multiText"
):
if not isinstance(
_admTemplateData[
policy_namespace
][policy_name][
temp_element_name
],
list,
):
raise SaltInvocationError(
'Element "{}" requires a'
" list value".format(
temp_element_name
)
)
_admTemplateData[policy_namespace][
policy_name
][
child_item.attrib["id"]
] = _admTemplateData[
policy_namespace
][
policy_name
].pop(
temp_element_name
)
else:
raise SaltInvocationError(
'The policy "{}" has elements which must be'
" configured".format(policy_name)
)
else:
if (
str(
_admTemplateData[policy_namespace][
policy_name
]
).lower()
!= "enabled"
):
raise SaltInvocationError(
'The policy {} must either be "Enabled", '
'"Disabled", or "Not Configured"'.format(
policy_name
)
)
if _regedits:
for regedit in _regedits:
log.trace("%s is a Registry policy", regedit)
# if the value setting is None or "(value not set)", we will delete the value from the registry
if (
_regedits[regedit]["value"] is not None
and _regedits[regedit]["value"] != "(value not set)"
):
_ret = __utils__["reg.set_value"](
_regedits[regedit]["policy"]["Registry"]["Hive"],
_regedits[regedit]["policy"]["Registry"]["Path"],
_regedits[regedit]["policy"]["Registry"]["Value"],
_regedits[regedit]["value"],
_regedits[regedit]["policy"]["Registry"]["Type"],
)
else:
_ret = __utils__["reg.read_value"](
_regedits[regedit]["policy"]["Registry"]["Hive"],
_regedits[regedit]["policy"]["Registry"]["Path"],
_regedits[regedit]["policy"]["Registry"]["Value"],
)
if _ret["success"] and _ret["vdata"] != "(value not set)":
_ret = __utils__["reg.delete_value"](
_regedits[regedit]["policy"]["Registry"]["Hive"],
_regedits[regedit]["policy"]["Registry"]["Path"],
_regedits[regedit]["policy"]["Registry"]["Value"],
)
if not _ret:
raise CommandExecutionError(
"Error while attempting to set policy {} via the"
" registry. Some changes may not be applied as"
" expected".format(regedit)
)
if _lsarights:
for lsaright in _lsarights:
_existingUsers = None
if not cumulative_rights_assignments:
_existingUsers = _getRightsAssignments(
_lsarights[lsaright]["policy"]["LsaRights"]["Option"]
)
if _lsarights[lsaright]["value"]:
for acct in _lsarights[lsaright]["value"]:
_ret = _addAccountRights(
acct,
_lsarights[lsaright]["policy"]["LsaRights"][
"Option"
],
)
if not _ret:
raise SaltInvocationError(
"An error occurred attempting to configure the"
" user right {}.".format(lsaright)
)
if _existingUsers:
for acct in _existingUsers:
if acct not in _lsarights[lsaright]["value"]:
_ret = _delAccountRights(
acct,
_lsarights[lsaright]["policy"]["LsaRights"][
"Option"
],
)
if not _ret:
raise SaltInvocationError(
"An error occurred attempting to remove previously "
"configured users with right {}.".format(
lsaright
)
)
if _secedits:
# we've got secedits to make
log.trace(_secedits)
ini_data = "\r\n".join(["[Unicode]", "Unicode=yes"])
_seceditSections = [
"System Access",
"Event Audit",
"Registry Values",
"Privilege Rights",
]
for _seceditSection in _seceditSections:
if _seceditSection in _secedits:
ini_data = "\r\n".join(
[
ini_data,
"".join(["[", _seceditSection, "]"]),
"\r\n".join(_secedits[_seceditSection]),
]
)
ini_data = "\r\n".join(
[ini_data, "[Version]", 'signature="$CHICAGO$"', "Revision=1"]
)
log.trace("ini_data == %s", ini_data)
if not _write_secedit_data(ini_data):
raise CommandExecutionError(
"Error while attempting to set policies via "
"secedit. Some changes may not be applied as "
"expected"
)
if _netshs:
# we've got netsh settings to make
for setting in _netshs:
log.trace("Setting firewall policy: %s", setting)
log.trace(_netshs[setting])
_set_netsh_value(**_netshs[setting])
if _advaudits:
# We've got AdvAudit settings to make
for setting in _advaudits:
log.trace("Setting Advanced Audit policy: %s", setting)
log.trace(_advaudits[setting])
_set_advaudit_value(**_advaudits[setting])
if _modal_sets:
# we've got modalsets to make
log.trace(_modal_sets)
for _modal_set in _modal_sets:
try:
_existingModalData = win32net.NetUserModalsGet(
None, _modal_set
)
_newModalSetData = dictupdate.update(
_existingModalData, _modal_sets[_modal_set]
)
log.trace("NEW MODAL SET = %s", _newModalSetData)
_ret = win32net.NetUserModalsSet(
None, _modal_set, _newModalSetData
)
# TODO: This needs to be more specific
except Exception as exc: # pylint: disable=broad-except
msg = (
"An unhandled exception occurred while "
"attempting to set policy via "
"NetUserModalSet\n{}".format(exc)
)
log.exception(msg)
raise CommandExecutionError(msg)
if _admTemplateData:
_ret = False
log.trace(
"going to write some adm template data :: %s", _admTemplateData
)
_ret = _writeAdminTemplateRegPolFile(
_admTemplateData,
adml_language=adml_language,
registry_class=p_class,
)
if not _ret:
raise CommandExecutionError(
"Error while attempting to write Administrative Template"
" Policy data. Some changes may not be applied as expected"
)
return True
else:
raise SaltInvocationError("You have to specify something!") | ee000c61cf4eb0653367340d87160b2e8e0f09d2 | 16,730 |
def get_dotenv_variable(var_name: str) -> str:
""" """
try:
return config.get(var_name)
except KeyError:
error_msg = f"{var_name} not found!\nSet the '{var_name}' environment variable"
raise ImproperlyConfigured(error_msg) | e3a06f3a439f5eb238688805985fb54eea7221e4 | 16,731 |
def load_dataset():
"""
Create a PyTorch Dataset for the images.
Notes
-----
- See https://discuss.pytorch.org/t/computing-the-mean-and-std-of-dataset/34949
"""
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.9720, 0.9720, 0.9720),
(0.1559, 0.1559, 0.1559)) # Normalize with the mean and std of the whole dataset
])
dataset = ImageFolder(root='images', transform=transform)
return dataset | 204e33cb7cb79ef81349b083e21ed6779c04dad0 | 16,732 |
import sys
def load():
"""Load list of available storage managers."""
storage_type = config.get("rights", "type")
if storage_type == "custom":
rights_module = config.get("rights", "custom_handler")
__import__(rights_module)
module = sys.modules[rights_module]
else:
root_module = __import__("rights.regex", globals=globals(), level=2)
module = root_module.regex
sys.modules[__name__].authorized = module.authorized
return module | f65c76af008c4cf2d41347d84815921d6897edc7 | 16,733 |
def vote(pred1, pred2, pred3=None):
"""Hard voting for the ensembles"""
vote_ = []
index = []
if pred3 is None:
mean = np.mean([pred1, pred2], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
else:
vote_.append(pred2[s])
index.append(s)
else:
mean = np.mean([pred1, pred2, pred3], axis=0)
for s, x in enumerate(mean):
if x == 1 or x == 0:
vote_.append(int(x))
elif x > 0.5:
vote_.append(1)
index.append(s)
else:
vote_.append(0)
index.append(s)
return mean, vote_, index | a24858c0f14fe51a70ff2c0146a4e1aa3a2afdeb | 16,734 |
from pathlib import Path
def generate_master_flat(
science_frame : CCDData,
bias_path : Path,
dark_path : Path,
flat_path : Path,
use_cache : bool=True
) -> CCDData:
"""
"""
cache_path = generate_cache_path(science_frame, flat_path) / 'flat'
if use_cache and cache_path.is_dir():
flat_frames = ccdp.ImageFileCollection(location=cache_path)
else:
cache_path.mkdir(parents=True, exist_ok=True)
flat_frames = calibrate_flat(science_frame=science_frame, bias_path=bias_path, dark_path=dark_path, flat_path=flat_path, output_path=cache_path)
ccd = select_flat_frame(science_frame=science_frame, flat_frames=flat_frames)
return ccd | 5448e6f95e1d2c9249ac9419c767e675dc424c5b | 16,735 |
def natrix_mqttclient(client_id):
"""Generate a natrix mqtt client.
This function encapsulates all configurations about natrix mqtt client.
Include:
- client_id
The unique id about mqtt connection.
- username & password
Username is device serial number which used to identify who am I;
:return:
"""
client = NatrixMQTTClient(client_id)
return client | 20960873f265068aff035ec554b880fa93c49e32 | 16,736 |
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words | c99e9ed38287c175cd97f9cec0c0f8fb8f3629a7 | 16,737 |
def talk(text, is_yelling=False, trim=False, verbose=True):
"""
Prints text
is_yelling capitalizes text
trim - trims whitespace from both ends
verbose - if you want to print something on screen
returns transformed text
"""
if trim:
text = text.strip()
if is_yelling:
text = text.upper()
if verbose:
print(text) # printing is considered a side effect inside a function
return text | 22728a877460b4504653e2a0ea9ecdf81fa422f9 | 16,738 |
def getNorthPoleAngle(target, position, C, B, camera):
"""
Get angle north pole of target makes with image y-axis, in radians.
"""
# get target spin axis
# the last row of the matrix is the north pole vector, *per spice docs*
# seems correct, as it's nearly 0,0,1
Bz = B[2]
print 'Bz=north pole spin axis',Bz
# get target radius, km
nvalues, radii = spice.bodvrd(target, 'RADII', 3)
targetRadiusEquator = (radii[0] + radii[1]) / 2
targetRadiusPoles = radii[2]
targetRadius = sum(radii) / 3
# flatteningCoefficient = (targetRadiusEquator - targetRadiusPoles) / targetRadiusEquator
# print 'target radius in km', targetRadius
# get north pole location
positionNP = position + targetRadius * Bz
print 'positionNP=north pole in world coords', positionNP
# get target position in camera space
c = np.dot(C, position)
cNP = np.dot(C, positionNP)
print 'c=position in camera space',c
print 'cNP=north pole in camera space',cNP
# get camera fov and focal length
fovDegrees = config.cameraFOVs[camera] # 0.424 or 3.169 deg
fovRadians = fovDegrees * math.pi / 180
f = 1.0 / math.tan(fovRadians/2) # focal length (relative to screen halfwidth of 1.0)
print 'f=focal length',f
# get camera-to-screen matrix S
cz = c[2]
fz = f/cz
# print 'fz=f/cz',fz
S = np.array([[fz,0,0],[0,fz,0]])
# get screen coordinate (-1 to 1, -1 to 1)
s = np.dot(S, c)
sNP = np.dot(S, cNP)
# ie sx=cx*f/cz; sy=cy*f/cz
print 's=screen space (-1 to 1)',s
print 'sNP=screen space north pole (-1 to 1)',sNP
# get angle between north pole and image y-axis
npDelta = sNP-s
npRadians = math.atan(npDelta[0]/npDelta[1])
npAngle = npRadians * 180/math.pi
print 'npAngle',npAngle
return npRadians | cf6d79ef3af005a170694d5fe00b93e9dd2665dd | 16,739 |
def ray_casting(polygon, ray_line):
""" checks number of intersection a ray makes with polygon
parameters: Polygon, ray (line)
output: number of intersection
"""
vertex_num = polygon.get_count()
ray_casting_result = [False] * vertex_num
''' count for vertices that is colinear and intersects with ray '''
vertex_colinear_intersect_with_ray = 0
cursor = polygon.head
for index in range(vertex_num):
edge = LineSegment(cursor.data, cursor.next.data)
ray_casting_result[index] = does_lines_intersect(edge, ray_line)
cursor = cursor.next
''' added to check whether vertex is colinear with ray '''
if is_vertex_colinear(ray_line, cursor.data) and ray_casting_result[index]:
vertex_colinear_intersect_with_ray = vertex_colinear_intersect_with_ray + 1
# print(ray_casting_result)
# print(vertex_colinear_intersect_with_ray)
''' adjusted for colinear vertices '''
return ray_casting_result.count(True) - vertex_colinear_intersect_with_ray | 004c73fbef35bec5af35b6b93cc5b2bdb2e40f33 | 16,740 |
import os
import subprocess
import re
import glob
import shutil
from pathlib import Path
def main_install(args, config=None):
"""
Main function for the 'install' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Use correct umask when installing
saved_umask = os.umask(config.install.umask.value)
# Mount
new_mounts = []
for i in config.install.mount:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.info("Mounting {}".format(i))
new_mounts.append(i)
try:
subprocess.run(['mount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.die("Could not mount '{}', mount returned code {}. Aborting.".format(i, e.returncode))
# Check mounts
for i in config.install.mount + config.install.assert_mounted:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.die("'{}' is not mounted. Aborting.".format(i))
# Execute pre hook
execute_command(args, 'install.hooks.pre', config.install.hooks.pre, replace_common_vars)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
target_dir = replace_common_vars(args, config.install.target_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# bzImage output
bzimage_output = os.path.join(args.kernel_dir, 'arch', autokernel.kconfig.get_uname_arch(), 'boot/bzImage')
def _purge_old(path):
keep_old = config.install.keep_old.value
# Disable purging on negative count
if keep_old < 0:
return
# Disable purging for non versionated paths
if not '{KERNEL_VERSION}' in path:
return
tokens = path.split('{KERNEL_VERSION}')
if len(tokens) > 2:
log.warn("Cannot purge path with more than one {{KERNEL_VERSION}} token: '{}'".format(path))
return
re_semver = re.compile(r'^[\d\.]+\d')
def _version_sorter(i):
suffix = i[len(tokens[0]):]
basename = suffix.split('/')[0]
st = os.stat(i)
try:
time_create = st.st_birthtime
except AttributeError:
time_create = st.st_mtime
semver = re_semver.match(basename).group()
val = autokernel.config.semver_to_int(semver)
return val, time_create
escaped_kv = re.escape('{KERNEL_VERSION}')
# matches from {KERNEL_VERSION} until first / exclusive in an regex escaped path
match_basename = re.compile(re.escape(escaped_kv) + r"(.+?(?=\\\/|$)).*$")
# derive regex to check if a valid semver is contained and prefix and suffix are given
re_match_valid_paths = re.compile('^' + match_basename.sub(lambda m: r'[0-9]+(\.[0-9]+(\.[0-9]+)?)?(-[^\/]*)?' + m.group(1) + r'.*$', re.escape(path)))
# matches from {KERNEL_VERSION} until first / exclusive in a normal path
re_replace_wildcard = re.compile(escaped_kv + r"[^\/]*")
# replace {KERNEL_VERSION}-* component with *
wildcard_path = re_replace_wildcard.sub('*', glob.escape(path))
# sort out paths that don't contain valid semvers
valid_globbed = [i for i in glob.glob(wildcard_path) if re_match_valid_paths.match(i)]
for i in sorted(valid_globbed, key=_version_sorter)[:-(keep_old + 1)]:
# For security, we will not call rmtree on a path that doesn't end with a slash,
# or if the realpath has less then two slash characters in it.
# Otherwise we only call unlink
if i[-1] == '/' and os.path.realpath(i).count('/') >= 2:
try:
shutil.rmtree(i)
except OSError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
else:
try:
os.unlink(i)
except IOError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
def _move_to_old(path):
re_old_suffix = re.compile(r'^.*\.old(\.\d+)?\/*$')
dst = path + '.old'
highest_num = -1
for i in glob.glob(glob.escape(dst) + '*'):
m = re_old_suffix.match(i)
old_num = int((m.group(1) or '.0')[1:]) if m else 0
if highest_num < old_num:
highest_num = old_num
if highest_num >= 0:
dst += ".{:d}".format(highest_num + 1)
shutil.move(path, dst)
def _install(name, src, target_var):
# If the target is disabled, return.
if not target_var:
return
# Figure out destination, and move existing filed if necessary
dst = os.path.join(target_dir, replace_common_vars(args, target_var))
if os.path.exists(dst):
_move_to_old(dst)
# Create directory if it doesn't exist
Path(os.path.dirname(dst)).mkdir(parents=True, exist_ok=True)
log.info("Installing {:<11s} {}".format(name + ':', dst))
# Install target file
shutil.copyfile(src, dst)
# Purge old files
_purge_old(os.path.join(target_dir, str(target_var)))
# Move target_dir, if it is dynamic
if '{KERNEL_VERSION}' in str(config.install.target_dir) and os.path.exists(target_dir):
_move_to_old(os.path.realpath(target_dir))
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_modules = kconfig.syms['MODULES']
# Install modules
if config.install.modules_prefix and sym_modules.str_value != 'n':
modules_prefix = str(config.install.modules_prefix)
modules_prefix_with_lib = os.path.join(modules_prefix, "lib/modules")
modules_dir = os.path.join(modules_prefix_with_lib, kernel_version)
if os.path.exists(modules_dir):
_move_to_old(os.path.realpath(modules_dir))
log.info("Installing modules: {}".format(modules_prefix_with_lib))
install_modules(args, prefix=modules_prefix)
_purge_old(modules_prefix_with_lib + "/{KERNEL_VERSION}/")
# Install targets
_install('bzimage', bzimage_output, config.install.target_kernel)
_install('config', config_output, config.install.target_config)
if config.initramfs.enabled:
_install('initramfs', initramfs_output, config.install.target_initramfs)
# Purge old target_dirs (will only be done if it is dynamic)
_purge_old(str(config.install.target_dir) + '/')
# Execute post hook
execute_command(args, 'install.hooks.post', config.install.hooks.post, replace_common_vars)
# Undo what we have mounted
for i in reversed(new_mounts):
log.info("Unmounting {}".format(i))
try:
subprocess.run(['umount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.warn("Could not umount '{}' (returned {})".format(i, e.returncode))
# Restore old umask
os.umask(saved_umask) | 647e2558532274d061c823682e831f928aafaadc | 16,741 |
import logging
import os
import tqdm
def kmeans_clustering_missing(reduced_components, output_path,
n_clusters=2, max_iter=10):
"""
Performs a K-means clustering with missing data.
:param reduced_components: reduced components matrix
:type reduced_components: np.ndarray
:param output_path: path to output directory
:type output_path: str
:param n_clusters: number of clusters
:type n_clusters: int
:param max_iter: maximum iterations for convergence
:type max_iter: int
:return: clustered array, centroids of clusters, filled matrix
:rtype: np.ndarray, list, np.ndarray
"""
logging.basicConfig(filename=os.path.join(output_path,
'clustering_FC_states_missing.log'),
level=logging.INFO)
# Initialize missing values to their column means
missing = ~np.isfinite(reduced_components)
mu = np.nanmean(reduced_components, axis=0)
X_filled = np.where(missing, mu, reduced_components)
for i in tqdm(range(max_iter)):
if i > 0:
# k means with previous centroids
cls = KMeans(n_clusters, init=prev_centroids)
else:
# do multiple random initializations in parallel
cls = KMeans(n_clusters, n_jobs=-1)
# perform clustering on the filled-in data
labels = cls.fit_predict(X_filled)
centroids = cls.cluster_centers_
# fill in the missing values based on their cluster centroids
X_filled[missing] = centroids[labels][missing]
# when the labels have stopped changing then we have converged
if i > 0 and np.all(labels == prev_labels):
break
prev_labels = labels
prev_centroids = cls.cluster_centers_
# perform the silhouette analysis as a metric for the clustering model
silhouette_avg = silhouette_score(X_filled, cls.labels_,
sample_size=300)
logging.info('For n_clusters = {}, the average silhouette score is: {}'
.format(n_clusters, silhouette_avg))
logging.info('For n_clusters = {}, the cluster centers are: {} and the '
'sum of squared distances of samples to their closest '
'cluster center are: {}'.format(n_clusters, centroids,
cls.inertia_))
np.savez(os.path.join(output_path, 'clustered_matrix'), labels)
return labels, centroids, X_filled | f6e5310982526b581a99ef2961ea8c5b30008f04 | 16,742 |
def ErrorWrapper(err, resource_name):
"""Wraps http errors to handle resources names with more than 4 '/'s.
Args:
err: An apitools.base.py.exceptions.HttpError.
resource_name: The requested resource name.
Returns:
A googlecloudsdk.api_lib.util.exceptions.HttpException.
"""
exc = exceptions.HttpException(err)
if exc.payload.status_code == 404:
# status_code specific error message
exc.error_format = ('{{api_name}}: {resource_name} not found.').format(
resource_name=resource_name)
else:
# override default error message
exc.error_format = ('Unknown error. Status code {status_code}.')
return exc | ebcce6241f88d0fa4f093f6823d0ccb9ae1bd431 | 16,743 |
def get_str_cmd(cmd_lst):
"""Returns a string with the command to execute"""
params = []
for param in cmd_lst:
if len(param) > 12:
params.append('"{p}"'.format(p=param))
else:
params.append(param)
return ' '.join(params) | a7cc28293eb381604112265a99b9c03e762c2f2c | 16,744 |
def calculate_score(arr):
"""Inside calculate_score() check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score. 0 will represent a blackjack in our game.
It check for an 11 (ace). If the score is already over 21, remove the 11 and replace it with a 1"""
if sum(arr) == 21 and len(arr) == 2:
return 0 # represents blackjack
if sum(arr) > 21 and 11 in arr:
arr.remove(11)
arr.append(1)
return sum(arr) | 0890c55068b8a92d9f1f577ccf2c5a770f7887d4 | 16,745 |
def tt_true(alpha):
"""Is the propositional sentence alpha a tautology? (alpha will be
coerced to an expr.)
>>> tt_true(expr("(P >> Q) <=> (~P | Q)"))
True
"""
return tt_entails(TRUE, expr(alpha)) | 91ca0d445407f50d4b985e16428dfeb7f1e1b5a2 | 16,746 |
from e2e_simulators import webbpsf_imaging as webbim
import time
import os
def contrast_jwst_ana_num(matdir, matrix_mode="analytical", rms=1. * u.nm, im_pastis=False, plotting=False):
"""
Calculate the contrast for an RMS WFE with image PASTIS, matrix PASTIS
:param matdir: data directory to use for matrix and calibration coefficients from
:param matrix_mode: use 'analytical or 'numerical' matrix
:param rms: RMS wavefront error in pupil to calculate contrast for; in NANOMETERS
:param im_pastis: default False, whether to also calculate contrast from image PASTIS
:param plotting: default False, whether to save E2E and PASTIS DH PSFs; works only if im_pastis=True
:return:
"""
log.warning("THIS ONLY WORKS FOR PISTON FOR NOW")
# Keep track of time
start_time = time.time() # runtime currently is around 12 min
# Parameters
dataDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), matdir)
which_tel = CONFIG_PASTIS.get('telescope', 'name')
nb_seg = CONFIG_PASTIS.getint(which_tel, 'nb_subapertures')
filter = CONFIG_PASTIS.get(which_tel, 'filter_name')
fpm = CONFIG_PASTIS.get(which_tel, 'focal_plane_mask') # focal plane mask
lyot_stop = CONFIG_PASTIS.get(which_tel, 'pupil_plane_stop') # Lyot stop
inner_wa = CONFIG_PASTIS.getint(which_tel, 'IWA')
outer_wa = CONFIG_PASTIS.getint(which_tel, 'OWA')
tel_size_px = CONFIG_PASTIS.getint('numerical', 'tel_size_px')
sampling = CONFIG_PASTIS.getfloat(which_tel, 'sampling')
#real_samp = sampling * tel_size_px / im_size
zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike')
zern_mode = util.ZernikeMode(zern_number)
zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern')
# Import PASTIS matrix
matrix_pastis = None
if matrix_mode == 'numerical':
filename = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_numerical', filename + '.fits'))
elif matrix_mode == 'analytical':
filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_analytical', filename + '.fits'))
# Create random aberration coefficients
aber = np.random.random([nb_seg]) # piston values in input units
#log.info(f'PISTON ABERRATIONS: {aber}')
# Normalize to the RMS value I want
rms_init = util.rms(aber)
aber *= rms.value / rms_init
calc_rms = util.rms(aber) * u.nm
aber *= u.nm # making sure the aberration has the correct units
log.info(f"Calculated RMS: {calc_rms}")
# Remove global piston
aber -= np.mean(aber)
# Make equivalent aberration array that goes into the WebbPSF function
Aber_WSS = np.zeros([nb_seg, zern_max])
Aber_WSS[:,0] = aber.to(u.m).value # index "0" works because we're using piston currently; convert to meters
### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
log.info('Generating baseline PSF from E2E - no coronagraph, no aberrations')
psf_perfect = webbim.nircam_nocoro(filter, np.zeros_like(Aber_WSS))
normp = np.max(psf_perfect)
psf_perfect = psf_perfect / normp
### WEBBPSF
log.info('Generating E2E coro contrast')
start_webb = time.time()
# Set up NIRCam and coronagraph, get PSF
psf_webbpsf = webbim.nircam_coro(filter, fpm, lyot_stop, Aber_WSS)
psf_webbpsf = psf_webbpsf / normp
# Create dark hole
dh_area = util.create_dark_hole(psf_webbpsf, inner_wa, outer_wa, sampling)
# Get the mean contrast from the WebbPSF coronagraph
webb_dh_psf = psf_webbpsf * dh_area
contrast_webbpsf = np.mean(webb_dh_psf[np.where(webb_dh_psf != 0)])
end_webb = time.time()
#TODO: save plots of phase on segmented pupil
# Load in baseline contrast
contrastname = 'base-contrast_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
coro_floor = float(np.loadtxt(os.path.join(dataDir, 'calibration', contrastname+'.txt')))
### IMAGE PASTIS
contrast_am = np.nan
if im_pastis:
log.info('Generating contrast from image-PASTIS')
start_impastis = time.time()
# Create calibrated image from analytical model
psf_am, full_psf = impastis.analytical_model(zern_number, aber, cali=True)
# Get the mean contrast from image PASTIS
contrast_am = np.mean(psf_am[np.where(psf_am != 0)]) + coro_floor
end_impastis = time.time()
### MATRIX PASTIS
log.info('Generating contrast from matrix-PASTIS')
start_matrixpastis = time.time()
# Get mean contrast from matrix PASTIS
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + coro_floor # calculating contrast with PASTIS matrix model
end_matrixpastis = time.time()
ratio = None
if im_pastis:
ratio = contrast_am / contrast_matrix
# Outputs
log.info('\n--- CONTRASTS: ---')
log.info(f'Mean contrast from E2E: {contrast_webbpsf}')
log.info(f'Mean contrast with image PASTIS: {contrast_am}')
log.info(f'Contrast from matrix PASTIS: {contrast_matrix}')
log.info(f'Ratio image PASTIS / matrix PASTIS: {ratio}')
log.info('\n--- RUNTIMES: ---')
log.info(f'E2E: {end_webb-start_webb}sec = {(end_webb-start_webb)/60}min')
if im_pastis:
log.info(f'Image PASTIS: {end_impastis-start_impastis}sec = {(end_impastis-start_impastis)/60}min')
log.info(f'Matrix PASTIS: {end_matrixpastis-start_matrixpastis}sec = {(end_matrixpastis-start_matrixpastis)/60}min')
end_time = time.time()
runtime = end_time - start_time
log.info(f'Runtime for contrast_calculation_simple.py: {runtime} sec = {runtime/60} min')
# Save the PSFs
if im_pastis:
if plotting:
# As fits files
util.write_fits(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), os.path.join(dataDir, 'results',
'dh_images_'+matrix_mode, '{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_e2e.fits'))
util.write_fits(psf_am, os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_am.fits'))
# As PDF plot
plt.clf()
plt.figure()
plt.suptitle('{:.2e}'.format(rms.value) + str(rms.unit) + " RMS")
plt.subplot(1, 2, 1)
plt.title("E2E")
plt.imshow(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), norm=LogNorm())
plt.colorbar()
plt.subplot(1, 2, 2)
plt.title("PASTIS image")
plt.imshow(psf_am, norm=LogNorm())
plt.colorbar()
plt.savefig(os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+'DH_PSFs.pdf'))
#TODO: check image rotation, I think there is a 90 degree difference in them for the JWST simulations
return contrast_webbpsf, contrast_am, contrast_matrix | 93336020d4d1c276fd7918d12b648476d63505f1 | 16,747 |
import configparser
def show_config_data_by_section(data:configparser.ConfigParser, section:str):
"""Print a section's data by section name
Args:
data (configparser.ConfigParser): Data
section (str): Section name
"""
if not _check_data_section_ok(data, section):
return None
val = data[section]
print("[{}]".format(section))
for k, v in val.items():
print("{} = {}".format(k, v))
print() | 620abac7791a9e34707236ea6186b4e77591a393 | 16,748 |
def train_save_tfidf(filein, target):
"""input is a bow corpus saved as a tfidf file. The output is
a saved tfidf corpus"""
try:
corpus = corpora.MmCorpus(filein)
except:
raise NameError('HRMMPH. The file does not seem to exist. Create a file'+
'first by running the "train_save_dictionary_corpus" function.')
tfidf = models.TfidfModel(corpus)
tfidf.save(f'nlp_training_data/{target}_tfidf_model.tfidf')
tfidf_corpus = tfidf[corpus]
return tfidf_corpus | e4d41443d27f8b55f9fd6ba4b8c13a42d381a980 | 16,749 |
def ScrewTrajectoryList(Xstart, Xend, Tf, N, method, gripper_state, traj_list):
""" Modified from the modern_robotics library ScrewTrajectory
Computes a trajectory as a list of SE(3) matrices with a gripper value and
converts into a list of lists
Args:
Xstart : The initial end-effector configuration
Xend : The final end-effector configuration
Tf : Total time of the motion in seconds from rest to rest
N : The number of points N > 1 in the discrete representation of the trajectory
method : The time-scaling method
gripper_state : The gripper open (0) and closed (1) value
Returns:
traj_list : list of rotations, positions, and gripper state
"""
N = int(N)
timegap = Tf / (N - 1.0)
traj = [[None]] * N
for i in range(N):
if method == 3:
s = CubicTimeScaling(Tf, timegap * i)
else:
s = QuinticTimeScaling(Tf, timegap * i)
traj[i] = np.dot(Xstart, MatrixExp6(MatrixLog6(np.dot(TransInv(Xstart), Xend)) * s))
traj = np.asarray(traj)
for i in range(N):
r11 = traj[i][0][0]
r12 = traj[i][0][1]
r13 = traj[i][0][2]
r21 = traj[i][1][0]
r22 = traj[i][1][1]
r23 = traj[i][1][2]
r31 = traj[i][2][0]
r32 = traj[i][2][1]
r33 = traj[i][2][2]
px = traj[i][0][3]
py = traj[i][1][3]
pz = traj[i][2][3]
traj_list.append([r11, r12, r13, r21, r22, r23, r31, r32, r33, px, py, pz, gripper_state])
return traj_list | 146f4f7b96207c74bbe0ed08e162c3ba656d7a43 | 16,750 |
def calculate_phase(time, period):
"""Calculates phase based on period.
Parameters
----------
time : type
Description of parameter `time`.
period : type
Description of parameter `period`.
Returns
-------
list
Orbital phase of the object orbiting the star.
"""
return (time % period) / period | a537810a7705b5d8b0144318469b249f64a01456 | 16,751 |
import os
import re
def get_sbappname(filepath):
""" Given a file path, find an acceptable name on the BL filesystem """
filename = os.path.split(filepath)[1]
filename = filename.split('.')[0]
return re.sub(r'[:*?"<>|]', "", filename)[:24] | c81154ee8ec97cd51f3fae55ba8b1778170de1b6 | 16,752 |
def perspective_transform(img):
"""
Do a perspective transform over an image.
Points are hardcoded and depend on the camera and it's positioning
:param img:
:return:
"""
pts1 = np.float32([[250, 686], [1040, 680], [740, 490], [523, 492]])
pts2 = np.float32([[295, 724], [980, 724], [988, 164], [297, 150]])
M = cv2.getPerspectiveTransform(pts1, pts2)
transformed_image = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
return transformed_image | 51411c1fc73e897a657e2e89c44275796b16a1b6 | 16,753 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 400)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | ffa2152cffdbfd161f3e8aa23aefa3c49993e630 | 16,754 |
def value_iteration(P, nS, nA, gamma=0.9, tol=1e-3):
"""
Learn value function and policy by using value iteration method for a given
gamma and environment.
Parameters:
----------
P, nS, nA, gamma:
defined at beginning of file
tol: float
Terminate value iteration when
max |value(s) - prev_value(s)| < tol
Returns:
----------
value: np.ndarray[nS]
policy: np.ndarray[nS]
"""
value = np.zeros(nS) # value function initialized
policy = np.zeros(nS, dtype=int) # policy initialized
while True: # until convergence or finite horizon overflow
new_value = np.zeros(nS)
for state in range(nS): # for each state
best_Q_value = -float("inf") # we are looking for the best action in term of Q value
for action in range(nA): # for each action
p = P[state][action] # {(probability, nextstate, reward, terminal),...}[state,action]
reward = sum([i[0]*i[2] for i in p]) # expected reward for this state-action
Q_value = reward + gamma*(sum([i[0]*value[i[1]] for i in p])) # expected reward + gamma * expected value for this state-action
if Q_value > best_Q_value:
new_value[state] = Q_value # max_a Q for this state
policy[state] = action # argmax_a Q for this state
best_Q_value = Q_value
if np.max(np.abs(new_value - value)) < tol: # convergence
value = new_value
break
value = new_value
return value, policy | 7362b95cd453f0983e6b82acf73d0350eae2734c | 16,755 |
def get_clients():
"""
Determine if the current user has a connected client.
"""
return jsonify(g.user.user_id in clients) | 602d77b25b6608db24fca66d5bc55bc83a0530e8 | 16,756 |
def get_split_indices(word, curr_tokens, include_joiner_token, joiner):
"""Gets indices for valid substrings of word, for iterations > 0.
For iterations > 0, rather than considering every possible substring, we only
want to consider starting points corresponding to the start of wordpieces in
the current vocabulary.
Args:
word: string we want to split into substrings
curr_tokens: string to int dict of tokens in vocab (from previous iteration)
include_joiner_token: bool whether to include joiner token
joiner: string used to indicate suffixes
Returns:
list of ints containing valid starting indices for word
"""
indices = []
start = 0
while start < len(word):
end = len(word)
while end > start:
subtoken = word[start:end]
# Subtoken includes the joiner token.
if include_joiner_token and start > 0:
subtoken = joiner + subtoken
# If subtoken is part of vocab, 'end' is a valid start index.
if subtoken in curr_tokens:
indices.append(end)
break
end -= 1
if end == start:
return None
start = end
return indices | 495d924716cfd0e14430d225e50b313fea305dbb | 16,757 |
def perspective(
vlist: list[list[Number,
Number,
Number]],
rotvec: list[list[float, float],
list[float, float],
list[float, float]],
dispvec: list[Number,
Number,
Number],
d: float) -> tuple:
"""Projects 3D points to 2D and
apply rotation and translation
vectors
Args:
vlist : list of 3D vertices
rotvec : 3D rotation vector
dispvec: 3D translation vector
d : Distance of observer
from the screen
Returns:
tuple (list, list)
"""
projvlist = []
rotvlist = []
((sroll, croll),
(spitch, cpitch),
(syaw, cyaw)) = rotvec
for p in vlist:
(px, py, pz) = p
x1 = -cyaw * px - syaw * pz
y1 = croll * py - sroll * x1
z1 = -syaw * px + cyaw * pz
x = croll * x1 + sroll * py
y = spitch * z1 + cpitch * y1
z = cpitch * z1 - spitch * y1
x += dispvec[0]
y += dispvec[1]
z += dispvec[2]
rotvlist.append([x, y, z])
projvlist.append([-d * x / z,
-d * y / z])
return (rotvlist, projvlist) | daece49851ecca55ba30d4f6f82fe59d5deb5497 | 16,758 |
import os
def _check_shebang(filename, disallow_executable):
"""Return 0 if the filename's executable bit is consistent with the
presence of a shebang line and the shebang line is in the whitelist of
acceptable shebang lines, and 1 otherwise.
If the string "# noqa: shebang" is present in the file, then this check
will be ignored.
"""
with open(filename, mode='r', encoding='utf8') as file:
content = file.read()
if "# noqa: shebang" in content:
# Ignore.
return 0
is_executable = os.access(filename, os.X_OK)
if is_executable and disallow_executable:
print("ERROR: {} is executable, but should not be".format(filename))
print("note: fix via chmod a-x '{}'".format(filename))
return 1
lines = content.splitlines()
assert len(lines) > 0, f"Empty file? {filename}"
shebang = lines[0]
has_shebang = shebang.startswith("#!")
if is_executable and not has_shebang:
print("ERROR: {} is executable but lacks a shebang".format(filename))
print("note: fix via chmod a-x '{}'".format(filename))
return 1
if has_shebang and not is_executable:
print("ERROR: {} has a shebang but is not executable".format(filename))
print("note: fix by removing the first line of the file")
return 1
shebang_whitelist = {
"bash": "#!/bin/bash",
"python": "#!/usr/bin/env python3",
}
if has_shebang and shebang not in list(shebang_whitelist.values()):
print(("ERROR: shebang '{}' in the file '{}' is not in the shebang "
"whitelist").format(shebang, filename))
for hint, replacement_shebang in shebang_whitelist.items():
if hint in shebang:
print(("note: fix by replacing the shebang with "
"'{}'").format(replacement_shebang))
return 1
return 0 | 205fa2bc88b4c80899223e691b6f9cd00492c011 | 16,759 |
def actor_is_contact(api_user, nick, potential_contact):
"""Determine if one is a contact.
PARAMETERS:
potential_contact - stalkee.
RETURNS: boolean
"""
nick = clean.user(nick)
potential_contact = clean.user(potential_contact)
key_name = Relation.key_from(relation='contact',
owner=nick,
target=potential_contact)
rel_ref = Relation.get_by_key_name(key_name)
return rel_ref and True | 93a3bfd0a52b2acb043c162428f0fa45754702bf | 16,760 |
def compute_mem(w, n_ring=1, spectrum='nonzero', tol=1e-10):
"""Compute Moran eigenvectors map.
Parameters
----------
w : BSPolyData, ndarray or sparse matrix, shape = (n_vertices, n_vertices)
Spatial weight matrix or surface. If surface, the weight matrix is
built based on the inverse geodesic distance between each vertex
and the vertices in its `n_ring`.
n_ring : int, optional
Neighborhood size to build the weight matrix. Only used if user
provides a surface mesh. Default is 1.
spectrum : {'all', 'nonzero'}, optional
Eigenvalues/vectors to select. If 'all', recover all eigenvectors
except the smallest one. Otherwise, select all except non-zero
eigenvectors. Default is 'nonzero'.
tol : float, optional
Minimum value for an eigenvalue to be considered non-zero.
Default is 1e-10.
Returns
-------
w : 1D ndarray, shape (n_components,)
Eigenvalues in descending order. With ``n_components = n_vertices - 1``
if ``spectrum == 'all'`` and ``n_components = n_vertices - n_zero`` if
``spectrum == 'nonzero'``, and `n_zero` is number of zero eigenvalues.
mem : 2D ndarray, shape (n_vertices, n_components)
Eigenvectors of the weight matrix in same order.
See Also
--------
:func:`.moran_randomization`
:class:`.MoranRandomization`
References
----------
* Wagner H.H. and Dray S. (2015). Generating spatially constrained
null models for irregularly spaced data using Moran spectral
randomization methods. Methods in Ecology and Evolution, 6(10):1169-78.
"""
if spectrum not in ['all', 'nonzero']:
raise ValueError("Unknown autocor '{0}'.".format(spectrum))
# If surface is provided instead of affinity
if not (isinstance(w, np.ndarray) or ssp.issparse(w)):
w = me.get_ring_distance(w, n_ring=n_ring, metric='geodesic')
w.data **= -1 # inverse of distance
# w /= np.nansum(w, axis=1, keepdims=True) # normalize rows
if not is_symmetric(w):
w = make_symmetric(w, check=False, sparse_format='coo')
# Doubly centering weight matrix
if ssp.issparse(w):
m = w.mean(axis=0).A
wc = w.mean() - m - m.T
if not ssp.isspmatrix_coo(w):
w_format = w.format
w = w.tocoo(copy=False)
row, col = w.row, w.col
w = getattr(w, 'to' + w_format)(copy=False)
else:
row, col = w.row, w.col
wc[row, col] += w.data
else:
m = w.mean(axis=0, keepdims=True)
wc = w.mean() - m - m.T
wc += w
# when using float64, eigh is unstable for sparse matrices
ev, mem = np.linalg.eigh(wc.astype(np.float32))
ev, mem = ev[::-1], mem[:, ::-1]
# Remove zero eigen-value/vector
ev_abs = np.abs(ev)
mask_zero = ev_abs < tol
n_zero = np.count_nonzero(mask_zero)
if n_zero == 0:
raise ValueError('Weight matrix has no zero eigenvalue.')
# Multiple zero eigenvalues
if spectrum == 'all':
if n_zero > 1:
n = w.shape[0]
memz = np.hstack([mem[:, mask_zero], np.ones((n, 1))])
q, _ = np.linalg.qr(memz)
mem[:, mask_zero] = q[:, :-1]
idx_zero = mask_zero.argmax()
else:
idx_zero = ev_abs.argmin()
ev[idx_zero:-1] = ev[idx_zero+1:]
mem[:, idx_zero:-1] = mem[:, idx_zero + 1:]
ev = ev[:-1]
mem = mem[:, :-1]
else: # only nonzero
mask_nonzero = ~mask_zero
ev = ev[mask_nonzero]
mem = mem[:, mask_nonzero]
return mem, ev | fe622d75816629aaf5fce34405eb7a3021393d7d | 16,761 |
def eval_BenchmarkModel(x, a, y, model, loss):
"""
Given a dataset (x, a, y) along with predictions,
loss function name
evaluate the following:
- average loss on the dataset
- DP disp
"""
pred = model(x) # apply model to get predictions
n = len(y)
if loss == "square":
err = mean_squared_error(y, pred) # mean square loss
elif loss == "absolute":
err = mean_absolute_error(y, pred) # mean absolute loss
## functions from sklearn.metrics library.
## The strange thing is that in the evaluate_FairModel function, the author uses his own function.
elif loss == "logistic": # assuming probabilistic predictions
# take the probability of the positive class
pred = pd.DataFrame(pred).iloc[:, 1]
err = log_loss(y, pred, eps=1e-15, normalize=True)
else:
raise Exception('Loss not supported: ', str(loss))
disp = pred2_disp(pred, a, y, loss)
## this function seems incomplete
## because i cannot find the definition of function argument quantization.
loss_vec = loss_vec2(pred, y, loss)
## Isn't this equal to the error part???
loss_mean, loss_std = norm.fit(loss_vec)
evaluation = {}
evaluation['pred'] = pred
evaluation['average_loss'] = err
evaluation['DP_disp'] = disp['DP']
evaluation['disp_std'] = KS_confbdd(n, alpha=0.05)
evaluation['loss_std'] = loss_std / np.sqrt(n)
return evaluation | cdb4e82004d94c7b25a705d33f716ac3d81e38de | 16,762 |
def parse_sgf_game(s):
"""Read a single SGF game from a string, returning the parse tree.
s -- 8-bit string
Returns a Coarse_game_tree.
Applies the rules for FF[4].
Raises ValueError if can't parse the string.
If a property appears more than once in a node (which is not permitted by
the spec), treats it the same as a single property with multiple values.
Identifies the start of the SGF content by looking for '(;' (with possible
whitespace between); ignores everything preceding that. Ignores everything
following the first game.
"""
game_tree, _ = _parse_sgf_game(s, 0)
if game_tree is None:
raise ValueError("no SGF data found")
return game_tree | 4315277a91f732f92c3001cf570221ab6aa657a7 | 16,763 |
import re
def retrieve(
framework,
region,
version=None,
py_version=None,
instance_type=None,
accelerator_type=None,
image_scope=None,
container_version=None,
distribution=None,
base_framework_version=None,
):
"""Retrieves the ECR URI for the Docker image matching the given arguments.
Args:
framework (str): The name of the framework or algorithm.
region (str): The AWS region.
version (str): The framework or algorithm version. This is required if there is
more than one supported version for the given framework or algorithm.
py_version (str): The Python version. This is required if there is
more than one supported Python version for the given framework version.
instance_type (str): The SageMaker instance type. For supported types, see
https://aws.amazon.com/sagemaker/pricing/instance-types. This is required if
there are different images for different processor types.
accelerator_type (str): Elastic Inference accelerator type. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html.
image_scope (str): The image type, i.e. what it is used for.
Valid values: "training", "inference", "eia". If ``accelerator_type`` is set,
``image_scope`` is ignored.
container_version (str): the version of docker image
distribution (dict): A dictionary with information on how to run distributed training
(default: None).
Returns:
str: the ECR URI for the corresponding SageMaker Docker image.
Raises:
ValueError: If the combination of arguments specified is not supported.
"""
config = _config_for_framework_and_scope(framework, image_scope, accelerator_type)
original_version = version
version = _validate_version_and_set_if_needed(version, config, framework)
version_config = config["versions"][_version_for_config(version, config)]
if framework == HUGGING_FACE_FRAMEWORK:
if version_config.get("version_aliases"):
full_base_framework_version = version_config["version_aliases"].get(
base_framework_version, base_framework_version
)
_validate_arg(full_base_framework_version, list(version_config.keys()), "base framework")
version_config = version_config.get(full_base_framework_version)
py_version = _validate_py_version_and_set_if_needed(py_version, version_config, framework)
version_config = version_config.get(py_version) or version_config
registry = _registry_from_region(region, version_config["registries"])
hostname = utils._botocore_resolver().construct_endpoint("ecr", region)["hostname"]
repo = version_config["repository"]
processor = _processor(
instance_type, config.get("processors") or version_config.get("processors")
)
if framework == HUGGING_FACE_FRAMEWORK:
pt_or_tf_version = (
re.compile("^(pytorch|tensorflow)(.*)$").match(base_framework_version).group(2)
)
tag_prefix = f"{pt_or_tf_version}-transformers{original_version}"
else:
tag_prefix = version_config.get("tag_prefix", version)
tag = _format_tag(
tag_prefix,
processor,
py_version,
container_version,
)
if _should_auto_select_container_version(instance_type, distribution):
container_versions = {
"tensorflow-2.3-gpu-py37": "cu110-ubuntu18.04-v3",
"tensorflow-2.3.1-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-2.3.2-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15-gpu-py37": "cu110-ubuntu18.04-v8",
"tensorflow-1.15.4-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15.5-gpu-py37": "cu110-ubuntu18.04",
"mxnet-1.8-gpu-py37": "cu110-ubuntu16.04-v1",
"mxnet-1.8.0-gpu-py37": "cu110-ubuntu16.04",
"pytorch-1.6-gpu-py36": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py36": "cu110-ubuntu18.04",
"pytorch-1.6-gpu-py3": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py3": "cu110-ubuntu18.04",
}
key = "-".join([framework, tag])
if key in container_versions:
tag = "-".join([tag, container_versions[key]])
if tag:
repo += ":{}".format(tag)
return ECR_URI_TEMPLATE.format(registry=registry, hostname=hostname, repository=repo) | eeee1aec620de5b29650b9605c7fb2b13aed76e5 | 16,764 |
def deconstruct_DMC(G, alpha, beta):
"""Deconstruct a DMC graph over a single step."""
# reverse complementation
if G.has_edge(alpha, beta):
G.remove_edge(alpha, beta)
w = 1
else:
w = 0
# reverse mutation
alpha_neighbors = set(G.neighbors(alpha))
beta_neighbors = set(G.neighbors(beta))
x = len(alpha_neighbors & beta_neighbors)
y = len(alpha_neighbors | beta_neighbors)
for neighbor in alpha_neighbors:
G.add_edge(beta, neighbor)
# reverse duplication
G.remove_node(alpha)
return (w, x, y) | fa32a325fd49435e3191a20b908ac0e9c3b992f8 | 16,765 |
def new_followers_view(request):
"""
View to show new followers.
:param request:
:return:
"""
current_author = request.user.user
followers_new = FollowRequest.objects.all().filter(friend=current_author).filter(acknowledged=False)
for follow in followers_new:
follow.acknowledged = True
follow.save()
request.context['followers_new'] = followers_new
return render(request, 'authors/follower_request.html', request.context) | 88277967b8185c47b9bb955dabf6fcd79ea3a530 | 16,766 |
def inv(a, p):
"""Inverse of a in :math:`{mathbb Z}_p`
:param a,p: non-negative integers
:complexity: O(log a + log p)
"""
return bezout(a, p)[0] % p | d2caab3a564d5f58d1be345900382e762350a2ea | 16,767 |
import os
import zipfile
def get_iSUN(location=None):
"""
Loads or downloads and caches the iSUN dataset.
@type location: string, defaults to `None`
@param location: If and where to cache the dataset. The dataset
will be stored in the subdirectory `iSUN` of
location and read from there, if already present.
@return: Training stimuli, validation stimuli, testing stimuli, training fixation trains, validation fixation trains
.. seealso::
P. Xu, K. A. Ehinger, Y. Zhang, A. Finkelstein, S. R. Kulkarni, and J. Xiao.: TurkerGaze: Crowdsourcing Saliency with Webcam based Eye Tracking
http://lsun.cs.princeton.edu/
http://vision.princeton.edu/projects/2014/iSUN/
"""
if location:
location = os.path.join(location, 'iSUN')
if os.path.exists(location):
stimuli_training = _load(os.path.join(location, 'stimuli_training.hdf5'))
stimuli_validation = _load(os.path.join(location, 'stimuli_validation.hdf5'))
stimuli_testing = _load(os.path.join(location, 'stimuli_testing.hdf5'))
fixations_training = _load(os.path.join(location, 'fixations_training.hdf5'))
fixations_validation = _load(os.path.join(location, 'fixations_validation.hdf5'))
return stimuli_training, stimuli_validation, stimuli_testing, fixations_training, fixations_validation
os.makedirs(location)
with TemporaryDirectory(cleanup=True) as temp_dir:
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/training.mat',
os.path.join(temp_dir, 'training.mat'),
'5a8b15134b17c7a3f69b087845db1363')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/validation.mat',
os.path.join(temp_dir, 'validation.mat'),
'f68e9b011576e48d2460b883854fd86c')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/testing.mat',
os.path.join(temp_dir, 'testing.mat'),
'be008ef0330467dcb9c9cd9cc96a8546')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/fixation.zip',
os.path.join(temp_dir, 'fixation.zip'),
'aadc15784e1b0023cda4536335b7839c')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/image.zip',
os.path.join(temp_dir, 'image.zip'),
'0a3af01c5307f1d44f5dd309f71ea963')
# Stimuli
print('Creating stimuli')
f = zipfile.ZipFile(os.path.join(temp_dir, 'image.zip'))
namelist = f.namelist()
namelist = filter_files(namelist, ['.DS_Store'])
f.extractall(temp_dir, namelist)
def get_stimuli_names(name):
data_file = os.path.join(temp_dir, '{}.mat'.format(name))
data = loadmat(data_file)[name]
stimuli_names = [d[0] for d in data['image'][:, 0]]
stimuli_names = ['{}.jpg'.format(n) for n in stimuli_names]
return stimuli_names
stimulis = []
stimuli_src_location = os.path.join(temp_dir, 'images')
for name in ['training', 'validation', 'testing']:
print("Creating {} stimuli".format(name))
stimuli_target_location = os.path.join(location, 'stimuli_{}'.format(name)) if location else None
images = get_stimuli_names(name)
stimulis.append(create_stimuli(stimuli_src_location, images, stimuli_target_location))
# FixationTrains
print('Creating fixations')
def get_fixations(name):
data_file = os.path.join(temp_dir,'{}.mat'.format(name))
data = loadmat(data_file)[name]
gaze = data['gaze'][:, 0]
ns = []
train_xs = []
train_ys = []
train_ts = []
train_subjects = []
for n in range(len(gaze)):
fixation_trains = gaze[n]['fixation'][0, :]
for train in fixation_trains:
xs = train[:, 0]
ys = train[:, 1]
ns.append(n)
train_xs.append(xs)
train_ys.append(ys)
train_ts.append(range(len(xs)))
train_subjects.append(0)
fixations = FixationTrains.from_fixation_trains(train_xs, train_ys, train_ts, ns, train_subjects)
return fixations
fixations = []
for name in ['training', 'validation']:
print("Creating {} fixations".format(name))
fixations.append(get_fixations(name))
if location:
stimulis[0].to_hdf5(os.path.join(location, 'stimuli_training.hdf5'))
stimulis[1].to_hdf5(os.path.join(location, 'stimuli_validation.hdf5'))
stimulis[2].to_hdf5(os.path.join(location, 'stimuli_test.hdf5'))
fixations[0].to_hdf5(os.path.join(location, 'fixations_training.hdf5'))
fixations[1].to_hdf5(os.path.join(location, 'fixations_validation.hdf5'))
return stimulis + fixations | 76f429a8caf4090b1df09550e3455888928f71d6 | 16,768 |
def metadata_columns(request, metadata_column_headers):
"""Make a metadata column header and column value dictionary."""
template = 'val{}'
columns = {}
for header in metadata_column_headers:
columns[header] = []
for i in range(0, request.param):
columns[header].append(template.format(i))
return columns | ca1f89935260e9d55d57df5fe5fbb0946b5948ac | 16,769 |
def all_done_tasks_for_person(person, client=default):
"""
Returns:
list: Tasks that are done for given person (only for open projects).
"""
person = normalize_model_parameter(person)
return raw.fetch_all("persons/%s/done-tasks" % person["id"], client=client) | 68883d7ac9c1e0cd009ff02ae4944782ae6fc637 | 16,770 |
def transform_cfg_to_wcnf(cfg: CFG) -> CFG:
"""
Transform given cfg into Weakened Normal Chomsky Form (WNCF)
Parameters
----------
cfg: CFG
CFG object to transform to WNCF
Returns
-------
wncf: CFG
CFG in Weakened Normal Chomsky Form (WNCF)
"""
wncf = (
cfg.remove_useless_symbols()
.eliminate_unit_productions()
.remove_useless_symbols()
)
new_productions = wncf._get_productions_with_only_single_terminals()
new_productions = wncf._decompose_productions(new_productions)
return CFG(start_symbol=wncf.start_symbol, productions=new_productions) | 55d72634b02feab7150d290619b40fc2976ffae3 | 16,771 |
def add_def() -> bool:
""" Retrieves the definition from the user and
enters it into the database.
"""
logger.info("Start <add_def>")
fields = ["what", "def_body", "subject"]
fields_dict = {"what": '', "def_body": '', "subject": ''}
for fi in fields:
phrase = PHRASES[fi]
fields_dict[fi] = input(colored(phrase[0], "green"))
if not fields_dict[fi]:
cprint(phrase[1], "green")
fields_dict[fi] = input(colored(phrase[2], "green"))
if not fields_dict[fi]:
cprint("Mm-m, no - some help?..")
return False
lecture = input(colored("Lecture #", "green"))
if (not lecture and lecture.isalnum()):
cprint("Number of lecture must be integer, did you know that?",
color="yellow")
lecture = input(colored("Lecture #", "green"))
if (not lecture and lecture.isalnum()):
cprint("Mm-m, no - some help?..")
return False
# what = what.lower()
lecture = int(lecture) if lecture else -1
result = [
fields_dict["what"], fields_dict["def_body"], fields_dict["subject"],
lecture
]
result[2] = result[2].capitalize()
logger.info(f"Get what=<{result[0]}>")
logger.debug(f"Get def_body=<{result[1]}>")
logger.debug(f"Get subject=<{result[2]}>")
logger.debug(f"Get lecture=<{result[3]}>")
data_base.add_def(*result)
cprint(f"All done! New definition of '{result[0]}' has been saved",
color="green")
return True | 20888544e2e7293c5226b62532ffa32a4aa2b874 | 16,772 |
def insert_scope_name(urls):
"""
given a tuple of URLs for webpy with '%s' as a placeholder for
SCOPE_NAME_REGEXP, return a finalised tuple of URLs that will work for all
SCOPE_NAME_REGEXPs in all schemas
"""
regexps = get_scope_name_regexps()
result = []
for i in range(0, len(urls), 2):
if "%s" in urls[i]:
# add a copy for each unique SCOPE_NAME_REGEXP
for scope_name_regexp in regexps:
result.append(urls[i] % scope_name_regexp)
result.append(urls[i + 1])
else:
# pass through unmodified
result.append(urls[i])
result.append(urls[i + 1])
return tuple(result) | 28cda0956f232adf176666c776b39463caca9847 | 16,773 |
def fit_cochrane_orcutt(ts, regressors, maxIter=10, sc=None):
"""
Fit linear regression model with AR(1) errors , for references on Cochrane Orcutt model:
See [[https://onlinecourses.science.psu.edu/stat501/node/357]]
See : Applied Linear Statistical Models - Fifth Edition - Michael H. Kutner , page 492
The method assumes the time series to have the following model
Y_t = B.X_t + e_t
e_t = rho*e_t-1+w_t
e_t has autoregressive structure , where w_t is iid ~ N(0,&sigma 2)
Outline of the method :
1) OLS Regression for Y (timeseries) over regressors (X)
2) Apply auto correlation test (Durbin-Watson test) over residuals , to test whether e_t still
have auto-regressive structure
3) if test fails stop , else update update coefficients (B's) accordingly and go back to step 1)
Parameters
----------
ts:
Vector of size N for time series data to create the model for as a Numpy array
regressors:
Matrix N X K for the timed values for K regressors over N time points as a Numpy array
maxIter:
maximum number of iterations in iterative cochrane-orchutt estimation
Returns instance of class [[RegressionARIMAModel]]
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
fnord = _nparray2breezematrix(sc, regressors)
print(fnord)
jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitCochraneOrcutt(
_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), maxIter
)
return RegressionARIMAModel(jmodel=jmodel, sc=sc) | 958ca88e6ac37ebd58c7f1ff88c191d801e4cb87 | 16,774 |
def get_nodeweight(obj):
"""
utility function that returns a
node class and it's weight
can be used for statistics
to get some stats when NO Advanced Nodes are available
"""
k = obj.__class__.__name__
if k in ('Text',):
return k, len(obj.caption)
elif k == 'ImageLink' and obj.isInline():
return 'InlineImageLink', 1
return k, 1 | 1ab88f73621c8396fca08551dd14c9a757d019ad | 16,775 |
def CMYtoRGB(C, M, Y):
""" convert CMY to RGB color
:param C: C value (0;1)
:param M: M value (0;1)
:param Y: Y value (0;1)
:return: RGB tuple (0;255) """
RGB = [(1.0 - i) * 255.0 for i in (C, M, Y)]
return tuple(RGB) | cfc2c7b91dd7f1faf93351e28ffdd9906613471a | 16,776 |
def update_local_artella_root():
"""
Updates the environment variable that stores the Artella Local Path
NOTE: This is done by Artella plugin when is loaded, so we should not do it manually again
"""
metadata = get_metadata()
if metadata:
metadata.update_local_root()
return True
return False | 23fb9f0eb47aec566dc6b9862474535545b963dc | 16,777 |
def app_tests(enable_migrations, tags, verbosity):
"""Gets the TestRunner and runs the tests"""
# prepare the actual test environment
setup(enable_migrations, verbosity)
# reuse Django's DiscoverRunner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
tags=tags,
)
failures = test_runner.run_tests(['.'])
return failures | c56ca20ea98dadf97f39a30e2f07c0eb3952b418 | 16,778 |
def quicksort(numbers, low, high):
"""Python implementation of quicksort."""
if low < high:
pivot = _partition(numbers, low, high)
quicksort(numbers, low, pivot)
quicksort(numbers, pivot + 1, high)
return numbers | 064aa30f032036aa73f08b2b94ce4556ffc565fd | 16,779 |
import scipy
def bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.):
"""
bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.)
Black-Scholes put option delta.
See Also
--------
bscall
"""
r, q = np.asarray(r), np.asarray(q)
d1, d2 = bsd1d2(k, t, x0=x0, r=r, q=q, sigma=sigma)
return -exp(-q*t) * scipy.stats.norm.cdf(-d1) | d6e1e3e6c2f97fa856b156170ac49ea3d5530423 | 16,780 |
from typing import Sequence
import re
def regex_filter(patterns: Sequence[Regex], negate: bool = False, **kwargs) -> SigMapper:
"""Filter out the signals that do not match regex patterns (or do match if negate=True)."""
patterns = list(map(re.compile, patterns))
def filt(sigs):
def map_sig(sig):
return _regex_map(sig, patterns,
on_match = lambda s, p: (s if not negate else None),
on_no_match = lambda s: (None if not negate else s),
**kwargs)
return list(filter(None, map(map_sig, sigs)))
return filt | 4c76d4bd5f76d5d35373ec14c910291b155cd4db | 16,781 |
def cpncc(img, vertices_lst, tri):
"""cython version for PNCC render: original paper"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
pncc_img = crender_colors(vertices, tri, pncc_code, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img | 8c7e380b56e26197cfb6b9b65c8d373ada0be4b1 | 16,782 |
import time
def run_net(X, y, batch_size, dnn, data_layer_name, label_layer_name,
loss_layer, accuracy_layer, accuracy_sink, is_train):
"""Runs dnn on given data"""
start = time.time()
total_loss = 0.
run_iter = dnn.learn if is_train else dnn.run
math_engine = dnn.math_engine
accuracy_layer.reset = True # Reset previous statistics
for X_batch, y_batch in irnn_data_iterator(X, y, batch_size, math_engine):
run_iter({data_layer_name: X_batch, label_layer_name: y_batch})
total_loss += loss.last_loss * y_batch.batch_width
accuracy_layer.reset = False # Don't reset statistics within one epoch
avg_loss = total_loss / y.shape[0]
acc = accuracy_sink.get_blob().asarray()[0]
run_time = time.time() - start
return avg_loss, acc, run_time | 43121dff269df6a03763f130e8f75f0ce6984a57 | 16,783 |
from typing import Any
import json
def json_safe(arg: Any):
"""
Checks whether arg can be json serialized and if so just returns arg as is
otherwise returns none
"""
try:
json.dumps(arg)
return arg
except:
return None | 97ac87464fb4b31b4fcfc7896252d23a10e57b72 | 16,784 |
def _key_iv_check(key_iv):
"""
密钥或初始化向量检测
"""
# 密钥
if key_iv is None or not isinstance(key_iv, string_types):
raise TypeError('Parameter key or iv:{} not a basestring'.format(key_iv))
if isinstance(key_iv, text_type):
key_iv = key_iv.encode(encoding=E_FMT)
if len(key_iv) > BLOCK_BYTE:
raise ValueError('Parameter key or iv:{} byte greater than {}'.format(key_iv.decode(E_FMT),
BLOCK_BYTE))
return key_iv | 809ff811a433f9843b330a56be926411871d8b7a | 16,785 |
def decomposeArbitraryLength(number):
"""
Returns decomposition for the numbers
Examples
--------
number 42 : 32 + 8 + 2
powers : 5, 3, 1
"""
if number < 1:
raise WaveletException("Number should be greater than 1")
tempArray = list()
current = number
position = 0
while current >= 1.:
power = getExponent(current)
tempArray.append(power)
current = current - scalb(1., power)
position += 1
return tempArray[:position] | 5645c9024dd93aa3bfaf904d7a69f4d46977fb5a | 16,786 |
def ax_draw_macd2(axes, ref, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
macd = MACD(CLOSE(kdata), n1, n2, n3)
bmacd, fmacd, smacd = macd.getResult(0), macd.getResult(1), macd.getResult(2)
text = 'MACD(%s,%s,%s) DIF:%.2f, DEA:%.2f, BAR:%.2f'%(n1,n2,n3,fmacd[-1],smacd[-1],bmacd[-1])
#axes.text(0.01,0.97, text, horizontalalignment='left', verticalalignment='top', transform=axes.transAxes)
total = len(kdata)
x = [i for i in range(total)]
y = bmacd
y1,y2,y3 = [y[0]],[y[0]],[y[0]]
for i in range(1, total):
if ref[i]-ref[i-1]>0 and y[i]-y[i-1]>0:
y2.append(y[i])
y1.append('-')
y3.append('-')
elif ref[i]-ref[i-1]<0 and y[i]-y[i-1]<0:
y3.append(y[i])
y1.append('-')
y2.append('-')
else:
y1.append(y[i])
y2.append('-')
y3.append('-')
style = gcf().get_style(axes)
bar = Bar(subtitle=text, title_pos='10%', title_top='8%')
bar.add('1', x, y1, is_stack=True, is_legend_show=False, **style)
bar.add('2', x, y2, is_stack=True, is_legend_show=False, **style)
bar.add('3', x, y3, is_stack=True, is_legend_show=False, **style)
axes.add(bar)
fmacd.plot(axes=axes, line_type='dotted')
smacd.plot(axes=axes)
gcf().add_axis(axes)
return gcf() | 3bcb73756211a8906f3bf601207092177aa45ade | 16,787 |
import scipy
def scipy_bfgs(
criterion_and_derivative,
x,
*,
convergence_absolute_gradient_tolerance=CONVERGENCE_ABSOLUTE_GRADIENT_TOLERANCE,
stopping_max_iterations=STOPPING_MAX_ITERATIONS,
norm=np.inf,
):
"""Minimize a scalar function of one or more variables using the BFGS algorithm.
For details see :ref:`list_of_scipy_algorithms`.
"""
options = {
"gtol": convergence_absolute_gradient_tolerance,
"maxiter": stopping_max_iterations,
"norm": norm,
}
res = scipy.optimize.minimize(
fun=criterion_and_derivative,
x0=x,
method="BFGS",
jac=True,
options=options,
)
return process_scipy_result(res) | e1d61454e7ea782d37b4ab222599c69b2c89df1b | 16,788 |
from random import shuffle
import six
def assign_to_coders_backend(sample,
limit_to_unassigned,
shuffle_pieces_before_assigning,
assign_each_piece_n_times,
max_assignments_per_piece,
coders, max_pieces_per_coder,
creation_time, creator):
"""Assignment to coders currently uses the following algorithm:
#. Get a list of all pieces in the sample.
#. If "shuffle pieces before assigning" is checked, shuffle the list of pieces
#. Make a numbering of "target coders" for this assignment, determine a
coder whose "turn" it is.
#. For each piece in the list of pieces, do the following:
#. If "limit to unassigned" is checked, and the piece is assigned to
someone, continue to the next piece.
#. Find how often this piece has already been assigned as
``n_piece_assignments``.
#. Determine number of new assignments *n* for this piece as::
n = min(
max_assignments_per_piece-n_piece_assignments,
assign_each_piece_n_times))
#. Do the following *n* times:
#. Try to assign the piece to the coder whose 'turn' it is.
#. If that coder already has this article assigned, go
round-robin among coders until someone does not have the article
assigned to them.
#. If no-one is found, skip this piece.
#. Advance the "turn", taking into account ``pieces_per_coder``.
If all coders have reached their ``pieces_per_coder`` (in this
assignment round), stop.
"""
log_lines = []
coder_idx_to_count = {}
num_coders = len(coders)
pieces = sample.pieces.all()
if shuffle_pieces_before_assigning:
pieces = list(pieces)
shuffle(pieces)
quit_flag = False
coder_idx = 0
for piece in pieces:
n_piece_assignments = CodingAssignment.objects.filter(
sample=sample, piece=piece).count()
if (limit_to_unassigned and n_piece_assignments):
log_lines.append("%s already assigned to someone, skipping."
% six.text_type(piece))
continue
assign_times = assign_each_piece_n_times
if max_assignments_per_piece is not None:
max_assign_times = assign_times = max(
0,
max_assignments_per_piece
- n_piece_assignments)
assign_times = min(
max_assign_times,
assign_times)
if assign_times == 0:
log_lines.append("Piece '%s' has reached max assignment count, skipping."
% six.text_type(piece))
continue
for i_assignment in range(assign_times):
local_coder_idx = coder_idx
assignment_tries = 0
# was this piece already assigned to this coder? (if so, try next)
# Note that, in its desperation, this may assign a few more items
# to a coder than are technically allowed by their limit.
while (
CodingAssignment.objects.filter(
sample=sample, piece=piece,
coder=coders[local_coder_idx]).count()
and assignment_tries < num_coders):
local_coder_idx = (local_coder_idx + 1) % num_coders
assignment_tries += 1
if assignment_tries >= num_coders:
log_lines.append("Piece '%s' already assigned "
"to all coders, skipping." % six.text_type(piece))
break
assmt = CodingAssignment()
assmt.coder = coders[local_coder_idx]
assmt.piece = piece
assmt.sample = sample
assmt.state = assignment_states.not_started
assmt.latest_state_time = creation_time
assmt.creation_time = creation_time
assmt.creator = creator
assmt.save()
coder_idx_to_count[local_coder_idx] = \
coder_idx_to_count.get(local_coder_idx, 0) + 1
# {{{ advance coder turn
find_coder_tries = 0
while find_coder_tries < num_coders:
coder_idx = (coder_idx + 1) % num_coders
if (
max_pieces_per_coder is None
or coder_idx_to_count.get(coder_idx, 0)
< max_pieces_per_coder):
break
find_coder_tries += 1
if find_coder_tries >= num_coders:
log_lines.append("All coders have reached their item limit, "
"stopping.")
quit_flag = True
break
# }}}
if quit_flag:
break
for coder_idx, coder in enumerate(coders):
log_lines.append("%s: %d new items assigned"
% (coder, coder_idx_to_count.get(coder_idx, 0)))
return log_lines | ffe59dce1b85f7b77e652a1823f298b643d104c7 | 16,789 |
def mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients | 1f94662948d2576874ca4bb13a602e0a0482d787 | 16,790 |
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
# XXX: The following does not strictly adhere to RFCs in that empty
# names and values are legal (the former will only appear once and will
# be overwritten if multiple occurrences are present). This is
# mostly to deal with backwards compatibility.
for ii, param in enumerate(ns_header.split(';')):
param = param.strip()
key, sep, val = param.partition('=')
key = key.strip()
if not key:
if ii == 0:
break
else:
continue
# allow for a distinction between present and empty and missing
# altogether
val = val.strip() if sep else None
if ii != 0:
lc = key.lower()
if lc in known_attrs:
key = lc
if key == "version":
# This is an RFC 2109 cookie.
if val is not None:
val = strip_quotes(val)
version_set = True
elif key == "expires":
# convert expires date to seconds since epoch
if val is not None:
val = http2time(strip_quotes(val)) # None if invalid
pairs.append((key, val))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result | 91d1006d6495b1ad86ff65abbc1575d9c759f183 | 16,791 |
def same_kind_right_null(a: DataType, _: Null) -> bool:
"""Return whether `a` is nullable."""
return a.nullable | 005e9d62702d8f9c6d1e1a4911c7dedf7d81bb73 | 16,792 |
def unary_col(op, v):
"""
interpretor for executing unary operator expressions on columnars
"""
if op == "+":
return v
if op == "-":
return compute.subtract(0.0, v)
if op.lower() == "not":
return compute.invert(v)
raise Exception("unary op not implemented") | ff4eec1f333cd0425cb1b7c533ec4dc94179512e | 16,793 |
def test_start_sep_graph() -> nx.Graph:
"""test graph with known clique partition that needs start_separate"""
G = nx.Graph()
G.add_nodes_from(range(6))
G.add_edges_from([(0, 1, {'weight': 1.0}), (0, 2, {'weight': -10}), (0, 3, {'weight': 1}), (0, 4, {'weight': -10}), (0, 5, {'weight': -10}),
(1, 2, {'weight': 1.2}), (1, 3, {'weight': -10}), (1, 4, {'weight': -10}), (1, 5, {'weight': -10}),
(2, 3, {'weight': 1}), (2, 4, {'weight': -1}), (2, 5, {'weight': 0.5}),
(3, 4, {'weight': 0.5}), (3, 5, {'weight': -1})])
return G | 84bd5a140ff7c8882513395a305f69d64d1830a7 | 16,794 |
def structure(table_toplevels):
"""
Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the
TOML file.
"""
table_toplevels = tuple(table_toplevels)
obj = NamedDict()
last_array_of_tables = None # The Name of the last array-of-tables header
for toplevel in table_toplevels:
if isinstance(toplevel, toplevels.AnonymousTable):
obj[''] = toplevel.table_element
elif isinstance(toplevel, toplevels.Table):
if last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
seq[-1] = CascadeDict(seq[-1], NamedDict({unprefixed_name: toplevel.table_element}))
else:
obj[toplevel.name] = toplevel.table_element
else: # It's an ArrayOfTables
if last_array_of_tables and toplevel.name != last_array_of_tables and \
toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
if unprefixed_name in seq[-1]:
seq[-1][unprefixed_name].append(toplevel.table_element)
else:
cascaded_with = NamedDict({unprefixed_name: [toplevel.table_element]})
seq[-1] = CascadeDict(seq[-1], cascaded_with)
else:
obj.append(toplevel.name, toplevel.table_element)
last_array_of_tables = toplevel.name
return obj | c34590f604d52ff4bfcf3cf1bae1fc41a7a1f3ec | 16,795 |
import math
def h(q):
"""Binary entropy func"""
if q in {0, 1}:
return 0
return (q * math.log(1 / q, 2)) + ((1 - q) * math.log(1 / (1 - q), 2)) | ad3d02d6e7ddf622c16ec8df54752ac5c77f8972 | 16,796 |
def has_next_page(page_info: dict) -> bool:
"""
Extracts value from a dict with hasNextPage key, raises an error if the key is not available
:param page_info: pagination info
:return: a bool indicating if response hase a next page
"""
has_next_page = page_info.get('hasNextPage')
if has_next_page is None:
raise KeyNotFoundException('hasNextPage key not available')
else:
return has_next_page | 13c7bf0096127e054adaa8a331d2168bfb76c1d3 | 16,797 |
def _stuw_code(current_name=None):
""""
Zoekt door TYPESTUW naar de naam van het stuwtype, geeft attribuut waarde uit DAMO
"""
if current_name not in TYPESTUW.values():
return 99
for i, name in TYPESTUW.items():
if name == current_name:
return i | f0444885fd9956bdb150442dc1de7de09a0ac693 | 16,798 |
def _build_init_nodes(context, device):
"""
Build initial inputs for beam search algo
"""
decoder_input = _prepare_init_inputs(context, device)
root_node = BeamSearchNode(None, None, decoder_input, 0, len(context))
return [root_node] | 009cf7b09f39eb5c9722015d310ecab0b32f7c59 | 16,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.