content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def atomic_coordinates_as_json(pk):
"""Get atomic coordinates from database."""
subset = models.Subset.objects.get(pk=pk)
vectors = models.NumericalValue.objects.filter(
datapoint__subset=subset).filter(
datapoint__symbols__isnull=True).order_by(
'datapoint_id', 'counter')
data = {'vectors':
[[x.formatted('.10g') for x in vectors[:3]],
[x.formatted('.10g') for x in vectors[3:6]],
[x.formatted('.10g') for x in vectors[6:9]]]}
# Here counter=1 filters out the first six entries
symbols = models.Symbol.objects.filter(
datapoint__subset=subset).filter(counter=1).order_by(
'datapoint_id').values_list('value', flat=True)
coords = models.NumericalValue.objects.filter(
datapoint__subset=subset).filter(
datapoint__symbols__counter=1).select_related('error').order_by(
'counter', 'datapoint_id')
tmp = models.Symbol.objects.filter(
datapoint__subset=subset).annotate(
num=models.models.Count('datapoint__symbols')).filter(
num=2).first()
if tmp:
data['coord-type'] = tmp.value
data['coordinates'] = []
N = int(len(coords)/3)
for symbol, coord_x, coord_y, coord_z in zip(
symbols, coords[:N], coords[N:2*N], coords[2*N:3*N]):
data['coordinates'].append((symbol,
coord_x.formatted('.9g'),
coord_y.formatted('.9g'),
coord_z.formatted('.9g')))
return data | 515854e789a15e845b0dbcd754e17bedfc0bcf69 | 11,000 |
def additional_bases():
""""Manually added bases that cannot be retrieved from the REST API"""
return [
{
"facility_name": "Koltyr Northern Warpgate",
"facility_id": 400014,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Eastern Warpgate",
"facility_id": 400015,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Southern Warpgate",
"facility_id": 400016,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Zorja",
"facility_id": 400017,
"facility_type_id": 2,
"facility_type": "Amp Station"
},
{
"facility_name": "Xander",
"facility_id": 400018,
"facility_type_id": 3,
"facility_type": "Bio Lab"
},
{
"facility_name": "Svarog",
"facility_id": 400019,
"facility_type_id": 4,
"facility_type": "Tech Plant"
},
{
"facility_name": "Koltyr Tech Plant Outpost",
"facility_id": 400020,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Biolab Outpost",
"facility_id": 400021,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Amp Station Outpost",
"facility_id": 400022,
"facility_type_id": 5,
"facility_type": "Large Outpost"
}
] | e2a5ad97ca1b424466f5ebe340466eaf9f627e7e | 11,001 |
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(label_info.values)
for label_info in dataset_info.labels
} | 929db286b3f7ee8917618e9f46feabdff630d3b2 | 11,002 |
def load_input(file: str) -> ArrayLike:
"""Load the puzzle input and duplicate 5 times in each direction,
adding 1 to the array for each copy.
"""
input = puzzle_1.load_input(file)
input_1x5 = np.copy(input)
for _ in range(4):
input = np.clip(np.mod(input + 1, 10), a_min=1, a_max=None)
input_1x5 = np.concatenate([input_1x5, input], axis=1)
input_5x5 = np.copy(input_1x5)
for _ in range(4):
input_1x5 = np.clip(np.mod(input_1x5 + 1, 10), a_min=1, a_max=None)
input_5x5 = np.concatenate([input_5x5, input_1x5], axis=0)
return input_5x5 | 91b2cd7854a793ebbbfee2400eddb22304fc18bd | 11,003 |
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals | 24a4d7b7c470abb881700a1775008d16c35c1fc3 | 11,004 |
import torch
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
"""
# batch support!
if top_k > 0:
values, _ = torch.topk(logits, top_k)
min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])
logits = torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'),
logits)
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)
logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)
return logits | 5cbbd9959a80e72364f098fe031e5e3c78485826 | 11,005 |
def get_reference_shift( self, seqID ):
"""Get a ``reference_shift`` attached to a particular ``seqID``.
If none was provided, it will return **1** as default.
:param str seqID: |seqID_param|.
:type shift: Union[:class:`int`, :class:`list`]
:raises:
:TypeError: |indf_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_ssebig.minisilent.gz",
...: {'sequence': 'C', 'structure': 'C'})
...: df.add_reference_structure('C', df.iloc[0].get_structure('C'))
...: df.add_reference_shift('C', 3)
...: df.get_reference_shift('C')
"""
if not isinstance(self, (pd.DataFrame, pd.Series)):
raise TypeError("Data container has to be a DataFrame/Series or a derived class.")
if self._subtyp != "sequence_frame" and (seqID not in self.get_available_structures() and
seqID not in self.get_available_sequences()):
raise KeyError("Data container does not have data for structure {}".format(seqID))
if seqID in self._reference:
return self._reference[seqID]["sft"]
else:
return 1 | 4a8f9fe683c9cf0085754ca2ebb9132bbae427ea | 11,006 |
import os
import sys
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# Short circuit. If we're given a full path which matches the mode
# and it exists, we're done here.
if _access_check(cmd, mode):
return cmd
path = (path or os.environ.get("PATH", os.defpath)).split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
os.sys.path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]
# If it does match, only test that one, otherwise we have to try
# others.
files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
dir = os.path.normcase(dir)
if dir not in seen:
seen.add(dir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None | 31fff48dd7984de5008bedf8a1da9687111fcfbf | 11,007 |
def load_and_resolve_feature_metadata(eval_saved_model_path: Text,
graph: tf.Graph):
"""Get feature data (feature columns, feature) from EvalSavedModel metadata.
Like load_feature_metadata, but additionally resolves the Tensors in the given
graph.
Args:
eval_saved_model_path: Path to EvalSavedModel, for the purposes of loading
the feature_metadata file.
graph: tf.Graph to resolve the Tensors in.
Returns:
Same as load_feature_metadata, except associated_tensors and features
contain the Tensors resolved in the graph instead of TensorInfos.
"""
result = load_feature_metadata(eval_saved_model_path=eval_saved_model_path)
# Resolve Tensors in graph
result['associated_tensors'] = [
tf.compat.v1.saved_model.get_tensor_from_tensor_info(tensor_info, graph)
for tensor_info in result['associated_tensors']
]
result['features'] = {
k: tf.compat.v1.saved_model.get_tensor_from_tensor_info(v, graph)
for k, v in result['features'].items()
}
return result | 3377d66c962ccccab7b62abf563f88032a8a7b14 | 11,008 |
def greater_than_or_eq(quant1, quant2):
"""Binary function to call the operator"""
return quant1 >= quant2 | 920c28da125b567bc32a149aec6aaade3645ef87 | 11,009 |
def pr_define_role(pe_id,
role=None,
role_type=None,
entity_type=None,
sub_type=None):
"""
Back-end method to define a new affiliates-role for a person entity
@param pe_id: the person entity ID
@param role: the role name
@param role_type: the role type (from pr_role_types), default 9
@param entity_type: limit selection in CRUD forms to this entity type
@param sub_type: limit selection in CRUD forms to this entity sub-type
@return: the role ID
"""
if not pe_id:
return None
s3db = current.s3db
if role_type not in s3db.pr_role_types:
role_type = 9 # Other
data = {"pe_id": pe_id,
"role": role,
"role_type": role_type,
"entity_type": entity_type,
"sub_type": sub_type}
rtable = s3db.pr_role
if role:
query = (rtable.pe_id == pe_id) & \
(rtable.role == role)
duplicate = current.db(query).select(rtable.id,
rtable.role_type,
limitby=(0, 1)).first()
else:
duplicate = None
if duplicate:
if duplicate.role_type != role_type:
# Clear paths if this changes the role type
if str(role_type) != str(OU):
data["path"] = None
s3db.pr_role_rebuild_path(duplicate.id, clear=True)
duplicate.update_record(**data)
record_id = duplicate.id
else:
record_id = rtable.insert(**data)
return record_id | 3f09ac9eca47347b51069a20b7b08b2192e2d452 | 11,010 |
def inherently_superior(df):
"""
Find rows in a dataframe with all values 'inherently superior',
meaning that all values for certain metrics are as high or higher
then for all other rows.
Parameters
----------
df : DataFrame
Pandas dataframe containing the columns to be compared. The columns
should be in a format in which higher values are superior.
Returns
-------
DataFrame with index of best values and values compared.
"""
# Copy dataframe to prevent altering the columns.
df_copy = df.copy()
# Reset index to reference location of values. Also, convert to numpy.
df_copy.reset_index(inplace=True)
arr = df_copy.values
# Repeat and tile the array for comparison. Given indices [1, 2], arr1 is
# in format [1, 1, 2, 2], and arr2 is in format [1, 2, 1, 2].
arr1 = np.repeat(arr, arr.shape[0], axis=0)
arr2 = np.tile(arr, (arr.shape[0], 1))
# Check if any values are greater than for other rows.
any_arr = np.all(arr1[:, 1:] >= arr2[:, 1:], axis=1)
# Adjust array so that all points at which a row is being compared to itself
# are labeled as superior.
same_idx = np.array(range(0, len(any_arr), arr.shape[0])) + np.array(range(arr.shape[0]))
any_arr[same_idx] = 1
# Concatenate arr1 and array with superior labels.
arr1_any = np.concatenate([arr1, any_arr.reshape(-1, 1)], axis=1)
# Split data at unique indices. Used to check if greater than all other rows.
splits = np.array(np.split(arr1_any, np.unique(arr1[:, 0], return_index=True)[1][1:]))
perc_sup = np.mean(splits[:, :, -1], axis=1)
idx = np.all(splits[:, :, -1], axis=1)
# Choose superior data idx and create dataframe.
columns = df_copy.columns.tolist() + ['perc_sup', 'fully_sup']
data = np.concatenate([arr, perc_sup.reshape(-1, 1), idx.reshape(-1, 1)], axis=1)
arr_df = pd.DataFrame(data, columns=columns)
arr_df.drop('index', axis=1, inplace=True)
arr_df['fully_sup'] = arr_df['fully_sup'].astype(bool)
return arr_df | 02dd6db624efd4f1daa4c0ef4f126c6c60c0376e | 11,011 |
def LineColourArray():
"""Line colour options array"""
Colour = [
'Black',
'dimgrey',
'darkgrey',
'silver',
'lightgrey',
'maroon',
'darkred',
'firebrick',
'red',
'orangered',
'darkorange',
'orange',
'saddlebrown',
'darkgoldenrod',
'goldenrod',
'gold',
'darkolivegreen',
'olivedrab',
'olive',
'y',
'darkkhaki',
'khaki',
'darkgreen',
'Green',
'limegreen',
'lime',
'mediumspringgreen',
'palegreen',
'greenyellow',
'midnightblue',
'navy',
'darkblue',
'mediumblue',
'blue',
'slateblue',
'indigo',
'purple',
'darkmagenta',
'darkorchid',
'mediumorchid',
'orchid',
'plum',
'crimson',
'deeppink',
'magenta',
'hotpink',
'pink' ]
return Colour | 94f91d17c6e539983ab38ca7fdadd211e6268bfb | 11,012 |
import errno
def os_to_maestral_error(exc, dbx_path=None, local_path=None):
"""
Gets the OSError and tries to add a reasonably informative error message.
.. note::
The following exception types should not typically be raised during syncing:
InterruptedError: Python will automatically retry on interrupted connections.
NotADirectoryError: If raised, this likely is a Maestral bug.
IsADirectoryError: If raised, this likely is a Maestral bug.
:param OSError exc: Python Exception.
:param str dbx_path: Dropbox path of file which triggered the error.
:param str local_path: Local path of file which triggered the error.
:returns: :class:`MaestralApiError` instance or :class:`OSError` instance.
"""
title = 'Cannot upload or download file'
if isinstance(exc, PermissionError):
err_cls = InsufficientPermissionsError # subclass of SyncError
text = 'Insufficient read or write permissions for this location.'
elif isinstance(exc, FileNotFoundError):
err_cls = NotFoundError # subclass of SyncError
text = 'The given path does not exist.'
elif isinstance(exc, FileExistsError):
err_cls = ExistsError # subclass of SyncError
title = 'Could not download file'
text = 'There already is an item at the given path.'
elif isinstance(exc, IsADirectoryError):
err_cls = IsAFolderError # subclass of SyncError
text = 'The given path refers to a folder.'
elif isinstance(exc, NotADirectoryError):
err_cls = NotAFolderError # subclass of SyncError
text = 'The given path refers to a file.'
elif exc.errno == errno.ENAMETOOLONG:
err_cls = PathError # subclass of SyncError
title = 'Could not create local file'
text = 'The file name (including path) is too long.'
elif exc.errno == errno.EFBIG:
err_cls = FileSizeError # subclass of SyncError
title = 'Could not download file'
text = 'The file size too large.'
elif exc.errno == errno.ENOSPC:
err_cls = InsufficientSpaceError # subclass of SyncError
title = 'Could not download file'
text = 'There is not enough space left on the selected drive.'
elif exc.errno == errno.ENOMEM:
err_cls = OutOfMemoryError # subclass of MaestralApiError
text = 'Out of memory. Please reduce the number of memory consuming processes.'
else:
return exc
return err_cls(title, text, dbx_path=dbx_path, local_path=local_path) | 7a99ce147e2fbe0a3cc94535ee1d84c9337b3791 | 11,013 |
from typing import Any
def parse_ccu_sys_var(data: dict[str, Any]) -> tuple[str, Any]:
"""Helper to parse type of system variables of CCU."""
# pylint: disable=no-else-return
if data[ATTR_TYPE] == ATTR_HM_LOGIC:
return data[ATTR_NAME], data[ATTR_VALUE] == "true"
if data[ATTR_TYPE] == ATTR_HM_ALARM:
return data[ATTR_NAME], data[ATTR_VALUE] == "true"
elif data[ATTR_TYPE] == ATTR_HM_NUMBER:
return data[ATTR_NAME], float(data[ATTR_VALUE])
elif data[ATTR_TYPE] == ATTR_HM_LIST:
return data[ATTR_NAME], int(data[ATTR_VALUE])
return data[ATTR_NAME], data[ATTR_VALUE] | 8b77dbbaa93739457a2e92aad79ac5b6bd3a6af0 | 11,014 |
def one_time_log_fixture(request, workspace) -> Single_Use_Log:
"""
Pytest Fixture for setting up a single use log file
At test conclusion, runs the cleanup to delete the single use text file
:return: Single_Use_Log class
"""
log_class = Single_Use_Log(workspace)
request.addfinalizer(log_class.cleanup)
return log_class | 73332892ece76ee90c15d84294b70d935e8a2f4c | 11,015 |
import json
def details(request, path):
"""
Returns detailed information on the entity at path.
:param path: Path to the entity (namespaceName/.../.../.../)
:return: JSON Struct: {property1: value, property2: value, ...}
"""
item = CACHE.get(ENTITIES_DETAIL_CACHE_KEY)
# ENTITIES_DETAIL : {"namespaceName": {"name":"", "description": "", "stream":{}, "artifact":"", "dataset":"",
# "application":""}, {}...} Each part in path.split('/') matches the key name in ENTITIES_DETAIL
# The detailed information of entity at path stores in the last dict
for k in path.strip('/').split('/'):
item = item[k]
item["privileges"] = _get_privileges_for_path(request.user, path)
return HttpResponse(json.dumps(item), content_type='application/json') | b460dc76f18f35b48509a1b2d8daa104bc89fbb5 | 11,016 |
def ca_get_container_capability_set(slot, h_container):
"""
Get the container capabilities of the given slot.
:param int slot: target slot number
:param int h_container: target container handle
:return: result code, {id: val} dict of capabilities (None if command failed)
"""
slot_id = CK_SLOT_ID(slot)
cont_id = CK_ULONG(h_container)
cap_ids = AutoCArray()
cap_vals = AutoCArray()
@refresh_c_arrays(1)
def _get_container_caps():
"""Closer for retries to work w/ properties"""
return CA_GetContainerCapabilitySet(
slot_id, cont_id, cap_ids.array, cap_ids.size, cap_vals.array, cap_vals.size
)
ret = _get_container_caps()
return ret, dict(list(zip(cap_ids, cap_vals))) | cf97db8f201d0c5fce12902b92abdc3a819ac394 | 11,017 |
def load_pyfunc(model_file):
"""
Loads a Keras model as a PyFunc from the passed-in persisted Keras model file.
:param model_file: Path to Keras model file.
:return: PyFunc model.
"""
return _KerasModelWrapper(_load_model(model_file)) | eb21f47a55f35bf3707ba7c5cb56e72948d24866 | 11,018 |
def business_days(start, stop):
"""
Return business days between two datetimes (inclusive).
"""
return dt_business_days(start.date(), stop.date()) | 1fa8c38e6cceca448bc988cd0c1eb24a27508a78 | 11,019 |
def empty_nzb_document():
""" Creates xmldoc XML document for a NZB file. """
# http://stackoverflow.com/questions/1980380/how-to-render-a-doctype-with-pythons-xml-dom-minidom
imp = minidom.getDOMImplementation()
dt = imp.createDocumentType("nzb", "-//newzBin//DTD NZB 1.1//EN",
"http://www.newzbin.com/DTD/nzb/nzb-1.1.dtd")
doc = imp.createDocument("http://www.newzbin.com/DTD/2003/nzb", "nzb", dt)
# http://stackoverflow.com/questions/2306149/how-to-write-xml-elements-with-namespaces-in-python
doc.documentElement.setAttribute('xmlns',
'http://www.newzbin.com/DTD/2003/nzb')
return doc | 7cd8aa73f201b4f432aa6adaed18d133ec08fa48 | 11,020 |
def get_output_directory(create_statistics=None, undersample=None, oversample=None):
"""
Determines the output directory given the balance of the dataset as well as columns.
Parameters
----------
create_statistics: bool
Whether the std, min and max columns have been created
undersample: bool
Whether the data has been undersampled
oversample: bool
Whether the data has been oversampled
Returns
-------
Output directory
"""
if create_statistics is None:
create_statistics = AppConfig.create_statistics
if undersample is None:
undersample = AppConfig.balance_data
if oversample is None:
oversample = AppConfig.oversample
stat = 'st' if create_statistics else 'ns'
bal = 'us' if undersample else 'ub'
bal = 'os' if oversample else bal
return f'./output/{stat}_{bal}/' | c10859e1eba4afb61d967e56be8a8206f5202618 | 11,021 |
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
"""
if isinstance(prefixes, str):
return word.split(prefixes)[-1]
for prefix in prefixes:
word = word.split(prefix)[-1]
return word | 6932e5605b11eee004a350c7f9be831d8bb7ca9d | 11,022 |
def isSol(res):
"""
Check if the string is of the type ai bj ck
"""
if not res or res[0] != 'a' or res[-1] != 'c':
return False
l = 0
r = len(res)-1
while res[l] == "a":
l+=1
while res[r] == "c":
r-=1
if r-l+1 <= 0:
return False
for x in res[l:r-l+1]:
if x != 'b':
return False
return True | 14030e52a588dc13029602e81a5f2068707bca17 | 11,023 |
import pandas
def _h1_to_dataframe(h1: Histogram1D) -> pandas.DataFrame:
"""Convert histogram to pandas DataFrame."""
return pandas.DataFrame(
{"frequency": h1.frequencies, "error": h1.errors},
index=binning_to_index(h1.binning, name=h1.name),
) | 28aa8cc36abd21a17e0a30f4bde2bb996753864b | 11,024 |
def wgt_area_sum(data, lat_wgt, lon_wgt):
"""wgt_area_sum() performas weighted area addition over a geographical area.
data: data of which last 2 dimensions are lat and lon. Strictly needs to be a masked array
lat_wgt: weights over latitude of area (usually cos(lat * pi/180))
lon_wgt: weights over longitude of area (usually 1)
Returns, Numpy array with 2 less dimensions (Masked array.
Mask is False if no mask was supplied with the input data.
Else mask is derived from the input data)"""
# Get data shape
shp = data.shape
ndims = data.ndim
if(isinstance(lat_wgt, float)):
lat_wgt = [lat_wgt] * shp[ndims - 2]
if(isinstance(lon_wgt, float)):
lon_wgt = [lon_wgt] * shp[ndims - 1]
lat_wgt = np.array(lat_wgt).reshape(len(lat_wgt), 1)
lon_wgt = np.array(lon_wgt)
# Make grid of lon_wgt, lat_wgt with lat and lon coordinates (last 2 axis of data)
wy = np.broadcast_to(lon_wgt, data.shape[ndims - 2:ndims])
wx = np.broadcast_to(lat_wgt, data.shape[ndims - 2:ndims])
# Mask the array
# Get 2D mask from the array
ds = data[0]
for el in shp[1:ndims-2]:
ds = ds[0]
if(isinstance(ds, np.ma.masked_array)):
msk = ds.mask
else:
msk = False
wy = np.ma.masked_array(wy, msk)
wx = np.ma.masked_array(wx, msk)
data_wgt = data * wy * wx
sm_wgt = data_wgt.sum(axis = (ndims - 2, ndims - 1))
# sm_wgt = sm_wgt/np.sum(wy * wx)
return sm_wgt | 725f7f199e634cf56afb846ebff2a0917a92c685 | 11,025 |
import os
def get_files_from_path(path, recurse=False, full_path=True):
"""
Get Files_Path From Input Path
:param full_path: Full path flag
:param path: Input Path
:param recurse: Whether Recursive
:return: List of Files_Path
"""
files_path_list = []
if not os.path.exists(path):
return []
dir_list = SimpleProgressBar(os.listdir(path))
dir_list.show_title("Processing")
for file_path in dir_list:
if full_path:
file_path = os.path.join(path, file_path)
if os.path.isdir(file_path):
if recurse:
files_path_list += get_files_from_path(file_path, recurse=True)
else:
pass
else:
files_path_list.append(file_path)
return files_path_list | 156088cd175a24bdb0bdd04c00fa6229470aab1f | 11,026 |
def load(filename):
"""Load the labels and scores for Hits at K evaluation.
Loads labels and model predictions from files of the format:
Query \t Example \t Label \t Score
:param filename: Filename to load.
:return: list_of_list_of_labels, list_of_list_of_scores
"""
result_labels = []
result_scores = []
current_block_name = ""
current_block_scores = []
current_block_labels = []
with open(filename,'r') as fin:
for line in fin:
splt = line.strip().split("\t")
block_name = splt[0]
block_example = splt[1]
example_label = int(splt[2])
example_score = float(splt[3])
if block_name != current_block_name and current_block_name != "":
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
current_block_labels = []
current_block_scores = []
current_block_labels.append(example_label)
current_block_scores.append(example_score)
current_block_name = block_name
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
return result_labels,result_scores | 8d9570d794ebf09eb393342f926a5536dd0c1a75 | 11,027 |
def expanding_sum(a, axis = 0, data = None, state = None):
"""
equivalent to pandas a.expanding().sum().
- works with np.arrays
- handles nan without forward filling.
- supports state parameters
:Parameters:
------------
a : array, pd.Series, pd.DataFrame or list/dict of these
timeseries
axis : int, optional
0/1/-1. The default is 0.
data: None
unused at the moment. Allow code such as func(live, **func_(history)) to work
state: dict, optional
state parameters used to instantiate the internal calculations, based on history prior to 'a' provided.
:Example: agreement with pandas
--------------------------------
>>> from pyg import *; import pandas as pd; import numpy as np
>>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999))
>>> panda = a.expanding().sum(); ts = expanding_sum(a)
>>> assert eq(ts,panda)
:Example: nan handling
----------------------
Unlike pandas, timeseries does not forward fill the nans.
>>> a[a<0.1] = np.nan
>>> panda = a.expanding().sum(); ts = expanding_sum(a)
>>> pd.concat([panda,ts], axis=1)
>>> 0 1
>>> 1993-09-23 NaN NaN
>>> 1993-09-24 NaN NaN
>>> 1993-09-25 0.645944 0.645944
>>> 1993-09-26 2.816321 2.816321
>>> 1993-09-27 2.816321 NaN
>>> ... ...
>>> 2021-02-03 3976.911348 3976.911348
>>> 2021-02-04 3976.911348 NaN
>>> 2021-02-05 3976.911348 NaN
>>> 2021-02-06 3976.911348 NaN
>>> 2021-02-07 3976.911348 NaN
:Example: state management
--------------------------
One can split the calculation and run old and new data separately.
>>> old = a.iloc[:5000]
>>> new = a.iloc[5000:]
>>> ts = expanding_sum(a)
>>> old_ts = expanding_sum_(old)
>>> new_ts = expanding_sum(new, **old_ts)
>>> assert eq(new_ts, ts.iloc[5000:])
:Example: dict/list inputs
---------------------------
>>> assert eq(expanding_sum(dict(x = a, y = a**2)), dict(x = expanding_sum(a), y = expanding_sum(a**2)))
>>> assert eq(expanding_sum([a,a**2]), [expanding_sum(a), expanding_sum(a**2)])
"""
state = state or {}
return first_(_expanding_sum(a, axis = axis, **state)) | ec3fb41784f7ce5ef268ec8e7d8fe8e65f222157 | 11,028 |
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 68b7c48e5bd832a637e7a06353c48ffa09b449cd | 11,029 |
from typing import Dict
from typing import Any
import os
import json
def read_configuration_from_file(path: str) -> Dict[str, Any]:
"""
Read the JSON file and return a dict.
:param path: path on file system
:return: raw, unchanged dict
"""
if os.path.isfile(path):
with open(path) as json_file:
return json.load(json_file)
else:
raise ConfigNotFoundException | 57a8ea69507d4941b00e9ec2849084536eb7d44f | 11,030 |
import typing
import logging
def logwrap(
func: typing.Optional[typing.Callable] = None,
*,
log: logging.Logger = _log_wrap_shared.logger,
log_level: int = logging.DEBUG,
exc_level: int = logging.ERROR,
max_indent: int = 20,
spec: typing.Optional[typing.Callable] = None,
blacklisted_names: typing.Optional[typing.List[str]] = None,
blacklisted_exceptions: typing.Optional[typing.List[typing.Type[Exception]]] = None,
log_call_args: bool = True,
log_call_args_on_exc: bool = True,
log_result_obj: bool = True
) -> typing.Union[LogWrap, typing.Callable]:
"""Log function calls and return values. Python 3.4+ version.
:param func: function to wrap
:type func: typing.Optional[typing.Callable]
:param log: logger object for decorator, by default used 'logwrap'
:type log: logging.Logger
:param log_level: log level for successful calls
:type log_level: int
:param exc_level: log level for exception cases
:type exc_level: int
:param max_indent: maximum indent before classic `repr()` call.
:type max_indent: int
:param spec: callable object used as spec for arguments bind.
This is designed for the special cases only,
when impossible to change signature of target object,
but processed/redirected signature is accessible.
Note: this object should provide fully compatible signature
with decorated function, or arguments bind will be failed!
:type spec: typing.Optional[typing.Callable]
:param blacklisted_names: Blacklisted argument names. Arguments with this names will be skipped in log.
:type blacklisted_names: typing.Optional[typing.Iterable[str]]
:param blacklisted_exceptions: list of exceptions, which should be re-raised without producing log record.
:type blacklisted_exceptions: typing.Optional[typing.Iterable[typing.Type[Exception]]]
:param log_call_args: log call arguments before executing wrapped function.
:type log_call_args: bool
:param log_call_args_on_exc: log call arguments if exception raised.
:type log_call_args_on_exc: bool
:param log_result_obj: log result of function call.
:type log_result_obj: bool
:return: built real decorator.
:rtype: _log_wrap_shared.BaseLogWrap
.. versionchanged:: 3.3.0 Extract func from log and do not use Union.
.. versionchanged:: 3.3.0 Deprecation of *args
.. versionchanged:: 4.0.0 Drop of *args
"""
wrapper = LogWrap(
log=log,
log_level=log_level,
exc_level=exc_level,
max_indent=max_indent,
spec=spec,
blacklisted_names=blacklisted_names,
blacklisted_exceptions=blacklisted_exceptions,
log_call_args=log_call_args,
log_call_args_on_exc=log_call_args_on_exc,
log_result_obj=log_result_obj
)
if func is not None:
return wrapper(func)
return wrapper | 4c48dafc6c4f062fd1d165fd30bcc99209eabed3 | 11,031 |
def sum_digits(number):
"""
Write a function named sum_digits which takes a number as input and
returns the sum of the absolute value of each of the number's decimal digits.
"""
return sum(int(n) for n in str(number) if n.isdigit()) | b6d8083a78d67a268316716174723f47d84b2287 | 11,032 |
import numpy
def label(input, structure=None, output=None):
"""Labels features in an array.
Args:
input (cupy.ndarray): The input array.
structure (array_like or None): A structuring element that defines
feature connections. ```structure``` must be centersymmetric. If
None, structure is automatically generated with a squared
connectivity equal to one.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
Returns:
label (cupy.ndarray): An integer array where each unique feature in
```input``` has a unique label in the array.
num_features (int): Number of features found.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.label`
"""
if not isinstance(input, cupy.ndarray):
raise TypeError('input must be cupy.ndarray')
if input.dtype.char in 'FD':
raise TypeError('Complex type not supported')
if structure is None:
structure = _generate_binary_structure(input.ndim, 1)
elif isinstance(structure, cupy.ndarray):
structure = cupy.asnumpy(structure)
structure = numpy.array(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for i in structure.shape:
if i != 3:
raise ValueError('structure dimensions must be equal to 3')
if isinstance(output, cupy.ndarray):
if output.shape != input.shape:
raise ValueError("output shape not correct")
caller_provided_output = True
else:
caller_provided_output = False
if output is None:
output = cupy.empty(input.shape, numpy.int32)
else:
output = cupy.empty(input.shape, output)
if input.size == 0:
# empty
maxlabel = 0
elif input.ndim == 0:
# 0-dim array
maxlabel = 0 if input.item() == 0 else 1
output[...] = maxlabel
else:
if output.dtype != numpy.int32:
y = cupy.empty(input.shape, numpy.int32)
else:
y = output
maxlabel = _label(input, structure, y)
if output.dtype != numpy.int32:
output[...] = y[...]
if caller_provided_output:
return maxlabel
else:
return output, maxlabel | fe3e4b7ee30f7dc1ae0541133f7db3d02c7d3157 | 11,033 |
import functools
def get_experiment_fn(nnObj,data_dir, num_gpus,variable_strategy,use_distortion_for_training=True):
"""Returns an Experiment function.
Experiments perform training on several workers in parallel,
in other words experiments know how to invoke train and eval in a sensible
fashion for distributed training. Arguments passed directly to this
function are not tunable, all other arguments should be passed within
tf.HParams, passed to the enclosed function.
Args:
data_dir: str. Location of the data for input_fns.
num_gpus: int. Number of GPUs on each worker.
variable_strategy: String. CPU to use CPU as the parameter server
and GPU to use the GPUs as the parameter server.
use_distortion_for_training: bool. See cifar10.Cifar10DataSet.
Returns:
A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->
tf.contrib.learn.Experiment.
Suitable for use by tf.contrib.learn.learn_runner, which will run various
methods on Experiment (train, evaluate) based on information
about the current runner in `run_config`.
"""
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
cifar_main.input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.train_batch_size,
use_distortion_for_training=use_distortion_for_training)
eval_input_fn = functools.partial(
cifar_main.input_fn,
data_dir,
subset='eval',
batch_size=hparams.eval_batch_size,
num_shards=num_gpus)
num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch('eval')
if num_eval_examples % hparams.eval_batch_size != 0:
raise ValueError(
'validation set size must be multiple of eval_batch_size')
train_steps = hparams.train_steps
eval_steps = num_eval_examples // hparams.eval_batch_size
classifier = tf.estimator.Estimator(
model_fn=cifar_main.get_model_fn(nnObj,num_gpus, variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
vail_accuracy=[]
for loop in range(20):
classifier.train(train_input_fn,steps=train_steps)
vail_accuracy.append(classifier.evaluate(eval_input_fn,steps=eval_steps))
print("finished iter:"+str((loop+1)*train_steps))
print("accuracy:")
print(vail_accuracy)
# Create experiment.
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps)
return _experiment_fn | 07ddb4ebac493826127464f76fd79ea17e7bf474 | 11,034 |
def calc_psnr(tar_img, ref_img):
""" Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
psnr : float
The PSNR metric.
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
tar_vol = tar_img
ref_vol = ref_img
ref_vol, tar_vol = _as_floats(ref_vol, tar_vol)
err = calc_mse(ref_img, tar_img)
return 10 * np.log10((256 ** 2) / err) | 61097170fb439b85583cd8aac8002c70d02c094b | 11,035 |
import networkx as nx
import os
def celegans(path):
"""Load the neural network of the worm C. Elegans [@watts1998collective].
The neural network consists of around 300 neurons. Each connection
between neurons is associated with a weight (positive integer)
capturing the strength of the connection.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there. Filename is `celegansneural.gml`.
Returns:
Adjacency matrix as a np.darray `x_train` with 297 rows and 297
columns.
"""
path = os.path.expanduser(path)
filename = 'celegansneural.gml'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://www-personal.umich.edu/~mejn/netdata/celegansneural.zip'
maybe_download_and_extract(path, url)
graph = nx.read_gml(os.path.join(path, filename))
x_train = np.zeros([graph.number_of_nodes(), graph.number_of_nodes()],
dtype=np.int)
for i, j in graph.edges():
x_train[i, j] = int(graph[i][j][0]['value'])
return x_train | fc7f63af1b70c58fab7655d33f6d9630d4bd003e | 11,036 |
from typing import Callable
from typing import Dict
from typing import Any
import functools
def glacier_wrap(
f: Callable[..., None],
enum_map: Dict[str, Dict[str, Any]],
) -> Callable[..., None]:
"""
Return the new function which is click-compatible
(has no enum signature arguments) from the arbitrary glacier compatible
function
"""
# Implemented the argument convert logic
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> None:
# convert args and kwargs
converted_kwargs = {}
for name, value in kwargs.items():
if name in enum_map:
converted_kwargs[name] = enum_map[name][value]
else:
converted_kwargs[name] = value
return f(*args, **converted_kwargs)
return wrapped | 01f3a90179bb0dba29ffb0b2fa9d91be15e0ee7e | 11,037 |
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
if num_gpus_per_worker == 0:
devices.append("/job:%s/task:%d" % (task_type, task_id))
else:
devices.extend([
"/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id)
for gpu_id in range(num_gpus_per_worker)
])
return devices | 3032a28f80dbed1fd870e4fc2ea06d724fc529ce | 11,038 |
def group_by_time(df, col, by='day', fun='max', args=(), kwargs={}, index='categories'):
""" See <https://pandas.pydata.org/pandas-docs/stable/api.html#groupby>_ for the set of `fun` parameters
available. Examples are: 'count', 'max', 'min', 'median', etc
.. Tip:: Since Access inherits from TimeIntervalTable, the underlaying data format
is a `pandas.DataFrame`, not a `pandas.Series`. Consequently, only the groupby
functions of a generic GroupBy or DataFrameGroupBy are valid. Functions of SeriesGroupBy
are not allowed.
"""
if col == 'index':
t = df.index
else:
t = df.loc[:, col].dt
if by.lower() in ['y', 'year']:
group = df.groupby([t.year])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year']
elif by.lower() in ['m', 'month']:
group = df.groupby([t.year, t.month])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month']
elif by.lower() in ['d', 'day']:
group = df.groupby([t.year, t.month, t.day])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day']
elif by.lower() in ['h', 'hour']:
group = df.groupby([t.year, t.month, t.day, t.hour])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour']
elif by.lower() in ['m', 'min', 'minute']:
group = df.groupby([t.year, t.month, t.day, t.hour, t.minute])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour', 'min']
elif by.lower() in ['s', 'sec', 'second']:
group = df.groupby([t.year, t.month, t.day, t.hour, t.minute, t.second])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour', 'min', 'sec']
else:
raise KeyError('Grouping can be by "year", "month", "day", "min" and "sec" only')
# Choose index
if index == 'categories':
return group
elif index == 'times':
group.index = pd.DatetimeIndex([pd.Timestamp(*i) for i, _ in group.iterrows()])
return group
else:
raise KeyError('Argument "index={}"" is not valid. Options are "categories" or "times"') | 6695d285b52757ee7dfd32ad5943aa433504322f | 11,039 |
def fetch(url, params=None, keepalive=False, requireValidCert=False,
debug=False):
"""
Fetches the desired @url using an HTTP GET request and appending and
@params provided in a dictionary.
If @keepalive is False, a fresh connection will be made for this request.
If @requireValidCert is True, then an exception is thrown if the remote
server cannot provide a valid TLS certificate.
If @keepalive is False, connections are closed and so subsequent connections
must make fresh (cold) HTTPS connections.
@returns the result as a dictionary, decoded from server-provided JSON.
@raises an exception if there are any problems connecting to the remote
server, receiving a valiud HTTP status 200 response, or decoding the
resulting JSON response.
"""
# Set the certificate verification flag
httpClient.disable_ssl_certificate_validation = not requireValidCert
# Assemble the URL
url = getUrl(url, params)
if debug:
print "Fetching " + url
# Fetch the URL with a GET request.
response, content = httpClient.request(url, "GET")
# Check the status code.
if response.status != 200:
m = "Remote service reported an error (status:{} {}) for "\
"URL {}".format(response.status, response.reason, url)
raise Exception(m)
# Close the connection if requested.
if not keepalive:
map(lambda (k,v): v.close(), httpClient.connections.iteritems())
# Parse the response
return json.loads(content) | 8edfa089e9ae40d32f4843e6c684b3a06783150a | 11,040 |
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C | afb910a9590195fa637be9c64382419c1c79a885 | 11,041 |
from sys import path
import yaml
def main(df: pyam.IamDataFrame) -> pyam.IamDataFrame:
"""Main function for validation and processing (for the ARIADNE-intern instance)"""
# load list of allowed scenario names
with open(path / "scenarios.yml", "r") as stream:
scenario_list = yaml.load(stream, Loader=yaml.FullLoader)
# validate list of submitted scenarios
illegal_scens = [s for s in df.scenario if s not in scenario_list]
if illegal_scens:
raise_error("scenarios", illegal_scens)
# call validation function for variables, regions and subannual time resolution
df = _validate(df)
# call validation function for meta indicators
df = _validate_meta(df, ALLOWED_META_ARIADNE)
return df | 8872d698e0e00baedfe24784b2db6b206ff32e04 | 11,042 |
import torch
def huber_loss(x, delta=1.):
""" Standard Huber loss of parameter delta
https://en.wikipedia.org/wiki/Huber_loss
returns 0.5 * x^2 if |a| <= \delta
\delta * (|a| - 0.5 * \delta) o.w.
"""
if torch.abs(x) <= delta:
return 0.5 * (x ** 2)
else:
return delta * (torch.abs(x) - 0.5 * delta) | b3493eb9d4e38fa36f92db80dc52a47c32caf3c9 | 11,043 |
def licenses_mapper(license, licenses, package): # NOQA
"""
Update package licensing and return package based on the `license` and
`licenses` values found in a package.
Licensing data structure has evolved over time and is a tad messy.
https://docs.npmjs.com/files/package.json#license
license(s) is either:
- a string with:
- an SPDX id or expression { "license" : "(ISC OR GPL-3.0)" }
- some license name or id
- "SEE LICENSE IN <filename>"
- (Deprecated) an array or a list of arrays of type, url.
- "license": "UNLICENSED" means this is proprietary
"""
declared_license = get_declared_licenses(license) or []
declared_license.extend(get_declared_licenses(licenses) or [])
package.declared_license = declared_license
return package | 5568c323b342cc09d966ddef3455381abdca1ccc | 11,044 |
def send_command(target, data):
"""sends a nudge api command"""
url = urljoin(settings.NUDGE_REMOTE_ADDRESS, target)
req = urllib2.Request(url, urllib.urlencode(data))
try:
return urllib2.urlopen(req)
except urllib2.HTTPError, e:
raise CommandException(
'An exception occurred while contacting %s: %s' %
(url, e), e) | fc6967f84568b755db7f132f5fc511ef9687369f | 11,045 |
def logistic_log_partial_ij(x_i, y_i, beta, j):
"""i is index of point and j is index of derivative"""
return (y_i - logistic(dot(x_i, beta))) * x_i[j] | a24f704bc3178c6f2d8b37ad075f1beea3666964 | 11,046 |
def expected_win(theirs, mine):
"""Compute the expected win rate of my strategy given theirs"""
assert abs(theirs.r + theirs.p + theirs.s - 1) < 0.001
assert abs(mine.r + mine.p + mine.s - 1) < 0.001
wins = theirs.r * mine.p + theirs.p * mine.s + theirs.s * mine.r
losses = theirs.r * mine.s + theirs.p * mine.r + theirs.s * mine.p
return wins - losses | 92de2010287e0c027cb18c3dd01d95353e4653c4 | 11,047 |
def get_first_where(data, compare):
"""
Gets first dictionary in list that fit to compare-dictionary.
:param data: List with dictionarys
:param compare: Dictionary with keys for comparison {'key';'expected value'}
:return: list with dictionarys that fit to compare
"""
l = get_all_where(data, compare)
if len(l) < 1:
raise Exception('Data not found! (' + str(compare) + ')')
return l[0] | fc961d7154aa265efd101a658f668ad2025c121f | 11,048 |
import numpy
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by ``megam`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError("This function requires that numpy be installed")
assert explicit, "non-explicit not supported yet"
lines = s.strip().split("\n")
weights = numpy.zeros(features_count, "d")
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights | db172935fe7af892b420d515391565ccc2b44c55 | 11,049 |
from typing import Counter
def project_statistics(contributions):
"""Returns a dictionary containing statistics about all projects."""
projects = {}
for contribution in contributions:
# Don't count unreviewed contributions
if contribution["status"] == "unreviewed":
continue
project = contribution["repository"]
utopian_vote = contribution["utopian_vote"]
# Set default in case category doesn't exist
projects.setdefault(
project, {
"project": project,
"average_score": [],
"average_without_0": [],
"voted": 0,
"not_voted": 0,
"unvoted": 0,
"task-requests": 0,
"moderators": [],
"average_payout": [],
"total_payout": 0,
"utopian_total": []
}
)
# Check if contribution was voted on or unvoted
if contribution["status"] == "unvoted":
projects[project]["unvoted"] += 1
projects[project]["not_voted"] += 1
elif contribution["voted_on"]:
projects[project]["voted"] += 1
else:
projects[project]["not_voted"] += 1
# If contribution was a task request count this
if "task" in contribution["category"]:
projects[project]["task-requests"] += 1
# Add moderator and score
projects[project]["moderators"].append(contribution["moderator"])
projects[project]["average_score"].append(contribution["score"])
projects[project]["total_payout"] += contribution["total_payout"]
projects[project]["utopian_total"].append(utopian_vote)
if contribution["score"] > 0:
projects[project]["average_without_0"].append(
contribution["score"])
project_list = []
for project, value in projects.items():
# Set new keys and append value to list
value["reviewed"] = value["voted"] + value["not_voted"]
value["average_score"] = average(value["average_score"])
value["average_without_0"] = average(value["average_without_0"])
value["average_payout"] = value["total_payout"] / value["reviewed"]
value["moderators"] = Counter(value["moderators"]).most_common()
value["pct_voted"] = percentage(value["reviewed"], value["voted"])
# Add Utopian.io's vote statistics
value["utopian_total"] = [vote for vote in value["utopian_total"]
if vote != 0]
value["average_utopian_vote"] = average(value["utopian_total"])
value["utopian_total"] = sum(value["utopian_total"])
project_list.append(value)
return {"projects": project_list} | 91c27b504fc974b26f4e76b8a3f78e3665a21efa | 11,050 |
def exportSDFVisual(visualobj, linkobj, visualdata, indentation, modelname):
"""Simple wrapper for visual data of links.
The visual object is required to determine the position (pose) of the
object.
If relative poses are used the data found in visualdata (key pose) is used.
Otherwise the pose of the visual object will be combined with all
collected links up to the rootobject (see
phobos.utils.editing.getCombinedTransform).
Args:
visualobj: object to be used for pose
visualdata: data as provided by dictionary (should contain name,
geometry)
indentation: indentation at current level
relative: True for usage of sdf relative pathing
modelname: the name of the model (required for geometry)
linkobj:
Returns:
: str -- writable xml line
"""
tagger = xmlTagger(initial=indentation)
tagger.descend('visual', attribs={'name': visualdata['name']})
# OPT: tagger.attrib('cast_shadows', ...)
# OPT: tagger.attrib('laser_retro', ...)
# OPT: tagger.attrib('transparency', ...)
# OPT: tagger.descend('meta')
# OPT: tagger.attrib('layer', ...)
# tagger.ascend()
# OPT: tagger.write(exportSDFFrame(..., tagger.get_indent()))
# Pose data of the visual is transformed by link --> use local matrix
matrix = visualobj.matrix_local
posedata = {
'rawmatrix': matrix,
'matrix': [list(vector) for vector in list(matrix)],
'translation': list(matrix.to_translation()),
'rotation_euler': list(matrix.to_euler()),
'rotation_quaternion': list(matrix.to_quaternion()),
}
# overwrite absolute position of the visual object
tagger.write(exportSDFPose(posedata, tagger.get_indent()))
# write material data if available
if 'material' in visualdata:
tagger.write(exportSDFMaterial(visualdata['material'], tagger.get_indent()))
tagger.write(exportSDFGeometry(visualdata['geometry'], tagger.get_indent(), modelname))
tagger.ascend()
return "".join(tagger.get_output()) | f556a1eb1cef42adfde28c481a3443f149219518 | 11,051 |
import resource
def register_module():
"""Callback for module registration. Sets up URL routes."""
global custom_module # pylint: disable=global-statement
permissions = [
roles.Permission(EDIT_STUDENT_GROUPS_PERMISSION,
messages.EDIT_STUDENT_GROUPS_PERMISSION_DESCRIPTION),
]
def permissions_callback(unused_application_context):
return permissions
def notify_module_enabled():
"""Callback at module-enable time, just after module registration.
Responsible for registering module's callbacks and other items with
core and other modules.
"""
model_caching.CacheFactory.build(
MODULE_NAME_AS_IDENTIFIER, MODULE_NAME + " Caching",
messages.ENABLE_GROUP_CACHING,
max_size_bytes=(
StudentGroupAvailabilityRestHandler.MAX_NUM_MEMBERS * 1024 * 4),
ttl_sec=60 * 60, dao_class=StudentGroupDAO)
# Tell permissioning system about permission for this module.
roles.Roles.register_permissions(custom_module, permissions_callback)
# Navigation sub-tab for showing list of student groups, and
# associated role-level permission.
dashboard.DashboardHandler.add_sub_nav_mapping(
'settings', MODULE_NAME_AS_IDENTIFIER, 'Student Groups',
action=StudentGroupListHandler.ACTION,
contents=StudentGroupListHandler.render_groups_view)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupListHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Register action for add/edit/delete of student group.
dashboard.DashboardHandler.add_custom_get_action(
StudentGroupRestHandler.ACTION,
handler=StudentGroupRestHandler.edit_student_group,
in_action=StudentGroupListHandler.ACTION)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupRestHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Override existing action for availability. For UX convenience,
# we want to have the same page modify overall course availability
# as well as per-group availability.
dashboard.DashboardHandler.add_custom_get_action(
availability.AvailabilityRESTHandler.ACTION,
StudentGroupAvailabilityRestHandler.get_form, overwrite=True)
# Register a callback to add the user's student group ID (if any) to
# recorded events.
models.EventEntity.EVENT_LISTENERS.append(
_add_student_group_to_event)
# Register a component with the student-aggregator data pump source
# so that student-aggregate records get marked with the group ID
# for that student.
student_aggregate.StudentAggregateComponentRegistry.register_component(
AddToStudentAggregate)
# Register a callback with models.models.StudentProfileDAO to let us
# know when a student registers. This allows us to move the
# Definitive Truth about group membership to the Student record.
models.StudentProfileDAO.STUDENT_CREATION_HOOKS.append(
StudentGroupMembership.user_added_callback)
# Register a callback with Course so that when anyone asks for the
# student-facing list of units and lessons we can modify them as
# appropriate.
courses.Course.COURSE_ELEMENT_STUDENT_VIEW_HOOKS.append(
modify_unit_and_lesson_attributes)
# Register a callback with Course so that when the environment is
# fetched, we can submit overwrite items.
courses.Course.COURSE_ENV_POST_COPY_HOOKS.append(
modify_course_environment)
# Register student group as a generically handle-able translatable
# resource.
resource.Registry.register(ResourceHandlerStudentGroup)
# Register student group as a translatable item; the title and
# description can appear on student profile pages.
i18n_dashboard.TranslatableResourceRegistry.register(
TranslatableResourceStudentGroups)
# Register a section on the student profile to add the current
# student's group - if any.
utils.StudentProfileHandler.EXTRA_PROFILE_SECTION_PROVIDERS.append(
_add_student_group_to_profile)
# Register with gradebook to add student group as a filterable
# item.
gradebook.RawAnswersDataSource.FILTERS.append(StudentGroupFilter)
# Register with generator feeding gradebook to add some handling to
# the map and reduce steps so we can generate our filter-able data
# column in the generator's output.
gradebook.RawAnswersGenerator.register_hook(
MODULE_NAME,
_add_student_group_to_map_result,
_add_student_group_to_kwargs)
# Add our types to the set of DB tables for download/upload of course.
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(StudentGroupEntity)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(
StudentGroupMembership)
custom_module = custom_modules.Module(
MODULE_NAME, 'Define and manage groups of students.',
global_routes=[
(EmailToObfuscatedUserIdCleanup.URL,
EmailToObfuscatedUserIdCleanup),
], namespaced_routes=[
(StudentGroupRestHandler.URL,
StudentGroupRestHandler),
(StudentGroupAvailabilityRestHandler.URL,
StudentGroupAvailabilityRestHandler)
],
notify_module_enabled=notify_module_enabled)
return custom_module | 82e8d57c2b0f73ae21b460da61ce047b4a25ebe3 | 11,052 |
from sys import version_info
def build_texttable(events):
"""
value['date'], value["target"], value['module_name'], value['scan_unique_id'],
value['options'], value['event']
build a text table with generated event related to the scan
:param events: all events
:return:
array [text table, event_number]
"""
_table = texttable.Texttable()
table_headers = [
'target',
'module_name',
'scan_unique_id',
'options',
'event',
'date'
]
_table.add_rows(
[
table_headers
]
)
for event in events:
_table.add_rows(
[
table_headers,
[
event['target'],
event['module_name'],
event['scan_unique_id'],
event['options'],
event['event'],
event['date']
]
]
)
return _table.draw().encode('utf8') + b'\n\n' + messages("nettacker_version_details").format(
version_info()[0],
version_info()[1],
now()
).encode('utf8') + b"\n" | 477c40ce240aae8848d960c6c3bba1e52a4c6b67 | 11,053 |
def for_all_regions(get_client_func, catalog_entry, action_func, parsed_args):
"""
Run the provided function on all the available regions.
Available regions are determined based on the user service catalog entries.
"""
result = []
cache_key = 'todo'
cache_item = CACHE.get(cache_key, None)
if cache_item is None:
client = get_client_func(parsed_args)
CACHE[cache_key] = client
else:
client = cache_item
catalog = client.connection.get_service_catalog()
urls = catalog.get_public_urls(service_type=catalog_entry,
name=catalog_entry)
auth_connection = client.connection.get_auth_connection_instance()
driver_kwargs = {'ex_auth_connection': auth_connection}
def run_in_pool(client):
item = action_func(client)
result.extend(item)
for api_url in urls:
parsed_args.api_url = api_url
client = get_client_func(parsed_args, driver_kwargs=driver_kwargs)
run_function(pool, run_in_pool, client)
join_pool(pool)
return result | 9b1dfba7939aada8ca3c4c894d2ebbde08e757c6 | 11,054 |
import scipy
def KL_distance(image1, image2):
"""
Given two images, calculate the KL divergence between the two
2d array is not supported, so we have to flatten the array and compare each pixel in the image1 to the corresponding pixel in the image2.
"""
return scipy.stats.entropy(image1.ravel(), image2.ravel()) | 6419c2f6456365e027fc7eff6f4b171e5eb4fc5f | 11,055 |
def stop_all_bots():
"""
This function address RestAPI call to stop polling for all bots which
have ever started polling.
:return:
"""
bots_stopped = procedures.stop_all() # Stop all bots.
botapi_logger.info('Successfully stopped {count} bots for polling in '
'start_all api call.'.format(count=len(bots_stopped)))
if bots_stopped > 0:
return jsonify({
"result": "success",
"message": "Successfully stopped {count} previously running "
"bots.".format(count=len(bots_stopped)),
"ids": [bot_id for bot_id in bots_stopped]
}), 200
else:
return internal_server_error(
message="No to stop previously running bots.") | 7e0bdaa0ae631e631cfbc56966311e59fc510d52 | 11,056 |
def load_word_embedding_dict(embedding, embedding_path, normalize_digits=True):
"""
load word embeddings from file
:param embedding:
:param embedding_path:
:return: embedding dict, embedding dimention, caseless
"""
print "loading embedding: %s from %s" % (embedding, embedding_path)
if embedding == 'word2vec':
# loading word2vec
word2vec = Word2Vec.load_word2vec_format(embedding_path, binary=True)
embedd_dim = word2vec.vector_size
return word2vec, embedd_dim, False
elif embedding == 'glove':
# loading GloVe
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if len(tokens) <101:
continue
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'senna':
# loading Senna
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'sskip':
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
# skip the first line
file.readline()
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
start = len(tokens) - embedd_dim
word = ' '.join(tokens[0:start])
embedd[:] = tokens[start:]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'polyglot':
words, embeddings = pickle.load(open(embedding_path, 'rb'))
_, embedd_dim = embeddings.shape
embedd_dict = dict()
for i, word in enumerate(words):
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = embeddings[i, :]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, False
else:
raise ValueError("embedding should choose from [word2vec, senna]") | 98cda8061aa49c708bc6986a6ab036e8941967f6 | 11,057 |
import sys
def fibonacci(n: int) -> int:
"""Returns nth fib number, fib_0 = 0, fib_1 = 1, ..."""
print(sys.platform)
return nfibonacci(n + 1)[-1] | 4f6b0c61709ad76e0600c395495b5b94c03c15ae | 11,058 |
def random_exponential(shape=(40,60), a0=100, dtype=float) :
"""Returns numpy array of requested shape and type filled with exponential distribution for width a0.
"""
a = a0*np.random.standard_exponential(size=shape)
return a.astype(dtype) | 29d3e438145d4495191868c956942b9626b76918 | 11,059 |
import json
def get_mpi_components_from_files(fileList, threads=False):
"""
Given a list of files to read input data from, gets a percentage of time
spent in MPI, and a breakdown of that time in MPI
"""
percentDict = dict()
timeDict = dict()
for filename in fileList:
filename = filename.strip()
try:
# Open the file for reading
with open(filename, "r") as infile:
# Read the json
jsonDict = json.load(infile)
runtime = get_runtime(jsonDict)
numprocs = get_num_threads(jsonDict) if threads else get_num_processes(jsonDict)
# Read the overview data and get the percentage of overall time spent in mpi
subDict = get_overview_data(jsonDict)
mpiPercent = get_dict_field_val(subDict, ["mpi", "percent"]) #mpiTime = (percent / 100.) * runtime
# Now get the sub-percentage of the mpi time
mpiEntry = get_dict_field_val(jsonDict, ["data", "mpi"])
# Get all of the percentages (as a percentage of total time)
mpiSubPercent = [float(get_dict_field_val(mpiEntry, [field])) * mpiPercent / 100. for field in mpiSubPercentages]
mpiSubTime = [runtime * subpercent / 100. for subpercent in mpiSubPercent]
percentDict[numprocs] = mpiSubPercent
timeDict[numprocs] = mpiSubTime
except IOError:
print("File " + filename + " does not exist. Skipping.")
pass
return percentDict, timeDict | 34549198676b823cf9e02ec927cb1e5fc30de2b8 | 11,060 |
import urllib
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1')) | 62dc27528b7b9b303367551b8cba0a02204d0eb6 | 11,061 |
def parse_input(lines):
"""Parse the input document, which contains validity rules for the various
ticket fields, a representation of my ticket, and representations of a
number of other observed tickets.
Return a tuple of (rules, ticket, nearby_tickets)
"""
section = parse_sections(lines)
rules = parse_rules(section[0])
my_ticket = parse_ticket(section[1][1])
tickets = [parse_ticket(line) for line in section[2][1:]]
return (rules, my_ticket, tickets) | cccf2a9b47768428b2004caab1b3cab15a369a68 | 11,062 |
from dateutil import tz
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | a35c8b1d1fb7f38fc23439bbe5b9778062fc6aa7 | 11,063 |
from typing import Optional
def maximum(
left_node: NodeInput,
right_node: NodeInput,
auto_broadcast: str = "NUMPY",
name: Optional[str] = None,
) -> Node:
"""Return node which applies the maximum operation to input nodes elementwise."""
return _get_node_factory_opset1().create(
"Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
) | 9ca2ac093059a9c7c2a1b310635c551d1982b1bb | 11,064 |
def create_template_error():
"""
Создает заготовку для генерации ошибок
"""
return {'response': False} | f15c27cc980cf1bda6b82353d01bbe7871fdbff1 | 11,065 |
from typing import Any
from typing import Tuple
import os
import logging
import sys
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1) | 6d4ab60f6c0e95a9b9a52afc58b510f03d32c8d1 | 11,066 |
def e_qest(model, m):
"""
Calculation of photocounting statistics estimation from
photon-number statistics estimation
Parameters
----------
model : InvPBaseModel
m : int
Photocount number.
"""
return quicksum(model.T[m, n] * model.PEST[n]
for n in model.PSET) | b4b5f9fb4ba1c142af3d91d170fdb90ae960dd0e | 11,067 |
def load_input(fname):
"""Read in the data, return as a list."""
data = [""]
with open(fname, "r") as f:
for line in f.readlines():
if line.strip("\n"):
data[-1] += line.strip("\n") + " "
else:
data[-1] = data[-1].strip(" ")
data.append("")
data [-1] = data[-1].strip(" ")
return data | f83021dd416e3a959996a16bb8d0a0e7352a471f | 11,068 |
import json
def parse_repo_layout_from_json(file_):
"""Parse the repo layout from a JSON file.
Args:
file_ (File): The source file.
Returns:
RepoLayout
Raises:
InvalidConfigFileError: The configuration file is invalid.
"""
def encode_dict(data):
new_data = {}
for key, value in data.items():
# Waf Node API requires String objects
if not isinstance(key, str):
new_data[key.encode('utf-8')] = [i.encode('utf-8')
for i in value]
else:
new_data[key] = value
return new_data
try:
loaded_dict = json.load(file_, object_hook=encode_dict)
except ValueError as e:
raise blderror.InvalidConfigFileError('Invalid .bdelayoutconfig: %s' %
e.message)
repo_layout = repolayout.RepoLayout()
for key in loaded_dict:
if key in repo_layout.__dict__:
setattr(repo_layout, key, loaded_dict[key])
else:
logutil.warn('Invalid field in .bdelayoutconfig: %s.' %
key)
return repo_layout | db1b7843c26ecc6796233e0cc193b41336fecf2d | 11,069 |
def SizeArray(input_matrix):
"""
Return the size of an array
"""
nrows=input_matrix.shape[0]
ncolumns=input_matrix.shape[1]
return nrows,ncolumns | 3ac45e126c1fea5a70d9d7b35e967896c5d3be0b | 11,070 |
def show_fun_elem_state_machine(fun_elem_str, xml_state_list, xml_transition_list,
xml_fun_elem_list):
"""Creates lists with desired objects for <functional_element> state, send them to
plantuml_adapter.py then returns url_diagram"""
new_fun_elem_list = set()
main_fun_elem = check_get_object(fun_elem_str, **{'xml_fun_elem_list': xml_fun_elem_list})
if not main_fun_elem:
return None
if not main_fun_elem.allocated_state_list:
print(f"No state allocated to {main_fun_elem.name} (no display)")
return None
new_fun_elem_list.add(main_fun_elem)
new_state_list = {s for s in xml_state_list if s.id in main_fun_elem.allocated_state_list}
new_transition_list = get_transitions(new_state_list, xml_transition_list)
_, url_diagram = plantuml_adapter.get_state_machine_diagram(new_state_list,
new_transition_list,
xml_fun_elem_list)
print("State Machine Diagram for " + fun_elem_str + " generated")
return url_diagram | 3d8b1426e791bcc40c9850723da9bf350bea361f | 11,071 |
def get_bank_account_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(BankAccount, *args, **kwargs) | 0599b2bbae3b7bb044789db6c18f47604c3c9171 | 11,072 |
import importlib
def load_class(class_name, module_name):
"""Dynamically load a class from strings or raise a helpful error."""
# TODO remove this nasty python 2 hack
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
try:
loaded_module = importlib.import_module(module_name)
class_ = getattr(loaded_module, class_name)
except ModuleNotFoundError as e:
raise PluginModuleNotFoundError(module_name=module_name)
except AttributeError as e:
raise PluginClassNotFoundError(
module_name=module_name,
class_name=class_name
)
return class_ | 02ce2988e45b30da7603acc41fea4846481a94e3 | 11,073 |
def pybo_mod(tokens, tag_codes=[]):
"""extract text/pos tuples from Token objects"""
txt_tags = []
for token in tokens:
tags = []
tags.append(token.text)
# Select and order the tags
for tag_code in tag_codes:
tags.append(get_tag(token, tag_code))
txt_tags.append(tags)
return txt_tags | e96bb6a4774a0e983f2288536921e98207aeaa4b | 11,074 |
def acf(
da: xr.DataArray, *, lag: int = 1, group: str | Grouper = "time.season"
) -> xr.DataArray:
"""Autocorrelation function.
Autocorrelation with a lag over a time resolution and averaged over all years.
Parameters
----------
da : xr.DataArray
Variable on which to calculate the diagnostic.
lag: int
Lag.
group : {'time.season', 'time.month'}
Grouping of the output.
E.g. If 'time.month', the autocorrelation is calculated over each month separately for all years.
Then, the autocorrelation for all Jan/Feb/... is averaged over all years, giving 12 outputs for each grid point.
Returns
-------
xr.DataArray
lag-{lag} autocorrelation of the variable over a {group.prop} and averaged over all years.
See Also
--------
statsmodels.tsa.stattools.acf
References
----------
Alavoine M., and Grenier P. (under review) The distinct problems of physical inconsistency and of multivariate bias potentially involved in the statistical adjustment of climate simulations. International Journal of Climatology, submitted on September 19th 2021. (Preprint: https://doi.org/10.31223/X5C34C)
Examples
--------
>>> from xclim.testing import open_dataset
>>> pr = open_dataset(path_to_pr_file).pr
>>> acf(da=pr, lag=3, group="time.season")
"""
attrs = da.attrs
def acf_last(x, nlags):
# noqa: D403
"""statsmodels acf calculates acf for lag 0 to nlags, this return only the last one."""
# As we resample + group, timeseries are quite short and fft=False seems more performant
out_last = stattools.acf(x, nlags=nlags, fft=False)
return out_last[-1]
@map_groups(out=[Grouper.PROP], main_only=True)
def _acf(ds, *, dim, lag, freq):
out = xr.apply_ufunc(
acf_last,
ds.dat.resample({dim: freq}),
input_core_dims=[[dim]],
vectorize=True,
kwargs={"nlags": lag},
)
out = out.mean("__resample_dim__")
return out.rename("out").to_dataset()
out = _acf(da.rename("dat").to_dataset(), group=group, lag=lag, freq=group.freq).out
out.attrs.update(attrs)
out.attrs["long_name"] = f"lag-{lag} autocorrelation"
out.attrs["units"] = ""
out.name = "acf"
return out | 630eb27574edb40f363f41656a23801f11cefb1c | 11,075 |
import requests
def username(UID: str) -> str:
"""
Get a users username from their user ID.
>>> username("zx7gd1yx")
'1'
>>> username("7j477kvj")
'AnInternetTroll'
>>> username("Sesame Street")
Traceback (most recent call last):
...
utils.UserError: User with uid 'Sesame Street' not found.
"""
R: dict = requests.get(f"{API}/users/{UID}").json()
try:
return R["data"]["names"]["international"]
except KeyError:
raise UserError(f"User with uid '{UID}' not found.") | c2d66af182a970783ef6e2236c1db3e5a3f80b50 | 11,076 |
import logging
def handle_exceptions(func):
"""Exception handler helper function."""
logging.basicConfig(level = logging.INFO)
def wrapper_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.error(f'{func.__name__} raised an error: {e}')#, exc_info = True)
return None
return wrapper_func | 2d5c428e65cfb823d1afbf2d2c77f98b8722d685 | 11,077 |
def apply_hamming_window(image):
"""Cross correlate after applying hamming window to compensate side effects"""
window_h = np.hamming(image.shape[0])
window_v = np.hamming(image.shape[1])
image = np.multiply(image.T, window_h).T
return np.multiply(image, window_v) | f319506e9a51350664683ede7411e677bbf96ab3 | 11,078 |
from typing import Tuple
from functools import reduce
def calc_ewald_sum(dielectric_tensor: np.ndarray,
real_lattice_set: np.ndarray,
reciprocal_lattice_set: np.ndarray,
mod_ewald_param: float,
root_det_epsilon: float,
volume: float,
) -> Tuple[float, float]:
"""Return real and reciprocal Ewald summations at given parameters"""
epsilon_inv = np.linalg.inv(dielectric_tensor)
real_sum = 0
# Skip the potential caused by the defect itself
for v in real_lattice_set:
root_r_inv_epsilon_r = np.sqrt(reduce(dot, [v.T, epsilon_inv, v]))
real_sum += \
erfc(mod_ewald_param * root_r_inv_epsilon_r) / root_r_inv_epsilon_r
real_part = real_sum / (4 * pi * root_det_epsilon)
# Ewald reciprocal part
# sum exp(-g * epsilon * g / (4 * ewald ** 2)) / g * epsilon * g [1/A]
reciprocal_sum = 0
for g in reciprocal_lattice_set:
g_epsilon_g = reduce(dot, [g.T, dielectric_tensor, g])
reciprocal_sum += \
(exp(- g_epsilon_g / 4.0 / mod_ewald_param ** 2)
/ g_epsilon_g * cos(dot(g, np.zeros(3)))) # [A^2]
reciprocal_part = reciprocal_sum / volume
return real_part, reciprocal_part | 5be08f833c8e44a4afeab48af0f5160278fbf88a | 11,079 |
import time
def proximal_descent(
x0, grad, prox, step_size, momentum='fista', restarting=None,
max_iter=100, early_stopping=True, eps=np.finfo(np.float64).eps,
obj=None, benchmark=False):
""" Proximal descent algorithm.
Parameters
----------
x0 : array, shape (n_length, ), initial variables
grad : func, gradient function
prox : func, proximal operator function
step_size : float, step-size for the gradient descent
momentum : str or None, (default='fista'), momentum to choose, possible
choice are ('fista', 'greedy', None)
restarting : str or None, (default=None), restarting to chosse, possible
choice are ('obj', 'descent', None), if restarting == 'obj', obj
function should be given
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
early_stopping : bool, (default=True), whether to early stop the analysis
eps : float, (default=np.finfo(np.float64).eps), stoppping parameter w.r.t
evolution of the cost-function
obj : func, (default=None), cost-function function
benchmark : bool, (default=False), whether or not to save the cost-function
and the duration of computatio nof each iteration
Return
------
x : array, shape (n_atoms, n_voxels), the estimated variable
pobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
times : array or None, shape (n_iter,) or(3 * n_iter,), the saved duration
per steps
"""
if benchmark and obj is None:
raise ValueError("If 'benchmark' is set True 'obj' should be given.")
if restarting == 'obj' and obj is None:
raise ValueError("If 'restarting' is set 'obj' 'obj' should be given.")
x_old, x, y, y_old = np.copy(x0), np.copy(x0), np.copy(x0), np.copy(x0)
t = t_old = 1
if benchmark:
pobj, times = [obj(y)], [0.0]
for ii in range(max_iter):
if benchmark:
t0 = time.process_time()
y -= step_size * grad(y)
x = prox(y, step_size)
if momentum == 'fista':
t = 0.5 * (1.0 + np.sqrt(1.0 + 4.0 * t_old**2))
y = x + (t_old - 1.0) / t * (x - x_old)
elif momentum == 'greedy':
y = x + (x - x_old)
elif momentum is None:
y = x
restarted = False
if restarting == 'obj' and (ii > 0) and (pobj[-1] > pobj[-2]):
if momentum == 'fista':
x = x_old
t = 1.0
elif momentum == 'greedy':
y = x
restarted = True
if restarting == 'descent':
angle = (y_old - x).ravel().dot((x - x_old).ravel())
if angle >= 0.0:
if momentum == 'fista':
x = x_old
t = 1.0
elif momentum == 'greedy':
y = x
restarted = True
if benchmark:
t1 = time.process_time()
pobj.append(obj(y))
converged = np.linalg.norm(x - x_old) < eps * np.linalg.norm(x_old)
if early_stopping and converged and not restarted:
break
t_old = t
x_old = x
y_old = y
if benchmark:
times.append(t1 - t0)
if benchmark:
return x, np.array(pobj), np.array(times)
else:
return x | e6a05c2ef4295b67e3bc3ac2b1608b16d43bc09e | 11,080 |
import json
def sign_tx(path,
multisig_address,
redeemscript,
utxo_file,
output_file,
testnet=False):
"""
Sign a spend of a bitcoin 2-of-3 P2SH-multisig address
using a Trezor One Hardware Wallet
Args:
path: BIP32 path of key with which to sign
multisig_address: Address that is being spent
redeemscript: redeem script corresponding to multisig_address
utxo_file: JSON file of UTXOs for multisig_address
(see get_utxo_set.py)
output_file: JSON file of destination addresses and amounts
(see generate_outputs.py)
testnet: Is this a testnet or mainnet address?
Returns:
Dictionary with two keys:
pubkey: public key corresponding to the private key used for signing
signatures: a list of signatures, one per utxo
Raises:
ValueError: If multisig_address is not correct for the given redeemscript
Example:
TODO
"""
with open(utxo_file, 'r') as f:
utxos = json.load(f)
with open(output_file, 'r') as f:
outputs = json.load(f)
# Verify that Pubkeys and Address match
check_address = generate_multisig_address(redeemscript, testnet)
parsed_redeem_script = btc_utils.parse_redeem_script(redeemscript)
if multisig_address != check_address:
raise ValueError("Incorrect Redeem Script")
if testnet:
coin = 'Testnet'
else:
coin = 'Bitcoin'
input_script_type = proto.InputScriptType.SPENDMULTISIG
output_script_type = proto.OutputScriptType.PAYTOADDRESS
tx_api = trezorlib.coins.tx_api[coin]
client = trezor_utils.get_trezor_client()
#client.set_tx_api(tx_api)
# Get signing node:
expanded_path = trezorlib.tools.parse_path(path)
signer = trezorbtc.get_public_node(client, expanded_path, show_display=True).node
# blank HDNodes with public_keys
nodes = [proto.HDNodePathType(node=proto.HDNodeType(public_key=bytes.fromhex(h),
depth=0,
fingerprint=0,
child_num=0,
chain_code=b'0'*32),
address_n=[]
) for h in parsed_redeem_script['pubkeys']]
trezor_inputs = []
for utxo in utxos:
multisig = proto.MultisigRedeemScriptType(
pubkeys=nodes,
m=parsed_redeem_script['m']
)
_input = proto.TxInputType(
prev_hash=bytes.fromhex(utxo['txid']),
prev_index=utxo['n'],
amount=utxo['amount'],
address_n=trezorlib.tools.parse_path(path),
script_type=input_script_type,
multisig=multisig
)
trezor_inputs.append(_input)
txes = {}
for tx in trezor_inputs:
tmptx = tx_api[tx.prev_hash]
txes[tx.prev_hash] = tmptx
# make this multi-output, probably from file
trezor_outputs = []
for output in outputs:
trezor_outputs.append(
proto.TxOutputType(
address=output['address'],
amount=output['amount'],
script_type=output_script_type,
)
)
output_signatures, serialized_tx = trezorbtc.sign_tx(client, coin, trezor_inputs, trezor_outputs, prev_txes=txes)
signature_blob = {"pubkey": signer.public_key.hex(),
"signatures": [s.hex() for s in output_signatures]
}
client.close()
return signature_blob | d9e16f5ce241b52f0cef0edb675936e52d3d2cf8 | 11,081 |
from stable_baselines_custom.common.atari_wrappers import wrap_deepmind
def wrap_atari_dqn(env):
"""
wrap the environment in atari wrappers for DQN
:param env: (Gym Environment) the environment
:return: (Gym Environment) the wrapped environment
"""
return wrap_deepmind(env, frame_stack=True, scale=False) | 6c47492fe412b5620f22db17a45aa42968ed9a62 | 11,082 |
def get_Theta_CR_i_d_t(pv_setup, Theta_A_d_t, I_s_i_d_t):
"""加重平均太陽電池モジュール温度 (6)
Args:
pv_setup(str): 太陽電池アレイ設置方式
Theta_A_d_t(ndarray): 日付dの時刻tにおける外気温度(℃)
I_s_i_d_t(ndarray): 日付dの時刻tにおける太陽電池アレイiの設置面の単位面積当たりの日射量(W/m2)
Returns:
ndarray: 日付dの時刻tにおける太陽電池アレイiの加重平均太陽電池モジュール温度
"""
# 係数 f_A, f_B
if pv_setup == '架台設置型':
f_A_i = get_table_6()[0][0]
f_B_i = get_table_6()[0][1]
elif pv_setup == '屋根置き型':
f_A_i = get_table_6()[1][0]
f_B_i = get_table_6()[1][1]
elif pv_setup == 'その他':
f_A_i = get_table_6()[2][0]
f_B_i = get_table_6()[2][1]
else:
raise NotImplementedError()
# 太陽電池アレイの接地面における風速
V_i_d_t = get_V_i_d_t()
return Theta_A_d_t + (f_A_i/(f_B_i * V_i_d_t**0.8 + 1)+2) * I_s_i_d_t * 10**(-3) - 2 | 6c96d9c4692de19909feccf647fd39126358b29c | 11,083 |
from typing import Set
def or_equality(input_1: Variable, input_2: Variable, output: Variable) -> Set[Clause]:
"""
Encode an OR-Gate into a CNF.
:param input_1: variable representing the first input of the OR-Gate
:param input_2: variable representing the second input of the OR-Gate
:param output: variable representing the output of the OR-Gate
:return: A set of clauses encoding the OR-Gate
"""
return {
frozenset([-input_1, output]),
frozenset([-input_2, output]),
frozenset([input_1, input_2, -output])
} | f101b1d7ae3d70e7849133562cd274275f8419a8 | 11,084 |
import math
def keyPosition_to_keyIndex(key_position: int, key: int) -> int:
"""
キーポジションからどのキーのノーツなのかを変換します
引数
----
key_position : int
-> キーポジション
key : int
-> 全体のキー数、4Kなら4と入力
戻り値
------
int
-> キーインデックス、指定したキーの0~キー-1の間の数
"""
return math.floor(key_position * key / 512) | e6edcc1711a283336da046e1f8f174cc7ff87760 | 11,085 |
import json
def load_file_recipes(fh, enabled_only=False, expensive=False, logger=logger):
"""
Load all the recipes from a given file handle.
:param enabled_only: Set True to limit to only enabled recipes.
:param expensive: Set True to use 'expensive' configurations.
:return: dict(name -> {recipe})
"""
logger.info("Loading recipes from %s", fh.name)
lua_text = fh.read().strip()
logger.debug("Loaded %d bytes", len(lua_text))
# Strip the non-table wrapper.
if not lua_text.startswith(RECIPE_PREFIX) or not lua_text.endswith(RECIPE_SUFFIX):
logger.warning("%s does not appear to be a recipe definition file.", fh.name)
return {}
lua_table = lua_text[len(RECIPE_PREFIX):-len(RECIPE_SUFFIX)].strip()
definitions = {}
for table in slpp.decode(lua_table):
own_version = {}
# Only handle 'recipe's.
if table.get('type') != "recipe":
logger.debug("Ignoring: %s", table)
continue
name = table.get('name').lower()
if not name:
logger.warning("Malformed entry: %s", table)
continue
own_version['name'] = name
# Check if we're skipping disabled recipes.
if enabled_only:
if table.get('enabled', True) is False:
logger.debug("Skipping %s: disabled" % name)
continue
own_version['enabled'] = table['enabled']
# Make sure it has a unique name.
if name in definitions:
raise ParseError("%s: Duplicated recipe: %s" % (fh.name, name))
inset = table.get('normal')
if expensive:
inset = table.get('expensive', inset)
if inset:
if enabled_only and inset.get('enabled', True) is False:
logger.debug("Skipping %s: inset dsabled" % name)
continue
if 'ingredients' in inset:
table = inset
ingredients = table.get('ingredients')
if not ingredients:
logger.warning("Entry with no ingredients: %s", table)
continue
own_version['ingredients'] = {}
for entry in ingredients:
if isinstance(entry, (tuple, list)):
assert len(entry) == 2
assert isinstance(entry[1], int)
own_version['ingredients'][entry[0]] = entry[1]
else:
assert isinstance(entry, dict)
assert len(entry) == 3
own_version['ingredients'][entry['name']] = int(entry['amount'])
if 'energy_required' in table:
own_version['energy_required'] = table['energy_required']
logger.debug("\"%s\": %s", name, json.dumps(own_version, sort_keys=True))
definitions[name] = own_version
return definitions | 8a372981da76f9dc060c79e6cf282612fec8a4b6 | 11,086 |
from masonite.routes import Patch
def patch(url, controller):
"""Shortcut for Patch HTTP class.
Arguments:
url {string} -- The url you want to use for the route
controller {string|object} -- This can be a string controller or a normal object controller
Returns:
masonite.routes.Patch -- The Masonite Patch class.
"""
return Patch().route(url, controller) | c267ca8c2e2c55369584a94cd07aaf26b0b7ae4b | 11,087 |
def get_user(message: discord.Message, username: str):
""" Get member by discord username or osu username. """
member = utils.find_member(guild=message.guild, name=username)
if not member:
for key, value in osu_tracking.items():
if value["new"]["username"].lower() == username.lower():
member = discord.utils.get(message.guild.members, id=int(key))
return member | 323ac71e24e4da516263df3a4683ed5fd87138ce | 11,088 |
import colorsys
def resaturate_color(color, amount=0.5):
"""
Saturates the given color by setting saturation to the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
"""
if not isinstance(color, np.ndarray) and color in matplotlib.colors.cnames:
color = matplotlib.colors.cnames[color]
hls = colorsys.rgb_to_hls(*matplotlib.colors.to_rgb(color))
new_hls = hls[0], hls[1], amount
new_color = colorsys.hls_to_rgb(*new_hls)
return tuple(np.minimum(np.maximum(0, new_color), 1)) | 2bd1b9b4d9e1d11390efc79f56a89bf7555cbe71 | 11,089 |
def create_reach_segment(upstream_point, downstream_point, polyline, identifier="HA",
junctionID=0, isEnd=False):
"""Returns a polyline based on two bounding vertices found on the line. """
part = polyline.getPart (0)
total_length = polyline.length
lineArray = arcpy.Array ()
#Identifies bounding vertices and associated distance along the line.
if isEnd:
last_point= polyline.lastPoint
upstream_point_dist = round (total_length - polyline.measureOnLine (downstream_point , False) , 2)
downstream_point_dist = round(total_length - polyline.measureOnLine (last_point , False), 2)
else:
upstream_point_dist = round (total_length - polyline.measureOnLine (upstream_point , False) , 2)
downstream_point_dist = round(total_length - polyline.measureOnLine (downstream_point , False), 2)
#Retrieves all vertices between bounding vertices of a polyline.
for pnt in part:
pnt_dist = round(total_length - polyline.measureOnLine (pnt , False), 2)
if pnt_dist <= upstream_point_dist and pnt_dist>=downstream_point_dist:
if lineArray.count == 0:
lineArray.add(upstream_point)
lineArray.add (pnt)
else:
lineArray.add (pnt)
#Makes ending downstream point is added to array
if lineArray[lineArray.count -1].X != downstream_point.X and lineArray[lineArray.count -1].Y != downstream_point.Y:
lineArray.add(downstream_point)
#Creates a new polyline from point array
new_polyline = arcpy.Polyline(lineArray)
identifier = str(identifier)
junc = identifier
if identifier.upper().find('J') == len(identifier)-1:
identifier =identifier.upper()[0:len(identifier)-1] + 'R'
else:
identifier = identifier.upper() + 'R'
return {'name':identifier,'polyline':new_polyline, 'DJunc':junc, 'JuncID':junctionID} | c378fb05c1eda5cde35d5caf60a9d732578ae6d8 | 11,090 |
def sample_recipe(user, **params):
""" Helper function for creating recipes """
""" for not writing every single time this fields """
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00
}
"""
Override any field of the defaults dictionary.
Updating the keys:field from params to defaults
if params has any similar key.
If params has a new key, then it appends to defaults.
"""
defaults.update(params)
return Recipe.objects.create(user=user, **defaults) | 11fe56c88cc0c641b1c04b279b2346615b2257c9 | 11,091 |
def _unary_geo(op, left, *args, **kwargs):
# type: (str, np.array[geoms]) -> np.array[geoms]
"""Unary operation that returns new geometries"""
# ensure 1D output, see note above
data = np.empty(len(left), dtype=object)
data[:] = [getattr(geom, op, None) for geom in left]
return data | d302bdb41c74f7b127df4ccd24dd6bc56c694a56 | 11,092 |
def map_func(h, configs, args):
"""Polygons command line in parallel.
"""
if args.verbose:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
print cmd
else:
cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp".format(
configs["path"]["polygons"],
configs["path"]["output"],
h,
configs["path"]["output"],
h
)
cmd_args = shlex.split(cmd)
stdout,stderr = sp.Popen(
cmd_args,
stdin = sp.PIPE,
stdout = sp.PIPE,
stderr = sp.PIPE
).communicate()
if args.verbose:
print stdout, stderr
return True | 4ff4e961b2d0eb9a19b277a0b8e2ef165aa43819 | 11,093 |
import os
def _runopenssl(pem, *args):
"""
Run the command line openssl tool with the given arguments and write
the given PEM to its stdin. Not safe for quotes.
"""
if os.name == 'posix':
command = "openssl " + " ".join(["'%s'" % (arg.replace("'", "'\\''"),) for arg in args])
else:
command = "openssl " + quoteArguments(args)
write, read = popen2(command, "b")
write.write(pem)
write.close()
return read.read() | ce61d5855d73365d13b28c447566f6ebb75aa030 | 11,094 |
def check_health(request: HttpRequest) -> bool:
"""Check app health."""
return True | 20d572edd68e1518e51cbdbe331c17798bc850fe | 11,095 |
def return_galo_tarsilo(message):
"""Middle function for returning "gaucho" vídeo.
Parameters
----------
message : telebot.types.Message
The message object.
Returns
-------
msg : str
User/Chat alert list addition/removal.
"""
return 'https://www.youtube.com/watch?v=MVYEwZFixJ8' | 58307b763d139dc38220b9a93af15644ccd32959 | 11,096 |
def preimage_func(f, x):
"""Pre-image a funcation at a set of input points.
Parameters
----------
f : typing.Callable
The function we would like to pre-image. The output type must be hashable.
x : typing.Iterable
Input points we would like to evaluate `f`. `x` must be of a type acceptable by `f`.
Returns
-------
D : dict(object, list(object))
This dictionary maps the output of `f` to the list of `x` values that produce it.
"""
D = {}
for xx in x:
D.setdefault(f(xx), []).append(xx)
return D | 6ca0496aff52cff1ce07e327f845df4735e3266a | 11,097 |
import dbm
import sys
def get_spec_id(mat_quality, mat_faction=None):
"""
Get the material_spec id corresponding to the material quality and faction.
Args:
mat_quality (str): A material quality like Basic, Fine, Choice etc...
mat_faction (str): A material faction like Matis, Zoraï etc...
Returns:
int - The id of the corresponding material_spec.
Example:
>>> get_spec_id('Basic', 'Generic')
1
"""
if mat_faction:
dbm.query(
"SELECT id FROM material_spec WHERE quality = ? AND faction = ?",
(mat_quality, mat_faction)
)
else:
dbm.query(
"SELECT id FROM material_spec WHERE quality = ?",
(mat_quality,)
)
try:
return dbm.cur.fetchone()[0]
except TypeError:
print(
"Wrong quality: {} or faction: {}".format(
mat_quality, mat_faction),
file=sys.stderr
)
sys.exit() | dbabe3fb6fd042a3510272aa8c353efd161b5651 | 11,098 |
def print_raw_data(raw_data, start_index=0, limit=200, flavor='fei4b', index_offset=0, select=None, tdc_trig_dist=False, trigger_data_mode=0):
"""Printing FEI4 raw data array for debugging.
"""
if not select:
select = ['DH', 'TW', "AR", "VR", "SR", "DR", 'TDC', 'UNKNOWN FE WORD', 'UNKNOWN WORD']
total_words = 0
for index in range(start_index, raw_data.shape[0]):
dw = FEI4Record(raw_data[index], chip_flavor=flavor, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode)
if dw in select:
print index + index_offset, '{0:12d} {1:08b} {2:08b} {3:08b} {4:08b}'.format(raw_data[index], (raw_data[index] & 0xFF000000) >> 24, (raw_data[index] & 0x00FF0000) >> 16, (raw_data[index] & 0x0000FF00) >> 8, (raw_data[index] & 0x000000FF) >> 0), dw
total_words += 1
if limit and total_words >= limit:
break
return total_words | 23464a46d5a3d05702fae3381e3d7623ad9017b5 | 11,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.