content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def reportData_to_report(report_data: ReportData) -> Report:
"""Create a report object from the given thrift report data."""
main = {
"check_name": report_data.checkerId,
"description": report_data.checkerMsg,
"issue_hash_content_of_line_in_context": report_data.bugHash,
"location": {
"line": report_data.line,
"col": report_data.column,
"file": 0,
},
}
bug_path = None
files = {0: report_data.checkedFile}
# TODO Can not reconstruct because only the analyzer name was stored
# it should be a analyzer_name analyzer_version
return Report(main, bug_path, files, metadata=None) | 7b8d210e08113af405225ef7497f6531c4054185 | 7,000 |
def softmax(x):
"""A softmax implementation."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) | 3c8e38bf30304733e957cabab35f8fec1c5fba55 | 7,001 |
import logging
def get_cazy_class_fam_genbank_records(args, session, config_dict):
"""GenBank acc query results from the local CAZyme database for CAZyme from specific classes/fams
:param args: cmd-line argument parser
:param session: open SQLite db session
:param config_dict: dict, defines CAZy classes and families to get sequences for
Return CAZy class and CAZy family GenBank accession query results
"""
logger = logging.getLogger(__name__)
if args.update: # retrieve all GenBank accessions
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND\n"
"do not have a sequence in the db OR the sequence has been updated in NCBI"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams(
session,
config_dict,
)
else: # retrieve GenBank accesions of records that don't have a sequence
if args.primary:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_prim_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
else:
logger.warning(
"Retrieving sequences for PRIMARY GenBank accessions that:\n"
"belong to specific CAZy classes/families AND do not have a sequence in the db"
)
(
genbank_query_class,
genbank_query_family,
) = query_sql_db.get_all_gnbk_acc_from_clss_fams_no_seq(
session,
config_dict,
)
return genbank_query_class, genbank_query_family | 3f2d8f65f811be1de6b839753242e51457f8e03e | 7,002 |
def assign_bias_ID(data, bias_params=None, bias_name='bias_ID', key_name=None, bias_model=None):
"""
Assign a value to each data point that determines which biases are applied to it.
parameters:
data: pointCollection.data instance
bias_parameters: a list of parameters, each unique combination of which defines a different bias
bias_name: a name for the biases
key_name: an optional parameter which will be used as the dataset name, otherwise a key will be built from the parameter values
bias_model: a dict containing entries:
E_bias: a dict of expected bias values for the each biasID, determined from the sigma_corr parameter of the data
bias_ID_dict: a dict giving the parameter values for each bias_ID (or the key_name if provided)
bias_param_dict: a dict giving the mapping from parameter values to bias_ID values
"""
if bias_model is None:
bias_model={'E_bias':dict(), 'bias_param_dict':dict(), 'bias_ID_dict':dict()}
bias_ID=np.zeros(data.size)+-9999
p0=len(bias_model['bias_ID_dict'].keys())
if bias_params is None:
# assign all data the same bias
bias_model['bias_ID_dict'][p0+1]=key_name
bias_ID=p0+1
bias_model['E_bias'][p0+1]=np.nanmedian(data.sigma_corr)
else:
bias_ID=np.zeros(data.size)
temp=np.column_stack([getattr(data, bp) for bp in bias_params])
u_p, i_p=unique_by_rows(temp, return_index=True)
bias_model['bias_param_dict'].update({param:list() for param in bias_params})
bias_model['bias_param_dict'].update({'ID':list()})
for p_num, param_vals in enumerate(u_p):
this_mask=np.ones(data.size, dtype=bool)
param_vals_dict={}
#Identify the data that match the parameter values
for i_param, param in enumerate(bias_params):
this_mask = this_mask & (getattr(data, param)==param_vals[i_param])
param_vals_dict[param]=param_vals[i_param]
#this_name += '%s%3.2f' % (param, param_vals[i_param])
bias_model['bias_param_dict'][param].append(param_vals[i_param])
bias_model['bias_param_dict']['ID'].append(p0+p_num)
this_ind=np.where(this_mask)[0]
bias_ID[this_ind]=p0+p_num
bias_model['bias_ID_dict'][p0+p_num]=param_vals_dict
bias_model['E_bias'][p0+p_num]=np.nanmedian(data.sigma_corr[this_ind])
data.assign({bias_name:bias_ID})
return data, bias_model | 8f2145b5efcd7b892b3f156e1e0c4ff59dac9d43 | 7,003 |
def check(s):
"""
:param s:str. the input of letters
:return: bool.
"""
if len(s) == 7 and len(s.split(' ')) == 4:
for unit in s.split(' '):
if unit.isalpha():
return True | 86e1270af299ba83b68d0dab9f8afc3fc5b7d7c5 | 7,004 |
def pyeval(*args):
"""
.. function:: pyeval(expression)
Evaluates with Python the expression/s given and returns the result
>>> sql("pyeval '1+1'")
pyeval('1+1')
-------------
2
>>> sql("select var('test')") # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator VAR: Variable 'test' does not exist
>>> sql("select var('test', pyeval('1+1'))")
var('test', pyeval('1+1'))
--------------------------
2
>>> sql("select var('test')")
var('test')
-----------
2
>>> sql('''pyeval '1+1' '"-"' '3+1' ''')
pyeval('1+1','"-"','3+1')
-------------------------
2-4
>>> sql("var 'testvar' of select 5")
var('testvar',(select 5))
-------------------------
5
>>> sql("pyeval 'testvar+5'")
pyeval('testvar+5')
-------------------
10
>>> sql('''pyeval keywords('lala') ''')
pyeval('keywords(''lala'')')
----------------------------
lala
"""
if len(args) == 0:
return
r = ''
for i in args:
r = r + str(eval(i, functions.variables.__dict__, functions.rowfuncs.__dict__))
return r | fa7febed8f25860eee497ce670dc9465526cbbc1 | 7,005 |
from typing import Dict
def is_buggy(battery: Dict) -> bool:
"""
This method returns true in case an acpi bug has occurred.
In this case the battery is flagged unavailable and has no capacity information.
:param battery: the battery dictionary
:return: bool
"""
return battery['design_capacity'] is None | 3508b2ab6eae3f3c643f539c1ea5094e5052278b | 7,006 |
def with_hyperparameters(uri: Text):
"""Constructs an ImporterNode component that imports a `standard_artifacts.HyperParameters`
artifact to use for future runs.
Args:
uri (Text): Hyperparameter artifact's uri
Returns: ImporterNode
"""
return ImporterNode(
instance_name='with_hyperparameters',
source_uri=uri,
artifact_type=standard_artifacts.HyperParameters) | e06cc33d043e6abd4a9ee30648f72dcea2ad1814 | 7,007 |
def update_user_controller(user_repository_spy): # pylint: disable=W0621
"""montagem de update_user_controller utilizando spy"""
usecase = UpdateUser(user_repository_spy, PasswordHash())
controller = UpdateUserController(usecase)
return controller | 474c2bf42c932d71181bebbf7096cd628ba6956a | 7,008 |
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res | b13a67cd203930ca2d88ec3cd6dae367b313ae94 | 7,009 |
def log_new_fit(new_fit, log_gplus, mode='residual'):
"""Log the successful refits of a spectrum.
Parameters
----------
new_fit : bool
If 'True', the spectrum was successfully refit.
log_gplus : list
Log of all previous successful refits of the spectrum.
mode : str ('positive_residual_peak', 'negative_residual_peak', 'broad', 'blended')
Specifies the feature that was refit or used for a new successful refit.
Returns
-------
log_gplus : list
Updated log of successful refits of the spectrum.
"""
if not new_fit:
return log_gplus
modes = {'positive_residual_peak': 1, 'negative_residual_peak': 2, 'broad': 3, 'blended': 4}
log_gplus.append(modes[mode])
return log_gplus | 16761ca135efbdb9ee40a42cb8e9e1d62a5dc05e | 7,010 |
def prepare_hr_for_compromised_credentials(hits: list) -> str:
"""
Prepare human readable format for compromised credentials
:param hits: List of compromised credentials
:return: Human readable format of compromised credentials
"""
hr = []
for hit in hits:
source = hit.get('_source', {})
created_date = source.get('breach', {}).get('created_at', {}).get('date-time')
created_date = arg_to_datetime(created_date)
if created_date:
created_date = created_date.strftime(READABLE_DATE_FORMAT) # type: ignore
first_observed_date = source.get('breach', {}).get('first_observed_at', {}).get('date-time')
first_observed_date = arg_to_datetime(first_observed_date)
if first_observed_date:
first_observed_date = first_observed_date.strftime(READABLE_DATE_FORMAT) # type: ignore
data = {
'FPID': source.get('fpid', ''),
'Email': source.get('email', ''),
'Breach Source': source.get('breach', {}).get('source'),
'Breach Source Type': source.get('breach', {}).get('source_type'),
'Password': source.get('password'),
'Created Date (UTC)': created_date,
'First Observed Date (UTC)': first_observed_date
}
hr.append(data)
return tableToMarkdown("Compromised Credential(s)", hr, ['FPID', 'Email', 'Breach Source', 'Breach Source Type',
'Password', 'Created Date (UTC)',
'First Observed Date (UTC)'], removeNull=True) | 846144700d3fe21628306de5aff72a77d2cc9864 | 7,011 |
def red_bg(text):
""" Adds a red background to the given text. """
return colorize(text, "\033[48;5;167m") | edc2741f3246de2c90c9722c4dbd2d813708fe90 | 7,012 |
def model_utils(decoy: Decoy) -> ModelUtils:
"""Get mock ModelUtils."""
return decoy.mock(cls=ModelUtils) | eb5d3eaf8f280086521209f62025e42fca7aec93 | 7,013 |
def getLeftTopOfTile(tilex, tiley):
"""Remember from the comments in the getStartingBoard() function that we have two sets of coordinates in this program. The first set are the pixel coordinates, which on the x-axis ranges from 0 to WINDOWWIDTH - 1, and the y-axis ranges from 0 to WINDOWHEIGHT - 1.
Lembrando que a partir dos comentários na função getStartingBoard() temos dois conjuntos de coordenadas neste programa. O primeiro conjunto são as coordenadas dos pixels, que no intervalo do eixo-x vai de 0 até WINDOWWIDTH - 1 e no intervalo do eixo-y vai de 0 até WINDOWHEIGHT - 1.
The other coordinate system is used to refer to the tiles on the game board. The upper left tile is at 0, 0. The x-axis ranges from 0 to COLS - 1, and the y-axis ranges from 0 to ROWS - 1.
O outro sistema de coordenadas é usado para se referir as peças do jogo no tabuleiro. A peça superior esquerda está em 0,0. O intervalo do eixo-x vai de 0 até COLS -1, e o intervalo do eixo-y vai de 0 até ROWS -1."""
left = XMARGIN + (tilex * TILESIZE) + (tilex - 1)
top = YMARGIN + (tiley * TILESIZE) + (tiley - 1)
return (left, top) | fad5a9df02b05e76ba62013a49d77941b71f6f5f | 7,014 |
def count_str(text, sub, start=None, end=None):
"""
Computes the number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
Optional arguments start and end are interpreted as in slice notation.
:param text: The string to search
:type text: ``str``
:param sub: The substring to count
:type sub: ``str``
:param start: The start of the search range
:type start: ``int``
:param end: The end of the search range
:type end: ``int``
:return: The number of non-overlapping occurrences of substring ``sub`` in ``text[start:end]``.
:rtype: ``int``
"""
assert isinstance(text,str), '%s is not a string' % text
return text.count(sub,start,end) | 1578f868a4f1a193ec9907494e4af613ca2a6d4d | 7,015 |
def tanh(x, out=None):
"""
Raises a ValueError if input cannot be rescaled to a dimensionless
quantity.
"""
if not isinstance(x, Quantity):
return np.tanh(x, out)
return Quantity(
np.tanh(x.rescale(dimensionless).magnitude, out),
dimensionless,
copy=False
) | 3d86565fb512bfe6f8034dd7436b65c1c322cde6 | 7,016 |
from typing import Optional
from typing import Sequence
from pathlib import Path
def epacems(
states: Optional[Sequence[str]] = None,
years: Optional[Sequence[int]] = None,
columns: Optional[Sequence[str]] = None,
epacems_path: Optional[Path] = None,
) -> dd.DataFrame:
"""Load EPA CEMS data from PUDL with optional subsetting.
Args:
states: subset by state abbreviation. Defaults to None (which gets all states).
years: subset by year. Defaults to None (which gets all years).
columns: subset by column. Defaults to None (which gets all columns).
epacems_path: path to parquet dir. By default it automatically loads the path
from :mod:`pudl.workspace`
Returns:
The requested epacems data
"""
all_states = pudl.constants.WORKING_PARTITIONS['epacems']['states']
if states is None:
states = all_states # all states
else:
nonexistent = [state for state in states if state not in all_states]
if nonexistent:
raise ValueError(
f"These input states are not in our dataset: {nonexistent}")
states = list(states)
all_years = pudl.constants.WORKING_PARTITIONS['epacems']['years']
if years is None:
years = all_years
else:
nonexistent = [year for year in years if year not in all_years]
if nonexistent:
raise ValueError(f"These input years are not in our dataset: {nonexistent}")
years = list(years)
# columns=None is handled by dd.read_parquet; gives all columns
if columns is not None:
# nonexistent columns are handled by dd.read_parquet; raises ValueError
columns = list(columns)
if epacems_path is None:
pudl_settings = pudl.workspace.setup.get_defaults()
epacems_path = Path(pudl_settings["parquet_dir"]) / "epacems"
epacems = dd.read_parquet(
epacems_path,
use_nullable_dtypes=True,
columns=columns,
filters=year_state_filter(
states=states,
years=years,
),
)
return epacems | 79213c5adb0b56a3c96335c0c7e5cb1faa734752 | 7,017 |
from typing import Optional
from typing import Tuple
import logging
def check_termination_criteria(
theta: Optional[float],
num_iterations: Optional[int]
) -> Tuple[float, int]:
"""
Check theta and number of iterations.
:param theta: Theta.
:param num_iterations: Number of iterations.
:return: Normalized values.
"""
# treat theta <= 0 as None, as the caller wants to ignore it.
if theta is not None and theta <= 0:
theta = None
# treat num_iterations <= 0 as None, as the caller wants to ignore it.
if num_iterations is not None and num_iterations <= 0:
num_iterations = None
if theta is None and num_iterations is None:
raise ValueError('Either theta or num_iterations (or both) must be provided.')
logging.info(f'Starting evaluation (theta={theta}, num_iterations={num_iterations}).')
return theta, num_iterations | 536cd70b8e8b04d828f0a4af1db96809ab607ff3 | 7,018 |
import os
def _get_rank_info():
"""
get rank size and rank id
"""
rank_size = int(os.environ.get("RANK_SIZE", 1))
if rank_size > 1:
rank_size = int(os.environ.get("RANK_SIZE"))
rank_id = int(os.environ.get("RANK_ID"))
else:
rank_size = 1
rank_id = 0
return rank_size, rank_id | 35ef60e41678c4e108d133d16fd59d2f43c6d3dd | 7,019 |
def verify_password(password, hash):
"""Verify if a hash was generated by the password specified.
:password: a string object (plaintext).
:hash: a string object.
:returns: True or False.
"""
method = get_hash_algorithm(flask.current_app.config['HASH_ALGORITHM'])
return method.verify(password, hash) | 484ad9f2debbd8856b9b7fbdd2a7588f9a279f62 | 7,020 |
import re
def _conversion_sample2v_from_meta(meta_data):
"""
Interpret the meta data to extract an array of conversion factors for each channel
so the output data is in Volts
Conversion factor is: int2volt / channelGain
For Lf/Ap interpret the gain string from metadata
For Nidq, repmat the gains from the trace counts in `snsMnMaXaDw`
:param meta_data: dictionary output from spikeglx.read_meta_data
:return: numpy array with one gain value per channel
"""
def int2volts(md):
""" :return: Conversion scalar to Volts. Needs to be combined with channel gains """
if md.get('typeThis', None) == 'imec':
return md.get('imAiRangeMax') / 512
else:
return md.get('niAiRangeMax') / 32768
int2volt = int2volts(meta_data)
# interprets the gain value from the metadata header:
if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf
sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32)
# imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch
n_chn = _get_nchannels_from_meta(meta_data) - 1
# the sync traces are not included in the gain values, so are included for broadcast ops
gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', meta_data['imroTbl'])[:n_chn]
out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) *
int2volt, sy_gain)),
'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) *
int2volt, sy_gain))}
elif 'niMNGain' in meta_data.keys(): # binary from nidq
gain = np.r_[
np.ones(int(meta_data['snsMnMaXaDw'][0],)) / meta_data['niMNGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][1],)) / meta_data['niMAGain'] * int2volt,
np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync
np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]),))] # no unit for digital sync
out = {'nidq': gain}
return out | e8cd2ea376bdb44ee999459ffcc28c4c4db39458 | 7,021 |
def read_split_csv(input_files, delimiter='\t', names=['src', 'dst'],
dtype=['int32', 'int32']):
"""
Read csv for large datasets which cannot be read directly by dask-cudf
read_csv due to memory requirements. This function takes large input
split into smaller files (number of input_files > number of gpus),
reads two or more csv per gpu/worker and concatenates them into a
single dataframe. Additional parameters (delimiter, names and dtype)
can be specified for reading the csv file.
"""
client = default_client()
n_files = len(input_files)
n_gpus = get_n_gpus()
n_files_per_gpu = int(n_files/n_gpus)
worker_map = []
for i, w in enumerate(client.has_what().keys()):
files_per_gpu = input_files[i*n_files_per_gpu: (i+1)*n_files_per_gpu]
worker_map.append((files_per_gpu, w))
new_ddf = [client.submit(_read_csv, part, delimiter, names, dtype,
workers=[worker]) for part, worker in worker_map]
wait(new_ddf)
return new_ddf | cd1f2ccd487cf808af1de6a504bc0f6a3a8e34a1 | 7,022 |
def _gnurl( clientID ):
"""
Helper function to form URL to Gracenote_ API service.
:param str clientID: the Gracenote_ client ID.
:returns: the lower level URL to the Gracenote_ API.
:rtype: str
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c%s.web.cddbp.net/webapi/xml/1.0/' % clientIDprefix | 6d1935c8b634459892e4ec03d129c791b1d8a06a | 7,023 |
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
jsled: stolen from django newforms/util.py...
"""
return u''.join([u' %s="%s"' % (k, escape(v)) for k, v in attrs.items()]) | 01d9ee3ec96b5a096758f60c2defe6c491d94817 | 7,024 |
def draw_bboxes(img,boxes,classes):
"""
Draw bounding boxes on top of an image
Args:
img : Array of image to be modified
boxes: An (N,4) array of boxes to draw, where N is the number of boxes.
classes: An (N,1) array of classes corresponding to each bounding box.
Outputs:
An array of the same shape as 'img' with bounding boxes
and classes drawn
"""
source = Image.fromarray(img)
draw = ImageDraw.Draw(source)
w2,h2 = (img.shape[0],img.shape[1])
idx = 0
for i in range(len(boxes)):
xmin,ymin,xmax,ymax = boxes[i]
c = classes[i]
draw.text((xmin+15,ymin+15), str(c))
for j in range(4):
draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline="red")
return source | 6b60550206aaaa9e5033850c293e6c48a7b13e6d | 7,025 |
def markdown(context, template_path):
""" {% markdown 'terms-of-use.md' %} """
return mark_safe(get_markdown(context, template_path)[0]) | ea6cb711c1a669ad7efdf277baab82ea2a65ba9c | 7,026 |
def investorMasterGetSubaccAssetDetails(email, recvWindow=""):
"""# Query managed sub-account asset details(For Investor Master Account)
#### `GET /sapi/v1/managed-subaccount/asset (HMAC SHA256)`
### Weight:
1
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
email |STRING |YES |
recvWindow |LONG |NO |
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/managed-subaccount/asset'
params = {
"email": email
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params) | 7d4f4c5cbd069144319268dcb7235926e55f85d8 | 7,027 |
def ema_indicator(close, n=12, fillna=False):
"""EMA
Exponential Moving Average via Pandas
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(close, n, fillna)
return pd.Series(ema_, name='ema') | 9ddb20ddc6e0cc4b1f08a4e3347f719dbe84b55b | 7,028 |
import os
import uuid
import tempfile
def connect(addr=None, proto=None, name=None, pgrok_config=None, **options):
"""
Establish a new ``pgrok`` tunnel for the given protocol to the given port, returning an object representing
the connected tunnel.
If a `tunnel definition in pgrok's config file matches the given ``name``, it will be loaded and used to
start the tunnel. When ``name`` is ``None`` and a "pgrok_default" tunnel definition exists in ``pgrok``'s
config, it will be loaded and use. Any ``kwargs`` passed as ``options`` will
override properties from the loaded tunnel definition.
If ``pgrok`` is not installed at :class:`~pgrok.PgrokConfig`'s ``pgrok_path``, calling this method
will first download and install ``pgrok``.
If ``pgrok`` is not running, calling this method will first start a process with
:class:`~pgrok.PgrokConfig`.
.. note::
``pgrok``'s default behavior for ``http`` when no additional properties are passed is to open *two* tunnels,
one ``http`` and one ``https``. This method will return a reference to the ``http`` tunnel in this case. If
only a single tunnel is needed, pass ``bind_tls=True`` and a reference to the ``https`` tunnel will be returned.
"""
if pgrok_config is None:
pgrok_config = get_default_config()
config = get_pgrok_config(pgrok_config.config_path) if os.path.exists(pgrok_config.config_path) else {}
# If a "pgrok-default" tunnel definition exists in the pgrok config, use that
tunnel_definitions = config.get("tunnels", {})
if not name and "pgrok_default" in tunnel_definitions:
name = "pgrok_default"
# Use a tunnel definition for the given name, if it exists
if name and name in tunnel_definitions:
tunnel_definition = tunnel_definitions[name]
proto_map = tunnel_definition.get("proto", {})
protocol = [k for k in proto_map.keys() if k in ['http', 'https', 'tcp']]
assert len(protocol) > 0, \
ValueError("Invalid proto in config should be http|https|tcp")
addr = proto_map[protocol[0]] if not addr else addr
proto = proto if proto else protocol[0]
# Use the tunnel definition as the base, but override with any passed in options
tunnel_definition.update(options)
options = tunnel_definition
addr = str(addr) if addr else "80"
if not proto:
proto = "http"
if not name:
if not addr.startswith("file://"):
name = "{}-{}-{}".format(proto, addr, uuid.uuid4())
else:
name = "{}-file-{}".format(proto, uuid.uuid4())
logger.info("Opening tunnel named: {}".format(name))
# Create a temporary config yml and if config_path not set earlier
if not os.path.exists(pgrok_config.config_path) or not validate_config(config):
with tempfile.NamedTemporaryFile(suffix='.yml') as tmp:
_default_config['tunnels'].pop('pgrok_default', None)
tunnel_name = {}
tunnel_name['proto'] = {proto: addr}
tunnel_name['proto'].update(options)
_default_config['tunnels'][name] = tunnel_name
pgrok_config.config_path = tmp.name
process = get_pgrok_process(pgrok_config, service_name=name)
# Set tunnel parameter
_tunnelcfg = {
"name": name,
"addr": addr,
"proto": proto
}
options.update(_tunnelcfg)
options['api_url'] = process.api_url
options['public_url'] = process.public_url
tunnel = PgrokTunnel(options, pgrok_config)
logger.debug("Creating tunnel with options: {}".format(options))
_current_tunnels[tunnel.public_url] = tunnel
return tunnel | b71b8fd145e36b66f63f2a93a32c6bfc89ec9c13 | 7,029 |
def u_glob(U, elements, nodes, resolution_per_element=51):
"""
Compute (x, y) coordinates of a curve y = u(x), where u is a
finite element function: u(x) = sum_i of U_i*phi_i(x).
Method: Run through each element and compute cordinates
over the element.
"""
x_patches = []
u_patches = []
for e in range(len(elements)):
Omega_e = (nodes[elements[e][0]], nodes[elements[e][-1]])
local_nodes = elements[e]
d = len(local_nodes) - 1
X = np.linspace(-1, 1, resolution_per_element)
x = affine_mapping(X, Omega_e)
x_patches.append(x)
u_element = 0
for r in range(len(local_nodes)):
i = local_nodes[r] # global node number
u_element += U[i]*phi_r(r, X, d)
u_patches.append(u_element)
x = np.concatenate(x_patches)
u = np.concatenate(u_patches)
return x, u | 2c9cabf97b9904d80043a0102c0ac8cd156388ae | 7,030 |
def keyring_rgw_create(**kwargs):
"""
Create rgw bootstrap keyring for cluster.
Args:
**kwargs: Arbitrary keyword arguments.
cluster_uuid : Set the cluster UUID. Defaults to value found in
ceph config file.
cluster_name : Set the cluster name. Defaults to "ceph".
"""
params = dict(kwargs)
params["keyring_type"] = "rgw"
return keyring_create(**params) | 077a3536a6e1ce2e762b14d8fb046617136fe941 | 7,031 |
import pandas
from datetime import datetime
def read_tm224_data(filename: str, folder: str = None) -> pandas.DataFrame:
"""
Read data stored by Lakeshore TM224 temperature monitor software.
Args:
filename: string
name of ".xls" file on disk
folder: string
location of file on disk
Returns:
df : pandas.DataFrame
DataFrame with all .xls columns and converted matplotlib timestamps
"""
if not filename.endswith(".xls"):
filename += ".xls"
# Extract only the timestamp
timestamp = pd.read_excel(folder + filename, skiprows=1, nrows=1, usecols=[1], header=None)[1][0]
# Parse datetime object from timestamp
timestamp_dt = parser.parse(timestamp, tzinfos={"CET": 0 * 3600})
# Create DataFrame
df = pd.read_excel(folder + filename, skiprows=3)
# Add matplotlib datetimes to DataFrame
time_array = []
for milliseconds in df["Time"]:
time_array.append(timestamp_dt + datetime.timedelta(milliseconds=milliseconds))
# noinspection PyUnresolvedReferences
df["MPL_datetimes"] = matplotlib.dates.date2num(time_array)
return df | 430e5a64b5b572b721177c5adce7e222883e4512 | 7,032 |
import oci.mysql
import mysqlsh
from mds_plugin import compartment, compute, network, object_store
import datetime
import time
def create_db_system(**kwargs):
"""Creates a DbSystem with the given id
If no id is given, it will prompt the user for the id.
Args:
**kwargs: Optional parameters
Keyword Args:
db_system_name (str): The new name of the DB System.
description (str): The new description of the DB System.
availability_domain (str): The name of the availability_domain
shape (str): The compute shape name to use for the instance
subnet_id (str): The OCID of the subnet to use
configuration_id (str): The OCID of the MySQL configuration
data_storage_size_in_gbs (int): The data storage size in gigabytes
mysql_version (str): The MySQL version
admin_username (str): The name of the administrator user account
admin_password (str): The password of the administrator account
private_key_file_path (str): The file path to an SSH private key
par_url (str): The PAR url used for initial data import
perform_cleanup_after_import (bool): Whether the bucket and PARs should
be kept or deleted if an import took place
source_mysql_uri (str): The MySQL Connection URI if data should
be imported from an existing MySQL Server instance
source_mysql_password (str): The passwort to use when data
should be imported from an existing MySQL Server instance
source_local_dump_dir (str): The path to a local directory that
contains a dump
source_bucket (str): The name of the source bucket that contains
a dump
host_image_id (str): OCID of the host image to use for this Instance.
Private API only.
defined_tags (dict): The defined_tags of the dynamic group.
freeform_tags (dict): The freeform_tags of the dynamic group
compartment_id (str): The OCID of the compartment
config (object): An OCI config object or None.
interactive (bool): Ask the user for input if needed
return_object (bool): Whether to return the object when created
Returns:
None or the new DB System object if return_object is set to true
"""
db_system_name = kwargs.get("db_system_name")
description = kwargs.get("description")
availability_domain = kwargs.get("availability_domain")
shape = kwargs.get("shape")
subnet_id = kwargs.get("subnet_id")
configuration_id = kwargs.get("configuration_id")
data_storage_size_in_gbs = kwargs.get("data_storage_size_in_gbs")
mysql_version = kwargs.get("mysql_version")
admin_username = kwargs.get("admin_username")
admin_password = kwargs.get("admin_password")
private_key_file_path = kwargs.get(
"private_key_file_path", "~/.ssh/id_rsa")
par_url = kwargs.get("par_url")
perform_cleanup_after_import = kwargs.get(
"perform_cleanup_after_import")
source_mysql_uri = kwargs.get("source_mysql_uri")
source_mysql_password = kwargs.get("source_mysql_password")
source_local_dump_dir = kwargs.get("source_local_dump_dir")
source_bucket = kwargs.get("source_bucket")
host_image_id = kwargs.get("host_image_id")
defined_tags = kwargs.get("defined_tags")
# Conversion from Shell Dict type
if defined_tags:
defined_tags = dict(defined_tags)
freeform_tags = kwargs.get("freeform_tags")
# Conversion from Shell Dict type
if freeform_tags:
freeform_tags = dict(freeform_tags)
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
return_object = kwargs.get("return_object", False)
try:
# Get the active config and compartment
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
# Set the import_source_type to 0 to default to a clean new DB System
import_source_type = 0
# Check if source_* parameters are given and if so, set the correct
# import_source_type
if source_mysql_uri is not None:
# Import from an existing MySQL Server instance
import_source_type = 1
elif source_local_dump_dir is not None:
# Import from a local data dir
import_source_type = 2
elif source_bucket is not None:
# Import from an existing bucket
import_source_type = 3
# If the user did not specify a par_url, or other source paremeter,
# let him choose if he wants to import data from a given source
if interactive and import_source_type == 0 and par_url is None:
print("Choose one of the following options of how to create the "
"MySQL DB System:\n")
import_sources = [
"Create a clean MySQL DB System",
("Create a MySQL DB System from an existing MySQL Server "
"instance"),
"Create a MySQL DB System from a local dump",
("Create a MySQL DB System from a dump stored on OCI "
"Object Storage")
]
import_source = core.prompt_for_list_item(
item_list=import_sources,
prompt_caption=("Please enter the index of an option listed "
"above: "),
prompt_default_value='', print_list=True)
if import_source == "":
print("Operation cancelled.")
return
import_source_type = import_sources.index(import_source)
# Get a name
if not db_system_name and interactive:
db_system_name = core.prompt(
"Please enter the name for the new DB System: ").strip()
if not db_system_name:
raise Exception("No name given. "
"Operation cancelled.")
# Get a description
if not description and interactive:
description = core.prompt(
"Please enter a description for the new DB System: ").strip()
# Get an admin_username
if not admin_username and interactive:
admin_username = core.prompt(
"MySQL Administrator account name [admin]: ",
{'defaultValue': 'admin'}).strip()
if not admin_username:
raise Exception("No admin username given. "
"Operation cancelled.")
# Get an admin_password
if not admin_password and interactive:
admin_password = get_validated_mysql_password(
password_caption="MySQL Administrator account")
if not admin_password:
raise Exception("No admin password given. "
"Operation cancelled.")
# Get data_storage_size_in_gbs
if not data_storage_size_in_gbs and interactive:
data_storage_size_in_gbs = core.prompt(
"Please enter the amount of data storage size in gigabytes "
"with a minimum of 50 GB [50]: ",
{'defaultValue': '50'}).strip()
try:
data_storage_size_in_gbs = int(data_storage_size_in_gbs)
except ValueError:
ValueError("Please enter a number for data storage size.\n")
if not data_storage_size_in_gbs:
raise Exception("No data storage size given. "
"Operation cancelled.")
# Get the availability_domain name
availability_domain_obj = compartment.get_availability_domain(
random_selection=not interactive,
compartment_id=compartment_id,
availability_domain=availability_domain,
config=config, interactive=interactive,
return_python_object=True)
if not availability_domain_obj:
raise Exception("No availability domain selected. "
"Operation cancelled.")
availability_domain = availability_domain_obj.name
if interactive:
print(f"Using availability domain {availability_domain}.")
# Get the shapes
shape_id = compute.get_shape_name(
shape_name=shape, limit_shapes_to=[
"VM.Standard.E2.1", "VM.Standard.E2.2",
"VM.Standard.E2.4", "VM.Standard.E2.8"],
compartment_id=compartment_id,
availability_domain=availability_domain, config=config,
interactive=interactive)
if shape_id is None or shape_id == "":
print("Compute Shape not set or found. Operation cancelled.")
return
if interactive:
print(f"Using shape {shape_id}.")
# Get private subnet
subnet = network.get_subnet(
subnet_id=subnet_id, public_subnet=False,
compartment_id=compartment_id, config=config,
interactive=interactive)
if subnet is None:
print("Operation cancelled.")
return
if interactive:
print(f"Using subnet {subnet.display_name}.")
# Get mysql_version
mysql_version = get_mysql_version(compartment_id=compartment_id,
config=config)
if mysql_version is None:
print("Operation cancelled.")
return
print(f"Using MySQL version {mysql_version}.")
# Get mysql_configuration
mysql_configuration = get_db_system_configuration(
configuration_id=configuration_id, shape=shape_id,
availability_domain=availability_domain,
compartment_id=compartment_id, config=config)
if mysql_configuration is None:
print("Operation cancelled.")
return
print(f"Using MySQL configuration {mysql_configuration.display_name}.")
# TODO Check Limits
# limits.list_limit_values(config["tenancy"], "mysql").data
# limits.get_resource_availability(
# service_name="mysql", limit_name="vm-standard-e2-4-count",
# compartment_id=config["tenancy"],
# availability_domain="fblN:US-ASHBURN-AD-1").data
# limits.get_resource_availability(
# service_name="compute", limit_name="standard-e2-core-ad-count",
# compartment_id=config["tenancy"],
# availability_domain="fblN:US-ASHBURN-AD-1").data
# If requested, prepare import
if import_source_type > 0:
# If a bucket needs to be created, define a name for it
if import_source_type == 1 or import_source_type == 2:
# Take all alphanumeric chars from the DB System name
# to create the bucket_name
bucket_name = (
f"{''.join(e for e in db_system_name if e.isalnum())}_import_"
f"{datetime.datetime.now():%Y%m%d%H%M%S}")
print(f"\nCreating bucket {bucket_name}...")
bucket = object_store.create_bucket(
bucket_name=bucket_name, compartment_id=compartment_id,
config=config, return_object=True)
if bucket is None:
print("Cancelling operation")
return
if perform_cleanup_after_import is None:
perform_cleanup_after_import = True
# Create a MySQL DB System from an existing MySQL Server instance
if import_source_type == 1:
# Start the dump process
if not util.dump_to_bucket(bucket_name=bucket.name,
connection_uri=source_mysql_uri,
connection_password=source_mysql_password,
create_bucket_if_not_exists=True,
object_name_prefix="",
interactive=interactive,
return_true_on_success=True):
print(f"Could not dump the given instance to the object "
f"store bucket {bucket.name}")
return
# Create a MySQL DB System from local dir
elif import_source_type == 2:
if interactive and source_local_dump_dir is None:
source_local_dump_dir = mysqlsh.globals.shell.prompt(
"Please specify the directory path that contains the "
"dump: ",
{'defaultValue': ''}).strip()
if source_local_dump_dir == "":
print("Operation cancelled.")
return
elif source_local_dump_dir is None:
print("No directory path given. Operation cancelled.")
return
# Upload the files from the given directory to the bucket
file_count = object_store.create_bucket_objects_from_local_dir(
local_dir_path=source_local_dump_dir,
bucket_name=bucket.name,
object_name_prefix="",
compartment_id=compartment_id, config=config,
interactive=False)
if file_count is None:
print("Cancelling operation")
return
elif import_source_type == 3:
# Create a MySQL DB System from a bucket
bucket = object_store.get_bucket(
bucket_name=source_bucket,
compartment_id=compartment_id,
config=config)
if bucket is None:
print("Cancelling operation")
return
bucket_name = bucket.name
if perform_cleanup_after_import is None:
perform_cleanup_after_import = False
# Create PAR for import manifest and progress files
par, progress_par = util.create_bucket_import_pars(
object_name_prefix="",
bucket_name=bucket.name,
db_system_name=db_system_name,
compartment_id=compartment_id,
config=config)
if par is None or progress_par is None:
return
# Build URLs
par_url_prefix = object_store.get_par_url_prefix(config=config)
par_url = par_url_prefix + par.access_uri
# progress_par_url = par_url_prefix + progress_par.access_uri
# Once the API supports the new PAR based import, build the
# import_details using the given par_url
# if par_url:
# import urllib.parse
# import_details = oci.mysql.models.\
# CreateDbSystemSourceImportFromUrlDetails(
# source_type=oci.mysql.models.
# CreateDbSystemSourceImportFromUrlDetails.
# SOURCE_TYPE_IMPORTURL,
# source_url=(f'{par_url}?progressPar='
# f'{urllib.parse.quote(progress_par_url)}'))
db_system_details = oci.mysql.models.CreateDbSystemDetails(
description=description,
admin_username=admin_username,
admin_password=admin_password,
compartment_id=compartment_id,
configuration_id=mysql_configuration.id,
data_storage_size_in_gbs=data_storage_size_in_gbs,
display_name=db_system_name,
mysql_version=mysql_version,
shape_name=shape_id,
availability_domain=availability_domain,
subnet_id=subnet.id,
defined_tags=defined_tags,
freeform_tags=freeform_tags,
host_image_id=host_image_id
# source=import_details
)
# Get DbSystem Client
db_sys = core.get_oci_db_system_client(config=config)
# Create DB System
new_db_system = db_sys.create_db_system(db_system_details).data
# If there was a PAR URL given, wait till the system becomes
# ACTIVE and then perform the clean up work
if par_url is not None:
print("Waiting for MySQL DB System to become active.\n"
"This can take up to 20 minutes or more...", end="")
# Wait until the lifecycle_state == ACTIVE, 20 minutes max
cycles = 0
while cycles < 240:
db_system = db_sys.get_db_system(new_db_system.id).data
if db_system.lifecycle_state == "ACTIVE" or \
db_system.lifecycle_state == "FAILED":
break
else:
time.sleep(10)
print(".", end="")
cycles += 1
print("")
# Until the API is ready to directly import at deployment time,
# also start the import from here
if db_system.lifecycle_state == "ACTIVE":
util.import_from_bucket(
bucket_name=bucket_name,
db_system_id=new_db_system.id,
db_system_name=db_system_name,
object_name_prefix="",
admin_username=admin_username,
admin_password=admin_password,
private_key_file_path=private_key_file_path,
perform_cleanup=perform_cleanup_after_import,
compartment_id=compartment_id,
config=config,
interactive=False
)
else:
if return_object:
return new_db_system
else:
if new_db_system.lifecycle_state == "CREATING":
print(f"\nMySQL DB System {db_system_name} is being created.\n"
f"Use mds.ls() to check it's provisioning state.\n")
else:
print(f"\nThe creation of the MySQL DB System {db_system_name} "
"failed.\n")
except oci.exceptions.ServiceError as e:
if not interactive:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
if not interactive:
raise
print(f'ERROR: {e}')
return | 4e801cf1752e527bc82d35f9e134f2fbb26a201b | 7,033 |
import json
import logging
def load_keypoints2d_file(file_path, njoints=17):
"""load 2D keypoints from keypoint detection results.
Only one person is extracted from the results. If there are multiple
persons in the prediction results, we select the one with the highest
detection score.
Args:
file_path: the json file path.
njoints: number of joints in the keypoint defination.
Returns:
A `np.array` with the shape of [njoints, 3].
"""
keypoint = array_nan((njoints, 3), dtype=np.float32)
det_score = 0.0
try:
with open(file_path, 'r') as f:
data = json.load(f)
except Exception as e: # pylint: disable=broad-except
logging.warning(e)
return keypoint, det_score
det_scores = np.array(data['detection_scores'])
keypoints = np.array(data['keypoints']).reshape((-1, njoints, 3))
# The detection results may contain zero person or multiple people.
if det_scores.shape[0] == 0:
# There is no person in this image. We set NaN to this frame.
return keypoint, det_score
else:
# There are multiple people (>=1) in this image. We select the one with
# the highest detection score.
idx = np.argmax(det_scores)
keypoint = keypoints[idx]
det_score = det_scores[idx]
return keypoint, det_score | 3cf5c8f2c236b3883e983c74e1ac23c78d256b0d | 7,034 |
def utf8_bytes(string):
""" Convert 'string' to bytes using UTF-8. """
return bytes(string, 'UTF-8') | 8e5423d2b53e8d5fbeb07017ccd328236ef8bea5 | 7,035 |
def line_search(f, xk, pk, old_fval=None, old_old_fval=None, gfk=None, c1=1e-4,
c2=0.9, maxiter=20):
"""Inexact line search that satisfies strong Wolfe conditions.
Algorithm 3.5 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-61
Args:
fun: function of the form f(x) where x is a flat ndarray and returns a real
scalar. The function should be composed of operations with vjp defined.
x0: initial guess.
pk: direction to search in. Assumes the direction is a descent direction.
old_fval, gfk: initial value of value_and_gradient as position.
old_old_fval: unused argument, only for scipy API compliance.
maxiter: maximum number of iterations to search
c1, c2: Wolfe criteria constant, see ref.
Returns: LineSearchResults
"""
def restricted_func_and_grad(t):
phi, g = jax.value_and_grad(f)(xk + t * pk)
dphi = jnp.real(_dot(g, pk))
return phi, dphi, g
if old_fval is None or gfk is None:
phi_0, dphi_0, gfk = restricted_func_and_grad(0.)
else:
phi_0 = old_fval
dphi_0 = jnp.real(_dot(gfk, pk))
if old_old_fval is not None:
candidate_start_value = 1.01 * 2 * (phi_0 - old_old_fval) / dphi_0
start_value = jnp.where(candidate_start_value > 1, 1.0, candidate_start_value)
else:
start_value = 1
def wolfe_one(a_i, phi_i):
# actually negation of W1
return phi_i > phi_0 + c1 * a_i * dphi_0
def wolfe_two(dphi_i):
return jnp.abs(dphi_i) <= -c2 * dphi_0
state = _LineSearchState(
done=False,
failed=False,
# algorithm begins at 1 as per Wright and Nocedal, however Scipy has a
# bug and starts at 0. See https://github.com/scipy/scipy/issues/12157
i=1,
a_i1=0.,
phi_i1=phi_0,
dphi_i1=dphi_0,
nfev=1 if (old_fval is None or gfk is None) else 0,
ngev=1 if (old_fval is None or gfk is None) else 0,
a_star=0.,
phi_star=phi_0,
dphi_star=dphi_0,
g_star=gfk,
)
def body(state):
# no amax in this version, we just double as in scipy.
# unlike original algorithm we do our next choice at the start of this loop
a_i = jnp.where(state.i == 1, start_value, state.a_i1 * 2.)
phi_i, dphi_i, g_i = restricted_func_and_grad(a_i)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
star_to_zoom1 = wolfe_one(a_i, phi_i) | ((phi_i >= state.phi_i1) & (state.i > 1))
star_to_i = wolfe_two(dphi_i) & (~star_to_zoom1)
star_to_zoom2 = (dphi_i >= 0.) & (~star_to_zoom1) & (~star_to_i)
zoom1 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
state.a_i1,
state.phi_i1,
state.dphi_i1,
a_i,
phi_i,
dphi_i,
gfk,
~star_to_zoom1)
state = state._replace(nfev=state.nfev + zoom1.nfev,
ngev=state.ngev + zoom1.ngev)
zoom2 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
a_i,
phi_i,
dphi_i,
state.a_i1,
state.phi_i1,
state.dphi_i1,
gfk,
~star_to_zoom2)
state = state._replace(nfev=state.nfev + zoom2.nfev,
ngev=state.ngev + zoom2.ngev)
state = state._replace(
done=star_to_zoom1 | state.done,
failed=(star_to_zoom1 & zoom1.failed) | state.failed,
**_binary_replace(
star_to_zoom1,
state._asdict(),
zoom1._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(
done=star_to_i | state.done,
**_binary_replace(
star_to_i,
state._asdict(),
dict(
a_star=a_i,
phi_star=phi_i,
dphi_star=dphi_i,
g_star=g_i,
),
),
)
state = state._replace(
done=star_to_zoom2 | state.done,
failed=(star_to_zoom2 & zoom2.failed) | state.failed,
**_binary_replace(
star_to_zoom2,
state._asdict(),
zoom2._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(i=state.i + 1, a_i1=a_i, phi_i1=phi_i, dphi_i1=dphi_i)
return state
state = lax.while_loop(lambda state: (~state.done) & (state.i <= maxiter) & (~state.failed),
body,
state)
status = jnp.where(
state.failed,
jnp.array(1), # zoom failed
jnp.where(
state.i > maxiter,
jnp.array(3), # maxiter reached
jnp.array(0), # passed (should be)
),
)
# Step sizes which are too small causes the optimizer to get stuck with a
# direction of zero in <64 bit mode - avoid with a floor on minimum step size.
alpha_k = state.a_star
alpha_k = jnp.where((jnp.finfo(alpha_k).bits != 64)
& (jnp.abs(alpha_k) < 1e-8),
jnp.sign(alpha_k) * 1e-8,
alpha_k)
results = _LineSearchResults(
failed=state.failed | (~state.done),
nit=state.i - 1, # because iterations started at 1
nfev=state.nfev,
ngev=state.ngev,
k=state.i,
a_k=alpha_k,
f_k=state.phi_star,
g_k=state.g_star,
status=status,
)
return results | 787d5c1fa472f9cc2d59e517a3388f0538c4affd | 7,036 |
def _get_value(session_browser, field):
"""Get an input field's value."""
return session_browser.evaluate_script('$("#id_%s").val()' % field) | 7ed2d130b83af7e6fdb6cce99efb44846820585a | 7,037 |
def search_2d(arr, target):
"""
TODO same func as in adfgvx
"""
for row in range(len(arr)):
for col in range(len(arr)):
if arr[row][col] == target:
return row, col
raise ValueError | 8b6f9885175ddc766052aa10c9e57d8212ae385a | 7,038 |
import logging
def scale_large_images_landmarks(images, landmarks):
""" scale images and landmarks up to maximal image size
:param list(ndarray) images: list of images
:param list(ndarray) landmarks: list of landmarks
:return tuple(list(ndarray),list(ndarray)): lists of images and landmarks
>>> scale_large_images_landmarks([np.zeros((8000, 500, 3), dtype=np.uint8)],
... [None, None]) # doctest: +ELLIPSIS
([array(...)], [None, None])
"""
if not images:
return images, landmarks
scale = estimate_scaling(images)
if scale < 1.:
logging.debug(
'One or more images are larger then recommended size for visualisation,'
' an resize with factor %f will be applied', scale
)
# using float16 as image raise TypeError: src data type = 23 is not supported
images = [
resize(img, None, fx=scale, fy=scale, interpolation=INTER_LINEAR) if img is not None else None for img in images
]
landmarks = [lnds * scale if lnds is not None else None for lnds in landmarks]
return images, landmarks | 63449e9281b5dcfcafb89e8a661494f99170f19d | 7,039 |
def home():
"""Post-login page."""
if flask.request.method == 'POST':
rooms = get_all_open_rooms()
name = "anon"
if flask.request.form['name'] != "":
name = flask.request.form['name']
player_id = flask_login.current_user.id
game_id = ""
if flask.request.form['submit'] == 'create':
game_id, error_message = create_room(player_id, name)
if game_id is None:
flask.flash(error_message)
return flask.render_template('home.html', user=flask_login.current_user)
else:
game_id = flask.request.form['secret-key']
added, error_message = add_player_to_room(game_id, player_id, name)
if not added:
flask.flash(error_message)
return flask.render_template('home.html', user=flask_login.current_user)
else:
# notify all players that a new one has joined
update_players(game_id)
return flask.redirect(flask.url_for('.game_page'))
else:
# TODO: workout if noob or not - need DB field
return flask.render_template('home.html', user=flask_login.current_user, noob=True) | 3bac8a5adeeb3e22da9ef7f2ee840a02bc70b816 | 7,040 |
def ik(T, tf_base) -> IKResult:
""" TODO add base frame correction
"""
Rbase = tf_base[:3, :3]
Ree = T[:3, :3]
Ree_rel = np.dot(Rbase.transpose(), Ree)
# ignore position
# n s a according to convention Siciliano
n = Ree_rel[:3, 0]
s = Ree_rel[:3, 1]
a = Ree_rel[:3, 2]
A = np.sqrt(a[0] ** 2 + a[1] ** 2)
# solution with theta2 in (0, pi)
t1_1 = np.arctan2(a[1], a[0])
t2_1 = np.arctan2(A, a[2])
t3_1 = np.arctan2(s[2], -n[2])
# solution with theta2 in (-pi, 0)
t1_2 = np.arctan2(-a[1], -a[0])
t2_2 = np.arctan2(-A, a[2])
t3_2 = np.arctan2(-s[2], n[2])
q_sol = np.zeros((2, 3))
q_sol[0, 0], q_sol[0, 1], q_sol[0, 2] = t1_1, t2_1, t3_1
q_sol[1, 0], q_sol[1, 1], q_sol[1, 2] = t1_2, t2_2, t3_2
return IKResult(True, q_sol) | 41cf7ba841397f0d26ff597952609aa0685afe09 | 7,041 |
import functools
def standarize_ms(datas, val_index, max=(2^32 - 1)):
"""
Standarize milliseconds lapsed from Arduino reading.
Note: Only takes height for one circulation of ms from Arduino.
datas:
List of data readings
val_index:
Index of ms value in reading data entry
max:
Max time of ms - since the Arduino will output
a circular value from the time it starts.
For correct value, see https://www.arduino.cc/en/Reference/Millis.
"""
def _standarize_value(initial_value, reading):
reading[val_index] = int(reading[val_index]) - initial_value;
if(reading[val_index] <= 0):
reading[val_index] += max
return reading
initial_value = int(datas[0][val_index])
___standarize_value = functools.partial(_standarize_value, initial_value=initial_value)
res = map(lambda x: _standarize_value(initial_value, x), datas)
res = list(res)
res[0][val_index] = 0 | 84bf498ff3c88b3415433fa9d5be7b6865b3216b | 7,042 |
def corr_bias(x_data, y_data, yerr, pdz1_x, pdz1_y, pdz2_x, pdz2_y):
"""
Given a correlation measurement and associated PDZs, generate a model and
fit as a bias to the measurement. Return:
1) the model [unbiased] (x and y float arrays)
2) best fit bias (float)
3) the bias PDF (x and y float arrays)
@params
x_data - The central angles of the correlation measurements
y_data - The values of the correlation measurements
yerr - The errorbars of the correlation measurements
pdz1_x - PDZ 1 redshift range to generate models from
pdz1_y - PDZ 1 probability values to generate models from
pdz2_x - PDZ 2 redshift range to generate models from
pdz2_y - PDZ 2 probability values to generate models from
pdz1_x and pdz2_x, pdz1_y and pdz2_y should be the same for an autocorrelation
@returns
xmod - the angular range associated with the generated model
ymod - the value of the model at each angle
best - The best fit bias value
(i.e. square this and multiply it by the base model for
the best fitting model)
xbias - The range of bias values tested
ybias - The probability associated with each bias value
chisq - The not reduced chi square value associated with the best
fit bias value
"""
xmod, ymod = model(pdz1_x, pdz1_y, pdz2_x, pdz2_y)
xbias, ybias, chisq, best = bias_fit(x_data, y_data, yerr, xmod, ymod)
return xmod, ymod, best, xbias, ybias, chisq | 255e1c5a67551deb19b91d247f5a913541d8f1da | 7,043 |
def confidence_ellipse(
x=None, y=None, cov=None, ax=None, n_std=3.0, facecolor="none", **kwargs
):
"""
Create a plot of the covariance confidence ellipse of `x` and `y`
Parameters
----------
x, y : array_like, shape (n, )
Input data.
cov : array_like, shape (2, 2)
covariance matrix. Mutually exclusive with input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
Returns
-------
matplotlib.patches.Ellipse
Other parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
"""
if x is None and y is None:
if cov is None:
raise ValueError("Either ")
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse(
(0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
**kwargs
)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = (
transforms.Affine2D()
.rotate_deg(45)
.scale(scale_x, scale_y)
.translate(mean_x, mean_y)
)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse) | 3965012ccdd1f6b71af4f169b812c384446ed76d | 7,044 |
from pathlib import Path
from typing import Iterator
import itertools
import os
def lsR(root: Path) -> Iterator[Path]:
"""Recursive list a directory and return absolute path"""
return filter(lambda p: ".git" not in p.parts, itertools.chain.from_iterable(
map(
lambda lsdir: list(map(lambda f: Path(lsdir[0]) / f, lsdir[2])),
os.walk(root),
)
)) | 67771d5e305d30ac72aeaab16b72ad5a85fe1493 | 7,045 |
from typing import List
def adapted_fields(type) -> List[Attribute]:
"""Return the attrs format of `fields()` for attrs and dataclasses."""
if is_dataclass(type):
return [
Attribute(
attr.name,
attr.default
if attr.default is not MISSING
else (
Factory(attr.default_factory)
if attr.default_factory is not MISSING
else NOTHING
),
None,
True,
None,
True,
attr.init,
True,
type=attr.type,
)
for attr in dataclass_fields(type)
]
else:
return attrs_fields(type) | cc6a799e06715cbd4e3219ea42aaeff2e4924613 | 7,046 |
from typing import get_args
def get_parms():
"""
Use get_args to get the args, and return a dictionary of the args ready for
use in pump software.
@see get_args()
:return: dict: parms
"""
parms = {}
args = get_args()
for name, val in vars(args).items():
if val is not None:
parms[name] = val
return parms | 6ebdbee656fd216e5d8c66025029aa2d58641831 | 7,047 |
import ftplib
import os
import re
def cloud_backup(backup_info: dict):
"""
Send latest backup to the cloud.
Parameters
----------
backup_info: dict
Dictionary containing information in regards to date of backup and batch number.
"""
session = ftplib.FTP_TLS("u301483.your-storagebox.de")
session.login(user="u301483", passwd="dI52PgdgGeB8js0v")
try:
folder_name = backup_info["folder_name"]
if folder_name == "0000-00-00":
for parquet_file in os.listdir("0000-00-00"):
path = f"{cwd}/0000-00-00/{parquet_file}"
file = open(path, "rb")
session.storbinary(f"STOR {folder_name}\\{parquet_file}", file)
file.close()
else:
path_to_date = f"{cwd}/{folder_name}"
for parquet_file in os.listdir(path_to_date):
priority = re.search(r"\d", parquet_file)
digit = int(priority.group())
if digit <= backup_info["batch"]:
path = f"{cwd}/{folder_name}/{parquet_file}"
file = open(path, "rb")
session.storbinary(f"STOR {folder_name}\\{parquet_file}", file)
file.close()
except TypeError:
pass
session.quit()
return "Backup completed" | 47e2b6de1430b4b784dc0cd486d105b3c1653b12 | 7,048 |
def make_led_sample(n_samples=200, irrelevant=0, random_state=None):
"""Generate random samples from the 7-segment problem.
Parameters
----------
n_samples : int, optional (default=200)
The number of samples to generate.
irrelevant : int, optional (default=0)
The number of irrelevant binary features to add.
Returns
-------
X, y
"""
random_state = check_random_state(random_state)
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
data = data[random_state.randint(0, 10, n_samples)]
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
if irrelevant > 0:
X = np.hstack((X, random_state.rand(n_samples, irrelevant) > 0.5))
return X, y | 7dab2595c0118ca2f08a99ded22047be164f1648 | 7,049 |
def handle_source_authorization_exception(e):
""" Error handler: the data source requires authorisation
This will be triggered when opening a private HDX dataset before
the user has supplied their authorisation token.
@param e: the exception being handled
"""
if e.message:
flask.flash(e.message)
# we're using flask.g.recipe_id to handle the case where a saved recipe
# points to a formerly-public dataset that has suddenly become private
# normally, it will be None (because there's no saved recipe yet)
recipe = recipes.Recipe(recipe_id=flask.g.recipe_id)
# add an extra parameter for the /data/save form to indicate that we
# want the user to provide an authorisation token
extras = {
'need_token': 'on'
}
# note whether the resource looked like it came from HDX
if e.is_ckan:
extras['is_ckan'] = 'on'
# redirect to the /data/save page to ask the user for a token
return flask.redirect(util.data_url_for('data_save', recipe=recipe, extras=extras), 302) | e2c736b301e229d61874bb3cfad13b86dc93e1d1 | 7,050 |
def findcosmu(re0, rp0, sublat, latc, lon): # considers latc to be plaentocentric latitudes, but sublat to be planetographic
"""Takes the equitorial and polar radius of Jupiter (re0, rp0 respectively), the sub-latitude of Jupiter, latitude and
longitude (both in radians) to determine the "cos(mu)" of the photons. This effectively helps to idenify where the limb
of Jupiter occurs in the Chandra observations"""
rfactor = (re0/rp0)**2 # ratio of the equitorial radius and polar radius...
lat = np.arctan(np.tan(latc)*rfactor) # and coordinate transformation from planetocentric latitude -> planetographic latitude
ans = (rfactor * (np.cos(lon)*np.cos(sublat)*np.cos(lat)) + (np.sin(sublat)*np.sin(lat))) / np.sqrt(rfactor*np.cos(sublat)**2 \
+ np.sin(lat)**2) / np.sqrt(rfactor * np.cos(lat)**2 + np.sin(lat)**2) # to return the value(s) of cos(mu)
return ans | 677adffb6f00e9e1119a71a660ee81d2893d4ef1 | 7,051 |
def RMS_energy(frames):
"""Computes the RMS energy of frames"""
f = frames.flatten()
return N.sqrt(N.mean(f * f)) | 10d366e771f629c6efda2faf1f752363dca63b0a | 7,052 |
import urllib
def is_blacklisted_url(url):
"""
Return whether the URL blacklisted or not.
Using BLACKLIST_URLS methods against the URLs.
:param url: url string
:return: True if URL is blacklisted, else False
"""
url = urllib.parse.urlparse(url).netloc
for method in WHITELIST_URL:
for whitelist_url in WHITELIST_URL[method]:
if method(url, whitelist_url):
return False
for method in BLACKLIST_URLS:
for blacklist_url in BLACKLIST_URLS[method]:
if method(url, blacklist_url):
return True
return False | 8a987c0bbce01d18da67b047aed0e680ce5fc661 | 7,053 |
def heading(yaw):
"""A helper function to getnerate quaternions from yaws."""
q = euler2quat(0.0, 0.0, yaw)
quat = Quaternion()
quat.w = q[0]
quat.x = q[1]
quat.y = q[2]
quat.z = q[3]
return quat | fcd05575257ef6cdc084cb2fde309aa48b5a2fb5 | 7,054 |
def check_login_required(view_func):
"""
A decorator that checks whether login is required on this installation
and, if so, checks if the user is logged in. If login is required and
the user is not logged in, they're redirected to the login link.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("auth_require_sitewide_login"):
return login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
return _check | 9f0b44f630a24649d87af0bd604a41b7b5b885de | 7,055 |
def Str(*args):
"""(s1, s2, ...) -> match s1 or s2 or ..."""
if len(args) == 1:
return Str1(args[0])
return Expression.Alt(tuple(map(Str, args))) | 41aece71a6a774db58028add5d60d8c9fed42dd3 | 7,056 |
def image_noise_gaussian(image):
"""
Adds Gaussian noise to the provided image
"""
float_img = image.astype(np.float)
gauss = np.random.normal(0.0, 4.0, (IMG_SIZE, IMG_SIZE, IMG_CHANNELS))
gauss = gauss.reshape(IMG_SIZE, IMG_SIZE, IMG_CHANNELS).astype(np.float)
result = float_img + gauss
result = np.clip(result, 0, 255)
result = result.astype(np.uint8)
return result | 0e5f5a83f7017d48e083a35bcb22cdf50ebb1006 | 7,057 |
from re import T
def argsort(x: T.FloatTensor, axis: int = None) -> T.LongTensor:
"""
Get the indices of a sorted tensor.
If axis=None this flattens x.
Args:
x: A tensor:
axis: The axis of interest.
Returns:
tensor (of ints): indices of sorted tensor
"""
if axis is None:
return flatten(x).sort()[1]
else:
return x.sort(dim=axis)[1] | 57e2e4d8c5a870c4ea382a02e19d0451dbe90704 | 7,058 |
def dirPickledSize(obj,exclude=[]):
"""For each attribute of obj (excluding those specified and those that start with '__'),
compute the size using getPickledSize(obj) and return as a pandas Series of KBs"""
return pd.Series({o:getPickledSize(getattr(obj, o))/1024. for o in dir(obj) if not np.any([o[:2]=='__', o in exclude, getattr(obj, o) is None])}) | d27b404f8c637aa7dd230126d3dbe9112240112c | 7,059 |
from typing import Any
def audit_log() -> Any:
"""
List all events related to the connected member.
"""
if "member_id" not in session:
abort(404)
return render_template(
"audit_log.html",
full_audit_log=fetch_audit_log(session["member_id"]),
) | a5c95ac9c7e55212f8e308a9bf141468dc3a7626 | 7,060 |
def load_comparisonXL(method, evaluate="train", dropna=True):
"""Load comparison table."""
if evaluate == "test":
e = "['Test']"
elif evaluate == "in bag":
e = "['In Bag']"
elif evaluate == "out of bag":
e = "['Out of Bag']"
else:
e = "['Train']"
# Import methods
table = []
for i in method:
table.append(pd.read_excel(i + ".xlsx"))
# Concatenate table
df = pd.DataFrame()
for i in range(len(table)):
df = pd.concat([df, table[i].loc[table[i]['evaluate'] == e].T.squeeze()], axis=1, sort=False)
df = df.T.drop(columns="evaluate")
# Remove [ ] from string
for i in range(len(df)):
for j in range(len(df.T)):
if type(df.iloc[i, j]) is str:
df.iloc[i, j] = df.iloc[i, j][2: -2]
# Reset index and add methods column
method_name = []
for i in range(len(method)):
name_i = method[i].rsplit('/', 1)[1]
method_name.append(name_i)
df = df.reset_index()
df = pd.concat([pd.Series(method_name, name="method"), df], axis=1, sort=False)
df = df.drop("index", 1)
#df = df.set_index("method")
# drop columns with just nans
if dropna is True:
df = df.dropna(axis=1, how='all')
return df | 56ff4d8c74ec88fc8b2f245706b7cf039334a76f | 7,061 |
def verify_user_password(user: User, password: str) -> bool:
"""Verify User's password with the one that was given on login page."""
return pwd_context.verify(password, user.password) | 43b25118e5ef3b89622acd7aa3276a1b18352674 | 7,062 |
def __valid_ddb_response_q(response):
"""private function to validate a given DynamoDB query response."""
if 'ResponseMetadata' in response:
if 'HTTPStatusCode' in response['ResponseMetadata']:
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
return False | f4e71c4f5d058ba20013b3a405ffeff637e03ae8 | 7,063 |
def GetPipelineResultsPathInGCS(artifacts_path):
"""Gets a full Cloud Storage path to a pipeline results YAML file.
Args:
artifacts_path: string, the full Cloud Storage path to the folder containing
pipeline artifacts, e.g. 'gs://my-bucket/artifacts'.
Returns:
A string representing the full Cloud Storage path to the pipeline results
YAML file.
"""
return '{0}/results/results.yaml'.format(artifacts_path) | 83b7c15f00679ff201c9a8b155102f36bb8e685c | 7,064 |
def Pnm_p(n, m, x):
"""Eq:II.77 """
return lpmn(m, n, x)[1][-1, -1] | 027cb169263853ede6d29a6760da981d30ef950b | 7,065 |
def _remove_empty_subspace(subspaces, n_clusters, m, P, centers, labels, scatter_matrices):
"""
Check if after rotation and rearranging the dimensionalities a empty subspaces occurs. Empty subspaces will be
removed for the next iteration. Therefore all necessary lists will be updated.
:param subspaces: number of subspaces
:param n_clusters:
:param m: list containing number of dimensionalities for each subspace
:param P: list containing projections for each subspace
:param centers: list containing the cluster centers for each subspace
:param labels: list containing cluster assignments for each subspace
:param scatter_matrices: list containing scatter matrices for each subspace
:return: subspaces, n_clusters, m, P, centers, labels, scatter_matrices
"""
if 0 in m:
np_m = np.array(m)
empty_spaces = np.where(np_m == 0)[0]
print(
"[NrKmeans] ATTENTION:\nSubspaces were lost! Number of lost subspaces:\n" + str(
len(empty_spaces)) + " out of " + str(
len(m)))
subspaces -= len(empty_spaces)
n_clusters = [x for i, x in enumerate(
n_clusters) if i not in empty_spaces]
m = [x for i, x in enumerate(m) if i not in empty_spaces]
P = [x for i, x in enumerate(P) if i not in empty_spaces]
centers = [x for i, x in enumerate(centers) if i not in empty_spaces]
labels = [x for i, x in enumerate(labels) if i not in empty_spaces]
scatter_matrices = [x for i, x in enumerate(
scatter_matrices) if i not in empty_spaces]
return subspaces, n_clusters, m, P, centers, labels, scatter_matrices | 473a509860b9708ee217f4f7b0a2718d1a3a7d7e | 7,066 |
def _get_citekeys_action(elem, doc):
"""
Panflute action to extract citationId from all Citations in the AST.
"""
if not isinstance(elem, pf.Citation):
return None
manuscript_citekeys = global_variables["manuscript_citekeys"]
manuscript_citekeys.append(elem.id)
return None | 74dec7a972f38c34040dc430b0c130b2a76784c2 | 7,067 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the
gradient has been averaged across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for each_grad, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(each_grad, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0,values=grads)
grad = tf.reduce_mean(grad, 0)
# The variables are redundant because they are shared
# across towers. So we will just return the first tower's pointer to
# the Variable.
weights = grad_and_vars[0][1]
grad_and_var = (grad, weights)
average_grads.append(grad_and_var)
return average_grads | da85dee074f5bb15a13ea3d2c2fe105469c1ee90 | 7,068 |
def compute_neighbours_probability_matrix(n_matrix, src, d_matrix, sigma_neigh):
"""Compute neighbours' probability matrix.
Parameters
-----------
n_matrix : :py:class:`~numpy.ndarray` of :py:class:`~int`, shape (n_verts, n_neigh_max)
The sets of neighbours.
src : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, 3)
The coordinates of the points in the brain discretization.
d_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts x n_verts)
The Euclidean distance between the points in the
brain discretization.
sigma_neigh : :py:class:`~float`
The standard deviation of the Gaussian distribution that defines
the neighbours' probability.
Returns
--------
np_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, n_neigh_max)
The neighbours' probability.
"""
np_matrix = np.zeros(n_matrix.shape, dtype=float)
for i in range(src.shape[0]):
n_neig = len(np.where(n_matrix[i] > -1)[0])
np_matrix[i, 0:n_neig] = \
np.exp(-d_matrix[i, n_matrix[i, 0:n_neig]] ** 2
/ (2 * sigma_neigh ** 2))
np_matrix[i] = np_matrix[i] / np.sum(np_matrix[i])
return np_matrix | 2651ad650697266d7e0db5fdc55e176334fc3cb8 | 7,069 |
def ar_cosmap(inmap):
"""
Get the cosine map and off-limb pixel map using WCS.
Generate a map of the solar disk that is 1 at disk center and goes radially outward as the cos(angle to LOS), which
is = 2 at 60 degrees from LOS.
Other outputs:
- rrdeg: gives degrees from disk center
- offlimb: map of 1=on-disk and 0=off-disk
"""
## Take off an extra half percent from the disk to get rid of limb effects
fudge=0.999
#
## Get helioprojective_coordinates
# Below is deprecated so commented out and updated
# xx, yy = wcs.convert_pixel_to_data(inmap.data.shape,
# [inmap.meta["CDELT1"], inmap.meta["CDELT2"]],
# [inmap.meta["CRPIX1"], inmap.meta["CRPIX2"]],
# [inmap.meta["CRVAL1"], inmap.meta["CRVAL2"]])
x, y = (np.meshgrid(*[np.arange(v.value) for v in inmap.dimensions]) * u.pixel)
hpc = inmap.pixel_to_world(x, y)#NEED TO CHECK RE WHAT ORIGIN TO USE, origin=1)
xx = hpc.Tx.value
yy = hpc.Ty.value
rr = ((xx**2.) + (yy**2.))**(0.5)
#
coscor = np.copy(rr)
rrdeg = np.arcsin(coscor / inmap.meta["RSUN_OBS"])
coscor = 1. / np.cos(rrdeg)
wgt = np.where(rr > (inmap.meta["RSUN_OBS"]*fudge))
coscor[wgt] = 1.
#
offlimb = np.copy(rr)
wgtrr = np.where(rr >= (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wgtrr] = 0.
wltrr = np.where(rr < (inmap.meta["RSUN_OBS"]*fudge))
offlimb[wltrr] = 1.
#
return coscor, rrdeg, offlimb | 4365b0ef1134f117e5bc3396239cc1ba174f5009 | 7,070 |
def as_array(request: SubRequest) -> bool:
"""
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
b = request.param
assert isinstance(b, bool)
return b | 7a8b627769b8955ad4162a30be5ddc9b0ee76723 | 7,071 |
def gram_matrix(x):
"""Create the gram matrix of x."""
b, c, h, w = x.shape
phi = x.view(b, c, h * w)
return phi.bmm(phi.transpose(1, 2)) / (c * h * w) | 11de97b67f3f8ecb7d7d009de16c1a5d153ab8ff | 7,072 |
import os
import pandas as pd
from datetime import date
from pptx import Presentation
from pptx.util import Inches, Pt
def create_presentation(path):
"""Creates ppt report from files in the specified folder. """
report = Presentation()
#report = Presentation('test_data//templates//ppt_template.pptx')
#pic = slide.shapes.add_picture('hts_data//templates//company_logo.png', left = Inches(3), top = Inches(0.2))
slide = report.slides.add_slide(report.slide_layouts[6])
subtitle = slide.shapes.add_textbox(left = Inches(5.), top = Inches(3.5), width = Inches(3), height = Inches(0.5),).text_frame
p = subtitle.paragraphs[0]
run = p.add_run()
run.text = 'Technical Report\nGenerated on {:%m-%d-%Y}'.format(date.today())
font = run.font
font.size = Pt(18)
files_list = os.listdir(path)
for myfile in files_list:
if 'heatmap.png' in myfile:
slide = report.slides.add_slide(report.slide_layouts[6])
left = top = Inches(0.7)
height = Inches(6)
pic = slide.shapes.add_picture(path + '//' + myfile, left, top, width = Inches(5.8), height= Inches(4))
elif '.png' in myfile and 'heatmap.png' not in myfile:
slide = report.slides.add_slide(report.slide_layouts[6])
subtitle = slide.shapes.add_textbox(left = Inches(0.5), top = Inches(0.3), width = Inches(2), height = Inches(0.5)).text_frame
subtitle.text = myfile
left = top = Inches(0.7)
pic = slide.shapes.add_picture(path +'//' + myfile, left, top = Inches(0.8), height= Inches(6))
left = Inches(0.7)
elif 'csv' in myfile:
try:
table = pd.read_csv(path +'//' + myfile)
if table.shape[0]<30:
slide = report.slides.add_slide(report.slide_layouts[6])
subtitle = slide.shapes.add_textbox(left = Inches(0.5), top = Inches(0.3), width = Inches(2), height = Inches(0.5)).text_frame
subtitle.text = myfile
slide_table = df_to_table(table, slide, left = Inches(0.3), top = Inches(1), width = Inches(12.5), height = Inches(0.3))
left = Inches(0.7)
except Exception as e:
print(e)
return report | 3fdc2382b5e21bab54c0735314ab13aeb225cd0d | 7,073 |
def open_file(path, mode):
"""
Attempts to open file at path.
Tried up to max_attempts times because of intermittent permission errors on Windows
"""
max_attempts = 100
f = None
for _ in range(max_attempts):
try:
f = open(path, mode)
except PermissionError:
continue
break
return f | 9217a1b66b2bb30895fe445fa4a50b5da5466391 | 7,074 |
import requests
import json
def get_sts_token(current_refresh_token):
"""
Retrieves an authentication token.
:param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a
subsequent access token. If not provided (i.e. on the initial authentication), the password is used.
"""
url = 'https://{}:{}/{}'.format(opts.authHostname, opts.authPort, auth_path)
if not current_refresh_token: # First time through, send password
data = {'username': opts.user, 'password': opts.password, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
print("Sending authentication request with password to ", url, "...")
else: # Use the given refresh token
data = {'username': opts.user, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token',
'takeExclusiveSignOnControl': True}
print("Sending authentication request with refresh token to ", url, "...")
try:
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
auth=(opts.clientid, client_secret),
verify=True)
except requests.exceptions.RequestException as e:
print('RDP-GW authentication exception failure:', e)
return None, None, None
if r.status_code != 200:
print('RDP-GW authentication result failure:', r.status_code, r.reason)
print('Text:', r.text)
if r.status_code in [401,400] and current_refresh_token:
# Refresh token may have expired. Try again using machinedID + password.
return get_sts_token(None)
return None, None, None
auth_json = r.json()
print("RDP-GW Authentication succeeded. RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in'] | b41c6658a4eb218771d6e908411ba3b54e4e13f3 | 7,075 |
async def device_climate_fan(device_climate_mock):
"""Test thermostat with fan device."""
return await device_climate_mock(CLIMATE_FAN) | 1143adceacb610d18e1a26df4e24f715eb68917f | 7,076 |
def make_training_config(args):
""" Create training config by parsing args from command line and YAML config file, filling the rest with default values.
Args
args : Arguments parsed from command line.
Returns
config : Dictionary containing training configuration.
"""
# Parse the configuration file.
config = {}
if args.config:
config = parse_yaml(args.config)
config = set_defaults(config, default_training_config)
# Additional config; start from this so it can be overwritten by the other command line options.
if args.o:
config = parse_additional_options(config, args.o)
if args.backbone:
config['backbone']['name'] = args.backbone
if args.generator:
config['generator']['name'] = args.generator
# Backbone config.
if args.freeze_backbone:
config['backbone']['details']['freeze'] = args.freeze_backbone
if args.backbone_weights:
config['backbone']['details']['weights'] = args.backbone_weights
# Generator config.
if args.random_transform:
config['generator']['details']['transform_generator'] = 'random'
if args.random_visual_effect:
config['generator']['details']['visual_effect_generator'] = 'random'
if args.batch_size:
config['generator']['details']['batch_size'] = args.batch_size
if args.group_method:
config['generator']['details']['group_method'] = args.group_method
if args.shuffle_groups:
config['generator']['details']['shuffle_groups'] = args.shuffle_groups
if args.image_min_side:
config['generator']['details']['image_min_side'] = args.image_min_side
if args.image_max_side:
config['generator']['details']['image_max_side'] = args.image_max_side
# Train config.
if args.gpu:
config['train']['gpu'] = args.gpu
if args.epochs:
config['train']['epochs'] = args.epochs
if args.steps:
config['train']['steps_per_epoch'] = args.steps
if args.lr:
config['train']['lr'] = args.lr
if args.multiprocessing:
config['train']['use_multiprocessing'] = args.multiprocessing
if args.workers:
config['train']['workers'] = args.workers
if args.max_queue_size:
config['train']['max_queue_size'] = args.max_queue_size
if args.weights:
config['train']['weights'] = args.weights
return config | 1902e0999336249a7feda1f0aa415f7d148a16ee | 7,077 |
from typing import Tuple
def _crown_relu_relaxer(inp: Bound) -> Tuple[LinFun, LinFun]:
"""Obtain the parameters of a linear ReLU relaxation as in CROWN.
This relaxes the ReLU with the adaptive choice of lower bounds as described
for CROWN-ada in https://arxiv.org/abs/1811.00866.
Args:
inp: Input to the ReLU.
Returns:
lb_linfun, ub_linfun: Linear functions bounding the ReLU
"""
inp_lower, inp_upper = inp.lower, inp.upper
relu_on = (inp_lower >= 0.)
relu_amb = jnp.logical_and(inp_lower < 0., inp_upper >= 0.)
ub_slope = relu_on.astype(jnp.float32)
ub_slope += jnp.where(relu_amb,
inp_upper / jnp.maximum(inp_upper - inp_lower, 1e-12),
jnp.zeros_like(inp_lower))
ub_offset = jnp.where(relu_amb, - ub_slope * inp_lower,
jnp.zeros_like(inp_lower))
lb_slope = (ub_slope >= 0.5).astype(jnp.float32)
lb_offset = jnp.zeros_like(inp_lower)
return (eltwise_linfun_from_coeff(lb_slope, lb_offset),
eltwise_linfun_from_coeff(ub_slope, ub_offset)) | 7e43e973adb65089a2eb35665c219911fc409446 | 7,078 |
import torch
import time
def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling):
"""
This function initializes a model and performs
a single measurement of the model on the given input.
While it might seem most reasonable to initialize
the model outside of the loop, DTR's logs have shown
that certain constants in the model persist between loop iterations;
performing these actions in a separate *function scope* turned out to be the only
way to prevent having those constants hang around.
Returns a dict of measurements
"""
torch.cuda.reset_max_memory_allocated()
# resetting means the count should be reset to
# only what's in scope, meaning only the input
input_mem = torch.cuda.max_memory_allocated()
model = produce_model(extra_params=extra_params)
params = []
for m in model:
if hasattr(m, 'parameters'):
params.extend(m.parameters())
model_mem = torch.cuda.max_memory_allocated()
optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# start timing
torch.cuda.synchronize()
start_time = time.time()
if use_dtr:
torch.reset_profile()
start.record()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
run_model(criterion, *model, *inp, optimizer=optimizer)
end.record()
start_sync = time.time()
torch.cuda.synchronize()
end_sync = time.time()
end_time = time.time()
# end timing
if use_dtr:
# operators-only time, tracked by DTR
cuda_time = torch.compute_time()
base_compute_time = -1
remat_compute_time = -1
search_time = -1
cost_time = -1
if use_profiling:
base_compute_time = torch.base_compute_time()
remat_compute_time = torch.remat_compute_time()
search_time = torch.search_time()
cost_time = torch.cost_time()
torch.reset_profile()
total_mem = torch.cuda.max_memory_allocated()
teardown(*model)
torch.cuda.reset_max_memory_allocated()
del model
if use_dtr:
torch.toggle_log(False)
del params
batch_size = len(inp[0])
ips = batch_size / (end_time - start_time)
result = {
'time': end_time - start_time,
'sync_time': end_sync - start_sync,
'gpu_time': start.elapsed_time(end),
'input_mem': input_mem,
'model_mem': model_mem,
'total_mem': total_mem,
'base_compute_time': base_compute_time,
'remat_compute_time': remat_compute_time,
'search_time': search_time,
'cost_time': cost_time,
'batch_size': batch_size,
'ips': ips
}
if use_dtr:
result['cuda_time'] = cuda_time
else:
result['cuda_time'] = -1.0
return result | a3765a88ccb10b3f0322f11f8205ecfdb7f98f38 | 7,079 |
def make_noise(fid, snr, decibels=True):
"""Given a synthetic FID, generate an array of normally distributed
complex noise with zero mean and a variance that abides by the desired
SNR.
Parameters
----------
fid : numpy.ndarray
Noiseless FID.
snr : float
The signal-to-noise ratio.
decibels : bool, default: True
If `True`, the snr is taken to be in units of decibels. If `False`,
it is taken to be simply the ratio of the singal power and noise
power.
Returns
_______
noise : numpy.ndarray
"""
components = [
(fid, 'fid', 'ndarray'),
(snr, 'snr', 'float'),
(decibels, 'decibels', 'bool'),
]
ArgumentChecker(components)
size = fid.size
shape = fid.shape
# Compute the variance of the noise
if decibels:
var = np.real((np.sum(np.abs(fid) ** 2)) / (size * (20 ** (snr / 10))))
else:
var = np.real((np.sum(np.abs(fid) ** 2)) / (2 * size * snr))
# Make a number of noise instances and check which two are closest
# to the desired variance.
# These two are then taken as the real and imaginary noise components
instances = []
var_discrepancies = []
for _ in range(100):
instance = nrandom.normal(loc=0, scale=np.sqrt(var), size=shape)
instances.append(instance)
var_discrepancies.append(np.abs(np.var(instances) - var))
# Determine which instance's variance is the closest to the desired
# variance
first, second, *_ = np.argpartition(var_discrepancies, 1)
# The noise is constructed from the two closest arrays in a variance-sense
# to the desired SNR
return instances[first] + 1j * instances[second] | 823c9fee2c1a696a38b6a27406f51a27185460c1 | 7,080 |
def viterbi(prob_matrix):
""" find the most likely sequence of labels using the viterbi algorithm on prob_matrix """
TINY = 1e-6 # to avoid NaNs in logs
# if prob_matrix is 1D, make it 2D
if len(np.shape(prob_matrix)) == 1:
prob_matrix = [prob_matrix]
length = len(prob_matrix)
probs = np.zeros_like(prob_matrix)
backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1
for i in [0,1,2,3,4]:
probs[0][i] = np.log(prob_matrix[0][i]+TINY)
# {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single
for t in range(1, length):
# E, S -> B | B, M -> M | B, M -> E | E, S -> S
previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]
for i in range(5):
prevs = previous_of[i]
max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]
backpt[t][i] = max_id
probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]
seq = np.ones(length, 'int32') * -1
#print(probs[length-1])
seq[length-1] = np.argmax(probs[length-1])
#print(seq[length-1])
max_prob = probs[length-1][seq[length-1]]
for t in range(1, length):
seq[length-1-t] = backpt[length-t][seq[length-t]]
return seq | 50b28dcf7cedc75adb4a41cb9ccf2152af5f4b8f | 7,081 |
def slsn_constraint(parameters):
"""
Place constraints on the magnetar rotational energy being larger than the total output energy,
and the that nebula phase does not begin till at least a 100 days.
:param parameters: dictionary of parameters
:return: converted_parameters dictionary where the violated samples are thrown out
"""
converted_parameters = parameters.copy()
mej = parameters['mej'] * solar_mass
vej = parameters['vej'] * km_cgs
kappa = parameters['kappa']
mass_ns = parameters['mass_ns']
p0 = parameters['p0']
kinetic_energy = 0.5 * mej * vej**2
rotational_energy = 2.6e52 * (mass_ns/1.4)**(3./2.) * p0**(-2)
tnebula = np.sqrt(3 * kappa * mej / (4 * np.pi * vej ** 2)) / 86400
neutrino_energy = 1e51
total_energy = kinetic_energy + neutrino_energy
# ensure rotational energy is greater than total output energy
converted_parameters['erot_constraint'] = rotational_energy - total_energy
# ensure t_nebula is greater than 100 days
converted_parameters['t_nebula_min'] = tnebula - 100
return converted_parameters | 9fd4cc37c783aa1afdc816edbc88c45132fb4026 | 7,082 |
def grover_circuit(n,o,iter):
"""Grover Search Algorithm
:param n: Number of qubits (not including ancilla)
:param o: Oracle int to find
:return qc: Qiskit circuit
"""
def apply_hadamard(qc, qubits,a=None) -> None:
"""Apply a H-gate to 'qubits' in qc"""
for q in qubits:
qc.h(q)
if a is not None:
qc.h(a)
def initialize_bits(qc,qubits,a) -> None:
"Start qubits at 0 and ancilla bit at 1"
for q in qubits:
qc.reset(q)
qc.reset(a[0])
qc.x(a[0])
def apply_mean_circuit(qc, qubits) -> None:
"""Apply a H-gate to 'qubits' in qc"""
control_qubits = []
for q in qubits:
qc.h(q)
qc.x(q)
control_qubits.append(q)
cZ = control_qubits[-1]
control_qubits.pop()
qc.h(cZ)
qc.mcx(control_qubits,cZ)
qc.h(cZ)
for q in qubits:
qc.x(q)
qc.h(q)
def create_oracle(qc,qubit,ancilla,oracle,n) -> None:
"""Creates a quantum oracle."""
test_list = []
for q in qubit:
test_list.append(q)
_oracle_logic(qc, qubit, oracle,n)
qc.mcx(test_list,ancilla[0])
_oracle_logic(qc, qubit, oracle,n)
def _oracle_logic(qc, qubit, oracle,n) -> None:
if 0 <= oracle <= 2**len(qubit)-1:
bin_list = [int(i) for i in list('{0:0b}'.format(oracle))]
if len(bin_list) < n:
for _ in range(0,n-len(bin_list)):
bin_list.insert(0,0)
for i in range(0,len(bin_list)):
if bin_list[i] == 0:
qc.x(q[i])
else:
raise ValueError('Oracle must be between 0 and 2^n-1')
# print(f"Creating circuit with {n} qubits")
q = QuantumRegister(n, 'q')
a = QuantumRegister(1, 'a')
c = ClassicalRegister(n, 'c')
qc = QuantumCircuit(q,a,c)
i2b = "{0:b}".format(o)
# print(f"Oracle set to: {o} ({i2b})")
# print(" ")
initialize_bits(qc,q,a)
qc.barrier(q,a)
apply_hadamard(qc,q,a)
# print(f"Generating {iter} Grover module(s)")
# print("=====================================")
for _ in range(1,iter+1):
qc.barrier(q,a)
create_oracle(qc,q,a,o,n)
qc.barrier(q,a)
apply_mean_circuit(qc, q)
qc.barrier(q,a)
for i in range(0,len(q)):
qc.measure(q[i],c[len(q)-1-i])
return qc | fac61eda28a249e333dabd46c7d404603141c07c | 7,083 |
def reference_cluster(envs, in_path):
"""
Return set of all env in_paths referencing or
referenced by given in_path.
>>> cluster = sorted(reference_cluster([
... {'in_path': 'base', 'refs': []},
... {'in_path': 'test', 'refs': ['base']},
... {'in_path': 'local', 'refs': ['test']},
... ], 'test'))
>>> cluster == ['base', 'local', 'test']
True
"""
edges = [
set([env['in_path'], fix_reference_path(env['in_path'], ref)])
for env in envs
for ref in env['refs']
]
prev, cluster = set(), set([in_path])
while prev != cluster:
# While cluster grows
prev = set(cluster)
to_visit = []
for edge in edges:
if cluster & edge:
# Add adjacent nodes:
cluster |= edge
else:
# Leave only edges that are out
# of cluster for the next round:
to_visit.append(edge)
edges = to_visit
return cluster | 6398705dfb63c30de62b2eb900d88612e5144774 | 7,084 |
def scheme_apply(procedure, args, env):
"""Apply Scheme PROCEDURE to argument values ARGS in environment ENV."""
if isinstance(procedure, PrimitiveProcedure):
return apply_primitive(procedure, args, env)
elif isinstance(procedure, UserDefinedProcedure):
new_env = make_call_frame(procedure, args, env)
return eval_all(procedure.body, new_env)
else:
raise SchemeError("cannot call: {0}".format(str(procedure))) | 14879f29a5e8c3c5b7d4d41be35730eb66dbdc66 | 7,085 |
def _validate_num_clusters(num_clusters, initial_centers, num_rows):
"""
Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward
"""
## Basic validation
if num_clusters is not None and not isinstance(num_clusters, int):
raise _ToolkitError("Parameter 'num_clusters' must be an integer.")
## Determine the correct number of clusters.
if initial_centers is None:
if num_clusters is None:
raise ValueError("Number of clusters cannot be determined from " +
"'num_clusters' or 'initial_centers'. You must " +
"specify one of these arguments.")
else:
_num_clusters = num_clusters
else:
num_centers = initial_centers.num_rows()
if num_clusters is None:
_num_clusters = num_centers
else:
if num_clusters != num_centers:
raise ValueError("The value of 'num_clusters' does not match " +
"the number of provided initial centers. " +
"Please provide only one of these arguments " +
"or ensure the values match.")
else:
_num_clusters = num_clusters
if _num_clusters > num_rows:
raise ValueError("The desired number of clusters exceeds the number " +
"of data points. Please set 'num_clusters' to be " +
"smaller than the number of data points.")
return _num_clusters | 67d0be234a97c33eb742c70e8d6bb30be4608ab2 | 7,086 |
import mimetypes
def urlinline(filename, mime=None):
"""
Load the file at "filename" and convert it into a data URI with the
given MIME type, or a guessed MIME type if no type is provided.
Base-64 encodes the data.
"""
infile = open(filename, 'rb')
text = infile.read()
infile.close()
enc = b64.standard_b64encode(text)
if mime is None:
mime, _ = mimetypes.guess_type(filename)
mime = mime or DEFAULT_MIME_TYPE
ret = "data:%s;base64,%s" % (mime, enc)
return ret | 4b8035944a7a25d5b3ce3bc8a8fbd0a4dd424447 | 7,087 |
import json
def parse_matching_criteria(filters, filter_operator):
"""
build the filter criteria, if present
:param filters:field opr value[;]...
:param filter_operator: any|all
:return dictionary of parsed filter settings, True/False for "all"/"any" setting
"""
LOG.debug("%s %s", filters, filter_operator)
if filter_operator and filter_operator.strip().lower() not in ('all', 'any'):
raise ValueError("operator must be 'all' or 'any': {}".format(filter_operator))
match_operator_and = (filter_operator.strip().lower() == 'all') if filter_operator else True
# parse the filters and produce a tuple of (field, operator, value)
match_list = {}
if filters:
for filter_str in filters.split(';'):
m = REGEX_OPERATORS.match(filter_str.strip())
if not m or len(m.groups()) != 3:
raise ValueError("Unable to parse filter '{}'".format(filter_str))
match_field = m.group(1)
match_opr = m.group(2)
# correct mistyped comparison
if match_opr.strip() == '=':
match_opr = '=='
match_value = m.group(3)
# restore lists to actual lists
if match_value.startswith("["):
try:
match_value = json.loads(match_value.replace("'", '"')) # make sure correct json format
except Exception as err:
LOG.error(str(err))
pass
# determine if working with a string, boolean, or int
elif match_value in ["true", "True", "false", "False"]:
match_value = str_to_bool(match_value)
elif match_value == 'None':
match_value = None
else:
try:
match_value = int(match_value) # this will fail for numbers, which will be trapped
except:
pass
compare_tuple = (match_field, match_opr, match_value)
LOG.debug(compare_tuple)
match_list[match_field] = compare_tuple
return match_list, match_operator_and | 13bc51b84751671e913e43a614cda7edf9fe3734 | 7,088 |
def star_rating(new_rating=None, prev_rating=None):
"""
Generates the query to update the product's star ratings. Inc method is
from https://docs.mongodb.com/manual/reference/operator/update/inc/
"""
add_file = {
1: {"one_star": 1},
2: {"two_stars": 1},
3: {"three_stars": 1},
4: {"four_stars": 1},
5: {"five_stars": 1}
}
delete_file = {
1: {"one_star": -1},
2: {"two_stars": -1},
3: {"three_stars": -1},
4: {"four_stars": -1},
5: {"five_stars": -1}
}
if new_rating and prev_rating:
return {"$inc": {add_file[new_rating], delete_file[prev_rating]}}
elif new_rating:
return {"$inc": add_file[new_rating]}
else:
return {"$inc": delete_file[prev_rating]} | e50f8271dbbb8c2722729cce6a8f036c851c4e95 | 7,089 |
def check_encoder(value: EncoderArg) -> EncoderFactory:
"""Checks value and returns EncoderFactory object.
Returns:
d3rlpy.encoders.EncoderFactory: encoder factory object.
"""
if isinstance(value, EncoderFactory):
return value
if isinstance(value, str):
return create_encoder_factory(value)
raise ValueError("This argument must be str or EncoderFactory object.") | 5e23b483df8fbe190f1ac6ccf743bc783728adf8 | 7,090 |
import pathlib
def allowed_task_name(name: str) -> bool:
"""Determine whether a task, which is a 'non-core-OSCAL activity/directory is allowed.
args:
name: the task name which is assumed may take the form of a relative path for task/subtasks.
Returns:
Whether the task name is allowed or not allowed (interferes with assumed project directories such as catalogs).
"""
# Task must not use an OSCAL directory
# Task must not self-interfere with a project
pathed_name = pathlib.Path(name)
root_path = pathed_name.parts[0]
if root_path in const.MODEL_TYPE_TO_MODEL_DIR.values():
logger.error('Task name is the same as an OSCAL schema name.')
return False
elif root_path[0] == '.':
logger.error('Task name must not start with "."')
return False
elif pathed_name.suffix != '':
# Does it look like a file
logger.error('tasks name must not look like a file path (e.g. contain a suffix')
return False
return True | 231d7a98f5d6b7059f5517283ec3bed35264050e | 7,091 |
def get_ignored_classes(uppercase, lowercase, digit):
"""
get tuple of ignored classes based on selected classes
:param
uppercase: whether to keep uppercase classes
:param
lowercase: whether to keep lowercase classes
:param
digit: whether to keep digit classes
:return:
tuple of ignored classes
"""
# result placeholder
ignored = []
# add digit classes to the ignore list
if not digit:
ignored.append(dataset.get_classes('digit'))
# add uppercase classes to the ignore list
if not uppercase:
ignored.append(dataset.get_classes('uppercase'))
# add lowercase classes to the ignore list
if not lowercase:
ignored.append(dataset.get_classes('lowercase'))
# return tuple
return tuple(ignored) | 2a2380f4f984feb42ce1de912739fd395a8422bd | 7,092 |
import torch
def unscaled_prediction_rmse(model, input_tensor, label_tensor, scalar, loading_length=0, return_loading_error=False,
device=None):
"""
Prediction RMSE.
:param model: model
:param input_tensor: input tensor
:param label_tensor: label tensor
:param scalar: scalar for transforming output data
:param loading_length: time length used for loading the NARX
:param return_loading_error: return the loading RMSE with the multi-step ahead RMSE
:param device: specified device to use (Default: None - select what is available)
:return: prediction rmse
"""
# Create Network on GPU/CPU
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Training Data on GPU/CPU
input_tensor, label_tensor = input_tensor.to(device), label_tensor.to(device)
# Sort data for loading and k-step ahead predictions.
inputs, labels, itdl, otdl = init_tdl(model, input_tensor, label_tensor, device)
loading_labels, k_step_labels = labels[:, :loading_length, :], labels[:, loading_length:, :]
# Perform a k-step ahead prediction
k_step_outputs, loading_outputs = __multi_step_ahead_prediction(model, input_tensor, label_tensor,
loading_length, device)
if return_loading_error:
# Combine loading and multi-step predictions/labels
outputs = torch.cat([loading_outputs, k_step_outputs], dim=1)
labels = torch.cat([loading_labels, k_step_labels], dim=1)
else:
# Use the multi-step predictions/labels
outputs = k_step_outputs
labels = k_step_labels
labels = labels.cpu().data.numpy()
labels = labels.reshape((labels.shape[0], labels.shape[1]))
labels = (labels - scalar.min_[1]) / scalar.scale_[1]
outputs = outputs.cpu().data.numpy()
outputs = outputs.reshape((outputs.shape[0], outputs.shape[1]))
outputs = (outputs - scalar.min_[1]) / scalar.scale_[1]
error = labels - outputs
error = np.sqrt((np.power(error, 2)).mean(axis=0))
return error | ea7b0e2c2fd022cc7bcb466057feacf5a1fbaa00 | 7,093 |
import copy
def __copyList__(fromList, initialValues = None):
"""
Returns a copy of the provided list. Initial values must either be a single value, or
a list of exactly the same size as the provided list.
"""
if __isListType__(fromList) is False:
raise ValueError('The provided value to copy was not a list!')
fromList = copy.deepcopy(fromList)
if initialValues is not None:
initialValues = copy.deepcopy(initialValues)
if initialValues is None or __isNonStringIterableType__(initialValues) is False:
copySingleValue = True
elif __isNonStringIterableType__(initialValues) and len(initialValues) == 1 or __isListType__(initialValues) is False:
# Treat an initialValue object with 1 element the same as a non-iterable, so we could set every value to a list, or to a non-list value
copySingleValue = True
else:
if len(initialValues) != len(fromList):
raise ValueError('The initial values list must be the same size as the list to copy!')
else:
copySingleValue = False
returnList = fromList[:]
for itemIndex in range(len(returnList)):
if copySingleValue is True:
returnList[itemIndex] = initialValues
else:
returnList[itemIndex] = initialValues[itemIndex]
return returnList | 9f126a10795132b5d2ddaeef552c6e5abd8680ba | 7,094 |
import re
def build_or_pattern(patterns, escape=False):
"""Build a or pattern string from a list of possible patterns
"""
or_pattern = []
for pattern in patterns:
if not or_pattern:
or_pattern.append('(?:')
else:
or_pattern.append('|')
or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern)
or_pattern.append(')')
return ''.join(or_pattern) | 225cc20504a85342694e14ea76b9bf3ed8b6d11b | 7,095 |
from typing import Tuple
from typing import Any
def concatenate_and_process_data(
data_consent: pd.DataFrame,
data_noconsent: pd.DataFrame,
conversion_column: str = CONVERSION_COLUMN,
drop_columns: Tuple[Any, ...] = DROP_COLUMNS,
non_dummy_columns: Tuple[Any, ...] = NON_DUMMY_COLUMNS
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Concatenates consent and no-consent data and preprocesses them.
Args:
data_consent: Dataframe of consent customers.
data_noconsent: Dataframe of no-consent customers.
conversion_column: Name of the conversion column in the data.
drop_columns: Names of columns that should be dropped from the data.
non_dummy_columns: Names of (categorical) columns that should be kept, but
not dummy-coded.
Raises:
ValueError: if concatenating consent and no-consent data doesn't
match the expected length.
Returns:
Processed dataframes for consent and no-consent customers.
"""
data_noconsent["consent"] = 0
data_consent["consent"] = 1
data_concat = pd.concat([data_noconsent, data_consent])
data_concat.reset_index(inplace=True, drop=True)
if len(data_concat) != (len(data_noconsent) + len(data_consent)):
raise ValueError(
"Length of concatenated data does not match sum of individual dataframes."
)
data_preprocessed = preprocess_data(
data=data_concat,
drop_columns=list(drop_columns),
non_dummy_columns=list(non_dummy_columns),
conversion_column=conversion_column)
data_noconsent_processed = data_preprocessed[data_preprocessed["consent"] ==
0]
data_consent_processed = data_preprocessed[data_preprocessed["consent"] == 1]
return data_consent_processed, data_noconsent_processed | 57c84f0b406750b40161bb7f5ed19c5f2cd509e8 | 7,096 |
def plot(nRows=1, nCols=1, figSize=5):
"""
Generate a matplotlib plot and axis handle
Parameters
-----------------
nRows : An int, number of rows for subplotting
nCols : An int, number of columns for subplotting
figSize : Numeric or array (xFigSize, yFigSize). The size of each axis.
"""
if isinstance(figSize, (list, tuple)):
xFigSize, yFigSize = figSize
elif isinstance(figSize, (int, float)):
xFigSize = yFigSize = figSize
else:
raise Exception('figSize type {} not recognised'.format(type(figSize)))
fig, axs = plt.subplots(nRows, nCols, figsize=(nCols * xFigSize, nRows * yFigSize))
if nRows * nCols > 1:
axs = axs.ravel()
return fig, axs | a0ec25fa932933f717ef9a576d0f80d531865aad | 7,097 |
def make_rate_data(grp, valuevars, query="none == 'All'", data=ob):
"""Filters, Groups, and Calculates Rates
Params:
grp [list]: A list detailing the names of the variables to group by.
valuevars [list]: A list detailing the names of the quantitative
variable summarise and calculate a rate for (as a function of
population).
query [string]: A query string used to subset the data prior to
aggregation.
data [pd.DataFrame]: The obesity dataset.
Returns:
[pd.DataFrame]: A pandas data frame containing the grouping variables
and rates for the value variables (carrying the same column name).
Cells where a rate could not be calculated due to missing information
are return as np.NaN.
"""
grp_plus = grp + ["none"]
ratedata = (
data.query(query)
.loc[:, grp + ["pop"] + valuevars]
.melt(id_vars=grp + ["pop"], var_name="variable", value_name="value")
.dropna()
.groupby(grp + ["variable"])[["pop", "value"]]
.sum()
.reset_index()
.assign(rate=lambda x: x["value"] / x["pop"])
.drop(columns=["value", "pop"])
.pivot(index=grp, columns="variable", values="rate")
.reset_index()
)
return ratedata | 8342d5b20f7020a97f283ce80b04b92b42476862 | 7,098 |
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
def mocked_seir_step(data, **pars):
data["exposed"] = 0
new_data = SEIRModel.simulation_step(data, **pars)
new_data["infected"] += new_data["exposed_new"]
return new_data
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
) | d70b841b23af6883a14bb1c97f31f3e24ae7fd4d | 7,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.