content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import html
def update_table1(is_open, signup, user_edit, user_remove):
"""update_table
"""
df = lookup_data()
# Add icons
df[_('Active')] = df[_('Active')].apply(lambda x:
html.I(className="fa fa-check") if x else None
)
df[_('Edit')] = df[_('UID')].apply(lambda x:
dbc.Button(
html.I(className="fa fa-user-edit"),
color='light', size='sm',
className='bg-transparent',
id={'type':'update-user', 'index':x},
)
)
df[_('Remove')] = df[_('UID')].apply(lambda x:
dbc.Button(
html.I(className="fa fa-trash-alt"),
color='light', size='sm',
id={'type':'remove-user', 'index':x},
)
)
# Return table
return dbc.Table.from_dataframe(df,
striped=True,
bordered=True,
hover=True,
responsive=True,
size='sm',
style={'textAlign':'center'}
)
|
c93ce73bfb1d4d74cf2d05e98e2689db908d3b2d
| 32,598 |
def create_app(*args, **kwargs):
"""
Create flask app with predefined values.
:return: Flask application
"""
"""Create flask app with all configuration set"""
app = flask.Flask(__name__)
app.register_blueprint(pps_blueprint)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.server = Server()
app.secret_key = 'No_good_key'
return app
|
84e283f7e7d5299e0a6f4f77c590e53eb5890df1
| 32,600 |
from typing import Optional
def _rebase_node(curr: Optional[SQLNode], pre: SQLNode) -> SQLNode:
"""shorthand for a common pattern"""
return pre if curr is None else curr.rebase(pre)
|
84e7fbb901009619d8183dc779a94f08404833d1
| 32,601 |
def proposal_list(request):
"""Retrieve and return a list of proposals, optionally
filtered by the given acceptance status.
Requires API Key.
URL: /<YEAR>/pycon_api/proposals/
To filter by proposal type, add a GET query param "type" with
a value of "talk", "tutorial", "lightning", or "poster", e.g.::
GET /<YEAR>/pycon_api/proposals/?type=tutorial
To filter by proposal status, add a GET query param "status" with
a value of "undecided", "rejected", "accepted", or "standby".
So if you wanted to filter by both type and status, you might use::
GET /<YEAR>/pycon_api/proposals/?type=tutorial&status=accepted
The return data, in JSON, looks like::
{
'code': 200,
'data': [<item>, <item>, ..., <item>]
}
where each <item> looks like::
{
'id': 13, # proposal key
'speakers': [<speaker>, <speaker>, ..., <speaker>],
'status': "undecided"|"accepted"|"rejected"|"standby"
'title': "Title of talk"
}
and a <speaker> looks like::
{
'name': "Speaker Name",
'email': "[email protected]"
}
"""
# What model should we be pulling from?
model = ProposalBase
proposal_type = request.GET.get('type', 'talk')
if proposal_type in PROPOSAL_TYPES:
try:
model = get_proposal_model_from_section_slug(proposal_type + 's')
except ValueError:
return ({ 'error': 'unrecognized proposal type' }, 400)
else:
return ({ 'error': 'unrecognized proposal type' }, 400)
# See if there is such a proposal
proposals = model.objects.select_related('result').order_by('pk')
proposals = proposals.filter(kind__slug=proposal_type)
# Don't look at unsubmitted proposals
proposals = proposals.exclude(submitted=False)
# Don't look at cancelled proposals.
proposals = proposals.exclude(cancelled=True)
# If specific proposal status is being requested, filter on that.
desired_status = request.GET.get('status', None)
if desired_status == 'undecided':
proposals = proposals.filter(Q(result__status=desired_status) |
Q(result=None))
else:
proposals = proposals.filter(result__status=desired_status)
# We may be asking only for ungrouped talks; if so, limit to these.
ungrouped = request.GET.get('ungrouped', '').lower() in ('true', '1')
if ungrouped:
proposals = proposals.filter(thunderdome_group=None)
# If there's a limit parameter provided, limit to those objects.
if 'limit' in request.GET:
proposals = proposals[0:request.GET['limit']]
# Return the proposal data objects.
return [i.as_dict() for i in proposals]
|
dadb694c162c385dc6aba1ac8d9dfdfced117a5e
| 32,602 |
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
|
b57c6606eb125ece0f00825a41c99702affd0a9e
| 32,603 |
import re
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = unidecode(string)
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r"\"", "", string)
return ' ' +string.strip().lower()+ ' '
|
56c55ef100aa2c612e5a84f061b82ac225be2bd0
| 32,604 |
def dkm_ms_em_remote_check_stopping(w, wo, epsilon):
"""
Stopping condition is distance below some epsilon
"""
delta = np.sum([abs(w[i] - wo[i]) for i in range(len(w))])
# print("Delta", delta)
result = delta > epsilon
return result
|
c226ed6f6b7e3a383e6f59492ed1b30d1f8eec31
| 32,605 |
def parse_from_string(root_processor, xml_string):
"""
Parses the XML string using the processor starting from the root of the document.
:param xml_string: XML string to parse.
See also :func:`declxml.parse_from_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
root = ET.fromstring(xml_string)
_xml_namespace_strip(root)
state = _ProcessorState()
return root_processor.parse_at_root(root, state)
|
015f89cb407bef4564cd2aa4de0f807c69cd2a15
| 32,606 |
def validate(args, limit_to=None):
"""Validate an input dictionary for Coastal Blue Carbon.
Args:
args (dict): The args dictionary.
limit_to=None (str or None): If a string key, only this args parameter
will be validated. If ``None``, all args parameters will be
validated.
Returns:
A list of tuples where tuple[0] is an iterable of keys that the error
message applies to and tuple[1] is the string validation warning.
"""
validation_warnings = validation.validate(
args, ARGS_SPEC['args'])
sufficient_keys = validation.get_sufficient_keys(args)
invalid_keys = validation.get_invalid_keys(validation_warnings)
if ("landcover_snapshot_csv" not in invalid_keys and
"landcover_snapshot_csv" in sufficient_keys):
snapshots = _extract_snapshots_from_table(
args['landcover_snapshot_csv'])
for snapshot_year, snapshot_raster_path in snapshots.items():
raster_error_message = validation.check_raster(
snapshot_raster_path)
if raster_error_message:
validation_warnings.append(
(['landcover_snapshot_csv'], (
f"Raster for snapshot {snapshot_year} could not "
f"be validated: {raster_error_message}")))
if ("analysis_year" not in invalid_keys
and "analysis_year" in sufficient_keys):
if max(set(snapshots.keys())) > int(args['analysis_year']):
validation_warnings.append(
(['analysis_year'], (
f"Analysis year {args['analysis_year']} must be >= "
f"the latest snapshot year ({max(snapshots.keys())})"
)))
return validation_warnings
|
57dd66464dca7d28974366e63755e84905c9c90d
| 32,607 |
def findBestK(x_train, y_train, params, R):
""" Función que calcula los valores de accuracy media obtenidos con validación
cruzada para los valores del parámetro K que se indican como parámetro, y
visualiza dichos valores en un gráfico
Args:
x_train: conjunto de entrenamiento
y_train: vector de etiquetas asociado al conjunto de entrenamiento
params: lista de valores para el parámetro K a probar
R: diámetro del conjunto de datos de entrenamiento
Returns:
scores: valores de accuracy en validación cruzada
para cada valor de K proporcionado
"""
scores = []
print("Búsqueda del mejor valor para el parámetro K")
for i in range(len(params)):
#Aplicamos cross_validation con 3 particiones para determinar el accuracy del modelo
#y el resultado se añade al vector de scores
scores.append(cross_val_rbf(x_train, y_train, K_FOLDS, params[i], R=R))
print(params[i],":",scores[i])
params = np.array(params)
scores = np.array(scores)
plotScores(params, scores, 'lightcoral', 'Accuracy media frente a K', 'Número de clusters K', log=True)
return scores
|
d9b07ccbca0551928a6966ab0dd6b29965668373
| 32,608 |
import base64
def pdf_to_img(pdf: str) -> [str]:
"""
Takes a base64 encoded string representing a PDF and turns it into a list of base64 encoded strings
representing pages in the PDF
:param pdf:
:return: list of strings
"""
imgs = list()
decoded = b64string_to_bytes(pdf)
images = convert_from_bytes(decoded)
for img in images:
buffered = BytesIO()
img.save(buffered, format="PNG")
buffered.seek(0)
data_uri = base64.b64encode(buffered.read()).decode('ascii')
imgs.append(data_uri)
buffered.close()
img.close()
return imgs
|
cf04b206b24786116cc5a92550333ac256db391d
| 32,609 |
def is_ready():
"""Checks if IoT Inspector is ready to interface with the AR app."""
return OK_JSON
|
c6b860179e4a969e1069b405bfcbd76c1b95c1d4
| 32,610 |
from typing import List
from typing import Dict
from typing import Any
def get_indicators_command(client: Client, insight_category: list, insight_data_type: list, args: dict) -> List[Dict]:
"""Create indicators.
Arguments:
client {Client} -- Client derives from BaseClient.
insight_category {List[String]} -- List of SafeBreach insight category - using as filter.
insight_data_type {List[String]} -- List of data types - using as filter.
Keyword Arguments:
Returns:
List[Dict] -- List of insights from SafeBreach
"""
limit: int = int(args.get('limit') or demisto.params().get('indicatorLimit'))
indicators: List[Dict] = []
count: int = 0
# These variable be filled directly from the integration configuration or as arguments.
insight_category, insight_data_type = get_category_and_data_type_filters(args, insight_category,
insight_data_type)
# Convert category into insight id
insights_ids: Any = get_insights_ids_by_category(insight_category)
raw_insights: Any = client.get_insights().json()
# Filter insight by category
insights: Any = list([item for item in raw_insights if int(item.get('ruleId')) in insights_ids])
for insight in insights:
# Fetch remediation data for each insight
processed_data: List[Dict[str, Any]] = get_remediation_data_command(client,
{'insightId': insight.get('ruleId')}, False)
for item in processed_data:
# if the data type is not in the filter data types continue,
if INDICATOR_TYPE_SB_TO_DEMISTO_MAPPER.get(item['type']) not in insight_data_type:
continue
if not INDICATOR_TYPE_MAPPER.get(str(item['type'])) or item["value"] == 'N/A':
continue
if isinstance(item['type'], int):
demisto.info('Data type is int', item['type'], insight['ruleId'])
is_behaveioral = item['type'] not in ['Domain', 'FQDN/IP', 'SHA256', 'URI', 'Hash']
score_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(demisto.params().get('behavioralReputation'))
score_non_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(
demisto.params().get('nonBehavioralReputation'))
raw_json = {
'value': str(item["value"]),
'dataType': item['type'],
'insightId': insight.get('ruleId'),
'insightTime': insight.get('maxExecutionTime'),
}
mapping = {
'description': 'SafeBreach Insight - {0}'.format(insight['actionBasedTitle']),
item['type'].lower(): item["value"],
"safebreachinsightids": str(insight.get('ruleId')),
"safebreachseverity": insight.get('severity'),
"safebreachseverityscore": str(insight.get('severityScore')),
"safebreachisbehavioral": is_behaveioral,
"safebreachattackids": list(map(str, insight.get('attacks'))),
'tags': [
f"SafeBreachInsightId: {insight.get('ruleId')}",
]
}
mapping['tags'] = list((set(mapping['tags'])).union(set(client.tags)))
indicator = {
'value': str(item["value"]),
'type': INDICATOR_TYPE_MAPPER.get(str(item['type'])),
'rawJSON': raw_json,
'fields': mapping,
'score': score_behavioral_reputation if is_behaveioral else score_non_behavioral_reputation
}
if is_ip(item["value"]):
indicator['type'] = FeedIndicatorType.IP
count += 1
if count > limit:
return indicators
indicators.append(indicator)
return indicators
|
63a678da0abd76e56fceaad2a075e7d209eced00
| 32,611 |
def engineer_features(df,training=True):
"""
for any given day the target becomes the sum of the next days revenue
for that day we engineer several features that help predict the summed revenue
the 'training' flag will trim data that should not be used for training
when set to false all data will be returned
"""
## extract dates
dates = df['date'].values.copy()
dates = dates.astype('datetime64[D]')
## engineer some features
eng_features = defaultdict(list)
previous =[7, 14, 28, 70] #[7, 14, 21, 28, 35, 42, 49, 56, 63, 70]
y = np.zeros(dates.size)
for d,day in enumerate(dates):
## use windows in time back from a specific date
for num in previous:
current = np.datetime64(day, 'D')
prev = current - np.timedelta64(num, 'D')
mask = np.in1d(dates, np.arange(prev,current,dtype='datetime64[D]'))
eng_features["previous_{}".format(num)].append(df[mask]['revenue'].sum())
## get get the target revenue
plus_30 = current + np.timedelta64(30,'D')
mask = np.in1d(dates, np.arange(current,plus_30,dtype='datetime64[D]'))
y[d] = df[mask]['revenue'].sum()
## attempt to capture monthly trend with previous years data (if present)
start_date = current - np.timedelta64(365,'D')
stop_date = plus_30 - np.timedelta64(365,'D')
mask = np.in1d(dates, np.arange(start_date,stop_date,dtype='datetime64[D]'))
eng_features['previous_year'].append(df[mask]['revenue'].sum())
## add some non-revenue features
minus_30 = current - np.timedelta64(30,'D')
mask = np.in1d(dates, np.arange(minus_30,current,dtype='datetime64[D]'))
eng_features['recent_invoices'].append(df[mask]['unique_invoices'].mean())
eng_features['recent_views'].append(df[mask]['total_views'].mean())
X = pd.DataFrame(eng_features)
## combine features in to df and remove rows with all zeros
X.fillna(0,inplace=True)
mask = X.sum(axis=1)>0
X = X[mask]
y = y[mask]
dates = dates[mask]
X.reset_index(drop=True, inplace=True)
if training == True:
## remove the last 30 days (because the target is not reliable)
mask = np.arange(X.shape[0]) < np.arange(X.shape[0])[-30]
X = X[mask]
y = y[mask]
dates = dates[mask]
X.reset_index(drop=True, inplace=True)
return(X,y,dates)
|
b10b973e5ef7cbe9aa2d7c436747673d83c0d5f6
| 32,613 |
def inconsistent_target_program():
"""Returns a benchmark.Benchmark with an inconsistent target program."""
examples = [
benchmark.Example(
inputs=[
[10],
[20],
],
output=[40], # Should be 30.
),
]
constants = [0]
description = 'add elementwise'
target_program = 'tf.add(in1, in2)'
source = 'test'
return benchmark.Benchmark(examples=examples,
constants=constants,
description=description,
target_program=target_program,
source=source,
name='inconsistent_target_program')
|
c7cc9e6fa380e2a1e111e5dabd6e9412582bb825
| 32,614 |
def _slice_slice(outer, outer_len, inner, inner_len):
"""
slice a slice - we take advantage of Python 3 range's support
for indexing.
"""
assert(outer_len >= inner_len)
outer_rng = range(*outer.indices(outer_len))
rng = outer_rng[inner]
start, stop, step = rng.start, rng.stop, rng.step
if step < 0 and stop < 0:
stop = None
return slice(start, stop, step)
|
4430ae752fbc9d7db6414418b18a0b8186f3d328
| 32,615 |
def tp(a):
"""Tranpose 1d vector"""
return a[np.newaxis].T
|
86f357de5d1f080194867e826db5ff78c3926b92
| 32,616 |
def _parse_interval(value):
"""
Do some nasty try/except voodoo to get some sort of datetime
object(s) out of the string.
"""
try:
return sorted(aniso8601.parse_interval(value))
except ValueError:
try:
return aniso8601.parse_datetime(value), None
except ValueError:
return aniso8601.parse_date(value), None
|
d99151734cd6ba81919857482e56c627a4b3aa9b
| 32,617 |
def is_masquerading_as_non_audit_enrollment(user, course_key, course_masquerade=None):
"""
Return if the user is a staff member masquerading as a user
in _any_ enrollment track _except_ audit
"""
group_id = _get_masquerade_group_id(ENROLLMENT_TRACK_PARTITION_ID, user, course_key, course_masquerade)
audit_mode_id = settings.COURSE_ENROLLMENT_MODES.get(CourseMode.AUDIT, {}).get('id')
if group_id is not None:
if group_id != audit_mode_id:
return True
return False
|
cd337e3f44a9c618609d103c5c0c6617fc62d70b
| 32,618 |
def holding_value_grouped_nb(holding_value, group_lens):
"""Get holding value series per group."""
check_group_lens(group_lens, holding_value.shape[1])
out = np.empty((holding_value.shape[0], len(group_lens)), dtype=np.float_)
from_col = 0
for group in range(len(group_lens)):
to_col = from_col + group_lens[group]
out[:, group] = np.sum(holding_value[:, from_col:to_col], axis=1)
from_col = to_col
return out
|
c0b0e2f538e9849671c5388c82f822d755d24b61
| 32,619 |
def read_upload(up_file, data_model=None):
"""
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
"""
print("-I- Running validation for your upload file")
## Read file
f = open(up_file)
lines = f.readlines()
f.close()
data = split_lines(lines)
data_dicts = get_dicts(data)
## initialize
invalid_data = {}
missing_data = {}
non_numeric = {}
bad_vocab = {}
bad_coords = {}
invalid_col_names = {}
missing_file_type = False
## make sure you have the data model
if not data_model:
data_model = get_data_model()
reqd_file_types = ['er_locations']
provided_file_types = set()
if not data_model:
return False, None
## Iterate through data
# each dictionary is one tab delimited line in a csv file
for dictionary in data_dicts:
for k, v in list(dictionary.items()):
if k == "file_type": # meta data
provided_file_types.add(v)
continue
file_type = dictionary['file_type']
# need to deal with pmag_criteria type file, too
item_type = file_type.split('_')[1][:-1]
if item_type == 'criteria':
item_name = dictionary.get('criteria_definition')
elif item_type == 'result':
item_name = dictionary.get('pmag_result_name', None)
elif item_type in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + item_type + '_name', None)
elif item_type == 'age':
# get the lowest level er_*_name column that is filled in
for dtype in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + dtype + '_name', None)
if item_name:
break
elif item_type == 'measurement':
exp_name = dictionary.get('magic_experiment_name')
meas_num = dictionary.get('measurement_number')
item_name = exp_name + '_' + str(meas_num)
else:
item_name = None
if file_type not in list(data_model.keys()):
continue
specific_data_model = data_model[file_type]
## Function for building problems list
def add_to_invalid_data(item_name, item_type, invalid_data,
validation, problem_type):
"""
correctly create or add to the dictionary of invalid values
"""
if item_name:
if item_type not in invalid_data:
invalid_data[item_type] = {}
if item_name not in invalid_data[item_type]:
invalid_data[item_type][item_name] = {}
if problem_type not in invalid_data[item_type][item_name]:
invalid_data[item_type][item_name][problem_type] = []
invalid_data[item_type][item_name][problem_type].append(validation)
## Validate for each problem type
# check if column header is in the data model
invalid_col_name = validate_for_recognized_column(k, v, specific_data_model)
if invalid_col_name:
if item_type not in list(invalid_col_names.keys()):
invalid_col_names[item_type] = set()
invalid_col_names[item_type].add(invalid_col_name)
# skip to next item, as additional validations won't work
# (key is not in the data model)
## new style
add_to_invalid_data(item_name, item_type, invalid_data,
invalid_col_name, 'invalid_col')
# skip to next item, as additional validations won't work
# (key is not in the data model)
continue
# make a list of missing, required data
missing_item = validate_for_presence(k, v, specific_data_model)
#print 'k, v', k, v
if missing_item:
if item_type not in list(missing_data.keys()):
missing_data[item_type] = set()
missing_data[item_type].add(missing_item)
if item_name:
# don't double count if a site is missing its parent location
if item_type == 'age' and missing_item == 'er_location_name':
pass
# ignore er_synthetic_name (data model is incorrect here)
if missing_item == 'er_synthetic_name':
pass
else:
add_to_invalid_data(item_name, item_type, invalid_data,
missing_item, 'missing_data')
# vocabulary problems
vocab_problem = validate_for_controlled_vocab(k, v, specific_data_model)
if vocab_problem:
if item_type not in list(bad_vocab.keys()):
bad_vocab[item_type] = set()
bad_vocab[item_type].add(vocab_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
vocab_problem, 'vocab_problem')
# illegal coordinates
coord_problem = validate_for_coordinates(k, v, specific_data_model)
if coord_problem:
if item_type not in list(bad_coords.keys()):
bad_coords[item_type] = set()
bad_coords[item_type].add(coord_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
coord_problem, 'coordinates')
# make a list of data that should be numeric, but aren't
number_fail = validate_for_numericality(k, v, specific_data_model)
if number_fail:
if item_type not in list(non_numeric.keys()):
non_numeric[item_type] = set()
non_numeric[item_type].add(number_fail)
add_to_invalid_data(item_name, item_type, invalid_data,
number_fail, 'number_fail')
## Print out all issues
for file_type, invalid_names in list(invalid_col_names.items()):
print("-W- In your {} file, you are using the following unrecognized columns: {}".format(file_type, ', '.join(invalid_names)))
for file_type, wrong_cols in list(non_numeric.items()):
print("-W- In your {} file, you must provide only valid numbers, in the following columns: {}".format(file_type, ', '.join(wrong_cols)))
for file_type, empty_cols in list(missing_data.items()):
print("-W- In your {} file, you are missing data in the following required columns: {}".format(file_type, ', '.join(empty_cols)))
for file_type in reqd_file_types:
if file_type not in provided_file_types:
print("-W- You have not provided a(n) {} type file, which is required data".format(file_type))
missing_file_type = True
for file_type, vocab_types in list(bad_vocab.items()):
print("-W- In your {} file, you are using an unrecognized value for these controlled vocabularies: {}".format(file_type, ', '.join(vocab_types)))
for file_type, coords in list(bad_coords.items()):
print("-W- In your {} file, you are using an illegal value for these columns: {}. (Latitude must be between -90 and +90)".format(file_type, ', '.join(coords)))
if any((invalid_col_names, non_numeric, missing_data, missing_file_type, bad_vocab, bad_coords)):
return False, invalid_data
else:
print("-I- validation was successful")
return True, None
|
b62339cc17aadd51f1a97cab3fe5158b5eaacf0b
| 32,620 |
import torch
def predict(network, X, batch_size, device, move_network=True):
""" predict batchwise """
# Build DataLoader
if move_network:
network = network.to(device)
y = torch.Tensor(X.size()[0])
data = DataLoader(TensorDataset(X, y), batch_size, False)
# Batch prediction
network.eval()
r, n = 0, X.size()[0]
for batch_data in data:
# Predict on batch
X_batch = Variable(batch_data[0]).to(device)
y_batch_pred = network(X_batch).detach().cpu()
# Infer prediction shape
if r == 0:
y_pred = torch.zeros((n,) + y_batch_pred.size()[1:])
# Add to prediction tensor
y_pred[r : min(n, r + batch_size)] = y_batch_pred
r += batch_size
return y_pred
|
e9f2c12e812950d73a4aec57c966f76f5c006785
| 32,621 |
def read_glossary_df(plugins):
"""Returns the glossary as a DataFrame, created from the schemas.yml file. NOTE: This is used by the GUI."""
global __glossary_df
if __glossary_df is None:
schemas = cea.schemas.schemas(plugins)
glossary_df = pd.DataFrame(columns=["SCRIPT", "LOCATOR_METHOD", "WORKSHEET", "VARIABLE",
"DESCRIPTION", "UNIT", "VALUES", "TYPE", "COLOR", "FILE_NAME"])
rows = []
for lm in schemas:
if lm == "get_database_standard_schedules_use":
# the schema for schedules is non-standard
continue
script = schemas[lm]["created_by"][0] if schemas[lm]["created_by"] else "-"
file_path = schemas[lm]["file_path"]
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schemas[lm]["schema"]: # ws: worksheet
for col in schemas[lm]["schema"][ws]["columns"]:
cd = schemas[lm]["schema"][ws]["columns"][col]
rows.append(glossary_row(script, file_path, col, lm, cd, worksheet=ws))
else:
for col in schemas[lm]["schema"]["columns"]:
cd = schemas[lm]["schema"]["columns"][col] # cd: column definition
rows.append(glossary_row(script, file_path, col, lm, cd, worksheet=""))
glossary_df = glossary_df.append(rows, ignore_index=True)
glossary_df['key'] = glossary_df['FILE_NAME'] + '!!!' + glossary_df['VARIABLE']
glossary_df = glossary_df.set_index(['key'])
glossary_df = glossary_df.sort_values(by=['LOCATOR_METHOD', 'FILE_NAME', 'VARIABLE'])
__glossary_df = glossary_df
return __glossary_df
|
246f27927cc70eb03be8e9b84423442ef5b884e2
| 32,623 |
from pathlib import Path
def extract_all(session_path, save=False, data=False):
"""
Extract all behaviour data from Bpod whithin the specified folder.
The timing information from FPGA is extracted in
:func:`~ibllib.io.extractors.ephys_fpga`
:param session_path: folder containing sessions
:type session_path: str or pathlib.Path
:param save: bool
:param data: raw Bpod data dictionary
:return: dictionary of trial related vectors (one row per trial)
"""
if not data:
data = raw.load_data(session_path)
feedbackType = get_feedbackType(session_path, save=save, data=data)
contrastLeft, contrastRight = get_contrastLR(
session_path, save=save, data=data)
probabilityLeft = get_probabilityLeft(session_path, save=save, data=data)
choice = get_choice(session_path, save=save, data=data)
rewardVolume = get_rewardVolume(session_path, save=save, data=data)
iti_dur = get_iti_duration(session_path, save=save, data=data)
go_cue_trig_times = get_goCueTrigger_times(session_path, save=save, data=data)
go_cue_times = get_goCueOnset_times(session_path, save=save, data=data)
intervals = get_intervals(session_path, save=save, data=data)
out = {'feedbackType': feedbackType,
'contrastLeft': contrastLeft,
'contrastRight': contrastRight,
'probabilityLeft': probabilityLeft,
'session_path': session_path,
'choice': choice,
'rewardVolume': rewardVolume,
'iti_dur': iti_dur,
'goCue_times': go_cue_times,
'goCueTrigger_times': go_cue_trig_times,
'intervals': intervals}
if save:
file_intervals = Path(session_path) / 'alf' / '_ibl_trials.intervals.npy'
file_intervals.rename(Path(session_path) / 'alf' / '_ibl_trials.intervalsBpod.npy')
return out
|
5ace8847251d3c4a971e5717495613179ead49c4
| 32,624 |
def tokenize(data):
""" Tokenization.
Tokenize and lemmatize the sentences; extract labels of tokens.
Parameters
----------
data : list of dict
each dict should have the following form:
{"sentence": str,
"sentence_id": str,
"annotations": [
{"ann_id": str
"text": str,
"start": int,
"end": int,
"label": str
}}
Returns
-------
list_tokens
list_labels
"""
tknzr = TweetTokenizer()
lemmatizer = WordNetLemmatizer()
list_tokens, list_labels = [], []
for idx in range(len(data)):
sample = data[idx]
sent = sample["sentence"]
tokens = tknzr.tokenize(sent)
lem_tokens = [lemmatizer.lemmatize(t) for t in tokens]
lem_tokens = ["".join([t if ord(t) < 128 else "*" for t in list(token)])
for token in lem_tokens]
idx_char = 0
labels = []
for t in tokens:
label = "Other"
while t != sent[idx_char:idx_char+len(t)]:
idx_char += 1
for ann in sample["annotations"]:
if (ann["start"] <= idx_char) and (idx_char+len(t) <= ann["end"]):
label = ann["label"]
idx_char += len(t)
labels.append(label)
list_tokens.append(lem_tokens)
list_labels.append(labels)
return list_tokens, list_labels
|
91a0db8ff0054f249f08ecb02f4fddeb17af40fe
| 32,625 |
def rotate_thread(thymio: Thymio, angle: float, verbose: bool = False, function=stop, args=None, kwargs=None):
"""
Rotates of the desired angle by using a timer on a parallel thread.
:param function: function to execute at the end of rotation, default stop
:param args: array of non-keyworded arguments of function
:param kwargs: set of keyworded arguments
:param thymio: the class to which the robot is referred to
:param angle: angle in radians by which we want to rotate, positive or negative
:param verbose: printing the speed in the terminal
:return: timer to check if it is still alive or not
"""
args_f = args if args is not None else [thymio]
kwargs_f = kwargs if kwargs is not None else {}
l_speed, r_speed, turn_time = rotate_time(angle)
# Printing the speeds if requested
if verbose:
# print("\t\t Rotate speed & time : ", l_speed, r_speed, turn_time)
print("\t\t Rotate of degrees : ", angle)
timer = Timer(interval=turn_time, function=function, args=args_f, kwargs=kwargs_f)
move(thymio, l_speed, r_speed)
timer.start()
return timer
|
d19ea0f601ff991a20e889b62481f46a0475573d
| 32,626 |
import bigflow.transform_impls.first
def first(pcollection, **options):
"""
取出PCollection中的第一个元素
Args:
pcollection (PCollection): 输入PCollection
**options: 可配置选项
Returns:
PObject: 取出的单个元素,以PObject给出
>>> from bigflow import transforms
>>> _p = _pipeline.parallelize([3, 7, 1, 3, 2, 8])
>>> transforms.first(_p).get()
3
"""
return bigflow.transform_impls.first.first(pcollection, **options)
|
993c6603c182bd67ee0c22418243c0ce9933ef37
| 32,627 |
def select_values_over_last_axis(values, indices):
"""
Auxiliary function to select logits corresponding to chosen tokens.
:param values: logits for all actions: float32[batch,tick,action]
:param indices: action ids int32[batch,tick]
:returns: values selected for the given actions: float[batch,tick]
"""
assert values.shape.ndims == 3 and indices.shape.ndims == 2
batch_size, seq_len = tf.shape(indices)[0], tf.shape(indices)[1]
batch_i = tf.tile(tf.range(0, batch_size)[:, None], [1, seq_len])
time_i = tf.tile(tf.range(0, seq_len)[None, :], [batch_size, 1])
indices_nd = tf.stack([batch_i, time_i, indices], axis=-1)
return tf.gather_nd(values, indices_nd)
|
a116e9fb009a53f3da2e177a9cefd5382ba44906
| 32,628 |
def dump_adcs(adcs, drvname='ina219', interface=2):
"""Dump xml formatted INA219 adcs for servod.
Args:
adcs: array of adc elements. Each array element is a tuple consisting of:
slv: int representing the i2c slave address plus optional channel if ADC
(INA3221 only) has multiple channels. For example,
"0x40" : address 0x40 ... no channel
"0x40:1" : address 0x40, channel 1
name: string name of the power rail
sense: float of sense resitor size in ohms
nom: float of nominal voltage of power rail.
mux: string name of i2c mux leg these ADC's live on
is_calib: boolean to determine if calibration is possible for this rail
drvname: string name of adc driver to enumerate for controlling the adc.
interface: interface index to handle low-level communication.
Returns:
string (large) of xml for the system config of these ADCs to eventually be
parsed by servod daemon ( servo/system_config.py )
"""
# Must match REG_IDX.keys() in servo/drv/ina2xx.py
regs = ['cfg', 'shv', 'busv', 'pwr', 'cur', 'cal']
if drvname == 'ina231':
regs.extend(['msken', 'alrt'])
elif drvname == 'ina3221':
regs = ['cfg', 'shv', 'busv', 'msken']
rsp = ""
for (slv, name, nom, sense, mux, is_calib) in adcs:
chan = ''
if drvname == 'ina3221':
(slv, chan_id) = slv.split(':')
chan = 'channel="%s"' % chan_id
rsp += (
'<control><name>%(name)s_mv</name>\n'
'<doc>Bus Voltage of %(name)s rail in millivolts on i2c_mux:%(mux)s</doc>\n'
'<params interface="%(interface)d" drv="%(drvname)s" slv="%(slv)s" %(chan)s'
' mux="%(mux)s" rsense="%(sense)s" type="get" subtype="millivolts"'
' nom="%(nom)s">\n</params></control>\n'
'<control><name>%(name)s_shuntmv</name>\n'
'<doc>Shunt Voltage of %(name)s rail in millivolts on i2c_mux:%(mux)s</doc>\n'
'<params interface="%(interface)d" drv="%(drvname)s" slv="%(slv)s" %(chan)s'
' mux="%(mux)s" rsense="%(sense)s" type="get" subtype="shuntmv"'
' nom="%(nom)s">\n</params></control>\n'
) % {'name':name, 'drvname':drvname, 'interface':interface, 'slv':slv,
'mux':mux, 'sense':sense, 'nom':nom, 'chan':chan}
# in some instances we may not know sense resistor size ( re-work ) or other
# custom factors may not allow for calibration and those reliable readings
# on the current and power registers. This boolean determines which
# controls should be enumerated based on rails input specification
if is_calib:
rsp += (
'<control><name>%(name)s_ma</name>\n'
'<doc>Current of %(name)s rail in milliamps on i2c_mux:%(mux)s</doc>\n'
'<params interface="%(interface)d" drv="%(drvname)s" slv="%(slv)s" %(chan)s'
'rsense="%(sense)s" type="get" subtype="milliamps">\n'
'</params></control>\n'
'<control><name>%(name)s_mw</name>\n'
'<doc>Power of %(name)s rail in milliwatts on i2c_mux:%(mux)s</doc>\n'
'<params interface="%(interface)d" drv="%(drvname)s" slv="%(slv)s" %(chan)s'
' mux="%(mux)s" rsense="%(sense)s" type="get" subtype="milliwatts">\n'
'</params></control>\n') % {'name':name, 'drvname':drvname,
'interface':interface, 'slv':slv,
'mux':mux, 'sense':sense, 'nom':nom, 'chan':chan}
for reg in regs:
rsp += (
'<control><name>%(name)s_%(reg)s_reg</name>\n'
'<doc>Raw register value of %(reg)s on i2c_mux:%(mux)s</doc>'
'<params cmd="get" interface="%(interface)d"'
' drv="%(drvname)s" slv="%(slv)s" %(chan)s'
' subtype="readreg" reg="%(reg)s" mux="%(mux)s"'
' fmt="hex">\n</params>') % {'name':name, 'drvname':drvname,
'interface':interface, 'slv':slv,
'mux':mux, 'sense':sense,
'reg':reg, 'chan':chan}
if reg in ["cfg", "cal"]:
map_str = ""
if reg == "cal":
map_str = ' map="calibrate"'
rsp += (
'<params cmd="set" interface="%(interface)d"'
' drv="%(drvname)s" slv="%(slv)s" %(chan)s'
' subtype="writereg" reg="%(reg)s" mux="%(mux)s"'
' fmt="hex"%(map)s>\n</params></control>') % {'drvname':drvname,
'interface':interface,
'slv':slv, 'mux':mux,
'sense':sense,
'reg':reg,
'chan':chan,
'map':map_str}
else:
rsp += ('</control>')
return rsp
|
b9c0aec3e6098a5de28a467910f4861e8860d723
| 32,629 |
def list_launch_agents():
"""
Return an array of the files that are present in ~/Library/LaunchAgents,
/System/Library/LaunchAgents/ and /Library/LaunchAgents/
"""
files = list_system_launch_agents()
files += list_library_launch_agents()
files += list_homedir_launch_agents()
return files
|
f8a890064f9140b6f67c9be489b5b4a991255ebe
| 32,630 |
def get_projection_point_dst(coords_src, M):
""" Gets the coordinate equivalent in surface projection space from original
view space
Args:
coords_src: `numpy.darray` coordinate in the original image space
M: `numpy.darray` rotation matrix
Returns:
coords_src: `numpy.darray` projected coordinate in original view space
"""
coords_dst = np.matmul(M, coords_src)
coords_dst = coords_dst / coords_dst[2]
coords_dst = [int(coords_dst[0]), int(coords_dst[1])]
return coords_dst
|
b93f52727d4ca378c54094a89ce346e049bdf818
| 32,631 |
import torch
def torchify(a):
"""Converts an array or a dict of numpy arrays to CPU tensors.
If you'd like CUDA tensors, follow the tensor-ification ``.cuda()`` ; the attribute delegation
built into :class:`~rebar.dotdict.dotdict` s will do the rest.
Floats get mapped to 32-bit PyTorch floats; ints get mapped to 32-bit PyTorch ints. This is usually what you want in
machine learning work.
"""
if hasattr(a, 'torchify'):
return a.torchify()
a = np.asarray(a)
if np.issubdtype(a.dtype, np.floating):
dtype = torch.float
elif np.issubdtype(a.dtype, np.integer):
dtype = torch.int
elif np.issubdtype(a.dtype, np.bool_):
dtype = torch.bool
else:
raise ValueError(f'Can\'t handle {type(a)}')
return torch.as_tensor(np.array(a), dtype=dtype)
|
d91577b1e4e9c1d0f2a1703995400e51623f9abe
| 32,632 |
def rel_angle(vec_set1, vec_set2):
"""
Calculate the relative angle between two vector sets
Args:
vec_set1(array[array]): an array of two vectors
vec_set2(array[array]): second array of two vectors
"""
return vec_angle(vec_set2[0], vec_set2[1]) / vec_angle(
vec_set1[0], vec_set1[1]) - 1
|
bc18b1e8c2225eac8fcded45c07e8d0fcaaba5bb
| 32,633 |
def equalization(data, equilibrium_points):
"""
Эквализация данных.
Параметры:
data - таблица данных, которые нужно выровнять. Тип - pandas.DataFrame.
equilibrium_points - точки сдвига равновесия. Для каждого столбца нужно
использовать свою точку равновесия, т.к. у каждой кривой должна быть своя
линия эквализации. Тип - dict[str: float].
Возвращает таблицу выровненных данных. Тип - pandas.DataFrame.
"""
# Создаём таблицу для эквализованных данных
equalized = pd.DataFrame(index=data.index)
# Эквализуем каждый столбец
for key in data.keys():
# Получим индекс столбца
point = equilibrium_points[key]
# Построим линию эквализации для столбца
eq_line = equalization_line(data, key, point)["equalization_line"]
# Сдвинем столбец на линию эквализации
equalized[key] = data[key] - eq_line
return equalized
|
c3c5dd9dafe6dd8655216ef53033968700d80e93
| 32,634 |
import json
def from_config(config_file="./config.json"):
"""Run training from a config file
:param config_file: JSON file with arguments as per "training" CLI.
:return:
"""
with open(config_file, "r") as f:
config_string = f.read()
config = json.loads(config_string)
return main(**config)
|
31eb83d0c364a31fcf515d164ca306c6756c70c9
| 32,635 |
def load_anomalies_text(input_path):
"""Loads the Anomalies proto stored in text format in the input path.
Args:
input_path: File path from which to load the Anomalies proto.
Returns:
An Anomalies protocol buffer.
"""
anomalies = anomalies_pb2.Anomalies()
anomalies_text = file_io.read_file_to_string(input_path)
text_format.Parse(anomalies_text, anomalies)
return anomalies
|
c723faff2a5b622162f3d23041a3c13dd44d35bb
| 32,636 |
def is_isbn_or_key(q):
"""
判断搜索关键字是isbn还是key
:param q:
:return:
"""
# isbn13 13位0-9数字;isbn10 10位0-9数字,中间含有 '-'
isbn_or_key = 'key'
if len(q) == 13 and q.isdigit():
isbn_or_key = 'isbn'
if '-' in q:
short_q = q.replace('-', '')
if len(short_q) == 10 and short_q.isdigit():
isbn_or_key = 'isbn'
return isbn_or_key
|
cf35f13e8188741bd37715a1ed91cf40667cff40
| 32,638 |
def find_scripts(entry_points=False, suffix=''):
"""Find IPython's scripts.
if entry_points is True:
return setuptools entry_point-style definitions
else:
return file paths of plain scripts [default]
suffix is appended to script names if entry_points is True, so that the
Python 3 scripts get named "ipython3" etc.
"""
if entry_points:
console_scripts = [s % suffix for s in [
'ipython%s = IPython.frontend.terminal.ipapp:launch_new_instance',
'pycolor%s = IPython.utils.PyColorize:main',
'ipcontroller%s = IPython.parallel.apps.ipcontrollerapp:launch_new_instance',
'ipengine%s = IPython.parallel.apps.ipengineapp:launch_new_instance',
'iplogger%s = IPython.parallel.apps.iploggerapp:launch_new_instance',
'ipcluster%s = IPython.parallel.apps.ipclusterapp:launch_new_instance',
'iptest%s = IPython.testing.iptest:main',
'irunner%s = IPython.lib.irunner:main'
]]
gui_scripts = [s % suffix for s in [
'ipython%s-qtconsole = IPython.frontend.qt.console.qtconsoleapp:main',
]]
scripts = dict(console_scripts=console_scripts, gui_scripts=gui_scripts)
else:
parallel_scripts = pjoin('IPython','parallel','scripts')
main_scripts = pjoin('IPython','scripts')
scripts = [
pjoin(parallel_scripts, 'ipengine'),
pjoin(parallel_scripts, 'ipcontroller'),
pjoin(parallel_scripts, 'ipcluster'),
pjoin(parallel_scripts, 'iplogger'),
pjoin(main_scripts, 'ipython'),
pjoin(main_scripts, 'pycolor'),
pjoin(main_scripts, 'irunner'),
pjoin(main_scripts, 'iptest')
]
return scripts
|
3103915d245f753f04ccf0eab682cccd57629613
| 32,639 |
def get_upright_box(waymo_box):
"""Convert waymo box to upright box format and return the convered box."""
xmin = waymo_box.center_x - waymo_box.length / 2
# xmax = waymo_box.center_x+waymo_box.length/2
ymin = waymo_box.center_y - waymo_box.width / 2
# ymax = waymo_box.center_y+waymo_box.width/2
return [xmin, ymin, waymo_box.length, waymo_box.width]
|
2c301e3d60078ba416446dfe133fb9802c96f09c
| 32,640 |
def update_two_contribution_score(click_time_one, click_time_two):
"""
user cf user contribution score update v2
:param click_time_one: different user action time to the same item
:param click_time_two: time two
:return: contribution score
"""
delta_time = abs(click_time_two - click_time_one)
norm_num = 60 * 60 * 24
delta_time = delta_time / norm_num
return 1 / (1 + delta_time)
|
df959e4581f84be5e0dffd5c35ebe70b97a78383
| 32,642 |
def series_dropna(series, axis=0, inplace=False, how=None):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
Returns
-------
Series
Series with NA entries dropped from it.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> import mars.dataframe as md
>>> ser = md.Series([1., 2., np.nan])
>>> ser.execute()
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna().execute()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser.execute()
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = md.Series([np.NaN, 2, md.NaT, '', None, 'I stay'])
>>> ser.execute()
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna().execute()
1 2
3
5 I stay
dtype: object
"""
axis = validate_axis(axis, series)
use_inf_as_na = options.dataframe.mode.use_inf_as_na
op = DataFrameDropNA(axis=axis, how=how, output_types=[OutputType.series],
use_inf_as_na=use_inf_as_na)
out_series = op(series)
if inplace:
series.data = out_series.data
else:
return out_series
|
994b9b8a6552f49a8533744a58091daa14557641
| 32,643 |
def partial_escape(xpath):
"""
Copied from http://stackoverflow.com/questions/275174/how-do-i-perform-html-decoding-encoding-using-python-django
but without replacing the single quote
"""
return mark_safe(force_unicode(xpath).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"'))
|
5588b97ec57d2df2ed22ded28d9d8b98b8ed3851
| 32,644 |
def identify_regions(lat_lon, coordinates=False):
"""
Returns the region associated with the given lat/lon point.
Args:
lat_lon (:obj:`list` of :obj:`float`): latitude/longitude point to
access
coordinates (bool): optionally include a list of all registered
coordinates for the region (requires HSDS API calls)
Returns:
list: list of region names (`coordinates=False`)
list: list of region dicts with coordinate info (`coordinates=True`)
"""
_check_lat_lon(lat_lon)
wtk = _load_wtk()
lat, lon = lat_lon
regions = []
for region in wtk:
lat_range, lon_range = wtk[region]['lat_lon_range']
if lat >= lat_range[0] and lat <= lat_range[1]:
if lon >= lon_range[0] and lon <= lon_range[1]:
if coordinates:
# grab coordinates from most recent year
wtk_file = build_wtk_filepath(region, wtk[region]['year_range'][1])
with WindX(wtk_file, hsds=True) as f:
regions.append({region: f.coordinates})
else:
regions.append(region)
if len(regions) == 0:
raise ValueError('No region found for specified lat/lon point.')
return regions
|
4a32bb0afd8d5768325769e8b89b3cd2cf593b4f
| 32,645 |
def fix_text_note(text):
"""Wrap CHS document text."""
if not text:
return ""
else:
return """* CHS "Chicago Streets" Document:\n> «{}»""".format(text)
|
56cbcbad7e8b3eee6bb527a240ad96701c4eea2f
| 32,646 |
def GetCBSPLogFile():
"""
Generate the CBSP base file name used for many things, including accessing *jason* files.
"""
return Config.GetCBSPInstanceName()
|
10d0f141e3ee7f605fce22c9f971eb9f2eabebd2
| 32,647 |
from typing import Optional
def pretty_xml(document: 'Document',
declaration: Optional[str] = None,
encoding: Optional[str] = UTF8,
indent: int = 2) -> str:
"""Render the given :class:`~xml.dom.minidom.Document` `document` into a prettified string."""
kwargs = {
'encoding': encoding or UTF8,
'indent': ' ' * indent,
}
body = document.toprettyxml(**kwargs).decode(kwargs['encoding'])
# Remove blank lines automatically added by `toprettyxml()`.
lines = [line for line in body.splitlines() if line.strip()]
# xml.dom automatically adds the declaration, even if
# it is not present in the actual body. Remove it.
if len(lines) >= 1 and parse_declaration(lines[0]):
lines.pop(0)
if declaration:
lines.insert(0, declaration)
return '\n'.join(lines)
|
0f342831ccb69cf1b1b7a4f379364d8b8c046f41
| 32,648 |
def cluster(image, R):
"""Split the image points up into a number of clusters
At first there are 10 clusters
1. The centre of each cluster are set to be equally
spaced out in angle, at a radius of 1
2. Each point is looped through and assigned to the
cluster with the closest centre
3. The centre of the clusters is set to be the mean
position of all the points in the cluster.
If a cluster has no points, then it's centre is set
to (100,100), effectively deleting the cluster
4. Each point is then reassigned to the nearest cluster
Parameters
--------------
image:
a list of points, all of which are inside some image of the star
R:
radius of the star
"""
no_of_clusters = 10
centres = []
clusters = []
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
cluster5 = []
cluster6 = []
cluster7 = []
cluster8 = []
cluster9 = []
cluster10 = []
for i in range(no_of_clusters):
centres.append((np.cos(i * 2 * np.pi / no_of_clusters),
np.sin(i * 2 * np.pi / no_of_clusters)))
loops = 0
total = 0
while loops < 2:
changed = False
for point in image:
shortestsqdistance = 10000000
shortestindex = 0
for j in range(len(centres)):
distance = (point[0] - centres[j][0]) ** 2 + (
point[1] - centres[j][1]) ** 2
if distance < shortestsqdistance:
shortestsqdistance = distance
shortestindex = j
if shortestindex == 0:
cluster1.append(point)
elif shortestindex == 1:
cluster2.append(point)
elif shortestindex == 2:
cluster3.append(point)
elif shortestindex == 3:
cluster4.append(point)
elif shortestindex == 4:
cluster5.append(point)
elif shortestindex == 5:
cluster6.append(point)
elif shortestindex == 6:
cluster7.append(point)
elif shortestindex == 7:
cluster8.append(point)
elif shortestindex == 8:
cluster9.append(point)
elif shortestindex == 9:
cluster10.append(point)
cluster1 = np.array(cluster1)
cluster2 = np.array(cluster2)
cluster3 = np.array(cluster3)
cluster4 = np.array(cluster4)
cluster5 = np.array(cluster5)
cluster6 = np.array(cluster6)
cluster7 = np.array(cluster7)
cluster8 = np.array(cluster8)
cluster9 = np.array(cluster9)
cluster10 = np.array(cluster10)
clusters = []
clusters.append(cluster1)
clusters.append(cluster2)
clusters.append(cluster3)
clusters.append(cluster4)
clusters.append(cluster5)
clusters.append(cluster6)
clusters.append(cluster7)
clusters.append(cluster8)
clusters.append(cluster9)
clusters.append(cluster10)
centres = []
for k in range(no_of_clusters):
if len(clusters[k]) != 0:
centres.append((sum(clusters[k][:, 0]) / len(clusters[k]),
sum(clusters[k][:, 1]) / len(clusters[k])))
else:
centres.append((100, 100))
if loops < 1:
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
cluster5 = []
cluster6 = []
cluster7 = []
cluster8 = []
cluster9 = []
cluster10 = []
loops += 1
else:
if changed == False and total < 10:
"""Now if the maximum distance from a point in a cluster to the centre of the cluster is either greater than 3
standard deviations, or 25 R, then it creates a new cluster centre on the further point, and goes back through
the loop again.
"""
for k in range(no_of_clusters):
maxdistance = 0
maxindex = 0
totalsqdistance = 0
std = 0
if len(clusters[k]) > 1:
for i in range(len(clusters[k])):
distance = (centres[k][0] - clusters[k][i][
0]) ** 2 + (centres[k][1] - clusters[k][i][
1]) ** 2
totalsqdistance += distance
if np.sqrt(distance) > maxdistance:
maxdistance = np.sqrt(distance)
maxindex = i
std = np.sqrt(totalsqdistance / (len(clusters[k]) - 1))
if maxdistance > 3 * std:
done = False
for j in range(5):
if centres[j][0] == 100 and done == False:
centres[j] = clusters[k][maxindex]
done = True
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
cluster5 = []
cluster6 = []
cluster7 = []
cluster8 = []
cluster9 = []
cluster10 = []
loops -= 1
changed = True
if changed == False and total < 8:
"""Now if 2 clusters are too close together, it makes them one cluster assuming this to be 1 image,
and goes through the loop again
However, if an infinite loop is found where at cluster is continually broken up then merged
together (c.f paper), the total < 8 here, when compared with the total < 10 on the previous step
ensure that the algorithm errs on the side of splitting them up.
"""
for k in range(no_of_clusters):
for j in range(no_of_clusters - k - 1):
if np.sqrt((centres[k][0] - centres[j + k + 1][
0]) ** 2 + (centres[k][1] - centres[j + k + 1][
1]) ** 2) < 0.4 and (
centres[k][0] and centres[j + k + 1][
0]) != 100:
centres[k] = (100, 100)
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
cluster5 = []
cluster6 = []
cluster7 = []
cluster8 = []
cluster9 = []
cluster10 = []
loops -= 1
changed = True
loops += 1
total += 1
return (np.array(clusters), np.array(centres))
|
063c513e771874b78725726a84383c1dff06552c
| 32,649 |
import torch
def bbox_xyxy_to_cxcywh(bbox):
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
return torch.cat(bbox_new, dim=-1)
|
4d8a7c4147a8c604004215d6240de33e72608abd
| 32,651 |
def get_isolated_page(request: HttpRequest) -> bool:
"""Accept a GET param `?nav=no` to render an isolated, navless page."""
return request.GET.get("nav") == "no"
|
bdfde1929308915cd797b25e2003e1b78fc2e75a
| 32,652 |
import multiprocessing
def _fetch_cpu_count():
"""
Returns the number of available CPUs on machine in use.
Parameters:
-----------
None
Returns:
--------
multiprocessing.cpu_count()
Notes:
------
None
"""
return multiprocessing.cpu_count()
|
6c35446b706aa27b49bd678520b2378b1c7f8b90
| 32,655 |
def MakeAxesActor():
"""
Make an axis actor.
:return: The axis actor.
"""
axes = vtkAxesActor()
axes.SetShaftTypeToCylinder()
axes.SetXAxisLabelText('X')
axes.SetYAxisLabelText('Y')
axes.SetZAxisLabelText('Z')
axes.SetTotalLength(1.0, 1.0, 1.0)
axes.SetCylinderRadius(1.0 * axes.GetCylinderRadius())
axes.SetConeRadius(1.75 * axes.GetConeRadius())
axes.SetSphereRadius(1.0 * axes.GetSphereRadius())
axes.GetXAxisCaptionActor2D().GetTextActor().GetScaledTextProperty()
axes.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
axes.GetYAxisCaptionActor2D().GetTextActor().GetScaledTextProperty()
axes.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
axes.GetZAxisCaptionActor2D().GetTextActor().GetScaledTextProperty()
axes.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
return axes
|
295dfc3ed64c5b7d307ebc5e710007055954c15c
| 32,657 |
def bmi_stats(merged_df, out=None, include_min=True, include_mean=True, include_max=True,
include_std=True, include_mean_diff=True,
include_count=True, age_range=[2, 20], include_missing=False):
"""
Computes summary statistics for BMI. Clean values are for BMIs computed when both the height
and weight values are categorized by growthcleanr as "Include". Raw values are computed for
all observations. Information is provided by age and sex.
Parameters:
merged_df: (DataFrame) with bmi, rounded_age and sex columns
out: (ipywidgets.Output) to display the results, if provided
include_min: (Boolean) Whether to include the minimum value column
include_mean: (Boolean) Whether to include the mean value column
include_max: (Boolean) Whether to include the maximum value column
include_std: (Boolean) Whether to include the standard deviation column
include_mean_diff: (Boolean) Whether to include the difference between the raw and
clean mean value column
include_count: (Boolean) Whether to include the count column
age_range: (List) Two elements containing the minimum and maximum ages that should be
included in the statistics
include_missing: (Boolean) Whether to include the missing (0) heights and weights that impact
raw columns
Returns:
If out is None, it will return a DataFrame. If out is provided, results will be displayed
in the notebook.
"""
if include_missing:
age_filtered = merged_df[(merged_df.rounded_age >= age_range[0]) & (merged_df.rounded_age <= age_range[1])]
else:
age_filtered = merged_df[(merged_df.rounded_age >= age_range[0]) & (merged_df.rounded_age <= age_range[1]) & (merged_df.weight > 0) & (merged_df.height > 0)]
age_filtered['sex'] = age_filtered.sex.replace(0, 'M').replace(1, 'F')
agg_functions = []
formatters = {}
# if not include_missing:
# age_filtered = age_filtered
if include_min:
agg_functions.append('min')
formatters['min_clean'] = "{:.2f}".format
formatters['min_raw'] = "{:.2f}".format
if include_mean:
agg_functions.append('mean')
formatters['mean_clean'] = "{:.2f}".format
formatters['mean_raw'] = "{:.2f}".format
if include_max:
agg_functions.append('max')
formatters['max_clean'] = "{:.2f}".format
formatters['max_raw'] = "{:.2f}".format
if include_std:
agg_functions.append('std')
formatters['sd_clean'] = "{:.2f}".format
formatters['sd_raw'] = "{:.2f}".format
if include_count:
agg_functions.append('count')
clean_groups = age_filtered[age_filtered.include_both].groupby(['sex',
'rounded_age'])['bmi'].agg(agg_functions)
raw_groups = age_filtered.groupby(['sex', 'rounded_age'])['bmi'].agg(agg_functions)
merged_stats = clean_groups.merge(raw_groups, on=['sex', 'rounded_age'], suffixes=('_clean', '_raw'))
if include_mean & include_count & include_mean_diff:
merged_stats['count_diff'] = merged_stats['count_raw'] - merged_stats['count_clean']
if include_std:
merged_stats = merged_stats.rename(columns={'std_raw': 'sd_raw', 'std_clean': 'sd_clean'})
if out == None:
return merged_stats
else:
out.clear_output()
out.append_display_data(Markdown("## Female"))
out.append_display_data(merged_stats.loc['F'].style.format(formatters))
out.append_display_data(Markdown("## Male"))
out.append_display_data(merged_stats.loc['M'].style.format(formatters))
|
2665fd7a4156451c55437a63626572cab32d9cda
| 32,659 |
async def system_health_info(hass):
"""Get info for the info page."""
remaining_requests = list(hass.data[DOMAIN].values())[0][
COORDINATOR
].accuweather.requests_remaining
return {
"can_reach_server": system_health.async_check_can_reach_url(hass, ENDPOINT),
"remaining_requests": remaining_requests,
}
|
8b63a669180af16839e2dc36af30c3c45e39dfdf
| 32,660 |
def predict_ankle_model(data):
"""Generate ankle model predictions for data.
Args:
data (dict): all data matrices/lists for a single subject.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
"""
RESULT_DIR = '../results/imus6_subjects7/sensors01_rankle/'\
'iteration0/'
data = selectFeats(data, ['ankle_r'])
test_dset = (data['X'], data['y'])
subject = str(int(data['subjectID']))
model = load_model_and_weights(subject, RESULT_DIR)
labels = make_predictions(model, test_dset)
return labels
|
e2ebea56855f6717f01d7962b00d6b8ee0df16ea
| 32,661 |
def cholesky_decomp(A):
"""
Function: int gsl_linalg_cholesky_decomp (gsl_matrix * A)
This function factorizes the positive-definite square matrix A into
the Cholesky decomposition A = L L^T. On output the diagonal and
lower triangular part of the input matrix A contain the matrix L.
The upper triangular part of the input matrix contains L^T, the diagonal
terms being identical for both L and L^T. If the matrix is not
positive-definite then the decomposition will fail, returning the
error code GSL_EDOM.
"""
An = array_typed_copy(A)
_gslwrap.gsl_linalg_cholesky_decomp(An)
return An
|
004669d55eb58df99e1724beab8453ce4140697f
| 32,662 |
def is_block_comment(line):
""" Entering/exiting a block comment """
line = line.strip()
if line == '"""' or (line.startswith('"""') and not line.endswith('"""')):
return True
if line == "'''" or (line.startswith("'''") and not line.endswith("'''")):
return True
return False
|
ea38c248964eeaec1eab928182022de6ecccad69
| 32,663 |
import tensorflow as tf
import functools
def generate_keras_segmentation_dual_transform(*layers):
"""Generates a `dual_transform` pipeline from Keras preprocessing layers.
This method takes in Keras preprocessing layers and generates a
transformation pipeline for the `dual_transform` argument in
*semantic segmentation* loaders, which applies the transform in the
same fashion to both the image and annotation.
This is due to the fact that TensorFlow has its operation-level
random states different than its module-level random state, so
the layers need to have their seeds manually set in order to work.
In essence, for each of the preprocessing layers passed, this
method conducts the following operations:
> def preprocessing_transform(image, annotation):
> layer = functools.partial(KerasPreprocessingLayer, **kwargs)
> seed = np.random.randint(BUFFER_SIZE) # up to sys.maxsize
> image = layer(image, seed = seed)
> annotation = layer(annotation, seed = seed)
> return image, annotation
It then repeats this transform for all of the preprocessing layers
passed, and returns a method which has this behavior wrapped into
it and can perform it when the preprocessing is actually conducted.
Parameters
----------
layers : Any
Either a Sequential model with preprocessing layers, or a
set of instantiated preprocessing layers.
Returns
-------
"""
if len(layers) == 1:
if isinstance(layers[0], tf.keras.Sequential):
layers = layers[0].layers
# These methods perform the behavior indicated in the
# code snippet above (for each of the layers given).
def _single_preprocessing_layer_base(layer_, build_dict):
def _internal(image, annotation, seed):
instantiated_layer = functools.partial(layer_, **build_dict)
seed_update = {}
if seed is not None:
seed_update['seed'] = seed
image = instantiated_layer(**seed_update)(image)
annotation = instantiated_layer(**seed_update)(annotation)
return image, annotation
return _internal
preprocessing_methods, use_seeds = [], []
for layer in layers:
config = layer.get_config()
if 'seed' in config:
config.pop('seed')
use_seeds.append(True)
else:
use_seeds.append(False)
preprocessing_methods.append(
_single_preprocessing_layer_base(layer.__class__, config))
def _execute_preprocessing(layers_, use_seeds_):
def _execute(image, annotation):
for p_layer, seed_ in zip(layers_, use_seeds_):
seed = np.random.randint(2147483647) if seed_ else None
image, annotation = p_layer(image, annotation, seed = seed)
return image, annotation
return _execute
return _execute_preprocessing(preprocessing_methods, use_seeds)
|
6f10b4cf1ce8fc34aa71588682db99cbe6a01537
| 32,664 |
def get_before_deploy_steps():
"""Get pre-deploy steps and their associated model aliases.
Get a map of configuration steps to model aliases. If there are
configuration steps which are not mapped to a model alias then these are
associated with the the DEFAULT_MODEL_ALIAS.
eg if test.yaml contained:
before_deploy:
- conf.method1
- conf.method2
- model_alias1:
- conf.method3
then get_before_deploy_steps() would return:
{
'default_alias': ['conf.method1', 'conf.method2'],
'model_alias1': ['conf.method3']}
:returns: A dict mapping config steps to model aliases
:rtype: Dict[str, List[str]]
"""
return _concat_model_alias_maps(
get_charm_config().get('before_deploy', []))
|
3277f8827f204f358501afeb781d6e2934727327
| 32,665 |
def pre_ml_preprocessing(df, initial_to_drop, num_cols, target_var = None, num_cols_threshold = 0.9, low_var_threshold = 0.9):
"""Process data for machine learning preprocessing.
Low variance categorical features are high correlated numerical features are dropped from DataFrame. This process helps in dimensionality reduction.
Parameters
----------
df: DataFrame
DataFrame to process.
initial_to_drop: list
List of initial columns to drop.
target_var: str
Target variable to exclude from analysis.
Default(value = None)
num_cols_threshold: float64
Threshold correlation value for numerical features.
Default(value = 0.9)
low_var_threshold: str
Threshold normalized unique value of max value counts.
Default(value = 0.9)
Returns
-------
DataFrame
"""
# check for valid dataframe
if isinstance(df, pd.DataFrame):
# extract dataframe columns
df_cols = df.columns.tolist()
# check if all columns to drop are in df_cols
membership = all(col in df_cols for col in initial_to_drop)
# if membership
if membership:
for col in initial_to_drop:
# drop col
print("Dropping: {}".format(col))
df.drop(col, axis=1, inplace=True)
else:
not_cols = []
for col in initial_to_drop:
if col not in df_cols:
not_cols.append(col)
raise utils.InvalidColumn(not_cols)
# drop high correlated features
# df = funcs.dim_redux(df, num_cols, threshold = num_cols_threshold)
# drop low variance features
# df = funcs.drop_low_var_cols(df, target_var = target_var, unique_val_threshold = low_var_threshold)
else:
raise utils.InvalidDataFrame(df)
return df
|
20325bc6d650cf9bccd06bf2547f36bc29e5d446
| 32,666 |
import json
def pool_status():
"""Fetch overall pool status."""
d = make_request('getpoolstatus')
return json.loads(d)
|
0793dd3c5bb07f670b84b905fb030e278d4a9cba
| 32,667 |
from typing import List
from typing import Dict
import requests
def get_airtable_data(url: str, token: str) -> List[Dict[str, str]]:
"""
Fetch all data from airtable.
Returns a list of records where record is an array like
{'my-key': 'George W. Bush', 'my-value': 'Male'}
"""
response = requests.request("GET", url, headers={"Authorization": f"Bearer {token}"})
records = response.json()["records"]
return [record["fields"] for record in records]
|
f91326938a663ddedd8b4a10f796c7c36981da4e
| 32,668 |
def displayname(user):
"""Returns the best display name for the user"""
return user.first_name or user.email
|
aecebc897803a195b08cdfb46dbeb4c9df9ede09
| 32,669 |
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
|
2afa53e820d080bef762ab78fd02f09b60fe355f
| 32,670 |
def get_aps(dispatcher, apic, tenant):
"""Display Application Profiles configured in Cisco ACI."""
if not apic:
dispatcher.prompt_from_menu("aci get-aps", "Select APIC Cluster", apic_choices)
return False
try:
aci_obj = NautobotPluginChatopsAci(**aci_creds[apic])
except KeyError:
dispatcher.send_markdown(
f"Sorry, there is no cluster configured with name {dispatcher.bold(apic)}", ephemeral=True
)
return False
if not tenant:
logger.info(f"Getting list of tenants from APIC {aci_creds[apic]['base_uri']} in tenant {tenant}")
try:
tenant_list = aci_obj.get_tenants()
except (RequestConnectError, RequestHTTPError) as e:
dispatcher.send_error(e)
logger.error(e)
return CommandStatusChoices.STATUS_FAILED
tenant_choices = [(tenant, tenant) for tenant in tenant_list]
tenant_choices.append(("all", "all"))
dispatcher.prompt_from_menu(f"aci get-aps {apic}", "Select Tenant", tenant_choices)
return False
send_wait_msg(dispatcher)
logger.info(f"Getting list of Application Profiles from APIC {aci_creds[apic]['base_uri']} in tenant {tenant}")
try:
ap_list = aci_obj.get_aps(tenant)
except (RequestConnectError, RequestHTTPError) as e:
dispatcher.send_error(e)
logger.error(e)
return CommandStatusChoices.STATUS_FAILED
table_fields = ["Tenant", "Application Profile"]
table_rows = [[ap["tenant"], ap["ap"]] for ap in ap_list]
send_logo(
dispatcher,
"get-aps",
f"get-aps {apic} {tenant}",
args=[("APIC", aci_creds[apic]["base_uri"]), ("Tenant", tenant)],
)
# TO-DO: add title argument
dispatcher.send_large_table(table_fields, table_rows, title="get-aps")
return CommandStatusChoices.STATUS_SUCCEEDED
|
de7ab1a4680edb735c7cf2e46098b52d88ecb865
| 32,671 |
def delete_dnt(id):
"""
Function deleting specific department by its id
:param id: id of the specific department an admin wants to delete
:return: redirects user to the departments page
"""
if session.get('user') and session.get('user')[0] == ADMIN:
data = f'?login={session["user"][0]}&password={session["user"][1]}&id={id}&page=True'
return redirect('/api/departments/del' + data)
|
12f87af2a657eb631aabfac7acb88ce2e5abf07d
| 32,672 |
def price(listing):
"""Score based on number of bedrooms."""
if listing.price is None:
return -4000
score = 0.0
bedrooms = 1.0 if listing.bedrooms is None else listing.bedrooms
if (bedrooms * 1000.0) > listing.price:
score += -1000 - ((bedrooms * 750.0) / listing.price - 1.0) * 1000
return -1.0 * listing.price + score
|
7087da4e4f9dedf03fdaed5661029f1be0703f0e
| 32,673 |
def entropy_to_mnemonic(entropy: bytes) -> str:
"""Convert entropy bytes to a BIP39 english mnemonic
Entropy can be 16, 20, 24, 28, 32, 36 or 40 bytes"""
try:
return wally.bip39_mnemonic_from_bytes(None, entropy)
except ValueError:
raise InvalidEntropy
|
1e4de2dfdde71a8ab241fae05385a6ce8c8a10f5
| 32,675 |
async def read_run(catalog_name: str, run_uid: str):
"""Summarize the run for the given uid."""
summary = databroker.run_summary(catalog_name, run_uid)
if summary is None:
raise HTTPException(status_code=404, detail="Not found")
return summary
|
cf36b8bffa034392c8943ea179e67f28d12102e3
| 32,676 |
import shlex
def shell_quote(*args: str) -> str:
"""
Takes command line arguments as positional args, and properly quotes each argument to make it safe to
pass on the command line. Outputs a string containing all passed arguments properly quoted.
Uses :func:`shlex.join` on Python 3.8+, and a for loop of :func:`shlex.quote` on older versions.
Example::
>>> print(shell_quote('echo', '"orange"'))
echo '"orange"'
"""
return shlex.join(args) if hasattr(shlex, 'join') else " ".join([shlex.quote(a) for a in args]).strip()
|
b2d0ecde5a2569e46676fed81ed3ae034fb8d36c
| 32,677 |
def is_resources_sufficient(order):
"""Returns True when order can be made, False if ingredients are insufficient."""
check = [
True if MENU[order][key] <= resources[key] else False
for key, value in MENU[order].items()
]
return not False in check[:-1]
|
5454fca8ac7574e74c795d205a9355839f5320f2
| 32,678 |
def mode(arr):
"""Return the mode, i.e. most common value, of NumPy array <arr>"""
uniques, counts = np.unique(arr, return_counts=True)
return uniques[np.argmax(counts)]
|
abb8a79ff8ac4f28fe5e6e4491146c90ee680cd4
| 32,679 |
import torch
def normalized_state_to_tensor(state, building):
"""
Transforms a state dict to a pytorch tensor.
The function ensures the correct ordering of the elements according to the list building.global_state_variables.
It expects a **normalized** state as input.
"""
ten = [[ state[sval] for sval in building.global_state_variables ]]
return torch.tensor(ten)
|
4aea246f388f941290d2e4aeb6da16f91e210caa
| 32,680 |
def get_num_of_lines_in_file(filename):
"""Open the file and get the number of lines to use with tqdm as a progress bar."""
file_to_read = open(filename, "rb")
buffer_generator = takewhile(
lambda x: x, (file_to_read.raw.read(1024 * 1024) for _ in repeat(None))
)
return sum(buf.count(b"\n") for buf in buffer_generator if buf)
|
00d4a042a904140645d7f1e9fe6d85b86031fc3b
| 32,681 |
def translate_lat_to_geos5_native(latitude):
"""
The source for this formula is in the MERRA2
Variable Details - File specifications for GEOS pdf file.
The Grid in the documentation has points from 1 to 361 and 1 to 576.
The MERRA-2 Portal uses 0 to 360 and 0 to 575.
latitude: float Needs +/- instead of N/S
"""
return ((latitude + 90) / 0.5)
|
b1fb1824bfefce3fd58ca7a26f9603b910779a61
| 32,682 |
def disabled_payments_notice(context, addon=None):
"""
If payments are disabled, we show a friendly message urging the developer
to make his/her app free.
"""
addon = context.get('addon', addon)
return {'request': context.get('request'), 'addon': addon}
|
b77dc41cf8ac656b2891f82328a04c1fe7aae49c
| 32,683 |
import time
def upload_fastq_collection_single(gi,history_id,fastq_single):
"""
Uploads given fastq files to the Galaxy history and builds a dataset collection.
:param gi: Galaxy instance.
:param history_Id: History id to upload into.
:param fastq_single: Single-end files to upload.
:return: The dataset collection id for the constructed dataset.
"""
single_elements=[]
if (upload_fastqs_as_links):
created_library=gi.libraries.create_library("SNVPhyl Library Dataset-"+str(time.time()))
single_elements=upload_fastqs_library_single(gi,history_id,created_library['id'],fastq_single)
else:
single_elements=upload_fastqs_single(gi,history_id,fastq_single)
# construct single collection
single_collection_name="single_datasets"
print("Building dataset collection named "+single_collection_name)
collection_response_single = gi.histories.create_dataset_collection(
history_id=history_id,
collection_description=dataset_collections.CollectionDescription(
name=single_collection_name,
type="list",
elements=single_elements
)
)
return collection_response_single['id']
|
ecf3c3be06e0f012609214dc810ebefe67968ab1
| 32,684 |
import torch
def get_world_size() -> int:
"""
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_world_size()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 1
)
|
0017278b422aa39ea0a8074c70a7df84885c7929
| 32,685 |
def filtraColunas(df: pd.DataFrame, colunas_interesse: list) -> pd.DataFrame:
"""
Seleciona apenas as colunas de interesse de uma DataFrame
Parameters
----------
DataFrame : pd.DataFrame
DataFrame para filtragem
colunas_interesse : list
lista com os cabeçalhos das colunas de interesse
Returns
-------
DataFrame : pd.DataFrame
DataFrame com as colunas filtradas
"""
df = df[colunas_interesse]
return df
|
bb2c2a0e5718e663750a420a6b70a7b9af182e4b
| 32,686 |
def PatSub(s, op, pat, replace_str):
"""Helper for ${x/pat/replace}."""
#log('PAT %r REPLACE %r', pat, replace_str)
regex, err = glob_.GlobToExtendedRegex(pat)
if err:
e_die("Can't convert glob to regex: %r", pat)
if regex is None: # Simple/fast path for fixed strings
if op.do_all:
return s.replace(pat, replace_str)
elif op.do_prefix:
if s.startswith(pat):
n = len(pat)
return replace_str + s[n:]
else:
return s
elif op.do_suffix:
if s.endswith(pat):
n = len(pat)
return s[:-n] + replace_str
else:
return s
else:
return s.replace(pat, replace_str, 1) # just the first one
else:
regex = '(%s)' % regex # make it a group
if op.do_all:
return _PatSubAll(s, regex, replace_str) # loop over matches
if op.do_prefix:
regex = '^' + regex
elif op.do_suffix:
regex = regex + '$'
m = libc.regex_first_group_match(regex, s, 0)
log('regex = %r, s = %r, match = %r', regex, s, m)
if m is None:
return s
start, end = m
return s[:start] + replace_str + s[end:]
|
d5272d9b16517ce4244573bad992031f78e40556
| 32,687 |
async def check_for_role(self, name):
"""
A function to check for the existence of a role.
"""
name = name.lower()
role = (
await GuildRoles
.query
.where(database.func.lower(GuildRoles.name) == name)
.gino
.scalar()
)
return role
|
c78b66660bcacf930a2945ccf90860b89799dfcc
| 32,688 |
def construct_futures_symbols(
symbol, start_year=2010, end_year=2014
):
"""
Constructs a list of futures contract codes
for a particular symbol and timeframe.
"""
futures = []
# March, June, September and
# December delivery codes
months = 'HMUZ'
for y in range(start_year, end_year+1):
for m in months:
futures.append("%s%s%s" % (symbol, m, y))
return futures
|
2917a9eb008f5ab141576243bddf4769decfd1f3
| 32,689 |
def UpdateDescription(unused_ref, args, request):
"""Update description.
Args:
unused_ref: unused.
args: The argparse namespace.
request: The request to modify.
Returns:
The updated request.
"""
if args.IsSpecified('clear_description'):
request.group.description = ''
elif args.IsSpecified('description'):
request.group.description = args.description
return request
|
2c70200915cef809f91d2f0f303b622b41301cfa
| 32,690 |
def getAllChannels():
"""
a func to see all valid channels
:return: a list channels
"""
return ["Hoechst", 'ERSyto', 'ERSytoBleed', 'Ph_golgi', 'Mito']
|
d4fee111bad73f8476877a96645490aef37c07c9
| 32,691 |
def get_virtual_func_address(name, tinfo=None, offset=None):
"""
:param name: method name
:param tinfo: class tinfo
:param offset: virtual table offset
:return: address of the method
"""
address = idc.LocByName(name)
if address != idaapi.BADADDR:
return address
address = Cache.demangled_names.get(name, idaapi.BADADDR)
if address != idaapi.BADADDR:
return address + idaapi.get_imagebase()
if tinfo is None or offset is None:
return
offset *= 8
udt_member = idaapi.udt_member_t()
while tinfo.is_struct():
address = Cache.demangled_names.get(tinfo.dstr() + '::' + name, idaapi.BADADDR)
if address != idaapi.BADADDR:
return address + idaapi.get_imagebase()
udt_member.offset = offset
tinfo.find_udt_member(idaapi.STRMEM_OFFSET, udt_member)
tinfo = udt_member.type
offset = offset - udt_member.offset
|
b7466609b0119d1a110a72fdb2d012f3f40a43dc
| 32,692 |
from typing import Callable
from typing import Tuple
from typing import List
from typing import Dict
def coupler_netlist(
wg_width: float = 0.5,
gap: float = 0.236,
length: float = 20.007,
coupler_symmetric_factory: Callable = coupler_symmetric,
coupler_straight: Callable = coupler_straight,
layer: Tuple[int, int] = LAYER.WG,
layers_cladding: List[Tuple[int, int]] = [LAYER.WGCLAD],
cladding_offset: int = 3,
) -> Tuple[
Dict[str, Tuple[Component, str]],
List[Tuple[str, str, str, str]],
Dict[str, Tuple[str, str]],
]:
"""
SBEND_L-CS-SBEND_R
"""
assert_on_1nm_grid(length)
assert_on_2nm_grid(gap)
_sbend = coupler_symmetric_factory(
gap=gap,
wg_width=wg_width,
layer=layer,
layers_cladding=layers_cladding,
cladding_offset=cladding_offset,
)
_cpl_straight = coupler_straight(
length=length,
gap=gap,
width=wg_width,
layer=layer,
layers_cladding=layers_cladding,
cladding_offset=cladding_offset,
)
components = {
"SBEND_L": (_sbend, "mirror_y"),
"SBEND_R": (_sbend, "None"),
"CS": (_cpl_straight, "None"),
}
connections = [("SBEND_L", "W0", "CS", "W0"), ("CS", "E0", "SBEND_R", "W0")]
ports_map = {
"W0": ("SBEND_L", "E0"),
"W1": ("SBEND_L", "E1"),
"E0": ("SBEND_R", "E0"),
"E1": ("SBEND_R", "E1"),
}
return components, connections, ports_map
|
d855b0ccfe435470f109b3c212083313ebe06ce1
| 32,693 |
def accuracy(output, target, mask, inc_fix=False):
""" Calculate accuracy from output, target, and mask for the networks """
output = output.astype(cp.float32)
target = target.astype(cp.float32)
mask = mask.astype(cp.float32)
arg_output = cp.argmax(output, -1)
arg_target = cp.argmax(target, -1)
mask = mask if inc_fix else mask * (arg_target != 0)
acc = cp.sum(mask * (arg_output == arg_target), axis=(0,2))/cp.sum(mask, axis=(0,2))
return acc.astype(cp.float32)
|
c78be00f4c229f440987b1004878d6e944626dfd
| 32,694 |
def get_filterset_class(filterset_class, **meta):
"""Get the class to be used as the FilterSet"""
if filterset_class:
# If were given a FilterSet class, then set it up and
# return it
return setup_filterset(filterset_class)
return custom_filterset_factory(**meta)
|
5b6f87fd6b299e89272fcc3f15499369cc2e3f0e
| 32,695 |
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
|
f3503902d18ffd5b5689c3738eb8c40e749d3e3e
| 32,696 |
def read_db() -> list:
"""Читает все данные из базы"""
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USERNAME, password=DB_PASS, host=DB_HOST)
cursor = conn.cursor(cursor_factory=DictCursor)
cursor.execute('SELECT * FROM posts')
res = cursor.fetchall()
conn.close()
return res
|
481fadc0ba86cdb36d007a924424ff073e0c844c
| 32,697 |
def logit(p):
"""Logit function"""
p = np.atleast_1d(np.asfarray(p))
logit_p = np.zeros_like(p)
valid = (p > 0) & (p < 1)
if np.any(valid):
logit_p[valid] = np.log(p[valid] / (1 - p[valid]))
logit_p[p==0] = np.NINF
logit_p[p==1] = np.PINF
return logit_p
|
a8d4ee374284721eeb04d2fd4409e28036a75f12
| 32,698 |
async def logout(request):
"""
Finish the session with auth, clear the cookie and stop the session being used again.
"""
session_id = request['session'].session_id
await finish_session(request, session_id, 'logout')
session = await get_session(request)
session.pop(str(session_id))
await request.app['redis'].setex(
dead_session_key(session_id), request.app['settings'].micro_session_duration + 60, b'1'
)
return json_response(status='ok')
|
0ff00ddeb34c39ef7611d8896298aeca7070df5d
| 32,699 |
def create_multiple_new_predictions(monkeypatch):
"""
Mock prediction model method to ensure no duplicate prediction in
predictions table.
"""
@classmethod
async def mockfunc_get_one_by_username(cls, username):
"""Return a user record from the users table."""
hashed_pwd = bcrypt.hash(API_USER_PASSWORD)
d = {"id": 1, "username": API_USER_NAME, "password_hash": hashed_pwd}
return d
@classmethod
async def mock_get_one_by_url(cls, url):
"""
Return None to indicate no pre-existing prediction with specified url
in the predictions table.
"""
return None
@classmethod
async def mock_create_new_record(cls, notes):
"""
Do nothing, to allow new prediction to be added to predictions table.
"""
pass
monkeypatch.setattr(
DBUser, "get_one_by_username", mockfunc_get_one_by_username
)
monkeypatch.setattr(DBPrediction, "get_one_by_url", mock_get_one_by_url)
monkeypatch.setattr(DBPrediction, "create", mock_create_new_record)
|
27608239ba1d1d5da0cf464d9ab1f752ec7057b6
| 32,700 |
def tidy_split(df, column, sep, keep=False):
"""
Split the values of a column and expand so the new DataFrame has one split
value per row. Filters rows where the column is missing.
Params
------
df : pandas.DataFrame
dataframe with the column to split and expand
column : str
the column to split and expand
sep : str
the string used to split the column's values
keep : bool
whether to retain the presplit value as it's own row
Returns
-------
pandas.DataFrame
Returns a dataframe with the same columns as `df`.
"""
indexes = list()
new_values = list()
df = df.dropna(subset=[column])
for i, presplit in enumerate(df[column].astype(str)):
values = presplit.split(sep)
if keep and len(values) > 1:
indexes.append(i)
new_values.append(presplit)
for value in values:
indexes.append(i)
new_values.append(value)
new_df = df.iloc[indexes, :].copy()
new_df[column] = new_values
return new_df
|
4e4138cf4f5fab924d4e9e792db5e1c954ee8032
| 32,701 |
def greedyClustering(v_space, initial_pt_index, k, style):
"""
Generate `k` centers, starting with the `initial_pt_index`.
Parameters:
----------
v_space: 2D array.
The coordinate matrix of the initial geometry.
The column number is the vertex's index.
initial_pt_index: Int.
The index of the initial point.
k: Int.
The number of centers aiming to generate.
style: String.
Indicate "last" or "mean" to choose the style of evaluation function.
"last": Calculate the farthest point by tracking the last generated center point.
Minimum distance threshold applied.
"mean": Calculate a point with the maximum average distance to all generated centers;
Calculate a point with the minimum distance variance of all generated centers;
Minimum distance threshold applied.
Returns:
----------
center_indices_list: List of int.
Containing the indices of all k centers.
"""
if style == "last":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
for j in range(k):
center_coord_temp = v_space[center_indices_list[j],:]
max_dist_temp = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_temp = np.linalg.norm(center_coord_temp.reshape(-1,3) - coord_temp.reshape(-1,3))
dist_list = []
for index in center_indices_list:
dist_temp_eachCenter = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp_eachCenter)
min_dist_temp = np.min(dist_list)
if dist_temp > max_dist_temp and min_dist_temp >= min_dist_thrshld:
max_dist_temp = dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
return center_indices_list
elif style == "mean":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
while(True):
max_dist_thrshld = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
avg_dist_temp = np.mean(dist_list)
min_dist_temp = np.min(dist_list)
if avg_dist_temp > max_dist_thrshld and min_dist_temp >= min_dist_thrshld:
max_dist_thrshld = avg_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
var_thrshld = 1e5
new_center_index_temp = 0
# ================= Picking only several points to calculate the distance variance (abandoned) ================= #
# picked_num_temp = int(np.ceil(len(center_indices_list)*0.3)) # Pick several center points to compute the distance variance.
# picked_indices_temp = generateFMIndices(picked_num_temp, len(center_indices_list))
# picked_indices_temp = [center_indices_list[i] for i in copy.deepcopy(picked_indices_temp)]
# ============================================================================================================== #
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
# for index in picked_indices_temp: # Picking only several points to calculate the distance variance (abandoned).
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
var_dist_temp = np.var(dist_list)
min_dist_temp = np.min(dist_list)
if var_dist_temp < var_thrshld and min_dist_temp >= min_dist_thrshld:
var_thrshld = var_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
return center_indices_list
else:
print("Wrong input of the style indicator. Will start training based on the optimal FM indices. ")
return []
|
ac90d1d6c461a969a2bcb71f3df3a822496b3a65
| 32,702 |
def s2_matrix(beta, gamma, device=None):
"""
Returns a new tensor corresponding to matrix formulation of the given input tensors representing
SO(3) group elements.
Args:
beta (`torch.FloatTensor`): beta attributes of group elements.
gamma (`torch.FloatTensor`): gamma attributes of group elements.
device (Device, optional): computation device. Defaults to None.
Returns:
(`torch.FloatTensor`): matrix representation of the group elements.
"""
R_beta_y = rotation_matrix(beta, "y", device)
R_gamma_z = rotation_matrix(gamma, "z", device)
return R_gamma_z @ R_beta_y
|
20d08d1b75f22bddfaf5295751f05d43ea7fe6bb
| 32,703 |
import threading
def cache(func):
"""Thread-safe caching."""
lock = threading.Lock()
results = {}
def wrapper(*args, **kwargs):
identifier = checksum(args, kwargs)
if identifier in results:
return results[identifier]
with lock:
if identifier in results:
return results[identifier]
result = func(*args, **kwargs)
results[identifier] = result
return result
return wrapper
|
c17a6550ec91edcfcad6d898a1f81fa4b878757a
| 32,704 |
import platform
def hide_console():
"""Startup-info for subprocess.Popen which hides the console on
Windows.
"""
if platform.system() != 'Windows':
return None
si = sp.STARTUPINFO()
si.dwFlags |= sp.STARTF_USESHOWWINDOW
si.wShowWindow = sp.SW_HIDE
return si
|
faf25cdf48ffddd2ae7185c457d1abe60a0ec181
| 32,706 |
from typing import Dict
from typing import Any
def merge_dicts(dict1: Dict[str, Any],
dict2: Dict[str, Any],
*dicts: Dict[str, Any]) -> Dict[str, Any]:
"""
Merge multiple dictionaries, producing a merged result without modifying
the arguments.
:param dict1: the first dictionary
:param dict2: the second dictionary
:param dicts: additional dictionaries
:return: The merged dictionary. Keys in dict2 overwrite duplicate keys in
dict1
"""
res = dict1.copy()
res.update(dict2)
for d in dicts:
res.update(d)
return res
|
869399774cc07801e5fa95d9903e6a9f2dadfc25
| 32,707 |
def decode(model, inputs):
"""Decode inputs."""
decoder_inputs = encode_onehot(np.array(['='])).squeeze()
decoder_inputs = jnp.tile(decoder_inputs, (inputs.shape[0], 1))
return model(
inputs, decoder_inputs, train=False, max_output_len=get_max_output_len())
|
632b57ab86b9dd670d3e7203197575130ded7057
| 32,708 |
from typing import Sequence
from typing import List
def combinations_all(data: Sequence) -> List:
"""
Return all combinations of all length for given sequence
Args:
data: sequence to get combinations of
Returns:
List: all combinations
"""
comb = []
for r in range(1, len(data) + 1):
comb.extend(combinations(data, r=r))
return comb
|
7e0b31189a5afe3ac027a4c947aca08b3a2075ff
| 32,709 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.