content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def roundPrecision(number, precision=4):
""" Rounds the given floating point number to a certain precision, for output."""
return float(('{:.' + str(precision) + 'E}').format(number)) | 3bac0b54f1f8320c158ce0ddc14db7bbd092d2ff | 17,300 |
import sys
import tempfile
def get_profile(host, cluster = False):
""" Download profile to temporary file and return tempfile handle. """
cmd = [GETPROF, host]
if cluster: cmd.insert(1, "-C")
if debug:
cmd.insert(1, "-D")
sys.stderr.write("%s: launching '%s'\n" % (CALL, " ".join(cmd)))
tempfh = tempfile.NamedTemporaryFile(prefix="tmp.%s." % CALL)
pipe = Popen(cmd, stdout=tempfh)
rc = pipe.wait()
if rc != 0: raise RuntimeError("'%s' returned exit status %d" % \
(" ".join(cmd), rc))
return tempfh | ba7bceb1b4047c811f5fc77e50b59c80aeae80bf | 17,301 |
def stringToNumbers(string, separators=[","], commentSymbol="#"):
""" Return a list of splitted string and numbers from string "string". Numbers will be converted into floats. Text after "#" will be skipped.
--- string: the string to be converted.
--- separators: a list of additional separators other than whitespace to be used.
--- commentSymbol: text after which will be ignored.
"""
if "#" in string: string = string[:string.index("#")].strip(); # take everything before "#" symbol, then strip
splitted = [string];
for separator in flatten(separators): splitted = FLI([x.split(separator) for x in splitted]);
splitted = FLI([x.split() for x in splitted]); # clean up empty strings
if splitted == []: return [];
lineData = [];
for piece in splitted:
if isFloat(piece):
lineData.append(float(piece));
else:
lineData.append(piece);
return lineData; | dea1fb1d3257d00eaa637e1b80f23ad0e6475c38 | 17,302 |
import json
def friend_invitation_by_facebook_send_view(request): # friendInvitationByFacebookSend
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
recipients_facebook_id_array = request.GET.getlist('recipients_facebook_id_array[]', "")
recipients_facebook_name_array = request.GET.getlist('recipients_facebook_name_array[]', "")
facebook_request_id = request.GET.get('facebook_request_id', "")
results = friend_invitation_by_facebook_send_for_api(voter_device_id, recipients_facebook_id_array,
recipients_facebook_name_array, facebook_request_id)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'all_friends_facebook_link_created_results': results['all_friends_facebook_link_created_results'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json') | 6aeb2852b9e299bc8ddd5d03fbff2d0200e5c4a0 | 17,303 |
import json
def inv_send_received(r, **attr):
"""
Confirm a Shipment has been Received
- called via POST from inv_send_rheader
- called via JSON method to reduce request overheads
"""
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD,
next = URL(),
)
T = current.T
send_id = r.id
if not send_id:
r.error(405, "Can only confirm a single shipment.")
auth = current.auth
s3db = current.s3db
stable = s3db.inv_send
if not auth.s3_has_permission("update", stable,
record_id = send_id,
):
r.unauthorised()
db = current.db
tracktable = s3db.inv_track_item
db(stable.id == send_id).update(status = SHIP_STATUS_RECEIVED)
db(tracktable.send_id == send_id).update(status = TRACK_STATUS_ARRIVED)
if current.deployment_settings.get_inv_send_req():
rtable = s3db.inv_req
srtable = s3db.inv_send_req
reqs = db(srtable.send_id == send_id).select(srtable.req_id)
if reqs:
req_ids = [row.req_id for row in reqs]
# Get the full list of items in the request(s)
ritable = s3db.inv_req_item
for req_id in req_ids:
query = (ritable.req_id == req_id)
ritems = db(query).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
# Virtual Field
#ritable.pack_quantity,
)
# Get all Received Shipments in-system for this request
query = (stable.status == SHIP_STATUS_RECEIVED) & \
(tracktable.send_id == send_id) & \
(stable.id == srtable.send_id) & \
(srtable.req_id == req_id)
sitems = db(query).select(tracktable.item_pack_id,
tracktable.quantity,
# Virtual Field
#tracktable.pack_quantity,
)
fulfil_qty = {}
for item in sitems:
item_pack_id = item.item_pack_id
if item_pack_id in fulfil_qty:
fulfil_qty[item_pack_id] += (item.quantity * item.pack_quantity())
else:
fulfil_qty[item_pack_id] = (item.quantity * item.pack_quantity())
complete = False
for item in ritems:
if item.item_pack_id in fulfil_qty:
quantity_fulfil = fulfil_qty[item.item_pack_id]
db(ritable.id == item.id).update(quantity_fulfil = quantity_fulfil)
req_quantity = item.quantity * item.pack_quantity()
complete = quantity_fulfil >= req_quantity
# Update overall Request Status
if complete:
# REQ_STATUS_COMPLETE
db(rtable.id == req_id).update(fulfil_status = 2)
else:
# REQ_STATUS_PARTIAL
db(rtable.id == req_id).update(fulfil_status = 1)
message = T("Shipment received")
current.session.confirmation = message
current.response.headers["Content-Type"] = "application/json"
return json.dumps({"message": s3_str(message),
"tree": URL(args = [send_id, "track_item"]),
}, separators=SEPARATORS) | 645281e0e2023bd454021058e0c0ed79a61223b2 | 17,304 |
import numbers
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters) | 5e5692c46e2dd207eca8d752912dff2b712cce18 | 17,305 |
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1) | 376176b8c17cf9e2f9611943b1fb18da4359748d | 17,306 |
def format_to_TeX(elements):
"""returns BeautifulSoup elements in LaTeX.
"""
accum = []
for el in elements:
if isinstance(el, NavigableString):
accum.append(escape_LaTeX(el.string))
else:
accum.append(format_el(el))
return "".join(accum) | 2df2c4979fc65656b8ef7f4b514a9c4e036b3fa1 | 17,307 |
from typing import OrderedDict
def namedlist(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of list with named fields.
>>> Point = namedlist('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with pos args or keywords
>>> p[0] + p[1] # indexable like a plain list
33
>>> x, y = p # unpack like a regular list
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = [str(x) for x in field_names]
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c == '_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r'
% name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
fmt_kw = {'typename': typename}
fmt_kw['field_names'] = tuple(field_names)
fmt_kw['num_fields'] = len(field_names)
fmt_kw['arg_list'] = repr(tuple(field_names)).replace("'", "")[1:-1]
fmt_kw['repr_fmt'] = ', '.join(_repr_tmpl.format(name=name)
for name in field_names)
fmt_kw['field_defs'] = '\n'.join(_m_field_tmpl.format(index=index, name=name)
for index, name in enumerate(field_names))
class_definition = _namedlist_tmpl.format(**fmt_kw)
if verbose:
print(class_definition)
def _itemsetter(key):
def _itemsetter(obj, value):
obj[key] = value
return _itemsetter
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter,
_itemsetter=_itemsetter,
__name__='namedlist_%s' % typename,
OrderedDict=OrderedDict,
_property=property,
_list=list)
try:
exec_(class_definition, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to
# the frame where the named list is created. Bypass this step in
# environments where sys._getframe is not defined (Jython for
# example) or sys._getframe is not defined for arguments greater
# than 0 (IronPython).
try:
frame = _sys._getframe(1)
result.__module__ = frame.f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result | 0d7e567e69c9d5c0038f4258b37f17ff1e6fb5b1 | 17,308 |
from typing import List
def has_good_frames(frames: List[MonitoredFrame]) -> bool:
"""
Find a frame with a score larger than X
"""
return any([frame.score and frame.score > 3 for frame in frames]) | 16e9e2bce53ae957254121438c2c4e4f8add2142 | 17,309 |
def updateHistory(conn, author, message_id, backer):
"""
Updates the history
Returns success
"""
c = conn.cursor()
c.execute(prepareQuery("INSERT INTO votes_history (user_id, message_id, backer) VALUES (?,?,?)"), (int(author), int(message_id), int(backer), ))
conn.commit()
return c.rowcount > 0 | 6e0f06ace0e3600c307fe3f5848da583c930bbe8 | 17,310 |
import six
def pyc_loads(data):
"""
Load a .pyc file from a bytestring.
Arguments:
data(bytes): The content of the .pyc file.
Returns:
PycFile: The parsed representation of the .pyc file.
"""
return pyc_load(six.BytesIO(data)) | 99b4b7d07d00a0c5098f1a3ded7c1929e2a4b231 | 17,311 |
import numpy
def time_series_figure(time_series, polynomial, drift, snr):
""" Return a matplotlib figure containing the time series and its
polynomial model.
"""
figure = plt.figure()
plot = figure.add_subplot(111)
plot.grid()
plt.title("Drift: {0: .1f}% - SNR: {1: .1f}dB".format(
drift * 100, 10 * numpy.log10(snr)))
x = numpy.arange(2, 2 + len(time_series))
model = numpy.polyval(polynomial, x)
plot.plot(x, time_series, "k-")
plot.plot(x, model, "k-")
plot.axes.set_xlabel("Volume number")
plot.axes.set_ylabel("Intensity")
return figure | 132aaf22108999e75ec6ca797753724d3198b2c8 | 17,312 |
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {}) | 0f20f8414adf1d324fbe8541a27cad2219e87794 | 17,313 |
def log(cm_uuid: UUID):
"""
:GET: returns the most recent logs for the specified control module. accepts the following url parameters
- limit: the number of logs that should be returned
- offset: offset the number of logs that should be returned
- log_type: the type of log that should be returned
:POST: inserts log with data into the database
"""
if request.method == 'GET':
limit = 20
offset = 0
log_type = "%"
if limit_arg := request.args.get('limit'):
limit = limit_arg
if offset_arg := request.args.get('offset'):
offset = offset_arg
if log_type_arg := request.args.get('log_type'):
log_type = log_type_arg
logs = CMLog.query.filter_by(cm_uuid=cm_uuid, log_type=log_type)\
.order_by(CMLog.timestamp.desc())\
.limit(limit)\
.offset(offset*limit)\
.all()
returnval = dict()
returnval['cm_uuid'] = logs[0].cm_uuid
returnval['status'] = 'success'
returnval['data'] = []
for current_log in logs:
log_data = {
'id': current_log.id,
'log_type': current_log.log_type,
'timestamp': current_log.timestamp,
'data': current_log.data
}
returnval['data'].append(log_data)
return jsonify(returnval), 200
if request.method == 'POST':
if not request.is_json:
return jsonify({
"status": "error",
"message": "missing json"
}), 415
if not CMMeta.query.filter_by(uuid=cm_uuid).first():
return jsonify({
'status': 'error',
'message': 'invalid control module uuid'
}), 404
log_type = request.json.get('log_type')
data = request.json.get('data')
error = False
missing = None
if not log_type:
error = True
missing = "log_type"
if not data:
error = True
missing = "data"
if error:
return jsonify({
"status": "error",
"message": "missing " + missing
}), 422
if not CMLogTypes.query.filter_by(cm_uuid=cm_uuid, log_type=log_type).first():
CMLogTypes.create(cm_uuid, log_type)
return jsonify(CMLog.create(cm_uuid, log_type, request.json.get("data"))), 201 | fa129b78497f44a4781e5fa2103abbb232294a7a | 17,314 |
def RetrieveResiduesNumbers(ResiduesInfo):
"""Retrieve residue numbers."""
# Setup residue IDs sorted by residue numbers...
ResNumMap = {}
for ResName in ResiduesInfo["ResNames"]:
for ResNum in ResiduesInfo["ResNum"][ResName]:
ResNumMap[ResNum] = ResName
ResNumsList = []
if len(ResNumMap):
ResNumsList = sorted(ResNumMap, key = int)
return ResNumsList | e9f522af368a8a058792b26f9cf53b1114e241ef | 17,315 |
def get_data_with_station(station_id):
"""
*** Returns Pandas DataFrame ***
Please Input Station ID: (String)"""
print("\nGETTING DATA FOR STATION: ",station_id)
ftp = FTP('ftp.ncdc.noaa.gov')
ftp.login()
ftp.cwd('pub/data/ghcn/daily/all')
ftp.retrbinary('RETR '+station_id+'.dly', open(station_id+'.dly', 'wb').write)
ftp.quit()
outfile=station_id+".dly"
dt = read_ghcn_data_file(filename=outfile)
dt = dt.rename_axis("DATE", axis="columns")
print('{} STATION DATA IS TAKEN'.format(station_id))
return dt | 093b6f7d88335e3ef591cedee7c362bf3b1468d6 | 17,316 |
def _canonicalize(path):
"""Makes all paths start at top left, and go clockwise first."""
# convert args to floats
path = [[x[0]] + list(map(float, x[1:])) for x in path]
# _canonicalize each subpath separately
new_substructures = []
for subpath in _separate_substructures(path):
leftmost_point, leftmost_idx = _get_leftmost_point(subpath)
reordered = ([['M', leftmost_point[0], leftmost_point[1]]] + subpath[leftmost_idx + 1:] + subpath[1:leftmost_idx + 1])
new_substructures.append((reordered, leftmost_point))
new_path = []
first_substructure_done = False
should_flip_cardinality = False
for sp, _ in sorted(new_substructures, key=lambda x: (x[1][1], x[1][0])):
if not first_substructure_done:
# we're looking at the first substructure now, we can determine whether we
# will flip the cardniality of the whole icon or not
should_flip_cardinality = not _is_clockwise(sp)
first_substructure_done = True
if should_flip_cardinality:
sp = _make_clockwise(sp)
new_path.extend(sp)
# convert args to strs
path = [[x[0]] + list(map(str, x[1:])) for x in new_path]
return path | 3f5aa9a4ac75417935415b5dcc561a1057b465e5 | 17,317 |
from typing import Tuple
def extract_meta(src: bytes) -> Tuple[int, int]:
"""
Return a 2-tuple:
- the length of the decoded block
- the number of bytes that the length header occupied.
"""
v, n = uvarint(src)
if n <= 0 or v > 0xFFFFFFFF:
raise CorruptError
if v > 0x7FFFFFFF:
raise TooLargeError
return v, n | 4bb02fd1c8b9870b450fcbca790fa94870a82cf2 | 17,318 |
def metric_source_configuration_table(data_model, metric_key, source_key) -> str:
"""Return the metric source combination's configuration as Markdown table."""
configurations = data_model["sources"][source_key].get("configuration", {}).values()
relevant_configurations = [config for config in configurations if metric_key in config["metrics"]]
if not relevant_configurations:
return ""
markdown = markdown_table_header("Configuration", "Value")
for configuration in sorted(relevant_configurations, key=lambda config: str(config["name"])):
name = configuration["name"]
values = ", ".join(sorted(configuration["value"], key=lambda value: value.lower()))
markdown += markdown_table_row(name, values)
markdown += "\n"
return markdown | 718a69df60272b7cdfafdbfeff3136a1aac49707 | 17,319 |
import requests
import json
def search(keyword, limit=20):
"""
Search is the iTunes podcast directory for the given keywords.
Parameter:
keyword = A string containing the keyword to search.
limit: the maximum results to return,
The default is 20 results.
returns:
A JSON object.
"""
keyword = keyword.replace(' ', '+') # Replace white space with +.
# Set user agent.
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
headers = {'User-Agent': user_agent}
# ITunes podcast search URL.
itunesurl = 'https://itunes.apple.com/search?term=%s&country=us&limit=%d&entity=podcast' % (keyword, limit)
req = requests.get(itunesurl, headers=headers)
return json.loads(req.text) | 922cd7dfaea30e7254c459588d28c33673281dac | 17,320 |
def job_checks(name: str):
"""
Check if the job has parameters
and ask to insert them printing
the default value
"""
p = job_parameters(name)
new_param = {}
if p:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has parameters, do you want to insert them?", default=True
)
if ask:
for k, v in p.items():
t = Prompt.ask(f"{k}", default=f"{v}")
new_param[k] = t
return new_param
else:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has no parameters, do you want to proceed?", default=True
)
if ask:
return new_param
else:
exit(0) | 2f64820ec6b180cc6c626fab0616774d7e9086b2 | 17,321 |
import re
def post():
"""Post new message"""
error = None
if request.method == 'POST'\
and request.form['message'] != '' and request.form['message'] is not None:
user_zid = session['logged_in']
post_message = request.form['message']
post_privacy = request.form['post_privacy']
# print('post_privacy: "{}"'.format(post_privacy))
cur_time_txt = time_date2txt()
db = get_db()
db.execute('INSERT INTO POST (zid, time, message, privacy) values (?, ?, ?, ?)',
[user_zid, cur_time_txt, post_message, post_privacy])
db.commit()
for m_zid in set(re.findall(r'z[0-9]{7}', post_message)):
m_user = get_user(zid=m_zid)
if m_user and m_user['email']:
email_subj = '{} Mentioned you in his post!!'.format(g.user['full_name'])
path = url_for('search', _external=True)+'?suggestion={}'.format(m_zid)
print(path)
email_body = 'Check the link to check the post: <a href="{0}">{0}</a>'.format(path)
send_email(m_user['email'], email_subj, email_body)
elif request.form['message'] == '' or request.form['message'] is None:
error = "Post cannot be empty"
return redirect(url_for('index', new_post_error=error)) | b59c5fb30d4b6ce499d0199fb794be38c5c2dfdf | 17,322 |
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get(uuid, local_id, topology_uuid, node_uuid, node_edge_point_uuid, connection_end_point_uuid): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get
returns tapi.connectivity.ConnectionEndPointRef # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param topology_uuid: Id of connection-end-point
:type topology_uuid: str
:param node_uuid: Id of connection-end-point
:type node_uuid: str
:param node_edge_point_uuid: Id of connection-end-point
:type node_edge_point_uuid: str
:param connection_end_point_uuid: Id of connection-end-point
:type connection_end_point_uuid: str
:rtype: TapiConnectivityConnectionEndPointRef
"""
return 'do some magic!' | 24fe9a977542f52d8bc8cc765c63ce32882d9f76 | 17,323 |
def softmax(logits):
"""Take the softmax over a set of logit scores.
Args:
logits (np.array): a 1D numpy array
Returns:
a 1D numpy array of probabilities, of the same shape.
"""
if not isinstance(logits, np.ndarray):
logits = np.array(logits) # 1D array
logits = logits - np.max(logits) # re-center
exp_logits = np.exp(logits)
probs = exp_logits / np.sum(exp_logits)
return probs | 7e1897748172e095ac58ce7111bed73caa4e2cb6 | 17,324 |
def _aggregate(df, variable, components=None, method=np.sum):
"""Internal implementation of the `aggregate` function"""
# list of variables require default components (no manual list)
if islistable(variable) and components is not None:
raise ValueError(
"Aggregating by list of variables does not support `components`!"
)
mapping = {}
msg = "Cannot aggregate variable '{}' because it has no components!"
# if single variable
if isstr(variable):
# default components to all variables one level below `variable`
components = components or df._variable_components(variable)
if not len(components):
logger.info(msg.format(variable))
return
for c in components:
mapping[c] = variable
# else, use all variables one level below `variable` as components
else:
for v in variable if islistable(variable) else [variable]:
_components = df._variable_components(v)
if not len(_components):
logger.info(msg.format(v))
continue
for c in _components:
mapping[c] = v
# rename all components to `variable` and aggregate
_df = df._data[df._apply_filters(variable=mapping.keys())]
_df.index = replace_index_values(_df, "variable", mapping)
return _group_and_agg(_df, [], method) | 25c36e6180aa5509ced7513c841eb9cc4450b41b | 17,325 |
import inspect
def get_attributes(klass):
"""Get all class attributes.
"""
attributes = list()
for attr, value in inspect.\
getmembers(klass, lambda x: not inspect.isroutine(x)):
if not (attr.startswith("__") and attr.endswith("__")):
attributes.append(attr)
return attributes | 6a72db39a9982b6a4ad5462ff9a4695f9cca6ce0 | 17,326 |
import io
def render(html):
"""Convert HTML to a PDF"""
output = io.BytesIO()
surface = cairo.PDFSurface(output, 595, 842)
ctx = cairo.Context(surface)
cffictx = cairocffi.Context._from_pointer(cairocffi.ffi.cast('cairo_t **', id(ctx) + object.__basicsize__)[0], incref=True)
html = etree.parse(io.StringIO(html), etree.HTMLParser())
for pdf in html.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for prev in pdf.xpath("preceding-sibling::*"):
pdf.getparent().remove(prev)
pdfsrc = pdf.get("src")
pdf.getparent().remove(pdf)
section = deepcopy(html)
for nextpdf in section.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for nextel in nextpdf.xpath("following-sibling::*"):
nextpdf.getparent().remove(nextel)
nextpdf.getparent().remove(nextpdf)
html_pages = weasyprint.HTML(tree=section).render().pages
surface.set_size(html_pages[0].width * 72 / 96.0, html_pages[0].height * 72 / 96.0)
if pdfsrc != "blank.pdf":
with weasyprint.default_url_fetcher(str(pdfsrc))['file_obj'] as fetch:
pdf_pages = Poppler.Document.new_from_stream(Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new_take(fetch.read())), -1, None, None)
else:
pdf_pages = None
for pageno in range(max(pdf_pages.get_n_pages() if pdf_pages else 0, len(html_pages))):
if pdf_pages and pageno < pdf_pages.get_n_pages():
pdf_pages.get_page(pageno).render_for_printing(ctx)
if pageno < len(html_pages):
html_pages[pageno].paint(cffictx, scale=72 / 96.0)
ctx.show_page()
surface.finish()
return output.getbuffer() | 9b0d4c252b7b7bf8dcdcab32e13cf183cef0312d | 17,327 |
def computeFlowImage(u,v,logscale=True,scaledown=6,output=False):
"""
topleft is zero, u is horiz, v is vertical
red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12
"""
colorwheel = makecolorwheel()
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
if output:
print("Maximum flow magnitude: %04f" % np.max(radius))
if logscale:
radius = np.log(radius + 1)
if output:
print("Maximum flow magnitude (after log): %0.4f" % np.max(radius))
radius = radius / scaledown
if output:
print("Maximum flow magnitude (after scaledown): %0.4f" % np.max(radius))
rot = np.arctan2(-v, -u) / np.pi
fk = (rot+1)/2 * (ncols-1) # -1~1 maped to 0~ncols
k0 = fk.astype(np.uint8) # 0, 1, 2, ..., ncols
k1 = k0+1
k1[k1 == ncols] = 0
f = fk - k0
ncolors = colorwheel.shape[1]
img = np.zeros(u.shape+(ncolors,))
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]
col1 = tmp[k1]
col = (1-f)*col0 + f*col1
idx = radius <= 1
# increase saturation with radius
col[idx] = 1 - radius[idx]*(1-col[idx])
# out of range
col[~idx] *= 0.75
img[:,:,i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8) | 87690e34ae1509a63df982b68e35346be8b5d8dd | 17,328 |
def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events | e17df37bb8908a557b9cf1175c3567b460a35385 | 17,329 |
import math
def decimal_to_octal(num):
"""Convert a Decimal Number to an Octal Number."""
octal = 0
counter = 0
while num > 0:
remainder = num % 8
octal = octal + (remainder * math.pow(10, counter))
counter += 1
num = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return "{0:g}".format(float(octal)) | e6bbc23a2235812c1e2298e8a0be8396c06b1c1f | 17,330 |
def get_formatted_dates(date_ranges):
"""Returns list of dates specified by date_ranges, formtted for Swiftly API use.
date_ranges is a list of dict, with each dict specifying a range of dates
in string format. sample dict for Tue/Wed/Thu in Sep/Oct:
{
"start_date": "09-01-2019",
"end_date": "10-31-2019",
"include_days": [0, 1, 1, 1, 0, 0, 0]
}
"""
final_date_list = []
for date_range in date_ranges:
timestamp_list = pd.bdate_range(
start=date_range["start_date"],
end=date_range["end_date"],
weekmask=date_range["include_days"],
freq="C"
).to_list()
final_date_list += [ts.strftime("%m-%d-%Y") for ts in timestamp_list]
return final_date_list | db20459ffacb8cb621acdf40b0bdcbc203787680 | 17,331 |
import os
def _get_config_path(config_path):
"""Find path to yaml config file
Args:
config_path: (str) Path to config.yaml file
Returns:
Path to config.yaml if specified else default config.yaml
Raises:
ValueError: If the config_path is not None but doesn't exist
"""
if config_path is None:
dirname = os.path.dirname(cli.__file__)
config_path = os.path.join(dirname, 'config/config.yaml')
if not os.path.exists(config_path):
raise ValueError("Config path {} does not exist!".format(config_path))
return config_path | 4353dca4c6b30bff3b90389fc7681baeb2da9052 | 17,332 |
def read_img(img: str, no_data: float, mask: str = None, classif: str = None, segm: str = None) ->\
xr.Dataset:
"""
Read image and mask, and return the corresponding xarray.DataSet
:param img: Path to the image
:type img: string
:type no_data: no_data value in the image
:type no_data: float
:param mask: Path to the mask (optional): 0 value for valid pixels, !=0 value for invalid pixels
:type mask: string
:param classif: Path to the classif (optional)
:type classif: string
:param segm: Path to the mask (optional)
:type segm: string
:return: xarray.DataSet containing the variables :
- im : 2D (row, col) xarray.DataArray float32
- msk : 2D (row, col) xarray.DataArray int16, with the convention defined in the configuration file
:rtype: xarray.DataSet
"""
img_ds = rasterio_open(img)
data = img_ds.read(1)
if np.isnan(no_data):
no_data_pixels = np.where(np.isnan(data))
else:
no_data_pixels = np.where(data == no_data)
# We accept nan values as no data on input image but to not disturb cost volume processing as stereo computation
# step,nan as no_data must be converted. We choose -9999 (can be another value). No_data position aren't erased
# because stored in 'msk'
if no_data_pixels[0].size != 0 and np.isnan(no_data):
data[no_data_pixels] = -9999
no_data = -9999
dataset = xr.Dataset({'im': (['row', 'col'], data.astype(np.float32))},
coords={'row': np.arange(data.shape[0]),
'col': np.arange(data.shape[1])})
# Add image conf to the image dataset
dataset.attrs = {'no_data_img': no_data,
'valid_pixels': 0, # arbitrary default value
'no_data_mask': 1} # arbitrary default value
if classif is not None:
input_classif = rasterio_open(classif).read(1)
dataset['classif'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['classif'].data = input_classif
if segm is not None:
input_segm = rasterio_open(segm).read(1)
dataset['segm'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['segm'].data = input_segm
# If there is no mask, and no data in the images, do not create the mask to minimize calculation time
if mask is None and no_data_pixels[0].size == 0:
return dataset
# Allocate the internal mask (!= input_mask)
# Mask convention:
# value : meaning
# dataset.attrs['valid_pixels'] : a valid pixel
# dataset.attrs['no_data_mask'] : a no_data_pixel
# other value : an invalid_pixel
dataset['msk'] = xr.DataArray(np.full((data.shape[0], data.shape[1]),
dataset.attrs['valid_pixels']).astype(np.int16), dims=['row', 'col'])
# Mask invalid pixels if needed
# convention: input_mask contains information to identify valid / invalid pixels.
# Value == 0 on input_mask represents a valid pixel
# Value != 0 on input_mask represents an invalid pixel
if mask is not None:
input_mask = rasterio_open(mask).read(1)
# Masks invalid pixels
# All pixels that are not valid_pixels, on the input mask, are considered as invalid pixels
dataset['msk'].data[np.where(input_mask > 0)] = dataset.attrs['valid_pixels'] + \
dataset.attrs['no_data_mask'] + 1
# Masks no_data pixels
# If a pixel is invalid due to the input mask, and it is also no_data, then the value of this pixel in the
# generated mask will be = no_data
dataset['msk'].data[no_data_pixels] = int(dataset.attrs['no_data_mask'])
return dataset | 2074269e47092313f1cb01dc81004b7ce9c8f411 | 17,333 |
def any_user(password=None, permissions=[], groups=[], **kwargs):
"""
Shortcut for creating Users
Permissions could be a list of permission names
If not specified, creates active, non superuser
and non staff user
"""
is_active = kwargs.pop('is_active', True)
is_superuser = kwargs.pop('is_superuser', False)
is_staff = kwargs.pop('is_staff', False)
user = any_model(User, is_active = is_active, is_superuser = is_superuser,
is_staff = is_staff, **kwargs)
for group_name in groups :
group = Group.objects.get(name=group_name)
user.groups.add(group)
for permission_name in permissions:
app_label, codename = permission_name.split('.')
permission = Permission.objects.get(
content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(permission)
if password:
user.set_password(password)
user.save()
return user | 914bbb58b68aad9b19a77f2dec7ea1f0e91508bd | 17,334 |
def devices_to_use():
"""Returns the device objects for the accel. we are the most likely to use.
Returns:
List of logical devices of the accelerators we will use.
"""
if tf.config.list_logical_devices("TPU"):
devices = tf.config.list_logical_devices("TPU")
elif tf.config.list_logical_devices("GPU"):
devices = tf.config.list_logical_devices("GPU")
else:
devices = tf.config.list_logical_devices("CPU")
devices.sort()
return devices | aca8cbd28ff46e79655b47e34334c12406cc94e8 | 17,335 |
def barcode_density(bars, length):
"""
calculates the barcode density (normalized average cycle lifetime)
of a barcode
"""
densities = np.zeros(len(bars))
nums = np.array([len(bars[i][1]) for i in range(len(bars))])
num_infs = np.zeros(len(bars))
for i in range(len(bars)):
tot = 0
intervals = bars[i][1]
for intr in intervals:
if np.isinf(intr[1]):
num_infs[i] += 1
tot += (length-intr[0])/(length-1)
else:
tot += (intr[1] - intr[0])/(length-1)
densities[i] = tot
normed_density = densities/nums
normed_density[np.isnan(normed_density)] = 0
return np.stack([densities, nums, normed_density, num_infs]) | 4b585338cef3fd8b8ca91f89a1ae0532450b6209 | 17,336 |
import argparse
def create_parser():
"""Creates the default argument parser.
Returns
-------
parser : ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--config-file')
parser.add_argument('--update-config', action='store_true')
update_parser_arguments(parser)
parser.add_argument('command', default='paste', nargs='?',
choices=('init', 'paste', 'clipboard', 'screenshot'))
return parser | e2ad6357811596c75f78dc24176976434e35e379 | 17,337 |
def genRankSurvey(readername, candidates, binsize, shareWith=None):
"""
readername (str)
candidates (iterable)
binsize (int)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyname = "Ranking Survey for {}".format(readername)
surveyId = c.createSurvey(surveyname)
desc = (
u"This survey is for: {0}.\n\n"
u"Rank students into the top 50%-ile bins. "
u"Put exactly {1} students in each bin. "
u"All uncategorized students will automatically "
u"be placed in the bottom 50%-ile. Ordering within a bin "
u"does not matter.".format(readername, binsize)
)
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionID": "QID1",
"QuestionType": "PGR",
"Selector": "DragAndDrop",
"SubSelector": "Columns",
"Configuration": {
"QuestionDescriptionOption": "UseText",
"Stack": False,
"StackItemsInGroups": False,
},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"Type": "GroupChoiceRange",
"MinChoices": "{}".format(binsize),
"MaxChoices": "{}".format(binsize),
}
},
"GradingData": [],
"Language": [],
"NextChoiceId": len(choices) + 1,
"NextAnswerId": 6,
"Groups": ["Top 10%", "Top 20%", "Top 30%", "Top 40%", "Top 50%"],
"NumberOfGroups": 5,
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link | e94f782389de86a8cbfb9c77aa078f004ac061c9 | 17,338 |
def _get_badge_status(
self_compat_res: dict,
google_compat_res: dict,
dependency_res: dict) -> BadgeStatus:
"""Get the badge status.
The badge status will determine the right hand text and the color of
the badge.
Args:
self_compat_res: a dict containing a package's self compatibility
status for py2 and py3. See _get_self_compatibility_dict().
google_compat_res: a dict containing a package's pair compatibility
status for py2 and py3. See _get_pair_compatibility_dict().
dependency_res: a dict containing a package's dependency status.
See _get_dependency_dict().
Returns:
The cumulative badge status.
"""
statuses = []
for pyver in ['py2', 'py3']:
statuses.append(self_compat_res[pyver]['status'])
statuses.append(google_compat_res[pyver]['status'])
statuses.append(dependency_res['status'])
return BadgeStatus.get_highest_status(statuses) | f367f75321c62a7c86b4ef26be446072e5eaca7c | 17,339 |
import yaml
def _get_yaml_as_string_from_mark(marker):
"""Gets yaml and converts to text"""
testids_mark_arg_no = len(marker.args)
if testids_mark_arg_no > 1:
raise TypeError(
'Incorrect number of arguments passed to'
' @pytest.mark.test_yaml, expected 1 and '
'received {}'.format(testids_mark_arg_no))
else:
yaml_object = yaml.load(marker.args[0])
yaml_text_block = '\n---\n' \
+ yaml.dump(yaml_object, default_flow_style=False) \
+ '...'
indented_yaml_text_block = '\n '.join(yaml_text_block.split('\n'))
return indented_yaml_text_block | 034dab9c5380035d2303df7ea7243b84baff47a0 | 17,340 |
def combine_dicts(w_dict1, w_dict2, params, model):
"""
Combine two dictionaries:
"""
w_dict = w_dict1 + w_dict2
eps = params[0]
params[0] = 0
P_w = []
w_dict = md.remove_duplicates_w_dict(P_w,w_dict,params,model)
return w_dict | 21c2de003cca0165b5404431178450bf6e6c549c | 17,341 |
def geocode_mapping(row, aian_ranges, aian_areas, redefine_counties, strong_mcd_states):
"""
Maps an RDD row to a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block), where
place/MCD is the five digit MCD in MCD-strong states and 5 digit place otherwise
AIAN_bool is '1' if the block is inside the AIAN area and '0' otherwise.
:param row: An RDD row with format (state, AIANNHCE, county, place, MCD, tract, block)
:param aian_ranges: a dictionary with keys given by the AIAN type and values given by a tuple with two elements
that indicate the starting and ending AIANNHCE values for the AIAN area catagory.
:param aian_areas: a specification of AIANNHCE code groups that should be used to define AIAN areas; see also
make_grfc_ids().
:param redefine_counties: specifies that counties inside of AIAN areas should be redefined as incorporated places or
MCDs "in_strong_MCDs", "everywhere", or "nowhere"
:param strong_mcd_states: a tuple of the state geoids that are strong MCD states
:return res: a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block)
"""
state, aiannhce, county, place, cousub, tract, block = row
county = '10' + county
is_strong_MCD = state in strong_mcd_states
# The following AIANNHCE values are not in the universe of possible AIANNHCE codes:
assert aiannhce not in [str(x) for x in range(4990, 5000)], "AIANNHCE codes cannot be between 4990 and 4999"
if aiannhce == '9999':
# Not in any of the AIAN area catagories:
aian = '0'
else:
# Check if AIAN area catagory is included in the user's specification of AIAN areas:
for aian_definition, aian_range in aian_ranges.items():
if aiannhce <= aian_range[1] and aiannhce >= aian_range[0]:
aian = '1' if aian_definition in aian_areas else '0'
# If the user wishes to bypass from the county geounit to the individual AIAN areas, do so here:
if aian_definition in aian_areas and ((redefine_counties == 'in_strong_MCDs' and is_strong_MCD) or redefine_counties == 'everywhere'):
county = '0' + aiannhce
break
# An alternative would be to remove the second condition in the next if statement to increase accuracy in MCDs:
if is_strong_MCD and aian == '0':
mcd_or_place = cousub
else:
mcd_or_place = place
das_aian_area_code = aiannhce if (aian == '1') else '9999'
return state, aian, county, mcd_or_place, tract, block, das_aian_area_code | 6d2dcd7aa5acb5bff71120d957f520d0eec79790 | 17,342 |
from typing import Dict
def get_stan_input(
scores: pd.DataFrame,
priors: Dict,
likelihood: bool,
) -> Dict:
"""Get an input to cmdstanpy.CmdStanModel.sample.
:param measurements: a pandas DataFrame whose rows represent measurements
:param model_config: a dictionary with keys "priors", "likelihood" and
"x_cols".
"""
return {
**priors,
**{
"N": len(scores),
"N_skater": scores["name"].nunique(),
"N_grade": N_GRADE,
"skater": one_encode(scores["name"]).values,
"y": scores["score"].astype(int).add(6).values,
"N_test": len(scores),
"skater_test": one_encode(scores["name"]).values,
"y_test": scores["score"].astype(int).add(6).values,
"likelihood": int(likelihood),
},
} | d8e11401c1c86bb3306652f6f3b1aaebe47ef2d8 | 17,343 |
def get_mph(velocity):
"""
Returns
-------
convert m/s to miles per hour [mph].
"""
velocity = velocity * 3600 /1852
return velocity | f4a1922712ef2d8cfeba5650f410405956a39c31 | 17,344 |
import json
def _load_jsonl(input_path) -> list:
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
print('[LoadJsonl] Loaded {} records from {}'.format(len(data), input_path))
return data | 2cd35ff8afa7c325688046165517746e2b120b77 | 17,345 |
import types
import importlib
def reload(name: str) -> types.ModuleType:
"""
Finalize and reload a plugin and any plugins that (transitively) depend on it. We try to run all finalizers in
dependency order, and only load plugins that were successfully unloaded, and whose dependencies have been
successfully reloaded. If a plugin fails to initialize, we run any finalizers it managed to register, and the plugin
is not loaded. Any exceptions raised will be reraised together. Returns the module object of the requested plugin if
successful.
"""
reloads = deps.subgraph_paths_to(name)
logger.info("Reloading {} with dependencies: {}".format(name,
", ".join(dep for dep in reloads.topo_sort_fwd() if dep != name)))
unload_success = set()
reload_success = set()
unload_gen = reloads.topo_sort_fwd()
reload_gen = reloads.topo_sort_bck()
def cont_reload() -> None:
try:
for dep in reload_gen:
if dep == name:
continue
elif dep not in unload_success:
logger.info("Not reloading {} because it was not unloaded properly".format(name))
elif not all(m in reload_success
for m in reloads.edges_from(dep)):
logger.info("Not reloading {} because its dependencies were not reloaded properly".format(name))
else:
importlib.import_module(dep)
reload_success.add(dep)
except:
cont_reload()
raise
def cont_unload() -> types.ModuleType:
try:
for dep in unload_gen:
if dep == name:
continue
unsafe_unload(dep)
unload_success.add(dep)
except:
cont_unload()
raise
try:
unsafe_unload(name)
except:
cont_reload()
raise
try:
ret = importlib.import_module(name)
reload_success.add(name)
finally:
cont_reload()
return ret
return cont_unload() | ea00d2139b51e80239960f61c0dc91dfe45de7d9 | 17,346 |
import json
def load_from_config(config_path, **kwargs):
"""Load from a config file. Config options can still be overwritten with kwargs"""
with open(config_path, "r") as config_file:
config = json.load(config_file)
config.update(kwargs)
return TokenizationConfig(**config) | 66ea64a334b265ae216413a043044767da0fd61c | 17,347 |
import collections
def get_tecogan_monitors(monitor):
"""
Create monitors for displaying and storing TECOGAN losses.
"""
monitor_vgg_loss = MonitorSeries(
'vgg loss', monitor, interval=20)
monitor_pp_loss = MonitorSeries(
'ping pong', monitor, interval=20)
monitor_sum_layer_loss = MonitorSeries(
'd layer loss', monitor, interval=20)
monitor_adv_loss = MonitorSeries(
'adversarial loss', monitor, interval=20)
monitor_disc_loss = MonitorSeries(
'discriminator loss', monitor, interval=20)
monitor_tb = MonitorSeries(
'tb', monitor, interval=20)
Monitor_tecogan = collections.namedtuple('Monitor_tecogan',
['monitor_vgg_loss', 'monitor_pp_loss', 'monitor_sum_layer_loss',
'monitor_adv_loss', 'monitor_disc_loss', 'monitor_tb'])
return Monitor_tecogan(monitor_vgg_loss, monitor_pp_loss, monitor_sum_layer_loss, monitor_adv_loss, monitor_disc_loss, monitor_tb) | 472605e4ff7a0e487fd868a573fbecf5acd977ba | 17,348 |
def user_based_filtering_recommend(new_user,user_movies_ids,movies_num,n_neighbor,movies_ratings):
""" This function return number of recommended movies based on user based filtering using
cosine similarity to find the most similar users to the new user
it returns movies_num of movies from the top ranked movies of n_neighbour users
who are the most similar to the new user"""
#pivot the dataframe
users_inDB = movies_ratings.pivot_table(index='userId', columns='movieId', values='rating')
list_id_movies = movies_ratings['movieId'].unique()
new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T
#fill Nans with 3 rating
users_inDB = users_inDB.fillna(3.0)
new_user_vector_filled = new_user_vector.fillna(3.0)
#for cosine similarity we have to center the data in order to have a magnitude(0-1)
users_inDB = (users_inDB - 3.0)/2.0
new_user = (new_user_vector_filled - 3.0)/2.0
#label the new user that we want to recommend for:
new_user.index=['new_user']
#add the new use to the original df
users_matrix = pd.concat([users_inDB,new_user])
#calculate cosine similarity
users_similarity_matrix = cosine_similarity(users_matrix)
users_similarity_matrix = pd.DataFrame(users_similarity_matrix,index=users_matrix.index,columns=users_matrix.index)
#we get here (users_num*users_num) similarity matrix
#print(users_matrix_similarity)
# get the new user similarities row: except the last column value(similarity with himself=1)
new_user_similarity = users_similarity_matrix['new_user'].iloc[:-1]
# take the n_neighbors nearest users (N users who have the most similarity with the new user)
similar_users = new_user_similarity.nlargest(n_neighbor).index.values
#print(similar_users)
#we will get (movies_num*n_neighbor*2) movies to choose
recommended_movieIds = []
scores = []
for user in similar_users:
recommended_movieIds.extend(users_inDB.loc[user].nlargest(movies_num*2).index)
scores.extend(users_inDB.loc[user].nlargest(movies_num*2).values)
recommended_movies_dic = {'movie_id':recommended_movieIds,'score':scores}
recommended_movies_df = pd.DataFrame(recommended_movies_dic)
#print(recommended_movies_df)
#Shuffle the movies
recommended_movies_df = sklearn.utils.shuffle(recommended_movies_df)
#Order movies by score
recommended_movies_df = recommended_movies_df.sort_values(by='score',ascending=False)
recommended_movies_ids = recommended_movies_df['movie_id'].unique()
#get the final recommendation: retrn movies_num of movies which the user hasn't rated
top_recommended_movies = []
for movie_id in recommended_movies_ids:
if (movie_id not in user_movies_ids) and (len(top_recommended_movies) < movies_num) :
top_recommended_movies.append(movie_id)
#finally return the movies titles
top_recommended_movies = movieId_to_title(top_recommended_movies,movies_ratings)
return top_recommended_movies | 4fa86b9966024e0d89969566d85ccf0b0a44bfcc | 17,349 |
def query_ps_from_wcs(w):
"""Query PanStarrs for a wcs.
"""
nra,ndec = w.array_shape[1:]
dra,ddec = w.wcs.cdelt[:2]
c = wcs.utils.pixel_to_skycoord(nra/2.,ndec/2.,w)
ddeg = np.linalg.norm([dra*nra/2,ddec*ndec/2])
pd_table = query(c.ra.value,c.dec.value,ddeg)
# Crop sources to those in the cube limits
scat = wcs.utils.skycoord_to_pixel(
SkyCoord(pd_table['raMean'],pd_table['decMean'], unit="deg"),
w,
origin=0,
mode='all'
)
mask = (scat[0] < nra)*(scat[1] < ndec)*(scat[0] > 0)*(scat[1] > 0)
pd_table = pd_table[mask]
pd_table['x'] = scat[0][mask]
pd_table['y'] = scat[1][mask]
return pd_table | 806baf87722213ab021e1e3889322539069a3b55 | 17,350 |
import torch
def permute(x, in_shape='BCD', out_shape='BCD', **kw):
""" Permute the dimensions of a tensor.\n
- `x: Tensor`; The nd-tensor to be permuted.
- `in_shape: str`; The dimension shape of `x`. Can only have characters `'B'` or `'C'` or `'D'`,
which stand for Batch, Channel, or extra Dimensions. The default value `'BCD'` means
the input tensor `x` should be at lest 2-d with shape `(Batch, Channel, Dim0, Dim1, Dim2, ...)`,
where `Dim0, Dim1, Dim2 ...` stand for any number of extra dimensions.
- `out_shape: str or tuple or None`; The dimension shape of returned tensor. Default: `'BCD'`.
If a `str`, it is restricted to the same three characters `'B'`, `'C'` or `'D'` as the `in_shape`.
If a `tuple`, `in_shape` is ignored, and simply `x.permute(out_shape)` is returned.
If `None`, no permution will be performed.
- `return: Tensor`; Permuted nd-tensor. """
if (in_shape == out_shape) or (out_shape is None):
return x
if isinstance(out_shape, (list, tuple, torch.Size)):
return x.permute(*out_shape)
if isinstance(in_shape, str) and isinstance(out_shape, str) :
assert set(in_shape) == set(out_shape) <= {'B', 'C', 'D'}, 'In and out shapes must have save set of chars among B, C, and D.'
in_shape = in_shape.lower().replace('d', '...')
out_shape = out_shape.lower().replace('d', '...')
return torch.einsum(f'{in_shape}->{out_shape}', x)
return x | e74594df581c12891963e931999563374cd89c7d | 17,351 |
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=False,
labeltop=True, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", axis="x", color="black", linestyle='--', linewidth=1)
ax.grid(which="minor", axis="y", color="black", linestyle='-', linewidth=3)
ax.tick_params(which="minor", top=False, left=False)
return im | 51c60139f9f2668f8ba31859c036f48a3e8faf63 | 17,352 |
import zlib
import struct
def assert_is_normal_rpyc(f):
"""
Analyze the structure of a single rpyc file object for correctness.
Does not actually say anything about the _contents_ of that section, just that we were able
to slice it out of there.
If succesful, returns the uncompressed contents of the first storage slot.
"""
f.seek(0)
header = f.read(1024)
f.seek(0)
if header[:10] != "RENPY RPC2":
# either legacy, or someone messed with the header
# assuming legacy, see if this thing is a valid zlib blob
raw_data = f.read()
f.seek(0)
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
raise ValueError("Did not find RENPY RPC2 header, but interpretation as legacy file failed")
return uncompressed
else:
if len(header) < 46:
# 10 bytes header + 4 * 9 bytes content table
return ValueError("File too short")
a,b,c,d,e,f,g,h,i = struct.unpack("<IIIIIIIII", header[10: 46])
# does the header format match default ren'py generated files?
if not (a == 1 and b == 46 and d == 2 and (g, h, i) == (0, 0, 0) and b + c == e):
return ValueError("Header data is abnormal, did the format gain extra fields?")
f.seek(b)
raw_data = f.read(c)
f.seek(0)
if len(raw_data) != c:
return ValueError("Header data is incompatible with file length")
try:
uncompressed = zlib.decompress(raw_data)
except zlib.error:
return ValueError("Slot 1 did not contain a zlib blob")
if not uncompressed.endswith("."):
return ValueError("Slot 1 did not contain a simple pickle")
return uncompressed | f7db901dd99b0ac9036d6569093068e8f6b3e675 | 17,353 |
import os
import json
def retrieve_s3_object_contents(s3_obj, bucket=os.environ["ARTIFACTS_BUCKET"]):
"""Retrieve S3 object contents."""
return json.loads(
s3.get_object(Bucket=bucket, Key=s3_obj)["Body"].read().decode("utf-8")
) | 33342158e327ef7d3f296b35939abb9336623060 | 17,354 |
def substract_li(cfg, data, lats, lons, future_exp):
"""Difference between historical and future fields."""
pathlist = data.get_path_list(short_name='pr', exp='historical')
ar_diff_rain = np.zeros((len(lats), len(lons), len(pathlist)))
mism_diff_rain = np.zeros(len(pathlist))
mwp_hist_rain = np.zeros(len(pathlist))
ar_hist_rain = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_ua = np.zeros((len(lats), len(lons), len(pathlist)))
ar_diff_va = np.zeros((len(lats), len(lons), len(pathlist)))
datasets = []
for iii, dataset_path in enumerate(pathlist):
# Substract historical experiment from rcp85 experiment
datasets.append(data.get_info(n.DATASET, dataset_path))
ar_diff_rain[:, :, iii] = (data.get_data(short_name='pr',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='pr',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
# ISM (60◦ –95◦ E, 10◦ –30◦ N)
mism_diff_rain[iii] = \
np.mean((ar_diff_rain[:,
get_latlon_index(lons, 60, 95),
iii])[get_latlon_index(lats, 10, 30), :])
ar_hist_rain[:, :, iii] = data.get_data(
short_name='pr', exp='historical', dataset=datasets[iii])
# Western pacific (140◦ E–170◦ W, 12◦ S–12◦ N)
mwp_hist_rain[iii] = \
np.mean((ar_hist_rain[:,
get_latlon_index(lons, 140, 170),
iii])[get_latlon_index(lats, -12, 12), :])
ar_diff_ua[:, :, iii] = (data.get_data(short_name='ua',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='ua',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
ar_diff_va[:, :, iii] = (data.get_data(short_name='va',
exp=future_exp,
dataset=datasets[iii]) -
data.get_data(short_name='va',
exp='historical',
dataset=datasets[iii])) / \
(data.get_data(short_name='ts',
exp=future_exp, dataset=datasets[iii]) -
data.get_data(short_name='ts',
exp='historical', dataset=datasets[iii]))
plot_rain_and_wind(cfg, datasets[iii],
{'ar_diff_rain': ar_diff_rain[:, :, iii],
'ar_diff_ua': ar_diff_ua[:, :, iii],
'ar_diff_va': ar_diff_va[:, :, iii],
'lats': lats, 'lons': lons}, future_exp)
return {
"datasets": datasets,
"ar_diff_rain": ar_diff_rain,
"ar_diff_ua": ar_diff_ua,
"ar_diff_va": ar_diff_va,
"ar_hist_rain": ar_hist_rain,
"mism_diff_rain": mism_diff_rain,
"mwp_hist_rain": mwp_hist_rain
} | 40506221fbdf5a9b0e2174e0fe144958dd57c93b | 17,355 |
def identify_jobs_to_update(file_path, jobs):
"""identify jobs to update."""
name_map = {}
for job in jobs:
cluster = get_desired_cluster(file_path, job)
if cluster != job.get("cluster", ""):
name_map[job["name"]] = cluster
return name_map | be9b8bd38ed90c96ac185195a79a43ffbec5e7d5 | 17,356 |
def bootstrap_storage_bucket(project_id, bucket_name, google_credentials):
"""
Bootstrap the bucket used to store Terraform state for projects.
Args:
project_id:
The ID of the project to create the bucket in.
bucket_name:
The name of the bucket to create.
google_credentials:
The credentials authorizing the creation of the bucket.
Returns:
An object containing information about the bucket.
"""
print(f"Attempting to retrieve existing bucket: {bucket_name}'")
service = googleapiclient.discovery.build(
"storage", "v1", credentials=google_credentials
)
request = service.buckets().get(bucket=bucket_name)
try:
bucket = request.execute()
print("Bucket exists.\n")
return bucket
except googleapiclient.errors.HttpError as e:
if e.resp['status'] != '404':
raise
print("Bucket does not exist yet. Creating it...")
bucket_body = {
"name": bucket_name,
"versioning": {
"enabled": True,
},
}
request = service.buckets().insert(
body=bucket_body,
predefinedAcl="projectPrivate",
predefinedDefaultObjectAcl="projectPrivate",
project=project_id
)
bucket = request.execute()
print("Done.\n")
return bucket | acdd72fbcb160d5c6347f1f41b6661fcf28ebdc2 | 17,357 |
def ValidateBucketForCertificateAuthority(bucket_name):
"""Validates that a user-specified bucket can be used with a Private CA.
Args:
bucket_name: The name of the GCS bucket to validate.
Returns:
A BucketReference wrapping the given bucket name.
Raises:
InvalidArgumentException: when the given bucket can't be used with a CA.
"""
messages = storage_util.GetMessages()
client = storage_api.StorageClient(messages=messages)
try:
bucket = client.GetBucket(
bucket_name,
messages.StorageBucketsGetRequest.ProjectionValueValuesEnum.full)
if not _BucketAllowsPublicObjectReads(bucket):
# Show a warning but don't fail, since this could be intentional.
log.warning(
'The specified bucket does not publicly expose new objects by '
'default, so some clients may not be able to access the CA '
'certificate or CRLs. For more details, see '
'https://cloud.google.com/storage/docs/access-control/making-data-public'
)
return storage_util.BucketReference(bucket_name)
except storage_api.BucketNotFoundError:
raise exceptions.InvalidArgumentException(
'gcs-bucket', 'The given bucket does not exist.') | b28e501b7747f8a4d417b156c2e627d8ca524aee | 17,358 |
def load_train_val(seq_len, batch_size, dataset="hollywood2"):
"""
This returns two dataloaders correponding to the train and validation sets. Each
iterator yields tensors of shape (N, 3, L, H, W) where N is the batch size, L is
the sequence length, and H and W are the height and width of the frame.
The batch size is always 1 in the validation set. The frames are always cropped
to (128, 128) windows in the training set. The frames in the validation set are
not cropped if they are smaller than 360x480; otherwise, they are cropped so the
maximum returned size is 360x480.
"""
train = DataLoader(VideoDataset(
"%s/train" % dataset,
crop_size=(160, 160),
seq_len=seq_len,
), shuffle=True, num_workers=16, batch_size=batch_size, pin_memory=True)
val = DataLoader(VideoDataset(
"%s/val" % dataset,
crop_size=False,
seq_len=seq_len,
), shuffle=False, batch_size=1, pin_memory=True)
return train, val | 628a2c0db01b30c4736e482dbc81789afcbdc92a | 17,359 |
import os
def checkIfMeshId(projectPath, mesh, name, meshID):
"""Checks if exists another Object having the same name as the mesh
This function asks the user what to do.
If the object is not a mesh, gets all the children meshes
Args:
projectPath: a str with the path where the exported file will be
mesh: a string that representes one of the mesh in the scene
name: a str with the name of the mesh
meshID: the new name if needed
returns
name: as tr with the new value
abort: is the uses choose to cancel
"""
abort = False
if os.access(projectPath + meshID + ".obj" , os.W_OK):
msg = "There is already an existing Go file [" + name + "] in the Go project folder.\n"
msg = msg + "Do you want to replace it?\n'Yes' to replace, 'No' to rename the mesh " + name + ", 'Cancel' to cancel export"
res = showDialog("Exporting mesh object " + name, msg, "yesNoCancel")
if (res == "ok"):
# replace existing Object
lx.out(' -> itemID=({0}): set GoMo tag to "{0}"'.format(mesh, name))
meshID = name
lx.eval('select.drop item')
lx.eval('select.item {0} set'.format(mesh))
lx.eval('item.tag string GoMo "{0}"' %(meshID))
if (res == "no"):
#rename the mesh to an unique Object
name = meshID
lx.eval('select.drop item')
lx.eval('select.item {0} set'.format(mesh))
lx.eval('item.name "{0}"'.format(meshID))
lx.eval('item.tag string GoMo "{0}"'.format(meshID))
if (res == "cancel"):
abort = True
return name, meshID, abort | 8e20683c59864b17c40291345beb21a7a54c5323 | 17,360 |
import json
import yaml
def read_params_file(config_path: str) -> json:
"""Read the and open the params.yaml file
Args:
config_path (str): yaml config file
Returns:
yaml: yaml file
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config | b8a4bf0f70d1b4e2096ebd6d96568fc7ee757e16 | 17,361 |
import re
def fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
matched = re.match(regex, string, flags=flags)
if matched and matched.span()[1] == len(string):
return matched
return None | 72de0abe5c15dd17879b439562747c9093d517c5 | 17,362 |
import re
def html2text(html: str) -> str:
""" Change HTML to help torenizer and return text """
# Replace <br/> with PERIOD+NEW_LINE
html = re.sub(r'(\s*<br\s?\/\s*>)+', '. \n', html)
html = re.sub(r'<br\s?\/?>', '. \n', html)
html = re.sub(r'\s*(</?em>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?strong>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?b>)\s*', r' \1 ', html)
html = re.sub(r'\s*(</?i>)\s*', r' \1 ', html)
soup = bs4.BeautifulSoup(html, 'html5lib')
if REMOVE_TABLES:
for tag in soup('table'):
tag.extract()
if REMOVE_LISTS:
for tag in soup('ul'):
tag.extract()
divs_to_remove = '^(script|noscript|form|style|head|nav)$'
for tag in soup.find_all(re.compile(divs_to_remove)):
tag.extract()
if REMOVE_TIME_TAG:
for tag in soup('time'):
tag.extract()
if REMOVE_LISTS_OF_LINKS:
for ul in soup('ul'):
ul_can_be_removed_flags = []
for li in ul.find_all('li'):
can_be_removed = False
li_is_link = False
a_tags_in_li = li.find_all('a')
if len(a_tags_in_li) == 1:
li_is_link = True
if li_is_link and li.get_text().strip() == \
a_tags_in_li[0].get_text().strip():
can_be_removed = True
ul_can_be_removed_flags.append(li_is_link)
ul_can_be_removed_flags.append(can_be_removed)
if all(ul_can_be_removed_flags):
ul.extract()
# List of html-tags that we consider as BLOCK,
# so there are no sentences that begins in one and ends in another
pattern = re.compile('^(div|p|h1|h2|h3|h4|h5|code|blockquote)$')
for tag in soup.find_all(pattern):
if tag.name == 'div' and tag.find_all(pattern): # skip if has child
continue
tag_text = tag.get_text().strip()
if not tag_text:
continue
if tag_text[-1] == ':' and SKIP_FINALYZING_IF_ENDS_WITH_COLON:
continue
# Adding PERIOD in the end of text tag
if not tag_text[-1] in PARAGRAPH_LAST_CHAR_MUST_BE_IN:
# remove COLON in the end
new_tag = soup.new_tag('p')
if tag_text[-1] == ':':
tag_text = tag_text.rstrip(':')
new_tag.string = '. \n' + tag_text + '. '
tag.replace_with(new_tag)
text = soup.get_text()
# Remove possible period (side-effect) at the start
text = re.sub(r'^\s*\.', r'', text)
# Text..SPACE -> Text.SPACE
text = re.sub(r'([^\.])\.\. ', r'\1. ', text)
# Remove redundant punkt . \n . -> . TODO: optimize here
text = re.sub(r'\.(\s*\n\s*\.)+', r'. \n', text)
text = re.sub(r'\.\s*\n\.', r'. \n', text)
text = re.sub(r'\n\.', r'. ', text)
text = re.sub(r'\.\s\n\s*\.', r'. \n', text)
text = re.sub(r'\.\s*\.\s\n', r'. \n', text)
text = re.sub(r'\s+\.\s\n', r'. \n', text)
text = re.sub(r'\n\.\s*\n', r'\n', text)
return text | f410238595b760a14439486f15e13420f02db68b | 17,363 |
from typing import Any
from typing import Type
import inspect
def _is_class(module: Any, member: Type, clazz: Type) -> bool:
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True | 5792fadcc93068fa8d7050de7d84ee2bbe1fb0f1 | 17,364 |
def word_boundary(queries, count, degree, parallel=True, **kwargs):
"""
run augmentation on list of sentences
:param queries: sentences to augment
:type queries: list
:param count: number of output for each query
:type count: int
:param degree: degree of augmentation, takes value between 0 and 1
:type degree: float
:param parallel: run in parallel
:type parallel: bool
:param kwargs:
:return:
"""
if parallel:
function = partial(
__word_boundary__,
**kwargs,
degree=degree,
count=count,
)
return run_parallel(queries, function)
else:
return [
__word_boundary__(word, degree=degree, count=count, **kwargs)
for word in queries
] | 7ca4172d2900c773322d54380bde6780f2580597 | 17,365 |
def myFunction(objectIn):
"""What you are supposed to test."""
return objectIn.aMethodToMock() + 2 | 1907db338a05f2d798ccde63366d052404324e6f | 17,366 |
def read_config(filename, section):
""" Reads a section from a .ini file and returns a dict object
"""
parser = ConfigParser()
parser.read(filename)
dic = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
dic[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return dic | 3eb84afc13b0ad40bcaf434d4a38712cedb4502a | 17,367 |
def get_training_set_count(disc):
"""Returns the total number of training sets of a discipline and all its
child elements.
:param disc: Discipline instance
:type disc: models.Discipline
:return: sum of training sets
:rtype: int
"""
training_set_counter = 0
for child in disc.get_descendants(include_self=True):
training_set_counter += child.training_sets.count()
return training_set_counter | 9b28a9e51e04b559f05f1cc0255a6c65ca4a0980 | 17,368 |
import os
def search_dir(path, dir_name, type):
"""Search directory in certain path"""
target_path = ""
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
if lambda_fun(dir_name, item, type):
target_path = item_path
break
else:
target_path = search_dir(item_path, dir_name, type)
if target_path != "":
break
return target_path | 27257c5243417119067066236d73263afc54ff37 | 17,369 |
def lazy_import(module_name, callback=None):
"""Returns a proxy module object that will lazily import the given module the first
time it is used.
Example usage::
# Lazy version of `import tensorflow as tf`
tf = lazy_import("tensorflow")
# Other commands
# Now the module is loaded
tf.__version__
Args:
module_name: the fully-qualified module name to import
callback (None): a callback function to call before importing the
module
Returns:
a proxy module object that will be lazily imported when first used
"""
return LazyModule(module_name, callback=callback) | bc94a18b4a8a2714d2cffd743de2a202ecb5af78 | 17,370 |
def _top_k(array, k):
"""Returns top k values and their indices along the last axis of the array.
This function serves the same purpose as jax.lax.top_k, but in a more XLA
friendly manner for TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select the top k values.
This convoluted way of obtaining the top k values is generally faster on
TPUs.
(2) Otherwise, we fall back to jax.lax.top_k (and its underlying scatter op).
Args:
array: Source array.
k: Number of top values to select.
Returns:
- Top k values
- Associated top k indices.
"""
if _favor_one_hot_slices():
top_k_indices = jax.lax.top_k(array, k)[-1]
top_k_values = _take_along_axis(array, top_k_indices, axis=-1)
return top_k_values, top_k_indices
else:
return jax.lax.top_k(array, k) | 74c7c705b6b972d227c10146f0b5209f62c1d59f | 17,371 |
def time_to_accuracy(raw_metrics, tag, threshold):
"""Calculate the amount of time for accuracy to cross a given threshold.
Args:
raw_metrics: dict mapping TensorBoard tags to list of MetricPoint.
tag: string name of accuracy metric.
threshold: the desired model accuracy.
Returns:
float, amount of time in seconds to reach the desired accuracy.
"""
values = raw_metrics.get(tag)
if not values:
raise ValueError('No values found for time to accuracy tag: {}. '
'Possible tags were: {}'.format(tag, raw_metrics.keys()))
# MetricPoints should be sorted by timestamp with earlier events first.
start_wall_time = values[0].wall_time
try:
end_wall_time = next(
v.wall_time for v in values
if v.metric_value >= threshold)
return MetricPoint(end_wall_time - start_wall_time, end_wall_time)
except StopIteration:
max_accuracy = max(v.metric_value for v in values)
raise ValueError(
'Accuracy metric `{}` was never high enough to satisfy the '
'`time_to_accuracy` settings from the config. Max accuracy: {}. '
'Target accuracy: {}. Config for `time_to_accuracy`: {}'.format(
tag, max_accuracy, threshold)) | 5ce2727a538a25f195c0d9ab3de2c2dcdbb56f88 | 17,372 |
def create_stencil(image_shape, smooth):
"""The stencil is a mask that will enable a smooth transition between blocks. blocks will be multiplied
by the stencil so that when they are blitted to the image, transition between them are smoothed out.
image 1: 1 1 1 1 1 1 1 , image 2: 2 2 2 2 2 2 2, stencil: .25 .75 1 1 1 .75 .25
image 1 * stencil: .25 .75 1 1 1 .75 .25
image 2 * stencil: .5 1.5 2 2 2 1.5 .5
adding them: .25 .75 1 1 1 1.25 1.75 2 2 2 1.5 .5
"""
stencil = np.ones(image_shape, dtype=np.float32)
# 2 * smooth because we need to blend the inside of the block with the outside of the other block
# for smooth = 4, i1; inside image 1, o1: outside image 1
# o1 o1 o1 o1 | i1 i1 i1 i1
# i1 i1 i1 i1 | o1 o1 o1 o1
factors = np.linspace(0, 1, 2*smooth+1, endpoint=False)[1:]
for i, f in enumerate(factors):
stencil[i, :, :] *= f
stencil[:, i, :] *= f
for i, f in enumerate(factors):
stencil[image_shape[0] - i - 1, :, :] *= f
stencil[:, image_shape[1] - i - 1, :] *= f
return stencil | 49aca2fb63ea6bef134c0872520fd203ce21bfef | 17,373 |
def a_m_to_P(a, m):
"""Compute the orbital period given the semi-major axis and total mass.
Parameters
----------
{a}
{m}
"""
return 2*np.pi * np.sqrt(a**3 / (G * m)) | 734332ff83c06830388ceeecd64315ee738756f1 | 17,374 |
def _async_attr_mapper(attr_name, val):
"""The `async` attribute works slightly different than the other bool
attributes. It can be set explicitly to `false` with no surrounding quotes
according to the spec."""
if val in [False, 'False']:
return ' {}=false'.format(attr_name)
elif val:
return ' {}'.format(attr_name)
else:
return '' | 79e72067b244d705df9aa09a78db656f0847938c | 17,375 |
from typing import Any
from typing import Type
def wrap(val: Any) -> Value:
"""Wraps the given native `val` as Protobuf `Value` message.
Supports converting collection/array of primitives types to `Value` message:
* numpy array of primitives.
* list of primitives.
* generator of finite no. of primitives.
Generally, wrapping only supports wrapping of collection of primitives
if all primitives share the same native primitive types. However, some
native type mixing is allowed as supported by `np.asarray()`, although
doing so is not recommended.
If the given `val` is already a Protobuf `Value` message, returns `val` as is.
Args:
val: The native value to wrap as a protobuf message. The value should
be native primitive, array of primitives.
Returns:
Wrapped `Value` protobuf message.
Throws:
TypeError: If the given native value is not of a supported type.
"""
# return as is if val is already value protobuf
if isinstance(val, Value):
return val
# try to wrap value as primitive
try:
return wrap_primitive(val)
except TypeError:
pass
# check that we are not trying to convert None
if val is None:
raise TypeError("Wrapping None is Value proto is not supported")
# extract values from if generator
if isgenerator(val):
val = list(val)
# extract flatten list of primitive protos from collect of primitives
val_arr = np.asarray(val)
primitives = [wrap_primitive(v) for v in val_arr.flatten()]
# resolve element data type and build value proto
element_type = primitives[0].data_type.primitive
return Value(
data_type=Type(
array=Type.Array(
dimensions=val_arr.shape,
element_type=element_type,
)
),
array=Value.Array(values=[p.primitive for p in primitives]),
) | 9208a2afd7b256ec791044531b13fe8c8b9fa2c8 | 17,376 |
def to_transform_msg(transform):
"""Convert a `Transform` object to a Transform message."""
msg = geometry_msgs.msg.Transform()
msg.translation = to_vector3_msg(transform.translation)
msg.rotation = to_quat_msg(transform.rotation)
return msg | c471ec8dfed03caa9f7096ab3294589477cf6d39 | 17,377 |
def print_pos_neg(num):
"""Print if positive or negative in polarity level
>>> print_pos_neg(0.8)
'positive'
>>> print_pos_neg(-0.5)
'negative'
"""
if num > 0:
return "positive"
elif num == 0:
return "neutral"
else:
return "negative" | 414aa98f54a2f01af24d591ae47ec4f394adf682 | 17,378 |
def delete_volume_op(name: str, namespace: str):
"""
Creates a kfp.dsl.ContainerOp that deletes a volume (Kubernetes Resource).
Parameters
----------
name : str
namespace : str
Returns
-------
kfp.dsl.ContainerOp
"""
kind = "PersistentVolumeClaim"
return kubernetes_resource_delete_op(
name=f"vol-{name}",
kind=kind,
namespace=namespace,
) | d947905e01de29061895512fbfd1fbefb024110d | 17,379 |
def distal(combo):
""" Returns the distal subspecies from a combo
:param combo: int representation of origin combination
:return: int representation of the distal origin
>>> distal(combine(CAS, DOM)) == DOM
True
"""
return combo & _DISTAL_MASK | 163875c1b4b081027344a3bc1f05bd0cb60a58d8 | 17,380 |
def get_eval_dataset(files, ftDict, axes = [2], splits = None, one_hot = None, moments = None, **kwargs):
"""
Get the preprocessed evaluation dataset
Args:
files (list): list of tfrecords to be used for evaluation
Returns:
A tf.data.Dataset of evaluation data.
"""
dataset = get_dataset(files, ftDict, axes, splits, one_hot, moments, **kwargs)
dataset = dataset.batch(1)
return dataset | 73476bf1273923e77bf5f4e6d415191cf83023cc | 17,381 |
def getTopApSignals(slot_to_io):
""" HLS simulator requires that there is an ap_done at the top level """
# find which slot has the s_axi_control
for slot, io_list in slot_to_io.items():
if any('s_axi' in io[-1] for io in io_list):
# note the naming convention
ap_done_source = [f'{io[-1]}_in' for io in io_list if 'ap_done' in io[-1]]
ap_start_source = [f'{io[-1]}_out' for io in io_list if 'ap_start' in io[-1]]
top_ap_signals = []
top_ap_signals.append(f'wire ap_done = ' + ' & '.join(ap_done_source) + ';')
top_ap_signals.append('wire ap_idle = ap_done;')
top_ap_signals.append('wire ap_ready = ap_done;')
top_ap_signals.append(f'wire ap_start = {ap_start_source[0]};') # only need 1 ap_start
return top_ap_signals
assert False | e40a8fb7797653ee7414c0120ceb29e49e9dfd84 | 17,382 |
def get_line_style(image: Image = None) -> int:
"""
Get line style of the specified image.
The line style will be used when drawing lines or shape outlines.
:param image: the target image whose line style is to be gotten. None means it is the target image
(see set_target() and get_target())
:return: line style used by the specified image
"""
image = _get_target_image(image)
return image.get_line_style() | cc1b9285fbd3b168f40e66969e0a4b1ae9ee234a | 17,383 |
def make_polygon_for_earth(lat_bottom_left, lon_bottom_left, lat_top_right, lon_top_right):
"""
Divides the region into two separate regions (if needed) so as to handle the cases where the regions
cross the international date
:param lat_bottom_left: float (-90 to 90)
:param lon_bottom_left: float (-180 to 180)
:param lat_top_right: float (-90 to 90)
:param lon_top_right: float (-180 to 180)
:return:
------------ <-----(lon top right, lat top right)
| |
| |
| |
| |
------------
^
|
---- (lon bottom left, lat bottom left)
"""
focus_regions = []
# case where region starts around 180 longitude and then wraps around to -180 longitude (complete cylinder)
# international date line crossed
if lon_bottom_left > lon_top_right: # overlap of latitudes
# we need two polygons.
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[180, lat_top_right],
[180, lat_bottom_left]])
focus_region2 = Polygon([
[-180, lat_bottom_left],
[-180, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1, focus_region2]
else: # international dateline not crossed
focus_region1 = Polygon([
[lon_bottom_left, lat_bottom_left],
[lon_bottom_left, lat_top_right],
[lon_top_right, lat_top_right],
[lon_top_right, lat_bottom_left]])
focus_regions = [focus_region1]
return focus_regions | 6f73cc35c11cd16eea0c80aa7921ff1680ee75b6 | 17,384 |
import torch
import os
def train_coral(s_dataloaders, t_dataloaders, val_dataloader, test_dataloader, metric_name, seed, **kwargs):
"""
:param s_dataloaders:
:param t_dataloaders:
:param kwargs:
:return:
"""
s_train_dataloader = s_dataloaders
t_train_dataloader = t_dataloaders
autoencoder = AE(input_dim=kwargs['input_dim'],
latent_dim=kwargs['latent_dim'],
hidden_dims=kwargs['encoder_hidden_dims'],
dop=kwargs['dop']).to(kwargs['device'])
encoder = autoencoder.encoder
target_decoder = MoMLP(input_dim=kwargs['latent_dim'],
output_dim=kwargs['output_dim'],
hidden_dims=kwargs['regressor_hidden_dims'],
out_fn=torch.nn.Sigmoid).to(kwargs['device'])
target_regressor = EncoderDecoder(encoder=encoder,
decoder=target_decoder).to(kwargs['device'])
train_history = defaultdict(list)
# ae_eval_train_history = defaultdict(list)
val_history = defaultdict(list)
s_target_regression_eval_train_history = defaultdict(list)
t_target_regression_eval_train_history = defaultdict(list)
target_regression_eval_val_history = defaultdict(list)
target_regression_eval_test_history = defaultdict(list)
model_optimizer = torch.optim.AdamW(target_regressor.parameters(), lr=kwargs['lr'])
for epoch in range(int(kwargs['train_num_epochs'])):
if epoch % 50 == 0:
print(f'Coral training epoch {epoch}')
for step, s_batch in enumerate(s_train_dataloader):
t_batch = next(iter(t_train_dataloader))
train_history = coral_train_step(model=target_regressor,
s_batch=s_batch,
t_batch=t_batch,
device=kwargs['device'],
optimizer=model_optimizer,
alpha=kwargs['alpha'],
history=train_history)
s_target_regression_eval_train_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=s_train_dataloader,
device=kwargs['device'],
history=s_target_regression_eval_train_history)
t_target_regression_eval_train_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=t_train_dataloader,
device=kwargs['device'],
history=t_target_regression_eval_train_history)
target_regression_eval_val_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=val_dataloader,
device=kwargs['device'],
history=target_regression_eval_val_history)
target_regression_eval_test_history = evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=test_dataloader,
device=kwargs['device'],
history=target_regression_eval_test_history)
save_flag, stop_flag = model_save_check(history=target_regression_eval_val_history,
metric_name=metric_name,
tolerance_count=50)
if save_flag:
torch.save(target_regressor.state_dict(), os.path.join(kwargs['model_save_folder'], f'coral_regressor_{seed}.pt'))
if stop_flag:
break
target_regressor.load_state_dict(
torch.load(os.path.join(kwargs['model_save_folder'], f'coral_regressor_{seed}.pt')))
# evaluate_target_regression_epoch(regressor=target_regressor,
# dataloader=val_dataloader,
# device=kwargs['device'],
# history=None,
# seed=seed,
# output_folder=kwargs['model_save_folder'])
evaluate_target_regression_epoch(regressor=target_regressor,
dataloader=test_dataloader,
device=kwargs['device'],
history=None,
seed=seed,
output_folder=kwargs['model_save_folder'])
return target_regressor, (
train_history, s_target_regression_eval_train_history, t_target_regression_eval_train_history,
target_regression_eval_val_history, target_regression_eval_test_history) | d7ff43137f7f869f0104bf0e6ba15dcdcf909a2f | 17,385 |
def first_nonzero_coordinate(data, start_point, end_point):
"""Coordinate of the first nonzero element between start and end points.
Parameters
----------
data : nD array, shape (N1, N2, ..., ND)
A data volume.
start_point : array, shape (D,)
The start coordinate to check.
end_point : array, shape (D,)
The end coordinate to check.
Returns
-------
coordinates : array of int, shape (D,)
The coordinates of the first nonzero element along the ray, or None.
"""
shape = np.asarray(data.shape)
length = np.linalg.norm(end_point - start_point)
length_int = np.round(length).astype(int)
coords = np.linspace(start_point, end_point, length_int + 1, endpoint=True)
clipped_coords = np.clip(np.round(coords), 0, shape - 1).astype(int)
nonzero = np.flatnonzero(data[tuple(clipped_coords.T)])
if len(nonzero) == 0:
return None
else:
return clipped_coords[nonzero[0]] | 5db67cf49c3638a80695fd76a1a16eeec992d725 | 17,386 |
def l1_distance(prediction, ground_truth):
"""L1 distance difference between two vectors."""
if prediction.shape != ground_truth.shape:
prediction, ground_truth = np.squeeze(prediction), np.squeeze(ground_truth)
min_length = min(prediction.size, ground_truth.size)
return np.abs(prediction[:min_length] - ground_truth[:min_length]) | aaf79b386efa5f1b8726adda8d8e7dc66a502e87 | 17,387 |
from typing import Match
import base64
def decode(match_id: str) -> Match:
"""Decode a match ID and return a Match.
>>> decode("QYkqASAAIAAA")
Match(cube_value=2, cube_holder=<Player.ZERO: 0>, player=<Player.ONE: 1>, crawford=False, game_state=<GameState.PLAYING: 1>, turn=<Player.ONE: 1>, double=False, resign=<Resign.NONE: 0>, dice=(5, 2), length=9, player_0_score=2, player_1_score=4)
"""
match_bytes: bytes = base64.b64decode(match_id)
match_key: str = "".join([format(b, "08b")[::-1] for b in match_bytes])
return Match(
cube_value=2 ** int(match_key[0:4][::-1], 2),
cube_holder=Player(int(match_key[4:6][::-1], 2)),
player=Player(int(match_key[6])),
crawford=bool(int(match_key[7])),
game_state=GameState(int(match_key[8:11][::-1], 2)),
turn=Player(int(match_key[11])),
double=bool(int(match_key[12])),
resign=Resign(int(match_key[13:15][::-1], 2)),
dice=(int(match_key[15:18][::-1], 2), int(match_key[18:21][::-1], 2)),
length=int(match_key[21:36][::-1], 2),
player_0_score=int(match_key[36:51][::-1], 2),
player_1_score=int(match_key[51:66][::-1], 2),
) | a48fae652650d03259fd003af16add381f2729f3 | 17,388 |
def _valid_proto_paths(transitive_proto_path):
"""Build a list of valid paths to build the --proto_path arguments for the ScalaPB protobuf compiler
In particular, the '.' path needs to be stripped out. This mirrors a fix in the java proto rules:
https://github.com/bazelbuild/bazel/commit/af3605862047f7b553b7d2c19fa645714ea19bcf
This is explained in this issue: https://github.com/bazelbuild/rules_scala/issues/687
"""
return depset([path for path in transitive_proto_path if path != "."]) | cb834a58fa091249f16d5cdfccf536229dacd3d0 | 17,389 |
def update_stats_objecness(obj_stats, gt_bboxes, gt_labels, pred_bboxes, pred_labels, pred_scores, mask_eval=False,
affordance_stats=None, gt_masks=None, pred_masks=None, img_height=None, img_width=None, iou_thres=0.3):
"""
Updates statistics for object classification and affordance detection.
:param obj_stats: accumulated statistics for object classification
:param gt_bboxes: ground truth normalized bounding boxes (batch_size, num_gt_bboxes, 4)
:param gt_labels: ground truth labels for gt_boxes (batch_size, num_gt_bboxes)
:param pred_bboxes: predicted normalized bounding boxes (batch_size, num_pred_bboxes, 4)
:param pred_labels: predicted labels for pred_bboxes (batch_size, num_pred_bboxes)
:param pred_scores: predicted scores for pred_bboxes (batch_size, num_pred_bboxes)
:param mask_eval: True if there are predicted masks, False otherwise
:param affordance_stats: accumulated statistics for affordance evaluation
:param gt_masks: ground truth masks (batch_size, num_gt_bboxes, orig_mask_height, orig_mask_width)
:param pred_masks: predicted masks with prob for each pixel for each class (batch_size, num_pred_bboxes, train_mask_size, train_mask_size, num_affordance_classes)
:param img_height: image height
:param img_width: image width
:returns: jsons with updated statistics for object classification and affordance detection
"""
# create empty mask to accumulate masks for all bboxes in one single mask
final_gt_mask = np.zeros((img_height, img_width))
final_pred_mask = np.zeros((img_height, img_width))
# iou for each pred_bbox wrt each gt_box
iou_map, zero_iou = bbox_utils.generate_iou_map(pred_bboxes, gt_bboxes)
# update stats only if there are some iou that are not 0
if not zero_iou:
# take max iou for each pred_bbox and its corresponding gt_box indices
merged_iou_map = tf.reduce_max(iou_map, axis=-1)
max_indices_each_gt = tf.argmax(iou_map, axis=-1, output_type=tf.int32)
sorted_ids = tf.argsort(merged_iou_map, direction="DESCENDING")
# Add total of true labels for each class to stats
count_holder = tf.unique_with_counts(tf.reshape(gt_labels, (-1,)))
for i, gt_label in enumerate(count_holder[0]):
if gt_label == -1:
continue
# gt_label = int(gt_label)
if int(gt_label) > 0:
gt_label = 1
obj_stats[gt_label]["total"] += int(count_holder[2][i])
for batch_id, m in enumerate(merged_iou_map):
true_labels = []
for i, sorted_id in enumerate(sorted_ids[batch_id]):
pred_label = pred_labels[batch_id, sorted_id]
if pred_label == 0:
continue
iou = merged_iou_map[batch_id, sorted_id]
gt_id = max_indices_each_gt[batch_id, sorted_id]
gt_label = int(gt_labels[batch_id, gt_id])
pred_label = int(pred_label)
score = pred_scores[batch_id, sorted_id]
obj_stats[pred_label]["scores"].append(score)
obj_stats[pred_label]["tp"].append(0)
obj_stats[pred_label]["fp"].append(0)
if int(gt_label) > 0:
gt_label = 1
# correct detection
if iou >= iou_thres and pred_label == gt_label and gt_id not in true_labels:
obj_stats[pred_label]["tp"][-1] = 1
true_labels.append(gt_id)
if mask_eval:
final_gt_mask, final_pred_mask = update_final_masks(final_gt_mask, final_pred_mask, gt_bboxes[batch_id, gt_id],
gt_masks[batch_id, gt_id].numpy(), pred_masks[batch_id, sorted_id],
img_height, img_width)
else:
obj_stats[pred_label]["fp"][-1] = 1
if mask_eval:
affordance_stats = update_stats_affordances(affordance_stats, final_gt_mask, final_pred_mask)
return obj_stats, affordance_stats | c07d57921a6f3f3d2d97c9d84afb5dcbcb885ea6 | 17,390 |
from typing import Dict
from pathlib import Path
import inspect
import json
def load_schema(rel_path: str) -> Dict:
"""
Loads a schema from a relative path of the caller of this function.
:param rel_path: Relative path from the caller. e.g. ../schemas/schema.json
:return: Loaded schema as a `dict`.
"""
caller_path = Path((inspect.stack()[1])[1]).parent
fp = (caller_path / rel_path).resolve()
with open(fp, "r") as fh:
data = json.loads(fh.read())
return data | 297e0e01dd2f4af071ab99ebaf203ddb64525c89 | 17,391 |
def bquantize(x, nsd=3, abstol=eps, reltol=10 * eps):
"""Bidirectionally quantize a 1D vector ``x`` to ``nsd`` signed digits.
This method will terminate early if the error is less than the specified
tolerances.
The quantizer details are repeated here for the user's convenience:
The quantizer is ideal, producing integer outputs centered about zero.
Quantizers with an even number of levels are of the mid-rise type and
produce outputs which are odd integers. Quantizers with an odd number
of levels are of the mid-tread type and produce outputs which are even
integers.
.. image:: ../doc/_static/quantizer_model.png
:align: center
:alt: Quantizer model
**Parameters:**
x : array_like or sequence
the data to be quantized.
nsd : int, optional
The number of signed digits.
abstol and reltol : floats, optional
If not supplied, the absolute tolerance and the relative
tolerance default to ``eps`` and ``10*eps``, resp.
**Returns:**
y : list
List of objects described below.
``y`` is a list of instances with the same length as ``x`` and the
following attributes:
* ``y[i].val`` is the quantized value in floating-point form,
* ``y[i].csd`` is a 2-by-nsd (or less) matrix containing
the powers of two (first row) and their signs (second row).
.. seealso::
:func:`bunquantize`, :func:`ds_quantize`
"""
n = x.shape[0] if isinstance(x, np.ndarray) else len(x)
#q = np.zeros((2*n, nsd)) in the original source #rep?
y = [empty() for i in range(n)]
offset = -np.log2(0.75)
for i in range(n):
xp = x[i]
y[i].val = 0.
y[i].csd = np.zeros((2, 0), dtype='int16')
for _ in range(nsd):
error = np.abs(y[i].val - x[i])
if error <= abstol and error <= np.abs(x[i]) * reltol: # rep? in the orig: or
break
p = mfloor(np.log2(np.abs(xp)) + offset)
p2 = 2 ** p
sx = np.sign(xp)
xp = xp - sx * p2
y[i].val = y[i].val + sx * p2
addme = np.array((p, sx)).reshape((2, 1))
y[i].csd = np.concatenate((y[i].csd, addme), axis=1)
return y | 2a2e5fb71f3198099a07d84e9ad83ba6849b38d0 | 17,392 |
import zoneinfo
def timezone_keys(
*,
# allow_alias: bool = True,
# allow_deprecated: bool = True,
allow_prefix: bool = True,
) -> SearchStrategy[str]:
"""A strategy for :wikipedia:`IANA timezone names <List_of_tz_database_time_zones>`.
As well as timezone names like ``"UTC"``, ``"Australia/Sydney"``, or
``"America/New_York"``, this strategy can generate:
- Aliases such as ``"Antarctica/McMurdo"``, which links to ``"Pacific/Auckland"``.
- Deprecated names such as ``"Antarctica/South_Pole"``, which *also* links to
``"Pacific/Auckland"``. Note that most but
not all deprecated timezone names are also aliases.
- Timezone names with the ``"posix/"`` or ``"right/"`` prefixes, unless
``allow_prefix=False``.
These strings are provided separately from Tzinfo objects - such as ZoneInfo
instances from the timezones() strategy - to facilitate testing of timezone
logic without needing workarounds to access non-canonical names.
.. note::
The :mod:`python:zoneinfo` module is new in Python 3.9, so you will need
to install the :pypi:`backports.zoneinfo` module on earlier versions, and
the :pypi:`importlib_resources` backport on Python 3.6.
``pip install hypothesis[zoneinfo]`` will install these conditional
dependencies if and only if they are needed.
On Windows, you may need to access IANA timezone data via the :pypi:`tzdata`
package. For non-IANA timezones, such as Windows-native names or GNU TZ
strings, we recommend using :func:`~hypothesis.strategies.sampled_from` with
the :pypi:`dateutil` package, e.g. :meth:`dateutil:dateutil.tz.tzwin.list`.
"""
# check_type(bool, allow_alias, "allow_alias")
# check_type(bool, allow_deprecated, "allow_deprecated")
check_type(bool, allow_prefix, "allow_prefix")
if zoneinfo is None: # pragma: no cover
raise ModuleNotFoundError(
"The zoneinfo module is required, but could not be imported. "
"Run `pip install hypothesis[zoneinfo]` and try again."
)
available_timezones = ("UTC",) + tuple(sorted(zoneinfo.available_timezones()))
# TODO: filter out alias and deprecated names if disallowed
# When prefixes are allowed, we first choose a key and then flatmap to get our
# choice with one of the available prefixes. That in turn means that we need
# some logic to determine which prefixes are available for a given key:
def valid_key(key):
return key == "UTC" or _valid_key_cacheable(zoneinfo.TZPATH, key)
# TODO: work out how to place a higher priority on "weird" timezones
# For details see https://github.com/HypothesisWorks/hypothesis/issues/2414
strategy = sampled_from([key for key in available_timezones if valid_key(key)])
if not allow_prefix:
return strategy
def sample_with_prefixes(zone):
keys_with_prefixes = (zone, f"posix/{zone}", f"right/{zone}")
return sampled_from([key for key in keys_with_prefixes if valid_key(key)])
return strategy.flatmap(sample_with_prefixes) | 9faffd54419f82b412dd9114ecfc5b950c985039 | 17,393 |
def seg_to_bdry(seg, connectivity=1):
"""Given a borderless segmentation, return the boundary map."""
strel = generate_binary_structure(seg.ndim, connectivity)
return maximum_filter(seg, footprint=strel) != \
minimum_filter(seg, footprint=strel) | dc4e66a7e6f86d2984a23a2e7a7297403502b51d | 17,394 |
def depthwise_conv2d(x, filters, strides, padding, data_format="NHWC", dilations=1):
"""Computes a 2-D depthwise convolution given 4-D input x and filters arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d]*.
filters
Convolution filters *[fh,fw,d]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating the per-dimension
paddings.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilations
The dilation factor for each dimension of input. (Default value = 1)
Returns
-------
ret
The result of the convolution operation.
"""
return _cur_framework(x).depthwise_conv2d(
x, filters, strides, padding, data_format, dilations
) | cc09b910d06b8fd9d1b5b00a80c6d376cf7f6005 | 17,395 |
def OUTA():
"""
The OUTA Operation
"""
control_signal = gen_control_signal_dict()
opcode_addr = gen_opcode_addr_component_dict()
mc_step_addr = gen_microcode_step_addr_component_dict()
input_sig_addr = gen_input_signal_addr_component_dict()
templates = []
# Step 2 - A -> OUT
addresses = rom_programmer.combine_address_components([
mc_step_addr[2],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["A_OUT"],
control_signal["OUT_IN"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
# Step 3: Reset microcode step
addresses = rom_programmer.combine_address_components([
mc_step_addr[3],
opcode_addr["OUTA"]
])
data = rom_programmer.combine_data_components([
control_signal["STEP_COUNTER_RESET"]
])
templates.append(rom_programmer.DataTemplate(addresses, data))
return templates | 3ebd5e74005316d3925eaa553c112df8a61eaf90 | 17,396 |
def incidence_matrices(G, V, E, faces, edge_to_idx):
"""
Returns incidence matrices B1 and B2
:param G: NetworkX DiGraph
:param V: list of nodes
:param E: list of edges
:param faces: list of faces in G
Returns B1 (|V| x |E|) and B2 (|E| x |faces|)
B1[i][j]: -1 if node i is tail of edge j, 1 if node i is head of edge j, else 0 (tail -> head) (smaller -> larger)
B2[i][j]: 1 if edge i appears sorted in face j, -1 if edge i appears reversed in face j, else 0; given faces with sorted node order
"""
B1 = np.array(nx.incidence_matrix(G, nodelist=V, edgelist=E, oriented=True).todense())
B2 = np.zeros([len(E),len(faces)])
for f_idx, face in enumerate(faces): # face is sorted
edges = [face[:-1], face[1:], [face[0], face[2]]]
e_idxs = [edge_to_idx[tuple(e)] for e in edges]
B2[e_idxs[:-1], f_idx] = 1
B2[e_idxs[-1], f_idx] = -1
return B1, B2 | 90a82132100bb6d2e867ee7460ad55c6891b9082 | 17,397 |
def get_hosts_ram_total(nova, hosts):
"""Get total RAM (free+used) of hosts.
:param nova: A Nova client
:type nova: *
:param hosts: A set of hosts
:type hosts: list(str)
:return: A dictionary of (host, total_ram)
:rtype: dict(str: *)
"""
hosts_ram_total = dict() #dict of (host, total_ram)
for host in hosts:
data = nova.hosts.get(host)
hosts_ram_total[host] = data[0].memory_mb
return hosts_ram_total | b913f9274339ab3ab976a17a8d07e5fe130b447d | 17,398 |
import re
import unicodedata
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-ascii characters,
and converts spaces to hyphens. For use in urls and filenames
From Django's "django/template/defaultfilters.py".
"""
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value) | 471a3205c84baa55573b780375999a7658031b89 | 17,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.