content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def pending_mediated_transfer(app_chain, token_network_identifier, amount, identifier):
""" Nice to read shortcut to make a LockedTransfer where the secret is _not_ revealed.
While the secret is not revealed all apps will be synchronized, meaning
they are all going to receive the LockedTransfer message.
Returns:
The secret used to generate the LockedTransfer
"""
# pylint: disable=too-many-locals
if len(app_chain) < 2:
raise ValueError('Cannot make a LockedTransfer with less than two apps')
target = app_chain[-1].raiden.address
# Generate a secret
initiator_channel = views.get_channelstate_by_token_network_and_partner(
views.state_from_app(app_chain[0]),
token_network_identifier,
app_chain[1].raiden.address,
)
address = initiator_channel.identifier
nonce_int = channel.get_next_nonce(initiator_channel.our_state)
nonce_bytes = nonce_int.to_bytes(2, 'big')
secret = sha3(address + nonce_bytes)
initiator_app = app_chain[0]
init_initiator_statechange = initiator_init(
initiator_app.raiden,
identifier,
amount,
secret,
token_network_identifier,
target,
)
events = initiator_app.raiden.wal.log_and_dispatch(
init_initiator_statechange,
initiator_app.raiden.get_block_number(),
)
send_transfermessage = must_contain_entry(events, SendLockedTransfer, {})
transfermessage = LockedTransfer.from_event(send_transfermessage)
initiator_app.raiden.sign(transfermessage)
for mediator_app in app_chain[1:-1]:
mediator_init_statechange = mediator_init(mediator_app.raiden, transfermessage)
events = mediator_app.raiden.wal.log_and_dispatch(
mediator_init_statechange,
mediator_app.raiden.get_block_number(),
)
send_transfermessage = must_contain_entry(events, SendLockedTransfer, {})
transfermessage = LockedTransfer.from_event(send_transfermessage)
mediator_app.raiden.sign(transfermessage)
target_app = app_chain[-1]
mediator_init_statechange = target_init(transfermessage)
events = target_app.raiden.wal.log_and_dispatch(
mediator_init_statechange,
target_app.raiden.get_block_number(),
)
return secret
|
82ae40ffa45a759f1aac132c3edc221ebd11ae9e
| 20,600 |
def get_comments(post, sort_mode='hot', max_depth=5, max_breadth=5):
"""
Retrieves comments for a post.
:param post: The unique id of a Post from which Comments will be returned.
:type post: `str` or :ref:`Post`
:param str sort_mode: The order that the Posts will be sorted by. Options are: "top" (ranked by upvotes minus downvotes), "best" (similar to top, except that it uses a more complicated algorithm to have good posts jump to the top and stay there, and bad comments to work their way down, see http://blog.reddit.com/2009/10/reddits-new-comment-sorting-system.html), "hot" (similar to "top", but weighted by time so that recent, popular posts are put near the top), "new" (posts will be sorted by creation time).
:param int max_depth: The maximum depth that comments will be retrieved from (i.e., how many descendants from the topmost comment). To go down infinitely, use None.
:param int max_breadth: The maximum breadth that comments will be retrieved from (i.e., how many siblings from the topmost comment). Note that this breadth applies at every subtree - in effect, it is the branching factor. To get all siblings, use None.
:returns: list of Comment
"""
if sort_mode not in SORT_MODES:
raise RedditException("Unknown sort mode: {}".format(sort_mode))
if isinstance(post, Post):
post = post.id
elif not isinstance(post, str):
raise RedditException("The post parameter should be a String or a Post")
result = _get_comments_string(post, sort_mode, max_depth, max_breadth)
if result:
try:
json_result = _from_json(result)[1]['data']['children']
except ValueError:
raise RedditException("The response from the server didn't make any sense.")
if "error" in json_result:
raise RedditException("Error from Reddit: {}".format(json_result.get("error", "Unknown error.")))
if max_breadth is None:
return [Comment._from_json(r, post, max_depth=max_depth-1)
for r in json_result]
else:
return [Comment._from_json(r, post, max_depth=max_depth-1,
max_breadth=max_breadth)
for r in json_result[:max_breadth]]
else:
if _CONNECTED:
raise RedditException("No response from the server.")
else:
raise RedditException("No data was in the cache for this comment.")
|
333009358f622560135e7e239741613356387d55
| 20,601 |
def neighbor_json(json):
"""Read neighbor game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type')
return _NeighborDeviationGame(
gamereader.loadj(json['model']),
num_neighbors=json.get('neighbors', json.get('devs', None)))
|
19891d59970610ad412fd4eb204477c96d1d82fd
| 20,602 |
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.name = 'ViT-B_16'
config.half_precision = True
config.encoder = ml_collections.ConfigDict()
config.encoder.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.encoder.hidden_size = 768
config.encoder.mlp_dim = 3072
config.encoder.num_heads = 12
config.encoder.num_layers = 12
config.encoder.attention_dropout_rate = 0.0
config.encoder.dropout_rate = 0.0
config.encoder.drop_path_rate = 0.0
config.decoder = ml_collections.ConfigDict()
config.decoder.hidden_size = 384
config.decoder.mlp_dim = 1536
config.decoder.num_heads = 6
config.decoder.num_layers = 4
config.decoder.attention_dropout_rate = 0.0
config.decoder.dropout_rate = 0.0
config.decoder.drop_path_rate = 0.0
config.decoder.out_dim = 768
return config
|
6afdb862bd07c21d569db65fbb1780492ff153f2
| 20,603 |
from typing import Container
def build_container_hierarchy(dct):
"""Create a hierarchy of Containers based on the contents of a nested dict.
There will always be a single top level scoping Container regardless of the
contents of dct.
"""
top = Container()
for key,val in dct.items():
if isinstance(val, dict): # it's a dict, so this is a Container
top.add(key, build_container_hierarchy(val))
else:
setattr(top, key, val)
return top
|
7fb629d7f570e5f77b381766b5c2d909d7c0d6c1
| 20,604 |
def occ_frac(stop_rec_range, bin_size_minutes, edge_bins=1):
"""
Computes fractional occupancy in inbin and outbin.
Parameters
----------
stop_rec_range: list consisting of [intime, outtime]
bin_size_minutes: bin size in minutes
edge_bins: 1=fractional, 2=whole bin
Returns
-------
[inbin frac, outbin frac] where each is a real number in [0.0,1.0]
"""
intime = stop_rec_range[0]
outtime = stop_rec_range[1]
bin_freq_str = '{}T'.format(int(bin_size_minutes))
indtbin = intime.floor(bin_freq_str)
outdtbin = outtime.floor(bin_freq_str)
# inbin occupancy
if edge_bins == 1:
right_edge = min(indtbin + timedelta(minutes=bin_size_minutes), outtime)
inbin_occ_secs = (right_edge - intime).total_seconds()
inbin_occ_frac = inbin_occ_secs / (bin_size_minutes * 60.0)
else:
inbin_occ_frac = 1.0
# outbin occupancy
if indtbin == outdtbin:
outbin_occ_frac = 0.0 # Use inbin_occ_frac
else:
if edge_bins == 1:
left_edge = max(outdtbin, intime)
outbin_occ_secs = (outtime - left_edge).total_seconds()
outbin_occ_frac = outbin_occ_secs / (bin_size_minutes * 60.0)
else:
outbin_occ_frac = 1.0
assert 1.0 >= inbin_occ_frac >= 0.0, \
"bad inbin_occ_frac={:.3f} in={} out={}".format(inbin_occ_frac,
intime, outtime)
assert 1.0 >= outbin_occ_frac >= 0.0, \
"bad outbin_occ_frac={:.3f} in={} out={}".format(outbin_occ_frac,
intime, outtime)
return [inbin_occ_frac, outbin_occ_frac]
|
d3d93cd92386a98c865c61ad2b595786aa5d4837
| 20,605 |
def geomprogr_mesh(N=None, a=0, L=None, Delta0=None, ratio=None):
"""Compute a sequence of values according to a geometric progression.
Different options are possible with the input number of intervals in the
sequence N, the length of the first interval Delta0, the total length L
and the ratio of the sought geometric progression. Three of them are
requested in input to find a valid sequence. The sequence is drawn within
the points a and b."""
if list(locals().values()).count(None) > 1:
raise ValueError('Insufficient number of input data for a sequence')
if ratio is not None:
if (ratio < 0):
raise ValueError('negative ratio is not valid')
if L is not None:
if (L < 0):
raise ValueError('negative total length is not valid')
if Delta0 is not None:
if (Delta0 < 0):
raise ValueError('negative length of the 1st interval is not valid')
if N is not None:
if (N < 0):
raise ValueError('negative number of intervals is not valid')
if N is None:
if ratio < 1:
N = np.log(1 - L / Delta0 * (1 - ratio)) / np.log(ratio)
else:
N = np.log(1 + L / Delta0 * (ratio - 1)) / np.log(ratio)
elif L is None:
if ratio < 1:
L = Delta0 * (1 - ratio**N) / (1 - ratio)
else:
L = Delta0 * (ratio**N - 1) / (ratio - 1)
elif Delta0 is None:
if not np.isclose(ratio, 1):
Delta0 = L * (1 - ratio) / (1 - ratio**N)
else:
Delta0 = L / float(N)
elif ratio is None:
f = lambda q: q**N - L / Delta0 * q + L / Delta0 - 1
x = L / float(N)
if Delta0 > x:
ratio = brentq(f, 0, 1 - 1.e-6)
elif Delta0 < x:
ratio = brentq(f, 1 + 1.e-6, 20)
else:
ratio = 1
if np.isclose(ratio, 1):
r = np.linspace(0, L, N + 1)
else:
r = np.insert(np.full(N - 1, ratio), 0, 1)
r = np.cumprod(r) * Delta0
r = np.insert(np.cumsum(r), 0, 0)
return r + a
|
3de67b8ee2d75b69638648316fcfad07dbabde3a
| 20,606 |
def list_subclasses(package, base_class):
"""
Dynamically import all modules in a package and scan for all subclasses of a base class.
`package`: the package to import
`base_class`: the base class to scan for subclasses
return: a dictionary of possible subclasses with class name as key and class type information as value
"""
import_modules(package)
subclasses = all_subclasses(base_class)
return dict(zip(map(lambda c: c.__name__, subclasses), subclasses))
|
e5570c30c89869b702c1c1015914540403be356f
| 20,607 |
def maxima_in_range(r, g_r, r_min, r_max):
"""Find the maxima in a range of r, g_r values"""
idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r)))
g_r_slice = g_r[idx]
g_r_max = g_r_slice[g_r_slice.argmax()]
idx_max, _ = find_nearest(g_r, g_r_max)
return r[idx_max], g_r[idx_max]
|
14a4e3dc65465dd2e515ac09fb74704a366368b4
| 20,608 |
def shared_fit_preprocessing(fit_class):
"""
Shared preprocessing to get X, y, class_order, and row_weights.
Used by _materialize method for both python and R fitting.
:param fit_class: PythonFit or RFit class
:return:
X: pd.DataFrame of features to use in fit
y: pd.Series of target to use in fit
class_order: array specifying class order, or None
row_weights: pd.Series of row weights, or None
"""
# read in data
if fit_class.input_filename.endswith(".mtx"):
colnames = None
if fit_class.sparse_column_file:
colnames = [column.strip() for column in open(fit_class.sparse_column_file).readlines()]
df = pd.DataFrame.sparse.from_spmatrix(mmread(fit_class.input_filename), columns=colnames)
else:
df = pd.read_csv(fit_class.input_filename)
# get num rows to use
if fit_class.num_rows == "ALL":
fit_class.num_rows = len(df)
else:
if fit_class.num_rows > len(df):
raise DrumCommonException(
"Requested number of rows greater than data length {} > {}".format(
fit_class.num_rows, len(df)
)
)
fit_class.num_rows = int(fit_class.num_rows)
# get target and features, resample and modify nrows if needed
if fit_class.target_filename or fit_class.target_name:
if fit_class.target_filename:
y_unsampled = pd.read_csv(fit_class.target_filename, index_col=False)
assert (
len(y_unsampled.columns) == 1
), "Your target dataset at path {} has {} columns named {}".format(
fit_class.target_filename, len(y_unsampled.columns), y_unsampled.columns
)
assert len(df) == len(
y_unsampled
), "Your input data has {} entries, but your target data has {} entries".format(
len(df), len(y_unsampled)
)
if y_unsampled.columns[0] in df.columns:
y_unsampled.columns = ["__target__"]
df = df.merge(y_unsampled, left_index=True, right_index=True)
assert len(y_unsampled.columns.values) == 1
fit_class.target_name = y_unsampled.columns.values[0]
df = df.dropna(subset=[fit_class.target_name])
X = df.drop(fit_class.target_name, axis=1).sample(fit_class.num_rows, random_state=1)
y = df[fit_class.target_name].sample(fit_class.num_rows, random_state=1)
else:
X = df.sample(fit_class.num_rows, random_state=1)
y = None
row_weights = extract_weights(X, fit_class)
class_order = extract_class_order(fit_class)
return X, y, class_order, row_weights
|
b87831540ba6fc4bc65fe0532e2af0574515c3a3
| 20,609 |
import json
def webhook():
"""
Triggers on each GET and POST request. Handles GET and POST requests using this function.
:return: Return status code acknowledge for the GET and POST request
"""
if request.method == 'POST':
data = request.get_json(force=True)
log(json.dumps(data)) # you may not want to log every incoming message in production, but it's good for testing
if data["object"] == "page":
for entry in data["entry"]:
for event in entry["messaging"]:
sender_id = event["sender"]["id"]
if 'message' in event and 'text' in event['message']:
message_text = event["message"]["text"]
if event.get("message").get("quick_reply"):
feedback_payload = event["message"]["quick_reply"]["payload"]
handle_message(feedback_payload, sender_id, message_type="feedback")
else:
handle_message(message_text, sender_id)
if 'postback' in event and 'payload' in event['postback']:
postback_payload = event['postback']['payload']
log(postback_payload)
handle_message(postback_payload, sender_id, message_type="feedback")
if event.get("delivery"):
pass
if event.get("optin"):
pass
return "ok", 200
elif request.method == 'GET': # Verification
if request.args.get("hub.verify_token") == VERIFY_TOKEN:
return request.args.get('hub.challenge'), 200
else:
return 'Error, wrong validation token', 403
|
0c9f39c1159990e6a84dc9ce0091078397a3b65e
| 20,610 |
def extract_winner(state: 'TicTacToeState') -> str:
"""
Return the winner of the game, or announce if the game resulted in a
tie.
"""
winner = 'No one'
tictactoe = TicTacToeGame(True)
tictactoe.current_state = state
if tictactoe.is_winner('O'):
winner = 'O'
elif tictactoe.is_winner('X'):
winner = 'X'
return winner
|
c92cef3bc3214923107871d5f044df16baf63401
| 20,611 |
def _prensor_value_fetch(prensor_tree: prensor.Prensor):
"""Fetch function for PrensorValue. See the document in session_lib."""
# pylint: disable=protected-access
type_spec = prensor_tree._type_spec
components = type_spec._to_components(prensor_tree)
def _construct_prensor_value(component_values):
return _prensor_value_from_type_spec_and_component_values(
type_spec, iter(component_values))
return components, _construct_prensor_value
|
ccea4a94fff5f17c6e650e1ac820ec6da1be023d
| 20,612 |
import subprocess
def start_workers_with_fabric():
""" testing spinning up workers using fabric """
tmp_file = open(settings.AUTOSCALE_TMP_FILE, 'w')
tmp_file.write('running')
tmp_file.close()
subprocess.call("/usr/local/bin/fab \
-f /opt/codebase/auto-scale/fabfile.py \
create_multiple_workers",
shell=True)
return True
|
8d6044c07ff0b92f0bd56d1793f0d7bae16d86dd
| 20,613 |
def request_validation_error(error):
"""Handles Value Errors from bad data"""
message = str(error)
app.logger.error(message)
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'error': 'Bad Request',
'message': message
}, status.HTTP_400_BAD_REQUEST
|
1d5c779286d83d756e1d73201f1274dbec7cf84b
| 20,614 |
def all(request):
"""Handle places list page."""
places = Place.objects.all()
context = {'places': places}
return render(request, 'rental/list_place.html', context)
|
d978a4ec22004a1a863e57113639722eaf1f02cf
| 20,615 |
def get_key_by_value(dictionary, search_value):
"""
searchs a value in a dicionary and returns the key of the first occurrence
:param dictionary: dictionary to search in
:param search_value: value to search for
"""
for key, value in dictionary.iteritems():
if value == search_value:
return ugettext(key)
|
febad38e70c973de23ce4e1a5702df92860a6c2e
| 20,616 |
def _subtract_ten(x):
"""Subtracts 10 from x using control flow ops.
This function is equivalent to "x - 10" but uses a tf.while_loop, in order
to test the use of functions that involve control flow ops.
Args:
x: A tensor of integral type.
Returns:
A tensor representing x - 10.
"""
def stop_condition(counter, x_minus_counter):
del x_minus_counter # unused
return tf.less(counter, 10)
def iteration(counter, x_minus_counter):
return tf.add(counter, 1), tf.add(x_minus_counter, -1)
initial_values = [tf.constant(0), x]
return tf.while_loop(stop_condition, iteration, initial_values)[1]
|
f2db402e5c98251dc93036be60f02eb88a4d13d9
| 20,617 |
def load_fortune_file(f: str) -> list:
"""
load fortunes from a file and return it as list
"""
saved = []
try:
with open(f, 'r') as datfile:
text = datfile.read()
for line in text.split('%'):
if len(line.strip()) > 0:
saved.append(line)
except OSError:
app.logger.warning('fail to process file: {}'.format(f))
pass
else:
return saved
|
824ddb0bcb34abf597fb317d10fa3eeab99a292e
| 20,618 |
def maskStats(wins, last_win, mask, maxLen):
"""
return a three-element list with the first element being the total proportion of the window that is masked,
the second element being a list of masked positions that are relative to the windown start=0 and the window end = window length,
and the third being the last window before breaking to expidite the next loop
"""
chrom = wins[0].split(":")[0]
a = wins[1]
L = wins[2]
b = a + L
prop = [0.0,[],0]
try:
for i in range(last_win, len(mask[chrom])):
x, y = mask[chrom][i][0], mask[chrom][i][1]
if y < a:
continue
if b < x:
return prop
else: # i.e. [a--b] and [x--y] overlap
if a >= x and b <= y:
return [1.0, [[0,maxLen]], i]
elif a >= x and b > y:
win_prop = (y-a)/float(b-a)
prop[0] += win_prop
prop[1].append([0,int(win_prop * maxLen)])
prop[2] = i
elif b <= y and a < x:
win_prop = (b-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int((1-win_prop)*maxLen),maxLen])
prop[2] = i
else:
win_prop = (y-x)/float(b-a)
prop[0] += win_prop
prop[1].append([int(((x-a)/float(b-a))*maxLen), int(((y-a)/float(b-a))*maxLen)])
prop[2] = i
return prop
except KeyError:
return prop
|
b5d75d2e86f1b21bf35cbc69d360cd1639c5527b
| 20,619 |
def dsoftmax(Z):
"""Given a (m,n) matrix, returns a (m,n,n) jacobian matrix"""
m,n=np.shape(Z)
softZ=(softmax(Z))
prodtensor=np.einsum("ij,ik->ijk",softZ,softZ)
diagtensor=np.einsum('ij,jk->ijk', softZ, np.eye(n, n))
return diagtensor-prodtensor
|
15296d493608dac1fc9843dd8a7d6eaaf29c4839
| 20,620 |
async def vbd_unplug(cluster_id: str, vbd_uuid: str):
"""Unplug from VBD"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vbd: VBD = VBD.get_by_uuid(session=session, uuid=vbd_uuid)
if vbd is not None:
ret = dict(success=vbd.unplug())
else:
ret = dict(success=False)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
|
8b36c55354b35470bceb47ef212aa183be09fad4
| 20,621 |
def calculate_age(created, now):
"""
Pprepare a Docker CLI-like output of image age.
After researching `datetime`, `dateutil` and other libraries
I decided to do this manually to get as close as possible to
Docker CLI output.
`created` and `now` are both datetime.datetime objects.
"""
age = {}
rdelta = relativedelta.relativedelta(now, created)
difference = now - created
if rdelta.years > 0:
age['number'] = rdelta.years
age['unit'] = 'years'
elif rdelta.years == 0 and difference >= timedelta(days=60):
age['number'] = rdelta.months
age['unit'] = 'months'
elif rdelta.years == 0 and difference < timedelta(days=60) and difference >= timedelta(days=14):
days = 0
if rdelta.months == 1:
days = 30
days += rdelta.days
weeks = round(days / 7)
age['number'] = weeks
age['unit'] = 'weeks'
elif rdelta.years == 0 and difference < timedelta(days=14) and difference >= timedelta(days=1):
age['number'] = rdelta.days
age['unit'] = 'days'
elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours >= 1:
age['number'] = rdelta.hours
age['unit'] = 'hours'
elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes > 0:
age['number'] = rdelta.minutes
age['unit'] = 'minutes'
elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds > 0:
age['number'] = rdelta.seconds
age['unit'] = 'seconds'
elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds <= 0:
age['number'] = 1
age['unit'] = 'second'
else:
raise DkrlsError(f'Encountered age of an image which this CLI can\'t handle: {rdelta}')
return age
|
f2b1a6fc643a78c9a2d3cdd0f497e05c3294eb03
| 20,622 |
def Maxout(x, num_unit):
"""
Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
Args:
x (tf.Tensor): a NHWC or NC tensor. Channel has to be known.
num_unit (int): a int. Must be divisible by C.
Returns:
tf.Tensor: of shape NHW(C/num_unit) named ``output``.
"""
input_shape = x.get_shape().as_list()
ndim = len(input_shape)
assert ndim == 4 or ndim == 2
ch = input_shape[-1]
assert ch is not None and ch % num_unit == 0
if ndim == 4:
x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit])
else:
x = tf.reshape(x, [-1, ch / num_unit, num_unit])
return tf.reduce_max(x, ndim, name='output')
|
d10294d7ad180b47c4276e3bb0f43e7ac4a9fa3b
| 20,623 |
import re
def is_youtube_url(url: str) -> bool:
"""Checks if a string is a youtube url
Args:
url (str): youtube url
Returns:
bool: true of false
"""
match = re.match(r"^(https?\:\/\/)?(www\.youtube\.com|youtu\.be)\/.+$", url)
return bool(match)
|
97536b8e7267fb5a72c68f242b3f5d6cbd1b9492
| 20,624 |
def time_nanosleep():
""" Delay for a number of seconds and nanoseconds"""
return NotImplementedError()
|
9ec91f2ef2656b5a481425dc65dc9f81a07386c2
| 20,625 |
import os
def get_regions(positions, genome_file, base=0, count=7):
"""Return a list of regions surrounding a position.
Will loop through each chromosome and search all positions in that
chromosome in one batch. Lookup is serial per chromosome.
Args:
positions (dict): Dictionary of {chrom->positons}
genome_file (str): Location of a genome fasta file or directory of
files. If directory, file names must be
<chrom_name>.fa[.gz]. Gzipped OK.
base (int): Either 0 or 1, base of positions in your list
count (int): Distance + and - the position to extract
Returns:
dict: {chrom->{postion->sequence}}
"""
# If genome file is a directory, use recursion! Because why not.
if os.path.isdir(genome_file):
chroms = positions.keys()
files = []
for chrom in chroms:
files.append(get_fasta_file(genome_file, chrom))
final = {}
for chrom, fl in zip(chroms, files):
final.update(
get_dinucleotides({chrom: positions[chrom]}, fl, base, count)
)
return final
done = []
results = {}
with open_zipped(genome_file) as fasta_file:
for chrom in seqio.parse(fasta_file, 'fasta'):
if chrom.id not in positions:
continue
else:
done.append(chrom.id)
results[chrom.id] = {}
for pos in positions[chrom.id]:
ps = pos-base # Correct base-1 positions here
region = seq(chrom[ps-count:ps+count+1])
results[chrom.id][pos] = region
if len(done) != len(positions.keys()):
print('The following chromosomes were not in files: {}'
.format([i for i in positions if i not in done]))
return results
|
4371e5a8eb51fd8303636147364e0ebc09865312
| 20,626 |
import jinja2
def render_series_fragment(site_config):
"""
Adds "other posts in this series" fragment to series posts.
"""
series_fragment = open("_includes/posts_in_series.html", "r").read()
for post_object in site_config["series_posts"]:
print("Generating 'Other posts in this series' fragment for " + post_object[1])
category, post_name, page_url = post_object
loader = jinja2.FileSystemLoader(searchpath="./")
template = jinja2.Environment(loader=loader)
rendered_series_text = template.from_string(series_fragment)
posts_to_show = site_config["categories"].get(category)
see_more_link = False
if len(posts_to_show) > 10:
see_more_link = True
category_slug = (
category.replace(" ", "-").lower().replace("(", "").replace(")", "")
)
rendered_series_text = rendered_series_text.render(
posts_in_series=posts_to_show[:10],
see_more_link=see_more_link,
site=site_config,
category_slug=category_slug,
page={"url": page_url},
)
year_month_date = "/".join(post_name.split("-")[:3]) + "/"
post_name = (
"-".join(post_name.split("-")[3:]).replace(".md", "").replace(".html", "")
)
with open(OUTPUT + year_month_date + post_name + "/index.html", "r") as file:
file_content = file.read()
file_content = file_content.replace(
"<!--- posts_in_series -->", rendered_series_text
)
with open(OUTPUT + year_month_date + post_name + "/index.html", "w") as file:
file.write(file_content)
return series_fragment
|
6cf947148af2978e926d51e9007684b9580d2cb0
| 20,627 |
import os
def create_barplot_orthologues_by_species(df, path, title, colormap, genes, species):
"""
The function creates a bar plot using seaborn.
:param df: pandas.DataFrame object
:param path: The CSV file path.
:param title: Title for the plot.
:param colormap: Colormap
:param genes: Ordered list of genes.
:param species: Ordered list of species.
:return:
"""
print("Creating barplot by species for {}".format(path))
output_path = os.path.dirname(path)
output = join_folder(output_path, "%s_barplot_byspecies.png" % title)
fig = plt.figure(figsize=(16, 10), dpi=180)
sns.barplot(x='Species', y='Orthologues', hue='Gene Name', data=df, order=species, hue_order=genes,
palette=colormap)
plt.ylabel("#Orthologues")
plt.xlabel("Species")
plt.ylim(0, )
# plt.suptitle(title, fontsize=16)
plt.yticks(fontsize=10)
plt.xticks(fontsize=10)
plt.savefig(output)
plt.close()
return output
|
0d1f23dba45095a85f87db0c5112c110e2ebcf0c
| 20,628 |
def get_class_by_name(name):
"""Gets a class object by its name, e.g. sklearn.linear_model.LogisticRegression"""
if name.startswith('cid.analytics'):
# We changed package names in March 2017. This preserves compatibility with old models.
name = name.replace('cid.analytics', 'analytics.core')
elif name.startswith('cid.'):
name = name.replace('cid.', 'analytics.')
module, class_name = name.rsplit('.', 1)
return getattr(import_module(module), class_name)
|
bf52eb8472e63cbb453183b57c5275d592665fc9
| 20,629 |
import functools
def _single_optimize(
direction,
criterion,
criterion_kwargs,
params,
algorithm,
constraints,
algo_options,
derivative,
derivative_kwargs,
criterion_and_derivative,
criterion_and_derivative_kwargs,
numdiff_options,
logging,
log_options,
error_handling,
error_penalty,
cache_size,
scaling_options,
):
"""Minimize or maximize *criterion* using *algorithm* subject to *constraints*.
See the docstring of ``_optimize`` for an explanation of all arguments.
Returns:
dict: The optimization result.
"""
# store all arguments in a dictionary to save them in the database later
problem_data = {
"direction": direction,
# "criterion"-criterion,
"criterion_kwargs": criterion_kwargs,
"algorithm": algorithm,
"constraints": constraints,
"algo_options": algo_options,
# "derivative"-derivative,
"derivative_kwargs": derivative_kwargs,
# "criterion_and_derivative"-criterion_and_derivative,
"criterion_and_derivative_kwargs": criterion_and_derivative_kwargs,
"numdiff_options": numdiff_options,
"log_options": log_options,
"error_handling": error_handling,
"error_penalty": error_penalty,
"cache_size": int(cache_size),
}
# partial the kwargs into corresponding functions
criterion = functools.partial(criterion, **criterion_kwargs)
if derivative is not None:
derivative = functools.partial(derivative, **derivative_kwargs)
if criterion_and_derivative is not None:
criterion_and_derivative = functools.partial(
criterion_and_derivative, **criterion_and_derivative_kwargs
)
# process params and constraints
params = add_default_bounds_to_params(params)
for col in ["value", "lower_bound", "upper_bound"]:
params[col] = params[col].astype(float)
check_params_are_valid(params)
# calculate scaling factor and offset
if scaling_options not in (None, {}):
scaling_factor, scaling_offset = calculate_scaling_factor_and_offset(
params=params,
constraints=constraints,
criterion=criterion,
**scaling_options,
)
else:
scaling_factor, scaling_offset = None, None
# name and group column are needed in the dashboard but could lead to problems
# if present anywhere else
params_with_name_and_group = _add_name_and_group_columns_to_params(params)
problem_data["params"] = params_with_name_and_group
params_to_internal, params_from_internal = get_reparametrize_functions(
params=params,
constraints=constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# get internal parameters and bounds
x = params_to_internal(params["value"].to_numpy())
lower_bounds, upper_bounds = get_internal_bounds(
params=params,
constraints=constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# process algorithm and algo_options
if isinstance(algorithm, str):
algo_name = algorithm
else:
algo_name = getattr(algorithm, "name", "your algorithm")
if isinstance(algorithm, str):
try:
algorithm = AVAILABLE_ALGORITHMS[algorithm]
except KeyError:
proposed = propose_algorithms(algorithm, list(AVAILABLE_ALGORITHMS))
raise ValueError(
f"Invalid algorithm: {algorithm}. Did you mean {proposed}?"
) from None
algo_options = _adjust_options_to_algorithms(
algo_options, lower_bounds, upper_bounds, algorithm, algo_name
)
# get convert derivative
convert_derivative = get_derivative_conversion_function(
params=params,
constraints=constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# do first function evaluation
first_eval = {
"internal_params": x,
"external_params": params,
"output": criterion(params),
}
# fill numdiff_options with defaults
numdiff_options = _fill_numdiff_options_with_defaults(
numdiff_options, lower_bounds, upper_bounds
)
# create and initialize the database
if not logging:
database = False
else:
database = _create_and_initialize_database(
logging, log_options, first_eval, problem_data
)
# set default error penalty
error_penalty = _fill_error_penalty_with_defaults(
error_penalty, first_eval, direction
)
# create cache
x_hash = hash_array(x)
cache = {x_hash: {"criterion": first_eval["output"]}}
# partial the internal_criterion_and_derivative_template
internal_criterion_and_derivative = functools.partial(
internal_criterion_and_derivative_template,
direction=direction,
criterion=criterion,
params=params,
reparametrize_from_internal=params_from_internal,
convert_derivative=convert_derivative,
derivative=derivative,
criterion_and_derivative=criterion_and_derivative,
numdiff_options=numdiff_options,
database=database,
database_path=logging,
log_options=log_options,
error_handling=error_handling,
error_penalty=error_penalty,
first_criterion_evaluation=first_eval,
cache=cache,
cache_size=cache_size,
)
res = algorithm(internal_criterion_and_derivative, x, **algo_options)
p = params.copy()
p["value"] = params_from_internal(res["solution_x"])
res["solution_params"] = p
if "solution_criterion" not in res:
res["solution_criterion"] = criterion(p)
if direction == "maximize":
res["solution_criterion"] = -res["solution_criterion"]
# in the long run we can get some of those from the database if logging was used.
optional_entries = [
"solution_derivative",
"solution_hessian",
"n_criterion_evaluations",
"n_derivative_evaluations",
"n_iterations",
"success",
"reached_convergence_criterion",
"message",
]
for entry in optional_entries:
res[entry] = res.get(entry, f"Not reported by {algo_name}")
if logging:
_log_final_status(res, database, logging, log_options)
return res
|
9f349f8e1124da3a2747b3880969a90e76aad52a
| 20,630 |
def item_len(item):
"""return length of the string format of item"""
return len(str(item))
|
7d68629a5c2ae664d267844fc90006a7f23df1ba
| 20,631 |
def get_progress_logger():
"""Returns the swift progress logger"""
return progress_logger
|
b1c0e8e206e2f051dcb97337dc51d4971fe0aa8b
| 20,632 |
import sys
import time
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
sys.stderr.write(msg + '\n')
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "Failed last attempt %s, %s %s" % (str(e), str(args), str(kwargs))
if logger:
logger.warning(msg)
else:
sys.stderr.write(msg + "\n")
raise
return f_retry # true decorator
return deco_retry
|
1ff65dbc0999f6ea129a8cf354edad54052b0156
| 20,633 |
def instantiate_me(spec2d_files, spectrograph, **kwargs):
"""
Instantiate the CoAdd2d subclass appropriate for the provided
spectrograph.
The class must be subclassed from Reduce. See :class:`Reduce` for
the description of the valid keyword arguments.
Args:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The instrument used to collect the data to be reduced.
tslits_dict: dict
dictionary containing slit/order boundary information
tilts (np.ndarray):
Returns:
:class:`PypeIt`: One of the classes with :class:`PypeIt` as its
base.
"""
indx = [ c.__name__ == (spectrograph.pypeline + 'Coadd2d') for c in Coadd2d.__subclasses__() ]
if not np.any(indx):
msgs.error('Pipeline {0} is not defined!'.format(spectrograph.pypeline))
return Coadd2d.__subclasses__()[np.where(indx)[0][0]](spec2d_files, spectrograph, **kwargs)
|
f9961231ead7c3ece5757e5b18dc5620a3492a40
| 20,634 |
def quoteattr(s, table=ESCAPE_ATTR_TABLE):
"""Escape and quote an attribute value.
"""
for c, r in table:
if c in s:
s = s.replace(c, r)
return '"%s"' % s
|
7af3e8ed6bfc0c23a957881ca41065d24cb288d5
| 20,635 |
def is_numeric(array):
"""Return False if any value in the array or list is not numeric
Note boolean values are taken as numeric"""
for i in array:
try:
float(i)
except ValueError:
return False
else:
return True
|
2ab0bb3e6c35e859e54e435671b5525c6392f66c
| 20,636 |
def reductions_right(collection, callback=None, accumulator=None):
"""This method is like :func:`reductions` except that it iterates over
elements of a `collection` from right to left.
Args:
collection (list|dict): Collection to iterate over.
callback (mixed): Callback applied per iteration.
accumulator (mixed, optional): Initial value of aggregator. Default is
to use the result of the first iteration.
Returns:
list: Results of each reduction operation.
Example:
>>> reductions_right([1, 2, 3, 4], lambda total, x: total ** x)
[64, 4096, 4096]
Note:
The last element of the returned list would be the result of using
:func:`reduce_`.
.. versionadded:: 2.0.0
"""
return reductions(collection, callback, accumulator, from_right=True)
|
eba2de662a6386d609da8cf3011010ae822c0440
| 20,637 |
import math
def pelt_settling_time(margin=1, init=0, final=PELT_SCALE, window=PELT_WINDOW, half_life=PELT_HALF_LIFE, scale=PELT_SCALE):
"""
Compute an approximation of the PELT settling time.
:param margin: How close to the final value we want to get, in PELT units.
:type margin_pct: float
:param init: Initial PELT value.
:type init: float
:param final: Final PELT value.
:type final: float
:param window: PELT window in seconds.
:type window: float
:param half_life: PELT half life, in number of windows.
:type half_life: int
:param scale: PELT scale.
:type scale: float
.. note:: The PELT signal is approximated as a first order filter. This
does not take into account the averaging inside a window, but the
window is small enough in practice for that effect to be negligible.
"""
tau = _pelt_tau(half_life, window)
# Response of a first order low pass filter:
# y(t) = u(t) * (1 - exp(-t/tau))
# We want to find `t` such as the output y(t) is as close as we want from
# the input u(t):
# A * u(t) = u(t) * (1 - exp(-t/tau))
# A is how close from u(t) we want the output to get after a time `t`
# From which follows:
# A = (1 - exp(-t/tau))
# t = -tau * log(1-A)
# Since the equation we have is for a step response, i.e. from 0 to a final
# value
delta = abs(final - init)
# Since margin and delta are in the same unit, we don't have to normalize
# them to `scale` first.
relative_margin = (margin / delta)
A = 1 - relative_margin
settling_time = - tau * math.log(1 - A)
return settling_time
|
c8d53d1132bc45278f2c127ed95ce10cfea0498b
| 20,638 |
import urllib
import sys
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urlopen(url)
encoding = get_http_message_param(resp.headers, 'charset', 'utf-8')
return geturl(resp), resp.read().decode(encoding)
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
|
7f87b75459219da800f5a665c69b4842735e42d9
| 20,639 |
def InstancesOverlap(instanceList,instance):
"""Returns True if instance contains a vertex that is contained in an instance of the given instanceList."""
for instance2 in instanceList:
if InstanceOverlap(instance,instance2):
return True
return False
|
634312b7e8d2ce4e36826410fcd1f6c3c06a40ce
| 20,640 |
def calc_qm_lea(p_zone_ref, temp_zone, temp_ext, u_wind_site, dict_props_nat_vent):
"""
Calculation of leakage infiltration and exfiltration air mass flow as a function of zone indoor reference pressure
:param p_zone_ref: zone reference pressure (Pa)
:param temp_zone: air temperature in ventilation zone (°C)
:param temp_ext: exterior air temperature (°C)
:param u_wind_site: wind velocity (m/s)
:param dict_props_nat_vent: dictionary containing natural ventilation properties of zone
:returns: - qm_lea_in : air mass flow rate into zone through leakages (kg/h)
- qm_lea_out : air mass flow rate out of zone through leakages (kg/h)
"""
# get default leakage paths from locals
coeff_lea_path = dict_props_nat_vent['coeff_lea_path']
height_lea_path = dict_props_nat_vent['height_lea_path']
# lookup wind pressure coefficients for leakage paths from locals
coeff_wind_pressure_path = dict_props_nat_vent['coeff_wind_pressure_path_lea']
# calculation of pressure difference at leakage path
delta_p_path = calc_delta_p_path(p_zone_ref, height_lea_path, temp_zone, coeff_wind_pressure_path, u_wind_site,
temp_ext)
# calculation of leakage air volume flow at path
qv_lea_path = calc_qv_lea_path(coeff_lea_path, delta_p_path)
# Eq. (65) in [1], infiltration is sum of air flows greater zero
qv_lea_in = qv_lea_path[np.where(qv_lea_path > 0)].sum()
# Eq. (66) in [1], exfiltration is sum of air flows smaller zero
qv_lea_out = qv_lea_path[np.where(qv_lea_path < 0)].sum()
# conversion to air mass flows according to 6.4.3.8 in [1]
# Eq. (67) in [1]
qm_lea_in = qv_lea_in * calc_rho_air(temp_ext)
# Eq. (68) in [1]
qm_lea_out = qv_lea_out * calc_rho_air(temp_zone)
return qm_lea_in, qm_lea_out
|
4d3f4789b3faedf68b9de3b3e6c8f17bcb478a51
| 20,641 |
async def ban(bon):
""" For .ban command, bans the replied/tagged person """
# Here laying the sanity check
chat = await bon.get_chat()
admin = chat.admin_rights
creator = chat.creator
# Well
if not (admin or creator):
return await bon.edit(NO_ADMIN)
user, reason = await get_user_from_event(bon)
if not user:
return
# Announce that we're going to whack the pest
await bon.edit("**Banindo...**")
try:
await bon.client(EditBannedRequest(bon.chat_id, user.id, BANNED_RIGHTS))
except BadRequestError:
return await bon.edit(NO_PERM)
# Helps ban group join spammers more easily
try:
reply = await bon.get_reply_message()
if reply:
await reply.delete()
except BadRequestError:
return await bon.edit(
"**Não tenho direitos de excluir mensagens, mas o usuário foi banido!**"
)
# Delete message and then tell that the command
# is done gracefully
# Shout out the ID, so that fedadmins can fban later
if reason:
await bon.edit(f"**{str(user.id)}** foi banido!\nMotivo: {reason}")
else:
await bon.edit(f"**{str(user.id)}** foi banido!")
# Announce to the logging group if we have banned the person
# successfully!
if BOTLOG:
await bon.client.send_message(
BOTLOG_CHATID,
"#BAN\n"
f"USUÁRIO: [{user.first_name}](tg://user?id={user.id})\n"
f"CHAT: {bon.chat.title}(`{bon.chat_id}`)",
)
|
f79f16c5e2722f576511a528f546a7f87f7e5236
| 20,642 |
def read_offset(rt_info):
"""
获取所有分区的offset
:param rt_info: rt的详细信息
:return: offset_msgs 和 offset_info
"""
rt_id = rt_info[RESULT_TABLE_ID]
task_config = get_task_base_conf_by_name(f"{HDFS}-table_{rt_id}")
if not task_config:
return {}
try:
partition_num = task_config[TASKS_MAX]
webhdfs_addr = _get_webhdfs_addr_by_rt(rt_info)
offset_dir = get_offset_dir(
webhdfs_addr, task_config[GROUP_ID], task_config[NAME], task_config[TOPICS_DIR], partition_num
)
offset_msgs = {}
if offset_dir:
for p in range(partition_num):
files = _get_hdfs_dir_files(webhdfs_addr, f"{offset_dir}/{p}")
offset = get_max_offset(files) if files else "-1"
topic_partition = f"table_{rt_id}-{p}"
offset_msgs[topic_partition] = offset
logger.info(f"rt {rt_id} get offset_msgs from hdfs offset dir: {offset_msgs}")
return offset_msgs
except Exception:
logger.warning(f"failed to get offset_msgs for rt {rt_id}", exc_info=True)
return {}
|
cc890301d4403a7815480ad0b414e16e26283fa7
| 20,643 |
def _CalculateElementMaxNCharge(mol,AtomicNum=6):
"""
#################################################################
**Internal used only**
Most negative charge on atom with atomic number equal to n
#################################################################
"""
Hmol=Chem.AddHs(mol)
GMCharge.ComputeGasteigerCharges(Hmol,iter_step)
res=[]
for atom in Hmol.GetAtoms():
if atom.GetAtomicNum()==AtomicNum:
res.append(float(atom.GetProp('_GasteigerCharge')))
if res==[]:
return 0
else:
return min(res)
|
f7bd9957c6e958f31cccc2bc20d6651baaf2f5fa
| 20,644 |
import os
def get_task_metrics_dir(
model="spatiotemporal_mean", submodel=None, gt_id="contest_tmp2m", horizon="34w",
target_dates=None
):
"""Returns the directory in which evaluation metrics for a given submodel
or model are stored
Args:
model: string model name
submodel: string submodel name or None; if None, returns metrics
directory associated with selected submodel or returns None if no
submodel selected
gt_id: contest_tmp2m or contest_precip
horizon: 34w or 56w
"""
if submodel is None:
submodel = get_selected_submodel_name(model=model, gt_id=gt_id, horizon=horizon,
target_dates=target_dates)
if submodel is None:
return None
return os.path.join(
"eval", "metrics", model, "submodel_forecasts", submodel, f"{gt_id}_{horizon}"
)
|
a8398d68864e560dfd472b6051e15b70d4c0daca
| 20,645 |
def check_stability(lambda0, W, mu, tau, dt_max):
"""Check if the model is stable for given parameter estimates."""
N, _ = W.shape
model = NetworkPoisson(N=N, dt_max=dt_max)
model.lamb = lambda0
model.W = W
model.mu = mu
model.tau = tau
return model.check_stability(return_value=True)
|
d417bdba0f236edf5f5c9e17c09e2d2a93bf2b4a
| 20,646 |
import re
def pid2id(pid):
"""convert pid to slurm jobid"""
with open('/proc/%s/cgroup' % pid) as f:
for line in f:
m = re.search('.*slurm\/uid_.*\/job_(\d+)\/.*', line)
if m:
return m.group(1)
return None
|
e7d0ee60d5a8930b8a6f761d5c27451a28b6ec2a
| 20,647 |
import platform
import os
def get_develop_directory():
"""
Return the develop directory
"""
if platform.system() == "Windows":
return os.path.dirname(os.path.realpath(__file__)) + "\\qibullet"
else:
return os.path.dirname(os.path.realpath(__file__)) + "/qibullet"
|
5654b3c5b2417e8429a3ec2ca310567a185d78a1
| 20,648 |
import copy
def multiaxis_scatterplot(xdata,
ydata,
*,
axes_loc,
xlabel='',
ylabel='',
title='',
num_cols=1,
num_rows=1,
saveas='mscatterplot',
**kwargs):
"""
Create a scatter plot with multiple axes.
:param xdata: list of arraylikes, passed on to the plotting functions for each axis (x-axis)
:param ydata: list of arraylikes, passed on to the plotting functions for each axis (y-axis)
:param axes_loc: list of tuples of two integers, location of each axis
:param xlabel: str or list of str, labels for the x axis
:param ylabel: str or list of str, labels for the y-axis
:param title: str or list of str, titles for the subplots
:param num_rows: int, how many rows of axis are created
:param num_cols: int, how many columns of axis are created
:param saveas: str filename of the saved file
Special Kwargs:
:param subplot_params: dict with integer keys, can contain all valid kwargs for :py:func:`multiple_scatterplots()`
with the integer key denoting to which subplot the changes are applied
:param axes_kwargs: dict with integer keys, additional arguments to pass on to `subplot2grid` for the creation
of each axis (e.g colspan, rowspan)
Other Kwargs will be passed on to all :py:func:`multiple_scatterplots()` calls
(If they are not overwritten by parameters in `subplot_params`).
"""
#convert parameters to list of parameters for subplots
subplot_params = kwargs.pop('subplot_params', {})
axes_kwargs = kwargs.pop('axes_kwargs', {})
param_list = [None] * len(axes_loc)
for indx, val in enumerate(param_list):
if indx in subplot_params:
param_list[indx] = subplot_params[indx]
else:
param_list[indx] = {}
if indx in axes_kwargs:
param_list[indx]['axes_kwargs'] = axes_kwargs[indx]
if not isinstance(xlabel, list):
param_list[indx]['xlabel'] = xlabel
else:
param_list[indx]['xlabel'] = xlabel[indx]
if not isinstance(ylabel, list):
param_list[indx]['ylabel'] = ylabel
else:
param_list[indx]['ylabel'] = ylabel[indx]
if not isinstance(title, list):
param_list[indx]['title'] = title
else:
param_list[indx]['title'] = title[indx]
general_keys = {'figure_kwargs', 'show', 'save_plots'}
general_info = {key: val for key, val in kwargs.items() if key in general_keys}
kwargs = {key: val for key, val in kwargs.items() if key not in general_keys}
plot_params.set_parameters(**general_info)
#figsize is automatically scaled with the shape of the plot
plot_shape = (num_cols, num_rows)
plot_params['figure_kwargs'] = {
'figsize': ([plot_shape[indx] * size for indx, size in enumerate(plot_params['figure_kwargs']['figsize'])])
}
plot_shape = tuple(reversed(plot_shape))
fig = plt.figure(**plot_params['figure_kwargs'])
axis = []
for indx, subplot_data in enumerate(zip(axes_loc, xdata, ydata, param_list)):
location, x, y, params = subplot_data
subplot_kwargs = copy.deepcopy(kwargs)
subplot_kwargs.update(params)
ax = plt.subplot2grid(plot_shape, location, fig=fig, **subplot_kwargs.pop('axes_kwargs', {}))
with NestedPlotParameters(plot_params):
ax = multiple_scatterplots(x, y, axis=ax, **subplot_kwargs, save_plots=False, show=False)
axis.append(ax)
plot_params.save_plot(saveas)
return axis
|
22d9aa3b0de496c498535b2b4bf663be429b8f48
| 20,649 |
import torch
def log1p_mse_loss(estimate: torch.Tensor, target: torch.Tensor,
reduce: str = 'sum'):
"""
Computes the log1p-mse loss between `x` and `y` as defined in [1], eq. 4.
The `reduction` only affects the speaker dimension; the time dimension is
always reduced by a mean operation as in [1]. It has the advantage of not
going to negative infinity in case of perfect reconstruction while keeping
the logarithmic nature.
The log1p-mse loss is defined as [1]:
.. math::
L^{\\text{T-L1PMSE}} = \\log_10 (1 + \sum_t |x(t) - y(t)|^2)
Args:
estimate (... x T): The estimated signal
target (... x T, same as estimate): The target signal
reduce:
Returns:
The log1p-mse error between `estimate` and `target`
References:
[1] Thilo von Neumann, Christoph Boeddeker, Lukas Drude, Keisuke
Kinoshita, Marc Delcroix, Tomohiro Nakatani, and Reinhold
Haeb-Umbach. „Multi-talker ASR for an unknown number of sources:
Joint training of source counting, separation and ASR“.
http://arxiv.org/abs/2006.02786.
"""
# Use the PyTorch implementation for MSE, should be the fastest
return _reduce(
torch.log10(
1 + F.mse_loss(estimate, target, reduce='none').mean(dim=-1)),
reduce=reduce
)
|
7c67a67dcf6f6d14bb712d5a92b54ea979f7a73c
| 20,650 |
def quaternion_inverse(quaternion: np.ndarray) -> np.ndarray:
"""Return inverse of quaternion."""
return quaternion_conjugate(quaternion) / np.dot(quaternion, quaternion)
|
b71c5b544199b02a76362bc42db900b157ea80ec
| 20,651 |
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, array, sparse} or None
Object to be converted to an indexable iterable.
"""
if issparse(iterable):
return mt.tensor(iterable)
elif hasattr(iterable, "iloc"):
if iterable.ndim == 1:
return md.Series(iterable)
else:
return md.DataFrame(iterable)
elif hasattr(iterable, "__getitem__"):
return mt.tensor(iterable)
elif iterable is None:
return iterable
return mt.tensor(iterable)
|
29d067826e0a863b06b1fb0295b12d57ecaea00d
| 20,652 |
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
# Forward pass
# Step 1 - shape of mu (D,)
mu = 1 / float(N) * np.sum(x, axis=0)
# Step 2 - shape of var (N,D)
xmu = x - mu
# Step 3 - shape of carre (N,D)
carre = xmu**2
# Step 4 - shape of var (D,)
var = 1 / float(N) * np.sum(carre, axis=0)
# Step 5 - Shape sqrtvar (D,)
sqrtvar = np.sqrt(var + eps)
# Step 6 - Shape invvar (D,)
invvar = 1. / sqrtvar
# Step 7 - Shape va2 (N,D)
va2 = xmu * invvar
# Step 8 - Shape va3 (N,D)
va3 = gamma * va2
# Step 9 - Shape out (N,D)
out = va3 + beta
running_mean = momentum * running_mean + (1.0 - momentum) * mu
running_var = momentum * running_var + (1.0 - momentum) * var
cache = (mu, xmu, carre, var, sqrtvar, invvar,
va2, va3, gamma, beta, x, bn_param)
elif mode == 'test':
mu = running_mean
var = running_var
xhat = (x - mu) / np.sqrt(var + eps)
out = gamma * xhat + beta
cache = (mu, var, gamma, beta, bn_param)
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
|
b36ea808c5865eb92a81464c3efe14ab9325d01e
| 20,653 |
def chunking():
"""
transforms dataframe of full texts into a list of chunked texts of 2000 tokens each
"""
word_list = []
chunk_list = []
text_chunks = []
# comma separating every word in a book
for entry in range(len(df)):
word_list.append(df.text[entry].split())
# create a chunk of 2000 words
for entry in word_list:
chunk_list.append(list(divide_chunks(entry, 2000)))
# flatten chunk list from a nested list to a list
text_chunks = [item for l in chunk_list for item in l]
print("Texts have been divided into cunks of 2000 tokens each for easier preprocessing")
return(text_chunks)
|
66e1976b3bd9e88420fab370f1eee9053986bd56
| 20,654 |
def generate_random_string():
"""Create a random string with 8 letters for users."""
letters = ascii_lowercase + digits
return ''.join(choice(letters) for i in range(8))
|
027a9d50e2ff5b80b7344d35e492ace7c65366e8
| 20,655 |
def contains_message(response, message):
"""
Inspired by django's self.assertRaisesMessage
Useful for confirming the response contains the provided message,
"""
if len(response.context['messages']) != 1:
return False
full_message = str(list(response.context['messages'])[0])
return message in full_message
|
4afcdba84603b8b53095a52e769d0a8e3f7bbb17
| 20,656 |
def definition():
"""To be used by UI."""
sql = f"""
SELECT c.course_id,
c.curriculum_id,
cs.course_session_id,
description + ' year ' +CAST(session as varchar(2)) as description,
CASE WHEN conf.course_id IS NULL THEN 0 ELSE 1 END as linked,
0 as changed
FROM ({select_all_and_default(Course)}) as c
LEFT JOIN c_course_session cs ON cs.curriculum_id = c.curriculum_id
LEFT JOIN c_course_config conf ON conf.course_id = c.course_id
AND conf.course_session_id = cs.course_session_id"""
return sql
|
ac67783943604e0e83bd4ccfc2b704737e427edd
| 20,657 |
def exec_psql_cmd(command, host, port, db="template1", tuples_only=True):
"""
Sets up execution environment and runs the HAWQ queries
"""
src_cmd = "export PGPORT={0} && source {1}".format(port, hawq_constants.hawq_greenplum_path_file)
if tuples_only:
cmd = src_cmd + " && psql -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command)
else:
cmd = src_cmd + " && psql -t -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command)
retcode, out, err = exec_ssh_cmd(host, cmd)
if retcode:
Logger.error("SQL command executed failed: {0}\nReturncode: {1}\nStdout: {2}\nStderr: {3}".format(cmd, retcode, out, err))
raise Fail("SQL command executed failed.")
Logger.info("Output:\n{0}".format(out))
return retcode, out, err
|
453f0c2ef0dfdf2a5d03b22d4a6fbd03282dd72a
| 20,658 |
def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member
"""Returns a `NumPy` array from a `CARLA` semantic segmentation image.
Args:
image: The `CARLA` semantic segmented image.
Returns:
A `NumPy` array representation of the image.
"""
image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = array.astype(np.float32) / 255
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return array
|
f191d3f9700b281178f395726d649e90dfc57bb7
| 20,659 |
import re
def since(version):
"""A decorator that annotates a function to append the version
of skutil the function was added. This decorator is an adaptation of PySpark's.
Parameters
----------
version : str, float or int
The version the specified method was added to skutil.
Examples
--------
>>> @since('0.1.5')
... def some_fun():
... '''Some docstring'''
... return None
...
>>>
>>> some_fun.__doc__ # doctest: +SKIP
'Some docstring\n\n.. versionadded:: 0.1.5'
.. versionadded:: 0.1.5
"""
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
|
e6b29b5e4c67ba4a213b183a0b79a1f16a85d81c
| 20,660 |
def get_dMdU():
"""Compute dMdU"""
dMdC = form_nd_array("dMdC", [3,3,3,3,3])
dMdPsi = form_nd_array("dMdPsi", [3,3,3,3,3])
dMdGamma = form_nd_array("dMdGamma",[3,3,3,3,3,3])
dCdU = form_symb_dCdU()
dPsidU = form_symb_dPhidU()
dGammadU = form_symb_dGammadU()
dMdU = form_nd_array("dMdU",[3,3,3,8*12])
for I in range(3):
for J in range(3):
for K in range(3):
for L in range(8*12):
dMdU[I,J,K,L] = 0
for O in range(3):
for P in range(3):
dMdU[I,J,K,L] += dMdC[I,J,K,O,P]*dCdU[O,P,L] + dMdPsi[I,J,K,O,P]*dPsidU[O,P,L]
for Q in range(3):
dMdU[I,J,K,L] += dMdGamma[I,J,K,O,P,Q]*dGammadU[O,P,Q,L]
tmp = [get_matrix_form_TOT(dCdU)[:,:12], get_matrix_form_TOT(dPsidU)[:,:12],\
get_matrix_form_FOT(dGammadU)[:,:12], get_matrix_form_VOT(dMdC),\
get_matrix_form_VOT(dMdPsi), get_matrix_form_VIOT(dMdGamma)]
symb = ["dCdU","dPsidU","dGammadU","dMdC","dMdPsi","dMdGamma"]
[implementation_extract_matrix(t,s,"I","I") for t,s in zip(tmp,symb)]
implementation_print_matrix(get_matrix_form_FOT(dMdU)[:,:12],"dMdU","I","I")
return dMdU
|
55d6dedc5311c8a2a30c44569508bd7687400cb5
| 20,661 |
def get_group_value_ctx_nb(sc_oc):
"""Get group value from context.
Accepts `vectorbt.portfolio.enums.SegmentContext` and `vectorbt.portfolio.enums.OrderContext`.
Best called once from `segment_prep_func_nb`.
To set the valuation price, change `last_val_price` of the context in-place.
!!! note
Cash sharing must be enabled."""
if not sc_oc.cash_sharing:
raise ValueError("Cash sharing must be enabled")
return get_group_value_nb(
sc_oc.from_col,
sc_oc.to_col,
sc_oc.last_cash[sc_oc.group],
sc_oc.last_shares,
sc_oc.last_val_price
)
|
0646e7a26b36af42ee38196e0ee60e3684da2d16
| 20,662 |
import math
import torch
import scipy
def motion_blur_generate_kernel(radius, angle, sigma):
"""
Args:
radius
angle (float): Radians clockwise from the (x=1, y=0) vector. This
is how ImageMagick's -motion-blur filter accepts angles, as far
as I can tell.
>>> mb_1_0_inf_expected = torch.ones(3) / 3
>>> mb_1_0_inf = motion_blur_generate_kernel(1, 0, np.inf)[0]
>>> assert torch.all(torch.isclose(mb_1_0_inf[0], mb_1_0_inf_expected))
>>> g_3_1 = torch.from_numpy(scipy.signal.gaussian(5, 1)[2:]).float()
>>> g_3_1 /= g_3_1.sum()
>>> mb_1_0_1 = motion_blur_generate_kernel(1, 0, 1)[0]
>>> assert torch.all(mb_1_0_1[0] == g_3_1), (mb_1_0_1[0], g_3_1)
>>> assert torch.all(mb_1_0_1[1] == 0)
>>> assert torch.all(mb_1_0_1[2] == 0)
"""
# Make angles be counterclockwise from (x=1, y=0) vector to maintain sanity.
angle = 2 * np.pi - angle
# Make all angles lie in [0, 2*pi]
if angle < 0:
angle += math.ceil(angle / (2 * np.pi)) * 2*np.pi
if angle > 2 * np.pi:
angle = angle % (2 * np.pi)
size = 2 * radius + 1
kernel = torch.zeros((size, size))
# Gaussian centered at 0th element.
kernel_1d = scipy.signal.gaussian(size * 2 - 1, sigma)[size-1:]
direction_up = 0 <= angle <= np.pi
direction_right = (angle < np.pi / 2) or (angle > 3 / 2 * np.pi)
cy = size - 1 if direction_up else 0
cx = 0 if direction_right else size - 1
# dy is relative to matrix coordinates, so, e.g., angle of np.pi/4 should
# be a line going up => dy should be negative.
dx, dy = np.cos(angle).item(), -np.sin(angle).item()
for i in range(size):
# *o*ffset_*x*, *o*ffset_*y*
ox, oy = dx * i, dy * i
x = min(cx + round(ox), size)
y = min(cy + round(oy), size)
assert x >= 0, f'x={x} should be >= 0!'
assert y >= 0, f'y={y} should be >= 0!'
kernel[y, x] = kernel_1d[i]
kernel /= kernel.sum()
return kernel, cy, cx
|
ff4e939d2ffbc91b6ef6af2ca11aceb1d32df594
| 20,663 |
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, "cryptography"
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
|
0e1836120f52981c3ff126038c0c74b9da94aa7f
| 20,664 |
def remove_att(doc_id, doc_rev, att_id, **kwargs):
"""Delete an attachment.
http://docs.couchdb.org/en/stable/api/document/attachments.html#delete--db-docid-attname
:param str doc_id: The attachment document.
:param str doc_rev: The document revision.
:param str att_id: The attachment to remove.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)):
kwargs["params"] = {}
path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id))
kwargs["params"]["rev"] = doc_rev
return "DELETE", path, kwargs
|
2b9361468baf4dc2e358b2fa2f4c43403556cd40
| 20,665 |
import urllib
def read_file(file_path):
"""Read file according to its file schema"""
s3_schema = 's3'
path_comps = urllib.parse.urlparse(file_path)
scheme = path_comps.scheme
return_result = None
if not scheme or scheme != s3_schema:
file_stream = open(file_path)
return_result = file_stream.read()
file_stream.close()
elif scheme == s3_schema:
return_result = read_s3_file(file_path)
return return_result
|
438c6286f5f29792fd7c99412bead96a11adc757
| 20,666 |
from typing import List
def build_command(codemodders_list: List) -> BaseCodemodCommand:
"""Build a custom command with the list of visitors."""
class CustomCommand(BaseCodemodCommand):
transformers = codemodders_list
return CustomCommand(CodemodContext())
|
5aed3c94c954a8e62c7cfb23f2b338e3a017d988
| 20,667 |
def default_gen_mat(dt: float, size: int) -> np.ndarray:
"""Default process matrix generator.
Parameters
----------
dt : float
Dimension variable difference.
size : int
Size of the process matrix, equals to number of rows and columns.
Returns
-------
np.ndarray
Process matrix.
"""
mat = np.identity(size)
for i in range(1, size):
np.fill_diagonal(mat[:, i:], dt**i/np.math.factorial(i))
return mat
|
fc4c19b33dae27ec412a00d20b89c25c5bc8668c
| 20,668 |
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
|
a6a0d0d6d7531b8e858c8ec0d0aedee320c20d8d
| 20,669 |
def striptag(tag):
"""
Get the short representation of a fully qualified tag
:param str tag: a (fully qualified or not) XML tag
"""
if tag.startswith('{'):
return tag.rsplit('}')[1]
return tag
|
f0193e3f792122ba8278e599247439a91139e72b
| 20,670 |
def dump_key(key):
""" Convert key into printable form using openssl utility
Used to compare keys which can be stored in different
format by different OpenSSL versions
"""
return Popen(["openssl","pkey","-text","-noout"],stdin=PIPE,stdout=PIPE).communicate(key)[0]
|
71dd28876c2fd3e28a4434b926b483cef3b104c2
| 20,671 |
def download_complete(root_data_path, domain_name, start_date, end_date):
"""
Check that all files have been downloaded and that they contain the data in
the expected date range
"""
missing_files = _find_missing_files(
root_data_path=root_data_path,
domain_name=domain_name,
start_date=start_date,
end_date=end_date,
)
return len(missing_files) == 0
|
3540632c5ec48fb0741b8173f926fd1cb5970333
| 20,672 |
from typing import Any
def get_test_string(actual: Any, rtol: float, atol: float) -> str:
"""
Args:
actual: The actual value that was produced, and that should be the desired value.
rtol: The relative tolerance of the comparisons in the assertion.
atol: The absolute tolerance of the comparisons in the assertion.
Returns:
A string of Python code that produces the desired value.
"""
return str(actual)
|
f017806bef4336bf187071436bd454d0ca980636
| 20,673 |
def is_type_resolved(_type):
"""Helper function that checks if type is already resolved."""
return _type in BASIC_TYPES or isinstance(_type, TypeDef)
|
9451a5dbf17aef1685122b881ede994d1a02b7a0
| 20,674 |
import types
def get_extension(media):
"""Gets the corresponding extension for any Telegram media."""
# Photos are always compressed as .jpg by Telegram
try:
get_input_photo(media)
return '.jpg'
except TypeError:
# These cases are not handled by input photo because it can't
if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto)):
return '.jpg'
# Documents will come with a mime type
if isinstance(media, types.MessageMediaDocument):
media = media.document
if isinstance(media, (
types.Document, types.WebDocument, types.WebDocumentNoProxy)):
if media.mime_type == 'application/octet-stream':
# Octet stream are just bytes, which have no default extension
return ''
else:
return guess_extension(media.mime_type) or ''
return ''
|
cb05f122fdf03df38c6d7b7904c7ec611f09c7a0
| 20,675 |
def process_input(df, col_group, col_t, col_death_rate, return_df=True):
"""
Trim filter and adding extra information to the data frame.
Args:
df (pd.DataFrame): Provided data frame.
col_group (str): Column name of group definition.
col_t (str): Column name of the independent variable.
col_death_rate (str): Name for column that contains the death rate.
return_df (bool, optional):
If True return the combined data frame, otherwise return the
splitted dictionary.
Returns:
pd.DataFrame: processed data frame.
"""
assert col_group in df
assert col_t in df
assert col_death_rate in df
# trim down the data frame
df = df[[col_group, col_t, col_death_rate]].reset_index(drop=True)
df.sort_values([col_group, col_t], inplace=True)
df.columns = ['location', 'days', 'ascdr']
# check and filter and add more information
data = split_by_group(df, col_group='location')
for location, df_location in data.items():
assert df_location.shape[0] == df_location['days'].unique().size
df_location = filter_death_rate(df_location,
col_t='days',
col_death_rate='ascdr')
df_location['ln ascdr'] = np.log(df_location['ascdr'])
df_location['asddr'] = df_location['ascdr'].values - \
np.insert(df_location['ascdr'].values[:-1], 0, 0.0)
df_location['ln asddr'] = np.log(df_location['asddr'])
data.update({
location: df_location.copy()
})
if return_df:
return pd.concat(list(data.values()))
else:
return data
|
24b1e7274959c5b4befbd826c1a60ca700316b2f
| 20,676 |
def cross_entropy_loss():
"""
Returns an instance to compute Cross Entropy loss
"""
return tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
5fcc673d9339bd4acb84d55fe0c316bc4cf802c4
| 20,677 |
def f(x):
"""
Surrogate function over the error metric to be optimized
"""
evaluation = run_quip(cutoff = float(x[:,0]), delta = float(x[:,1]), n_sparse = float(x[:,2]), nlmax = float(x[:,3]))
print("\nParam: {}, {}, {}, {} | MAE : {}, R2: {}".format(float(x[:,0]),float(x[:,1]),float(x[:,2]),float(x[:,3]) ,evaluation[0],evaluation[1]))
return evaluation[0]
|
0663b9eb2b717f547f57a8485739165414fbbdba
| 20,678 |
def equal(* vals):
"""Returns True if all arguments are equal"""
if len(vals) < 2:
return True
a = vals[0]
for b in vals[1:]:
if a != b:
return False
return True
|
dbd947016d2b84faaaa7fefa6f35975da0a1b5ec
| 20,679 |
import os
def chrome_options() -> Options:
"""Pass standard Chrome options to a test."""
options = Options()
executable_path = os.getenv("EXECUTABLE_PATH")
assert (
executable_path is not None
), "EXECUTABLE_PATH environment variable must be set"
logger.info(f"EXECUTABLE_PATH is {executable_path}")
options.binary_location = executable_path
options.add_argument("whitelisted-ips=''")
options.add_argument("disable-xss-auditor")
options.add_argument("disable-web-security")
options.add_argument("allow-running-insecure-content")
options.add_argument("no-sandbox")
options.add_argument("disable-setuid-sandbox")
options.add_argument("disable-popup-blocking")
options.add_argument("allow-elevated-browser")
options.add_argument("verbose")
return options
|
1c1c32aec18cfea1c040045da401052953626b29
| 20,680 |
def exp(x: pd.Series) -> pd.Series:
"""
Exponential of series
:param x: timeseries
:return: exponential of each element
**Usage**
For each element in the series, :math:`X_t`, raise :math:`e` (Euler's number) to the power of :math:`X_t`.
Euler's number is the base of the natural logarithm, :math:`ln`.
:math:`R_t = e^{X_t}`
**Examples**
Raise :math:`e` to the power :math:`1`. Returns Euler's number, approximately 2.71828
>>> exp(1)
**See also**
:func:`log`
"""
return np.exp(x)
|
c4cb057be2dd988a152cc8f224d4bd4300f88263
| 20,681 |
import os
def level_location(level, cache_dir):
"""
Return the path where all tiles for `level` will be stored.
>>> level_location(2, '/tmp/cache')
'/tmp/cache/02'
"""
if isinstance(level, string_type):
return os.path.join(cache_dir, level)
else:
return os.path.join(cache_dir, "%02d" % level)
|
368fb5696001133ebdbaeb885b0408c7bb3715b0
| 20,682 |
def pil_paste_image(im, mask, start_point=(0, 0)):
"""
:param im:
:param mask:
:param start_point:
:return:
"""
out = Image.fromarray(im)
mask = Image.fromarray(mask)
out.paste(mask, start_point, mask)
return np.asarray(out)
|
b6393426aa5b7434e64cddc11ed598fca78a2b47
| 20,683 |
import requests
def service_northwind_v2(schema_northwind_v2):
"""https://services.odata.org/V2/Northwind/Northwind.svc/"""
return pyodata.v2.service.Service('http://not.resolvable.services.odata.org/V2/Northwind/Northwind.svc',
schema_northwind_v2, requests)
|
a7934ff032725589bb11aab9cd84c26d9f2845c3
| 20,684 |
from typing import Mapping
from typing import Any
import logging
import os
from sys import flags
def train_and_eval(
params: base_configs.ExperimentConfig,
strategy_override: tf.distribute.Strategy) -> Mapping[str, Any]:
"""Runs the train and eval path using compile/fit."""
logging.info('Running train and eval.')
# Note: for TPUs, strategy and scope should be created before the dataset
strategy = strategy_override or distribution_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
logging.info('Detected %d devices.',
strategy.num_replicas_in_sync if strategy else 1)
label_smoothing = params.model.loss.label_smoothing
one_hot = label_smoothing and label_smoothing > 0
builders = _get_dataset_builders(params, strategy, one_hot)
datasets = [builder.build() if builder else None for builder in builders]
# Unpack datasets and builders based on train/val/test splits
train_builder, validation_builder = builders # pylint: disable=unbalanced-tuple-unpacking
train_dataset, validation_dataset = datasets
train_epochs = params.train.epochs
train_steps = params.train.steps or train_builder.num_steps
validation_steps = params.evaluation.steps or validation_builder.num_steps
initialize(params, train_builder)
logging.info('Global batch size: %d', train_builder.global_batch_size)
with strategy_scope:
model_params = params.model.model_params.as_dict()
model = get_models()[params.model.name](**model_params)
if params.model.model_weights_path:
if os.path.isdir(params.model.model_weights_path):
checkpoint = tf.train.latest_checkpoint(params.model.model_weights_path)
else:
checkpoint = params.model.model_weights_path
logging.info('Load weights from %s', checkpoint)
model.load_weights(checkpoint)
if flags.FLAGS.mode == 'sensitivity_analysis' or flags.FLAGS.pruning_config_file:
if flags.FLAGS.mode == 'sensitivity_analysis':
if flags.FLAGS.pruning_config_file:
raise ValueError
layer = [
layer for layer in model.layers
if hasattr(layer, 'kernel') or hasattr(layer, 'depthwise_kernel')
][flags.FLAGS.sensitivity_layer_count]
layer_name = layer.name
weight_name = 'kernel' if hasattr(layer, 'kernel') else 'depthwise_kernel'
pruning_params = cprune_from_config.generate_sensitivity_config(
model_name=model.name,
layer_name=layer_name,
weight_name=weight_name,
granularity=flags.FLAGS.sensitivity_granularity,
gamma=flags.FLAGS.sensitivity_gamma,
respect_submatrix=flags.FLAGS.sensitivity_respect_submatrix,
two_over_four_chin=flags.FLAGS.sensitivity_two_over_four_chin)
else:
pruning_params = get_pruning()[params.model.name]
params_dict.override_params_dict(
pruning_params, flags.FLAGS.pruning_config_file, is_strict=False)
logging.info('Specified pruning params: %s', pp.pformat(pruning_params.as_dict()))
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
logging.info('Understood pruning params: %s', pp.pformat(_pruning_params))
model = cprune_from_config.cprune_from_config(model, pruning_params)
else:
weights_list = model.get_weights()
model = tf.keras.models.clone_model(model)
model.set_weights(weights_list)
models = [model]
if flags.FLAGS.mode == 'prune_physically':
smaller_model = cprune_from_config.prune_physically(model)
models.append(smaller_model)
for _model in models:
learning_rate = optimizer_factory.build_learning_rate(
params=params.model.learning_rate,
batch_size=train_builder.global_batch_size,
train_steps=train_steps)
optimizer = optimizer_factory.build_optimizer(
optimizer_name=params.model.optimizer.name,
base_learning_rate=learning_rate,
params=params.model.optimizer.as_dict())
metrics_map = _get_metrics(one_hot)
metrics = [metrics_map[metric] for metric in params.train.metrics]
if one_hot:
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=params.model.loss.label_smoothing)
else:
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()
_model.compile(optimizer=optimizer,
loss=loss_obj,
metrics=metrics)
initial_epoch = 0
if params.train.resume_checkpoint:
initial_epoch = resume_from_checkpoint(model=model,
model_dir=params.model_dir,
train_steps=train_steps)
callbacks = None
if params.mode == 'train_and_eval':
serialize_config(params=params, model_dir=params.model_dir)
# TODO(dankondratyuk): callbacks significantly slow down training
model_pruning_config = None
if flags.FLAGS.pruning_config_file:
model_pruning_config = cprune_from_config._expand_model_pruning_config(
model, pruning_params
)
callbacks = custom_callbacks.get_callbacks(
model_checkpoint=params.train.callbacks.enable_checkpoint_and_export,
include_tensorboard=params.train.callbacks.enable_tensorboard,
time_history=params.train.callbacks.enable_time_history,
track_lr=params.train.tensorboard.track_lr,
model_pruning_config=model_pruning_config,
write_model_weights=params.train.tensorboard.write_model_weights,
batch_size=train_builder.global_batch_size,
log_steps=params.train.time_history.log_steps,
model_dir=params.model_dir)
if flags.FLAGS.pruning_config_file:
callbacks += [
cpruning_callbacks.UpdateCPruningStep(),
# cpruning_callbacks.CPruningSummaries(log_dir=params.model_dir),
]
if params.evaluation.skip_eval:
validation_kwargs = {}
else:
validation_kwargs = {
'validation_data': validation_dataset,
'validation_steps': validation_steps,
'validation_freq': params.evaluation.epochs_between_evals,
}
history = None
if params.mode == 'train_and_eval':
history = model.fit(
train_dataset,
epochs=train_epochs,
steps_per_epoch=train_steps,
initial_epoch=initial_epoch,
callbacks=callbacks,
verbose=flags.FLAGS.verbose,
**validation_kwargs)
elif params.mode == 'eval':
cprune.apply_cpruning_masks(model)
if flags.FLAGS.pruning_config_file:
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
logging.info('Pruning result: %s', pp.pformat(_pruning_params))
validation_output = None
if params.evaluation.eval_data == 'train':
eval_dataset = train_dataset
eval_steps = train_steps
elif params.evaluation.eval_data == 'validation':
eval_dataset = validation_dataset
eval_steps = validation_steps
if params.mode == 'sensitivity_analysis':
file_writer = tf.summary.create_file_writer(flags.FLAGS.model_dir + '/metrics')
file_writer.set_as_default()
cprune_registry.ConstraintRegistry.add_weight_constraint_pair(
'depthwise_kernel', 'depthwise_constraint')
for sparsity_x_16 in range(16):
cprune.apply_cpruning_masks(model, step=sparsity_x_16)
_validation_output = model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
_validation_output = [_validation_output['loss'],
_validation_output['accuracy'],
_validation_output['top_5_accuracy']]
_stats = common.build_stats(history, _validation_output, callbacks)
prefix = 'pruning_sensitivity/' + layer_name + '/' + weight_name + '/'
for key, value in _stats.items():
tf.summary.scalar(prefix + key, data=value, step=sparsity_x_16)
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
sparsity = _pruning_params['pruning'][0]['pruning'][0]['current_sparsity']
tf.summary.scalar(prefix + 'sparsity', data=sparsity, step=sparsity_x_16)
elif flags.FLAGS.mode == 'prune_physically':
logging.info('Number of filters before and after physical pruning:')
for layer, new_layer in zip(model.layers, smaller_model.layers):
if type(layer) is tf.keras.layers.Conv2D:
logging.info(' {}, {}, {}'.format(layer.name, layer.filters, new_layer.filters))
if type(layer) is tf.keras.layers.Dense:
logging.info(' {}, {}, {}'.format(layer.name, layer.units, new_layer.units))
for i, _model in enumerate(models):
situation = 'before' if i == 0 else 'after'
logging.info('Model summary {} physical pruning:'.format(situation))
_model.summary(print_fn=logging.info)
_validation_output = _model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
_validation_output = [_validation_output['loss'],
_validation_output['accuracy'],
_validation_output['top_5_accuracy']]
_stats = common.build_stats(history, _validation_output, callbacks)
logging.info('Evaluation {} physical pruning: {}'.format(situation, _stats))
postfix = '' if i == 0 else '_small'
export_path = os.path.join(flags.FLAGS.model_dir, 'saved_model' + postfix)
_model.save(export_path, include_optimizer=False)
elif not params.evaluation.skip_eval or params.mode == 'eval':
logging.info('Evaluate %s data', params.evaluation.eval_data)
validation_output = model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
if validation_output:
validation_output = [validation_output['loss'],
validation_output['accuracy'],
validation_output['top_5_accuracy']]
# TODO(dankondratyuk): eval and save final test accuracy
stats = common.build_stats(history, validation_output, callbacks)
return stats
|
6e1e8a4bf8b6821277e541e1433ba27973aa59c4
| 20,685 |
def make_params(params, extra_params):
"""
Creates URL query params by combining arbitrary params
with params designated by keyword arguments and escapes
them to be compatible with HTTP request URI.
Raises an exception if there is a conflict between the
two ways to specify a query param.
"""
params = params or {}
wire_params = {
k: quote(escape(v), b",*[]:/-")
for k, v in (extra_params or {}).items()
if v is not None
}
if set(wire_params).intersection(set(params)):
raise ValueError("Conflict between keyword argument and 'params'")
for k, v in (params or {}).items():
if v is None:
continue
wire_params[k] = quote(escape(v), b",*[]:/-")
return wire_params
|
f2df0c52675476c0420d40f5ef9053cd2a719194
| 20,686 |
def raw_tag(name, value):
"""Create a DMAP tag with raw data."""
return name.encode('utf-8') + \
len(value).to_bytes(4, byteorder='big') + \
value
|
9f86a5a9ebc38fcfd31eb7d76ac8bb01618f6ca7
| 20,687 |
def get_command(tool_xml):
"""Get command XML element from supplied XML root."""
root = tool_xml.getroot()
commands = root.findall("command")
command = None
if len(commands) == 1:
command = commands[0]
return command
|
8d50b2675b3a6089b15b5380025ca7def9e4339e
| 20,688 |
from whoosh.reading import SegmentReader
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return []
|
e5985641cbe724072f37158196cdaed0600b403e
| 20,689 |
def build_features_revenue_model_q2(
df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame
):
"""Builds the features to be used on the revenue modelling for
answer question 2.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns the input pandas dataframe with the new features added.
"""
data = pd.merge(
df_daily_revenue,
df_listings[["Código", "Comissão"]],
left_on="listing",
right_on="Código",
how="left",
)
data["company_revenue"] = data["Comissão"] * data["revenue"]
data_revenue = (
data.groupby("date")
.agg(company_revenue=("company_revenue", "sum"))
.reset_index()
)
data_revenue = build_date_features(data_revenue, "date")
data = data_revenue.loc[data_revenue["company_revenue"].notna()]
X = data.drop(columns="company_revenue").astype(float)
y = data["company_revenue"]
return X, y
|
16658cbc76edf66cf718b008d5fba58414df1f8c
| 20,690 |
import gzip
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
|
f021f1db4b0b22c6d89620f44db7e2578c516489
| 20,691 |
def find_by_id(cls, groupkey, objectid, raises=False):
"""A helper function to look up an object by id"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, objectid)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(pk=objectid)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, objectid)
if raises:
raise cls.DoesNotExist
return ob
|
c2ce6b2081411dd51ba4af231d9618f321c8f6fc
| 20,692 |
def get_elements(xmldoc, tag_name, attribute):
"""Returns a list of elements"""
l = []
for item in xmldoc.getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
l.append( repr( value ) )
return l
|
2cda65802d0dc1ebbb7796f6a43fa9bacfbe852e
| 20,693 |
def test_algorithm(circuit, iterations=(1000000)):
"""
Tests a circuit by submitting it to both aer_simulator and PyLinalg.
"""
linalg = PyLinalg()
qlm_circ, _ = qiskit_to_qlm(circuit, sep_measures=True)
test_job = qlm_circ.to_job(nbshots=0, aggregate_data=False)
expected = linalg.submit(test_job)
qiskit_qpu = BackendToQPU(Aer.get_backend('aer_simulator'))
test_job.nbshots = iterations
result = qiskit_qpu.submit(test_job)
dist_calc = compare_results(expected, result, aggregate=False)
distance = analyze_distance(dist_calc)
print("Distance is {}".format(distance))
return distance
|
ac11f10f9b467ab08275d55515a15d6906076191
| 20,694 |
def return_list_of_sn_host():
""" Return potential SN host names
This includes:
- List of object names in SIMBAD that would correspond to extra-galactic object
- Unknown objects
- objects with failed crossmatch
In practice, this exclude galactic objects from SIMBAD.
"""
list_simbad_galaxies = [
"galaxy",
"Galaxy",
"EmG",
"Seyfert",
"Seyfert_1",
"Seyfert_2",
"BlueCompG",
"StarburstG",
"LSB_G",
"HII_G",
"High_z_G",
"GinPair",
"GinGroup",
"BClG",
"GinCl",
"PartofG",
]
keep_cds = \
["Unknown", "Candidate_SN*", "SN", "Transient", "Fail"] + \
list_simbad_galaxies
return keep_cds
|
c2a536fc4b742dc0e4a4c57a582174017d6e2877
| 20,695 |
import glob
import re
def folder2catalog(path, granule_trunk='', granule_extension='*', add_sf=False, client=None):
""" Reads a folder of granules into a STAREDataFrame catalog
:param path: Path of the folder containing granules
:type path: str
:param granule_trunk: Granule identifier (e.g. MOD09)
:type granule_trunk: str
:param granule_extension: Extension of the granule (e.g. hdf, nc, HDF5)
:type granule_extension: str
:param add_sf: toggle creating simple feature representation of the iFOVs
:type add_sf: bool
:param client:
:type client:
:return: catalog
:rtype: starepandas.STAREDataFrame
"""
term = '{path}/{granule_trunk}*.{ext}'.format(path=path, granule_trunk=granule_trunk, ext=granule_extension)
s3 = None
if path[0:5] != 's3://':
granule_paths = glob.glob(term)
else:
granule_paths, s3 = starepandas.io.s3.s3_glob(path, '.*\.{ext}$'.format(ext=granule_extension))
if not granule_paths:
print('no granules in folder')
return None
pattern = '.*[^_stare]\.(nc|hdf|HDF5)'
granule_paths = list(filter(re.compile(pattern).match, granule_paths))
df = starepandas.STAREDataFrame()
if client is None:
for granule_path in granule_paths:
if s3 is not None:
granule_url = 's3://{bucket_name}/{granule}'.format(bucket_name=s3[0]['bucket_name'],
granule=granule_path)
else:
granule_url = granule_path
row = make_row(granule_url, add_sf)
df = df.append(row, ignore_index=True)
else:
pass
# client=Client()
# client.close()
df.set_sids('stare_cover', inplace=True)
if add_sf:
df.set_geometry('geom', inplace=True)
return df
|
3d1d34a3b2e85ddbfb624289126f077d1668bab4
| 20,696 |
def _check_satellite_low(xbee, is_on_hold):
"""
Check if satellites are low and set the is_on_hold flag.
Args:
xbee(xbee.Zigbee): the XBee communication interface.
is_on_hold(bool): a flag telling if the thread is already on hold.
Returns:
bool: True if low sats, False if cleared.
"""
if shared.status['thread_flag'] & shared.NSATS_TOO_LOW:
if not is_on_hold: _log_and_broadcast(xbee, "IFO,%s low sats hold." % shared.AGENT_ID)
tiime.sleep(0.5)
return True
else: return False
|
5ecfdc304a9f6aa5aa41335637f6e783a3643df1
| 20,697 |
import requests
def indexof(path):
"""Returns list of filenames parsed off "Index of" page"""
resp = requests.get(path)
return [a for a, b in file_index_re.findall(resp.text) if a == b]
|
38b165bfd4f3dbefedff21c7ac62fb57cd8f2d97
| 20,698 |
from typing import Optional
def get_oversight(xml: str) -> Optional[OversightInfo]:
""" Get oversight """
if val := xml.get('oversight_info'):
return OversightInfo(
has_dmc=val.get('has_dmc', ''),
is_fda_regulated_drug=val.get('is_fda_regulated_drug', ''),
is_fda_regulated_device=val.get('is_fda_regulated_device', ''),
is_unapproved_device=val.get('is_unapproved_device', ''),
is_ppsd=val.get('is_ppsd', ''),
is_us_export=val.get('is_us_export', ''))
|
fc14da139eb350306175016a2b8d2d036d02b042
| 20,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.