content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _fit_seasonal_model_with_gibbs_sampling(observed_time_series,
seasonal_structure,
num_warmup_steps=50,
num_results=100,
seed=None):
"""Builds a seasonality-as-regression model and fits it by Gibbs sampling."""
with tf.name_scope('fit_seasonal_model_with_gibbs_sampling'):
observed_time_series = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
dtype = observed_time_series.time_series.dtype
design_matrix = seasonality_util.build_fixed_effects(
num_steps=ps.shape(observed_time_series.time_series)[-2],
seasonal_structure=seasonal_structure,
dtype=dtype)
# Default priors.
# pylint: disable=protected-access
one = tf.ones([], dtype=dtype)
level_variance_prior = tfd.InverseGamma(concentration=16,
scale=16. * 0.001**2 * one)
level_variance_prior._upper_bound = one
slope_variance_prior = tfd.InverseGamma(concentration=16,
scale=16. * 0.05**2 * one)
slope_variance_prior._upper_bound = 0.01 * one
observation_noise_variance_prior = tfd.InverseGamma(
concentration=0.05, scale=0.05 * one)
observation_noise_variance_prior._upper_bound = 1.2 * one
# pylint: enable=protected-access
model = gibbs_sampler.build_model_for_gibbs_fitting(
observed_time_series=observed_time_series,
design_matrix=design_matrix,
weights_prior=tfd.Normal(loc=0., scale=one),
level_variance_prior=level_variance_prior,
slope_variance_prior=slope_variance_prior,
observation_noise_variance_prior=observation_noise_variance_prior)
return [
model,
gibbs_sampler.fit_with_gibbs_sampling(model,
observed_time_series,
num_results=num_results,
num_warmup_steps=num_warmup_steps,
seed=seed)
] | c13d4df3eca25f1a53ed27cd94e5f2b4b102013c | 5,227 |
import pandas as pd
import numpy as np
def rm_standard_dev(var,window):
"""
Smoothed standard deviation
"""
print('\n\n-----------STARTED: Rolling std!\n\n')
rollingstd = np.empty((var.shape))
for ens in range(var.shape[0]):
for i in range(var.shape[2]):
for j in range(var.shape[3]):
series = pd.Series(var[ens,:,i,j])
rollingstd[ens,:,i,j] = series.rolling(window).std().to_numpy()
newdata = rollingstd[:,window:,:,:]
print('-----------COMPLETED: Rolling std!\n\n')
return newdata | d37cfa3c756f8fc062a28ac078e4e16557282951 | 5,230 |
def visualizeTimeSeriesCategorization(dataName, saveDir, numberOfLagsToDraw=3, autocorrelationBased=True):
"""Visualize time series classification.
Parameters:
dataName: str
Data name, e.g. "myData_1"
saveDir: str
Path of directories pointing to data storage
numberOfLagsToDraw: boolean, Default 3
First top-N lags (or frequencies) to draw
autocorrelationBased: boolean, Default True
Whether autocorrelation or frequency based
Returns:
None
Usage:
visualizeTimeSeriesClassification('myData_1', '/dir1/dir2/')
"""
info = 'Autocorrelations' if autocorrelationBased else 'Periodograms'
def internal(className):
print('\n\n%s of Time Series:'%(className))
clusteringObject = dataStorage.read(saveDir + 'consolidatedGroupsSubgroups/' + dataName + '_%s_%s'%(className,info) + '_GroupsSubgroups')
if clusteringObject is None:
print('Clustering object not found')
return
print('Plotting Dendrogram with Heatmaps.')
visualizationFunctions.makeDendrogramHeatmapOfClusteringObject(clusteringObject, saveDir, dataName + '_%s_%sBased'%(className,info), AutocorrNotPeriodogr=autocorrelationBased)
return
for lag in range(1,numberOfLagsToDraw + 1):
internal('LAG%s'%(lag))
internal('SpikeMax')
internal('SpikeMin')
return None | b2fcac2179e3a689ee73e13519e2f4ad77c59037 | 5,232 |
from typing import Dict
def refund(payment_information: Dict, connection_params) -> Dict:
"""Refund a payment using the culqi client.
But it first check if the given payment instance is supported
by the gateway.
It first retrieve a `charge` transaction to retrieve the
payment id to refund. And return an error with a failed transaction
if the there is no such transaction, or if an error
from culqi occurs during the refund."""
error = check_payment_supported(payment_information=payment_information)
response_has_errors = False
if error:
response = get_error_response(
payment_information.amount, error=error)
else:
setup_client(**connection_params)
try:
payload = format_culqui_payload(
payment_information, TransactionKind.REFUND)
response = culqipy.Refund.create(payload)
print(f"DATA::response::{response}")
# Fix: get specific errors
except Exception as error:
response_has_errors = True
response = get_error_response(
payment_information.amount, error=error)
if not response_has_errors:
if response.get('object', None) == 'error':
error = response.get('user_message', None)
if error is None:
error = response.get('merchant_message', None)
if error is None:
error = 'Unkonw error!'
response = get_error_response(
payment_information.amount, error=error,
id=payment_information.token)
else:
clean_culqi_response(response)
return _generate_response(
payment_information=payment_information,
kind=TransactionKind.REFUND, data=response) | 75dff392c0748a1408eb801ad78ef65be988026c | 5,233 |
import numbers
def check_random_state(seed):
"""Turn `seed` into a `np.random.RandomState` instance.
Parameters
----------
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
Random number generator.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.default_rng(seed)
if isinstance(seed, (np.random.RandomState, np.random.Generator)):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
) | 57390806329776c77977a27e18e78fdad298fef9 | 5,236 |
def power3_sum_2method():
"""
Input:
nothing, it have everything it needs.
Output:
sum: summ of all numbers which is power of 3
and fit in between 0 and upper bound == 1000000
"""
k = 0
sum = 0
while True:
a = 3**k
k += 1
if a < 1000000:
sum += a
else:
break
return sum | b86bfaeb2418e183a78054d2a4b76c58d58be388 | 5,237 |
def bitwise_right_shift(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The BitwiseRightShift operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
:param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
:param astype: output pixel type
:return: the output raster
"""
return local(rasters, 15, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype) | 8d07a60a514466ee4aa0b15b0b442fb71b3347ed | 5,238 |
import re
def strip_comments(line):
"""Strips comments from a line and return None if the line is empty
or else the contents of line with leading and trailing spaces removed
and all other whitespace collapsed"""
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line | 09579031294d7b5787c97fa81807fa5ecfe12329 | 5,239 |
import logging
def fft_pxscale(header,wave):
"""Compute conversion scale from telescope space to sky space.
Parameters
----------
ima : array
2D Telescope pupil model.
Returns
-------
fftscale : float
The frequency scale in sky space.
Example
-------
.. code-block:: python
fftscale = fft_pxscale(ima)
"""
#size of the image. This should be taken from the header.
gridsize = header['NAXIS1']
#pixel scale of the image. This should be taken from the header.
pxscale_mod = header['PIXSCALE'] #in meters/px
#1D FFT of the gridsize.
fft_freq=np.fft.fftfreq(gridsize,pxscale_mod)
#wavelength of the desires psf. This is a input of the user, wavelength in microns
wave = (getQuantity(wave,recognized_units=UNITS['WAVE']))
lam = wave.to(u.m) #in meters
#re-orginizing the 1D FFT to match with the grid.
roll=np.floor(gridsize//2).astype("int")
freq = np.fft.fftshift(fft_freq)
##
## pxscale -> fftscale
fftscale=np.diff(freq)[0] ## cycles / mas per pixel in FFT image
mas2rad=np.deg2rad(1./3600000.) ## mas per rad
fftscale = fftscale/mas2rad * lam ## meters baseline per px in FFT image at a given wavelength
logging.info("Pixel scale in PSF image is: %g mas per pixel" % fftscale.value)
return fftscale.value | 6935bdefe96aec771704a79952cfc25ffb55e8bb | 5,240 |
def parse_git_submodules(gitmodules_data):
"""Parse a .gitmodules file to extract a { name -> url } map from it."""
result = {}
# NOTE: configparser.ConfigParser() doesn't seem to like the file
# (i.e. read_string() always returns None), so do the parsing
# manually here.
section_name = None
in_submodule_section = False
submodule_name = None
submodule_prefix = 'submodule "'
urls = {}
branches = {}
for line in gitmodules_data.splitlines():
if line.startswith('['):
section_name = line[1:-1]
is_submodule_section = section_name.startswith(submodule_prefix)
if is_submodule_section:
submodule_name = section_name[len(submodule_prefix):-1]
elif is_submodule_section:
key, _, value = line.strip().partition('=')
if not value:
continue
key = key.strip()
value = value.strip()
if key == 'url':
urls[submodule_name] = value
elif key == 'branch':
branches[submodule_name] = value
result = {}
for submodule, url in urls.iteritems():
branch = branches.get(submodule)
if not branch:
branch = get_git_remote_ref(url, 'heads/master')
result[submodule] = '%s@%s' % (url, branch)
return result | 78d01ec70b68164189a2ea775c6084e256116d0a | 5,241 |
import pathlib
from typing import Dict
import json
def get_model_cases(dir_path: pathlib.Path) -> Dict[str, Dict[str, str]]:
"""
Returns the Zen model case for each test if it exists.
:param dir_path: The path to the directory containing the DIFFERENCES directory.
"""
model_cases = defaultdict(dict) # type: Dict[str, Dict[str, str]]
queries_dir = dir_path / QUERIES
expected_res_dir = dir_path / QUERY_RESPONSES
tag_dir = None
if queries_dir.exists() and queries_dir.is_dir():
tag_dir = queries_dir
elif expected_res_dir.exists() and expected_res_dir.is_dir():
tag_dir = expected_res_dir
if isinstance(tag_dir, pathlib.Path):
for queries_file in tag_dir.iterdir():
with open(queries_file, 'r') as qf_fp:
queries_info = json.load(qf_fp)
for qinfo in queries_info:
if "ZenResponseTag" in qinfo:
query_str = qinfo["Query"]["Name"] + ":" +\
qinfo["Query"]["Type"]
model_cases[queries_file.stem][query_str] = qinfo["ZenResponseTag"]
return model_cases | d35b4cf59cf9b99a6aeb9e05e0af3ee342b11f3b | 5,242 |
def _format_date(event):
"""Returns formated date json object for event"""
old_date = event["date"]
term = event["term"]
dates = old_date.split("-")
if len(dates) == 1:
is_range = False
else:
is_range = True
is_range = (len(dates) > 1)
if is_range:
start_date = dates[0]
end_date = dates[-1]
else:
start_date = dates[0]
end_date = dates[0]
new_start_date = _format_date_string(start_date, term)
new_end_date = _format_date_string(end_date, term)
date = {
"start_date": new_start_date,
"end_date": new_end_date,
"range": is_range,
}
return date | aa8bf9a41fe30b664920e895cdc31d6993a408b2 | 5,243 |
def fetch(bibcode, filename=None, replace=None):
"""
Attempt to fetch a PDF file from ADS. If successful, then
add it into the database. If the fetch succeeds but the bibcode is
not in th database, download file to current folder.
Parameters
----------
bibcode: String
ADS bibcode of entry to update.
filename: String
Filename to assign to the PDF file. If None, get from
guess_name() funcion.
Replace: Bool
If True, enforce replacing a PDF regardless of a pre-existing one.
If None (default), only ask when fetched PDF comes from arxiv.
Returns
-------
filename: String
If successful, return the full path of the file name.
If not, return None.
"""
arxiv = False
print('Fetching PDF file from Journal website:')
req = request_ads(bibcode, source='journal')
if req is None:
return
if req.status_code != 200:
print('Fetching PDF file from ADS website:')
req = request_ads(bibcode, source='ads')
if req is None:
return
if req.status_code != 200:
print('Fetching PDF file from ArXiv website:')
req = request_ads(bibcode, source='arxiv')
arxiv = True
if replace is None:
replace = False
if req is None:
return
if replace is None:
replace = True
if req.status_code == 200:
if bm.find(bibcode=bibcode) is None:
if filename is None:
filename = f'{bibcode}.pdf'
with builtin_open(filename, 'wb') as f:
f.write(req.content)
print(f"Saved PDF to: '{filename}'.\n"
"(Note that BibTex entry is not in the Bibmanager database)")
else:
filename = set_pdf(
bibcode, bin_pdf=req.content, filename=filename, arxiv=arxiv,
replace=replace)
return filename
print('Could not fetch PDF from any source.') | 7d264df3f0eab896a9cb4858e7b19e2590d8142b | 5,244 |
def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop multiple images.
Parameters
----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.crop``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if (h < hrg) or (w < wrg):
raise AssertionError("The size of cropping should smaller than or equal to the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg))
w_offset = int(np.random.uniform(0, w - wrg))
results = []
for data in x:
results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])
return np.asarray(results)
else:
# central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
results = []
for data in x:
results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])
return np.asarray(results) | 61593029455a880d5309e8343cf4f6d1049f598f | 5,245 |
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma,
epsilon,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, RT+1, 1)
rewards: np.ndarray of shape (B, RT) of rewards.
reward_mask: np.ndarray of shape (B, RT), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, RT+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
Pair (value_loss, summaries), where value_loss is the average L2 value loss,
averaged over instances where reward_mask is 1. Summaries is a dict of
summaries collected during value loss computation.
"""
B, RT = rewards.shape # pylint: disable=invalid-name
assert (B, RT) == reward_mask.shape
assert (B, RT + 1) == value_prediction.shape
value_prediction = value_prediction[:, :-1] * reward_mask # (B, RT)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, RT)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, RT)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
value_loss = np.sum(loss) / np.sum(reward_mask)
summaries = {
'value_loss': value_loss,
}
return (value_loss, summaries) | 5896dd57e1e9d05eb71e5b31aab4071b61d0fdbf | 5,246 |
def build_pkt(pkt):
"""Build and return a packet and eth type from a dict."""
def serialize(layers):
"""Concatenate packet layers and serialize."""
result = packet.Packet()
for layer in reversed(layers):
result.add_protocol(layer)
result.serialize()
return result
layers = []
assert 'eth_dst' in pkt and 'eth_src' in pkt
ethertype = None
if 'arp_source_ip' in pkt and 'arp_target_ip' in pkt:
ethertype = ether.ETH_TYPE_ARP
arp_code = pkt.get('arp_code', arp.ARP_REQUEST)
layers.append(arp.arp(
src_ip=pkt['arp_source_ip'],
dst_ip=pkt['arp_target_ip'],
opcode=arp_code))
elif 'ipv6_src' in pkt and 'ipv6_dst' in pkt:
ethertype = ether.ETH_TYPE_IPV6
if 'router_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_ROUTER_SOLICIT))
elif 'neighbor_advert_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_ADVERT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_advert_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'neighbor_solicit_ip' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ND_NEIGHBOR_SOLICIT,
data=icmpv6.nd_neighbor(
dst=pkt['neighbor_solicit_ip'],
option=icmpv6.nd_option_sla(hw_src=pkt['eth_src']))))
elif 'echo_request_data' in pkt:
layers.append(icmpv6.icmpv6(
type_=icmpv6.ICMPV6_ECHO_REQUEST,
data=icmpv6.echo(id_=1, seq=1, data=pkt['echo_request_data'])))
layers.append(ipv6.ipv6(
src=pkt['ipv6_src'],
dst=pkt['ipv6_dst'],
nxt=inet.IPPROTO_ICMPV6))
elif 'ipv4_src' in pkt and 'ipv4_dst' in pkt:
ethertype = ether.ETH_TYPE_IP
proto = inet.IPPROTO_IP
if 'echo_request_data' in pkt:
echo = icmp.echo(id_=1, seq=1, data=pkt['echo_request_data'])
layers.append(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST, data=echo))
proto = inet.IPPROTO_ICMP
net = ipv4.ipv4(src=pkt['ipv4_src'], dst=pkt['ipv4_dst'], proto=proto)
layers.append(net)
elif 'actor_system' in pkt and 'partner_system' in pkt:
ethertype = ether.ETH_TYPE_SLOW
layers.append(slow.lacp(
version=1,
actor_system=pkt['actor_system'],
actor_port=1,
partner_system=pkt['partner_system'],
partner_port=1,
actor_key=1,
partner_key=1,
actor_system_priority=65535,
partner_system_priority=1,
actor_port_priority=255,
partner_port_priority=255,
actor_state_defaulted=0,
partner_state_defaulted=0,
actor_state_expired=0,
partner_state_expired=0,
actor_state_timeout=1,
partner_state_timeout=1,
actor_state_collecting=1,
partner_state_collecting=1,
actor_state_distributing=1,
partner_state_distributing=1,
actor_state_aggregation=1,
partner_state_aggregation=1,
actor_state_synchronization=pkt['actor_state_synchronization'],
partner_state_synchronization=1,
actor_state_activity=0,
partner_state_activity=0))
elif 'chassis_id' in pkt and 'port_id' in pkt:
ethertype = ether.ETH_TYPE_LLDP
return valve_packet.lldp_beacon(
pkt['eth_src'], pkt['chassis_id'], str(pkt['port_id']), 1,
org_tlvs=pkt.get('org_tlvs', None),
system_name=pkt.get('system_name', None))
assert ethertype is not None, pkt
if 'vid' in pkt:
tpid = ether.ETH_TYPE_8021Q
layers.append(vlan.vlan(vid=pkt['vid'], ethertype=ethertype))
else:
tpid = ethertype
eth = ethernet.ethernet(
dst=pkt['eth_dst'],
src=pkt['eth_src'],
ethertype=tpid)
layers.append(eth)
result = serialize(layers)
return result | afd84446d3bb545b03b9d4c42d80f096b6665342 | 5,247 |
def make_file_prefix(run, component_name):
"""
Compose the run number and component name into string prefix
to use with filenames.
"""
return "{}_{}".format(component_name, run) | 73ef37d75d9e187ee49ee058958c3b8701185585 | 5,248 |
from typing import Dict
def initialize_lock_and_key_ciphers() -> Dict[str, VigenereCipher]:
"""[summary]
Returns:
Dict[VigenereCipher]: [description]"""
ciphers = {}
with open(CIPHER_RESOURCE, "r") as cipher_resource_file:
cipher_data = load(cipher_resource_file, Loader=FullLoader)
for cipher_key_name, cipher_keys in cipher_data.items():
ciphers[cipher_key_name] = VigenereCipher(key=cipher_keys['key'], alphabet=cipher_keys['alphabet'])
return ciphers | 1c0a27b36b4c0524b77dcb5c44a3bc840797b226 | 5,250 |
def add_service():
"""
Used to register a new service
"""
form = ServiceForm()
if form.validate_on_submit():
try:
srv = Services()
srv.populate_from_form(form)
srv.authentication.value = {"db":request.form.get('authdb'),"user":request.form.get('authuser'),"pswd":request.form.get("authpass")}
srv.save()
flash('Datele au fost adaugate!', category='alert-success')
return redirect(url_for('services.list_services'))
except Exception as err:
flash('Datele nu pot fi adaugate!', category='alert-danger')
return render_template('services/settings/add.html', pagetitle='Adauga serviciu', form=form) | 56ce52c293d42710a9d4d5ac57b21f5ba1c0c0ac | 5,251 |
def parse_resolution(resolution):
"""
return: width, height, resolution
"""
resolution = resolution.strip()
splits = resolution.split(',')
return int(splits[0]), int(splits[1]), int(splits[2]) | de937e440c4540d11cedd868e3f4a046baa99f22 | 5,253 |
import inspect
def get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
print(func)
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func) | f93133f20c819c590c30e25b6c339c07732daebe | 5,256 |
def _check(isamAppliance, name):
"""
Check if suffix exists
"""
ret_obj = get(isamAppliance)
check_value, warnings = False, ret_obj['warnings']
if warnings == []:
for suffix in ret_obj['data']:
if suffix['name'] == name:
logger.info("Suffix found in embedded ldap: " + name)
check_value = True
return check_value, suffix['id'], warnings
logger.info("Suffix *not* found in embedded ldap: " + name)
return check_value, None, warnings | be2a6226ebdccb92ec3361df79e50165a22d6981 | 5,257 |
def check_listening_address(address: str) -> bool:
"""Check entered ip address for validity."""
if address == 'localhost':
return True
return address in get_local_addresses() | eaa5cecfee4e8be2947150a537213f4159ee6baf | 5,258 |
import base64
def multibase_b64decode(data):
"""
Follow forge's base64 urlsafe encode convention to decode string
Args:
data(string): encoded string
Returns: bytes
Examples:
>>> multibase_b64decode('aGVsbG8')
b'hello'
"""
if isinstance(data, str):
data = data.encode()
return base64.urlsafe_b64decode(
(data + b'=' * (-len(data) % 4))) | fdbc0f937e33d7994737a3a515973598cac3debd | 5,259 |
from typing import List
def parse_ordering_params(param: List[str]) -> List[str]:
"""
Ignores the request to sort by "ord".
Returns a sorting order based on the params and includes "readable_id"
sorting in passed params if the sorting request contains title
otherwise, it returns the requested order.
"""
if "ord" in param:
order = []
elif "title" in param:
prefix = "-" if param[0] == "-" else ""
order = ["{prefix}coursepage__course__readable_id".format(prefix=prefix), param]
else:
order = [param]
return order | a6a5f4665515a292ad2367945a6b8407000d656a | 5,260 |
def file_senzing_rabbitmq():
"""#!/usr/bin/env bash
# --- Functions ---------------------------------------------------------------
function up {
echo -ne "\033[2K${CONTAINER_NAME} status: starting...\r"
mkdir -p ${RABBITMQ_DIR}
chmod 777 ${RABBITMQ_DIR}
if [ "${CONTAINER_VERSION}" == "latest" ]
then
${SENZING_SUDO} docker pull ${SENZING_DOCKER_REGISTRY_URL}/bitnami/rabbitmq:${CONTAINER_VERSION} >> ${CONTAINER_LOG} 2>&1
fi
${SENZING_SUDO} docker run \\
--detach \\
--env RABBITMQ_PASSWORD=${SENZING_RABBITMQ_PASSWORD} \\
--env RABBITMQ_USERNAME=${SENZING_RABBITMQ_USERNAME} \\
--interactive \\
--name ${CONTAINER_NAME} \\
--publish ${CONTAINER_PORT}:15672 \\
--publish ${SENZING_DOCKER_PORT_RABBITMQ}:5672 \\
--restart always \\
--tty \\
--volume ${RABBITMQ_DIR}:/bitnami \\
${SENZING_DOCKER_RUN_PARAMETERS_GLOBAL} \\
${SENZING_DOCKER_RUN_PARAMETERS_RABBITMQ} \\
${SENZING_NETWORK_PARAMETER} \\
${SENZING_PRIVILEGED_PARAMETER} \\
bitnami/rabbitmq:${CONTAINER_VERSION} \\
>> ${CONTAINER_LOG} 2>&1
COUNTER=0
COUNTER_NOTICE=5
TIME_STRING=".."
CONTAINER_STATUS="$( docker container inspect -f '{{.State.Status}}' ${CONTAINER_NAME})"
while [ "${CONTAINER_STATUS}" != "running" ]; do
COUNTER=$((${COUNTER}+1))
if [ "${COUNTER}" -eq "${COUNTER_NOTICE}" ]; then
echo -ne "\033[2K"
echo ""
echo "To see what is happening behind-the-scenes, view the log at"
echo "${CONTAINER_LOG}"
echo "and/or run 'docker logs ${CONTAINER_NAME}'"
echo ""
fi
TIME_STRING="${TIME_STRING}."
echo -ne "\033[2K${CONTAINER_NAME} status: ${CONTAINER_STATUS}${TIME_STRING}\r"
sleep 5
CONTAINER_STATUS="$( docker container inspect -f '{{.State.Status}}' ${CONTAINER_NAME})"
done
sleep 10
echo "${SENZING_HORIZONTAL_RULE}"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${CONTAINER_NAME} running on http://${SENZING_DOCKER_HOST_IP_ADDR}:${CONTAINER_PORT}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Username: ${SENZING_RABBITMQ_USERNAME} Password: ${SENZING_RABBITMQ_PASSWORD}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Mount information: (Format: in container > on host)"
echo "${SENZING_HORIZONTAL_RULE:0:2} /bitnami > ${RABBITMQ_DIR}"
echo "${SENZING_HORIZONTAL_RULE:0:2} Logs:"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${CONTAINER_LOG}"
echo "${SENZING_HORIZONTAL_RULE:0:2} and/or run 'docker logs ${CONTAINER_NAME}'"
echo "${SENZING_HORIZONTAL_RULE:0:2} For more information:"
echo "${SENZING_HORIZONTAL_RULE:0:2} ${SENZING_REFERENCE_URL}#senzing-rabbitmq"
echo "${SENZING_HORIZONTAL_RULE}"
}
function down {
${SENZING_SUDO} docker stop ${CONTAINER_NAME} >> ${CONTAINER_LOG} 2>&1
${SENZING_SUDO} docker rm ${CONTAINER_NAME} >> ${CONTAINER_LOG} 2>&1
}
function usage {
echo "usage: $0 [up | down | restart]"
echo "For more information:"
echo "${SENZING_REFERENCE_URL}#senzing-rabbitmq"
}
# --- Main --------------------------------------------------------------------
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${SCRIPT_DIR}/docker-environment-vars.sh
CONTAINER_LOG="${SENZING_LOG_RABBITMQ}"
CONTAINER_NAME="${SENZING_DOCKER_CONTAINER_NAME_RABBITMQ}"
CONTAINER_PORT="${SENZING_DOCKER_PORT_RABBITMQ_UI}"
CONTAINER_VERSION="${SENZING_DOCKER_IMAGE_VERSION_RABBITMQ}"
if [ "$1" == "up" ]; then
up
elif [ "$1" == "down" ]; then
down
elif [ "$1" == "restart" ]; then
down
up
else
usage
fi
"""
return 0 | 95396425074096d17561b20cd197e77f1d550476 | 5,261 |
def mse(predictions, targets):
"""Calculate MSE: (Mean squared error)
"""
return ((predictions - targets) ** 2).mean() | 79d87a3422d4d24201cae86ee861614c83f6770f | 5,262 |
def export1d(hist):
"""Export a 1-dimensional `Hist` object to uproot
This allows one to write a coffea histogram into a ROOT file, via uproot.
Parameters
----------
hist : Hist
A 1-dimensional histogram object
Returns
-------
out
A ``uproot_methods.classes.TH1`` object
Examples
--------
Creating a coffea histogram, filling, and writing to a file::
import coffea, uproot, numpy
h = coffea.hist.Hist("Events", coffea.hist.Bin("var", "some variable", 20, 0, 1))
h.fill(var=numpy.random.normal(size=100))
fout = uproot.create('output.root')
fout['myhist'] = coffea.hist.export1d(h)
fout.close()
"""
if hist.dense_dim() != 1:
raise ValueError("export1d() can only support one dense dimension")
if hist.sparse_dim() != 0:
raise ValueError("export1d() expects zero sparse dimensions")
axis = hist.axes()[0]
sumw, sumw2 = hist.values(sumw2=True, overflow='all')[()]
edges = axis.edges(overflow='none')
out = TH1.__new__(TH1)
out._fXaxis = TAxis(len(edges) - 1, edges[0], edges[-1])
out._fXaxis._fName = axis.name
out._fXaxis._fTitle = axis.label
if not axis._uniform:
out._fXaxis._fXbins = edges.astype(">f8")
centers = (edges[:-1] + edges[1:]) / 2.0
out._fEntries = out._fTsumw = out._fTsumw2 = sumw[1:-1].sum()
out._fTsumwx = (sumw[1:-1] * centers).sum()
out._fTsumwx2 = (sumw[1:-1] * centers**2).sum()
out._fName = "histogram"
out._fTitle = hist.label
out._classname = b"TH1D"
out.extend(sumw.astype(">f8"))
out._fSumw2 = sumw2.astype(">f8")
return out | ffe09495a268c68d26f9861e6d732649f2f74497 | 5,263 |
def filter_words(w_map, emb_array, ck_filenames):
""" delete word in w_map but not in the current corpus """
vocab = set()
for filename in ck_filenames:
for line in open(filename, 'r'):
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
assert len(line) >= 3, 'wrong ck file format'
word = line[0]
vocab.add(word)
word = word.lower()
vocab.add(word)
new_w_map = {}
new_emb_array = []
for (word, idx) in w_map.items():
if word in vocab or word in ['<unk>', '<s>', '< >', '<\n>']:
assert word not in new_w_map
new_w_map[word] = len(new_emb_array)
new_emb_array.append(emb_array[idx])
print('filtered %d --> %d' % (len(emb_array), len(new_emb_array)))
return new_w_map, new_emb_array | efdef92093acf25c992dba86da25a4118ba728ec | 5,264 |
def get_cache_template(sources, grids, geopackage, table_name="tiles"):
"""
Returns the cache template which is "controlled" settings for the application.
The intent is to allow the user to configure certain things but impose specific behavior.
:param sources: A name for the source
:param grids: specific grid for the data source
:param geopackage: Location for the geopackage
:return: The dict template
"""
if sources == ["None"]:
sources = []
return {
"sources": sources,
"cache": {"type": "geopackage", "filename": str(geopackage), "table_name": table_name},
"grids": [grid for grid in grids if grid == "default"] or grids,
"format": "mixed",
"request_format": "image/png",
} | dc83a155d28e0b39f12a7dc7142b61a4bf27512b | 5,265 |
from datetime import datetime
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ccursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
lagmonths = ctx['lag']
months = ctx['months']
month = ctx['month']
highyears = [int(x) for x in ctx['year'].split(",")]
h = ctx['h']
wantmonth = month + lagmonths
yearoffset = 0
if month + lagmonths < 1:
wantmonth = 12 - (month + lagmonths)
yearoffset = 1
wanted = []
deltas = []
for m in range(month, month+months):
if m < 13:
wanted.append(m)
deltas.append(0)
else:
wanted.append(m-12)
deltas.append(-1)
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
elnino = {}
ccursor.execute("""SELECT monthdate, soi_3m, anom_34 from elnino""")
for row in ccursor:
if row[0].month != wantmonth:
continue
elnino[row[0].year + yearoffset] = dict(soi_3m=row[1], anom_34=row[2])
ccursor.execute("""
SELECT year, month, sum(precip), avg((high+low)/2.)
from """ + table + """
where station = %s GROUP by year, month
""", (station, ))
yearly = {}
for row in ccursor:
(_year, _month, _precip, _temp) = row
if _month not in wanted:
continue
effectiveyear = _year + deltas[wanted.index(_month)]
nino = elnino.get(effectiveyear, {}).get('soi_3m', None)
if nino is None:
continue
data = yearly.setdefault(effectiveyear, dict(precip=0, temp=[],
nino=nino))
data['precip'] += _precip
data['temp'].append(float(_temp))
fig = plt.figure(figsize=(10, 6))
ax = plt.axes([0.1, 0.12, 0.5, 0.75])
msg = ("[%s] %s\n%s\n%s SOI (3 month average)"
) % (station, nt.sts[station]['name'], title(wanted),
datetime.date(2000, wantmonth, 1).strftime("%B"))
ax.set_title(msg)
cmap = plt.get_cmap("RdYlGn")
zdata = np.arange(-2.0, 2.1, 0.5)
norm = mpcolors.BoundaryNorm(zdata, cmap.N)
rows = []
xs = []
ys = []
for year in yearly:
x = yearly[year]['precip']
y = np.average(yearly[year]['temp'])
xs.append(x)
ys.append(y)
val = yearly[year]['nino']
c = cmap(norm([val])[0])
if h == 'hide' and val > -0.5 and val < 0.5:
ax.scatter(x, y, facecolor='#EEEEEE', edgecolor='#EEEEEE', s=30,
zorder=2, marker='s')
else:
ax.scatter(x, y, facecolor=c, edgecolor='k', s=60, zorder=3,
marker='o')
if year in highyears:
ax.text(x, y + 0.2, "%s" % (year, ), ha='center', va='bottom',
zorder=5)
rows.append(dict(year=year, precip=x, tmpf=y, soi3m=val))
ax.axhline(np.average(ys), lw=2, color='k', linestyle='-.', zorder=2)
ax.axvline(np.average(xs), lw=2, color='k', linestyle='-.', zorder=2)
sm = plt.cm.ScalarMappable(norm, cmap)
sm.set_array(zdata)
cb = plt.colorbar(sm, extend='both')
cb.set_label("<-- El Nino :: SOI :: La Nina -->")
ax.grid(True)
ax.set_xlim(left=-0.01)
ax.set_xlabel("Total Precipitation [inch], Avg: %.2f" % (np.average(xs),))
ax.set_ylabel((r"Average Temperature $^\circ$F, "
"Avg: %.1f") % (np.average(ys), ))
df = pd.DataFrame(rows)
ax2 = plt.axes([0.67, 0.6, 0.28, 0.35])
ax2.scatter(df['soi3m'].values, df['tmpf'].values)
ax2.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax2.set_ylabel(r"Avg Temp $^\circ$F")
slp, intercept, r_value, _, _ = stats.linregress(df['soi3m'].values,
df['tmpf'].values)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax2.plot([-2, 2], [y1, y2])
ax2.text(0.97, 0.9, "R$^2$=%.2f" % (r_value**2, ),
ha='right', transform=ax2.transAxes, bbox=dict(color='white'))
ax2.grid(True)
ax3 = plt.axes([0.67, 0.1, 0.28, 0.35])
ax3.scatter(df['soi3m'].values, df['precip'].values)
ax3.set_xlabel("<-- El Nino :: SOI :: La Nina -->")
ax3.set_ylabel("Total Precip [inch]")
slp, intercept, r_value, _, _ = stats.linregress(df['soi3m'].values,
df['precip'].values)
y1 = -2.0 * slp + intercept
y2 = 2.0 * slp + intercept
ax3.plot([-2, 2], [y1, y2])
ax3.text(0.97, 0.9, "R$^2$=%.2f" % (r_value**2, ),
ha='right', transform=ax3.transAxes, bbox=dict(color='white'))
ax3.grid(True)
return fig, df | 0f41a53336f2bf65805adaf83a8f3f17c006e161 | 5,266 |
def _action_spec():
"""Returns the action spec."""
paddle_action_spec = dm_env_rpc_pb2.TensorSpec(
dtype=dm_env_rpc_pb2.INT8, name=_ACTION_PADDLE)
tensor_spec_utils.set_bounds(
paddle_action_spec,
minimum=np.min(_VALID_ACTIONS),
maximum=np.max(_VALID_ACTIONS))
return {1: paddle_action_spec} | 130b7b2fe9f56d925d4ec1206eb3fb2752fee716 | 5,267 |
def stdin(sys_stdin):
"""
Imports standard input.
"""
inputs = [x.strip("[]\n") for x in sys_stdin]
a = [int(x) for x in inputs[0].split(",")]
x = int(inputs[1][0])
return a, x | 4c34e1bc80da31c6c7aff0d71a0c65f6fc01ed00 | 5,268 |
def _row_key(row):
"""
:param row: a normalized row from STATEMENT_METRICS_QUERY
:return: a tuple uniquely identifying this row
"""
return row['database_name'], row['user_name'], row['query_signature'], row['query_hash'], row['query_plan_hash'] | 2984e0e0b5fcc4e51a26af188e51fe65c52077a2 | 5,269 |
from io import StringIO
def get (url, user_agent=UA, referrer=None):
"""Make a GET request of the url using pycurl and return the data
(which is None if unsuccessful)"""
data = None
databuffer = StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 5)
curl.setopt(pycurl.TIMEOUT, 8)
curl.setopt(pycurl.WRITEFUNCTION, databuffer.write)
curl.setopt(pycurl.COOKIEFILE, '')
if user_agent:
curl.setopt(pycurl.USERAGENT, user_agent)
if referrer is not None:
curl.setopt(pycurl.REFERER, referrer)
try:
curl.perform()
data = databuffer.getvalue()
except Exception:
pass
curl.close()
return data | e18de239a598be249d81c2a15486a66af763bc85 | 5,270 |
from typing import List
from typing import Tuple
from typing import Any
def apply_filters(
stream: StreamMeta, filters: List[Tuple[str, str]], config: Any
) -> StreamMeta:
"""Apply enabled filters ordered by priority on item"""
filter_pool = get_filter_pool(filters, config)
for filter_instance in filter(
lambda x: x.enabled, sorted(filter_pool, key=lambda x: x.priority)
):
filter_instance.apply(stream)
return stream | 2ff50b5d31e84ba69afe694b4beb4116dbc5fc55 | 5,272 |
def threading_d(func):
"""
A decorator to run function in background on thread
Args:
func:``function``
Function with args
Return:
background_thread: ``Thread``
"""
@wraps(func)
def wrapper(*args, **kwags):
background_thread = Thread(target=func, args=(*args,))
background_thread.daemon = True
background_thread.start()
return background_thread
return wrapper | ff4d86ded189737d68d4cdc98c0e9ba9f1a28664 | 5,273 |
def create_anchors_3d_stride(
feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
velocities=[],
dtype=np.float32,
):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=dtype)
y_centers = np.arange(feature_size[1], dtype=dtype)
x_centers = np.arange(feature_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
velocities = np.array(velocities, dtype=dtype).reshape([-1, 2])
combines = np.hstack([sizes, velocities]).reshape([-1, 5])
rets = np.meshgrid(x_centers, y_centers, z_centers, rotations, indexing="ij")
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
# sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
combines = np.reshape(combines, [1, 1, 1, -1, 1, 5])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
# sizes = np.tile(sizes, tile_size_shape)
combines = np.tile(combines, tile_size_shape)
# rets.insert(3, sizes)
rets.insert(3, combines)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5]) | 6834d20f44196f5dad19d1917a673196334adf9f | 5,274 |
import hashlib
def sha1_file(filename):
"""
Return the hex string representation of the SHA1 checksum of the filename
"""
s = hashlib.sha1()
with open(filename, "rb") as f:
for line in f:
s.update(line)
return s.hexdigest() | b993ac9f025d69124962905f87b1968617bb33f5 | 5,275 |
def read_from_file(file_path):
"""
Read a file and return a list with all the lines in the file
"""
file_in_list = []
with open(file_path, 'r') as f:
for line in f.readlines():
file_in_list.append(line)
return file_in_list | 5fef3a3f50528c1a9786451666ae7e43be282bf9 | 5,277 |
def count(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return the number of times the predicate returns value considered True.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to count.
@return: The number of true element.
"""
result = 0L
for i in iterable:
if predicate(i):
result += 1
return result | 1a2d9a05203f32a6f1a8349b6e31d14cb1b82b71 | 5,278 |
def get_object_from_path(path):
"""
:param path:
dot seperated path. Assumes last item is the object and first part is module
path(str) -
example:
cls = get_object_from_path("a.module.somewhere.MyClass")
you can create a path like this:
class_path = "{0}.{1}".format(MyClass.__module__, MyClass.__name__)
"""
module_path, _, obj_name = path.rpartition(".")
module = __import__(module_path, globals(), locals(), [obj_name], -1)
obj = getattr(module, obj_name, None)
return obj | e722b040486288d53fe4a357d81ddec8dfc9820e | 5,279 |
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for the collection.
Args:
collection_id: str. ID of the collection.
version: int. Schema version of the collection.
Returns:
str. The memcache key of the collection.
"""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id | cc054d726d1d2642701803a816e214eed4d9663d | 5,280 |
def biKmeans(dataSet, k, distMeas=calcEuclideanDistance):
"""
二分K-均值算法
:param dataSet:
:param k:
:param distMeas:
:return:
"""
m = np.shape(dataSet)[0]
clusterAssment = np.mat(np.zeros((m, 2)))
centroid0 = np.mean(dataSet, axis=0).tolist()[0]
centList = [centroid0] # create a list with one centroid
for j in range(m): # calc initial Error
clusterAssment[j, 1] = distMeas(np.mat(centroid0), dataSet[j, :]) ** 2
while len(centList) < k:
lowestSSE = np.inf
for i in range(len(centList)):
# get the data points currently in cluster i
ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:, 0].A == i)[0], :]
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:, 1]) # compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[np.nonzero(clusterAssment[:, 0].A != i)[0], 1])
print "sseSplit, and notSplit: ", sseSplit, sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 1)[0], 0] = len(centList) # change 1 to 3,4, or whatever
bestClustAss[np.nonzero(bestClustAss[:, 0].A == 0)[0], 0] = bestCentToSplit
print 'the bestCentToSplit is: ', bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0, :].tolist()[0] # replace a centroid with two best centroids
centList.append(bestNewCents[1, :].tolist()[0])
# reassign new clusters, and SSE
clusterAssment[np.nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClustAss
return np.mat(centList), clusterAssment | 1421dfa95c44e046bd7d729ad343e98eb83bbbcd | 5,281 |
def cleanup(args, repo):
"""Clean up undeployed pods."""
if args.keep < 0:
raise ValueError('negative keep: %d' % args.keep)
def _is_enabled_or_started(pod):
for instance in pod.iter_instances():
if scripts.systemctl_is_enabled(instance.unit_name):
return True
if scripts.systemctl_is_active(instance.unit_name):
return True
return False
for pod_dir_name in repo.get_pod_dir_names():
LOG.info('%s - cleanup', pod_dir_name)
all_pods = list(repo.iter_pods(pod_dir_name))
num_left = len(all_pods)
for pod in all_pods:
if num_left <= args.keep:
break
if _is_enabled_or_started(pod):
LOG.info('refuse to undeploy pod: %s', pod)
continue
_undeploy_pod(repo, pod)
num_left -= 1
return 0 | b015c1dbfeb3ad50218afaadfe198123ff2ab6df | 5,283 |
from typing import Dict
def optimizer_builder(
config: Dict):
"""
Instantiate an optimizer.
:param config:
:return:
"""
# --- argument checking
if not isinstance(config, dict):
raise ValueError("config must be a dictionary")
# --- read configuration
decay_rate = config["decay_rate"]
decay_steps = config["decay_steps"]
learning_rate = config["learning_rate"]
gradient_clipping_by_norm = config["gradient_clipping_by_norm"]
# --- set up schedule
lr_schedule = \
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=learning_rate,
decay_steps=decay_steps,
decay_rate=decay_rate)
return \
keras.optimizers.RMSprop(
learning_rate=lr_schedule,
global_clipnorm=gradient_clipping_by_norm),\
lr_schedule | 2194408d74d4f03bc54371b98b12c8dbe85fb585 | 5,284 |
def psi(z: float, a: float, b: float) -> float:
"""Penalty function with uniformly bounded derivative (Eq. 20)
Args:
z: Relative distance
a: Cohesion strength
b: Separation strength
"""
c = np.abs(a - b) / (2 * np.sqrt(a * b))
return ((a + b) / 2) * (np.sqrt(1 + (z + c) ** 2) - np.sqrt(1 + c ** 2)) + ((a - b) / 2) * z | df88e57d80a32d95367f30ce52af84308349387a | 5,285 |
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist) | 7558a57e28255817c71846da84230ced49553bb6 | 5,286 |
def EnableRing(serialPort):
""" Enable the ISU to listen for SBD Ring Alerts. When SBD Ring Alert indication is enabled, the 9602 asserts the RI line and issues the unsolicited result
code SBDRING when an SBD Ring Alert is received. """
# Enables Ring message to indicate there's a message to read.
Log("EnableRing()")
if not WriteAndCheck(serialPort, "AT+SBDMTA=1\r", "OK", 30):
Log("Issue enabling ring notifications.")
return False
Log("OK.")
return True | 7036610523802f659c7a69ae192f1009401a6ac3 | 5,287 |
def render(template, **context):
"""Render the given template.
:param template: The template file name or string to render.
:param **context: Context keyword-arguments.
"""
class Undefined(BaseUndefined):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
super(Undefined, self)._fail_with_undefined_error(*args,
**kwargs)
except Exception, error:
return "{{{{ {0} }}}}".format(error)
try:
try:
template_file = file(expanduser(template))
loader = FileSystemLoader(dirname(template_file.name))
environment = Environment(loader=loader, undefined=Undefined)
try:
template = environment.get_template(basename(
template_file.name))
except TemplateSyntaxError, error:
message = "Template {0}:{1}, {2}{3}".format(error.filename,
error.lineno, error.message[:1].lower(), error.message[1:])
exit(message)
except IOError:
try:
template = Template(template, undefined=Undefined)
except TemplateSyntaxError, error:
message = "Template \"{0}\" {1}{2}".format(template,
error.message[:1].lower(), error.message[1:])
exit(message)
except TemplateError, error:
message = "Template error: {0}".format(error.message)
exit(message)
return template.render(context) | 6680f163e1b89424e88b1a3046784083cdbb6520 | 5,288 |
def audio(src: str) -> str:
""" Insert audio tag
The tag is currently not supported by Nuance, please use `audio_player` kit:
docs/use_kits_and_actions.md
:param src:
:return:
"""
return f'<audio src="{src}"/>' | f9396d5f82eeca27089de41187fd7d5e967cc9cf | 5,290 |
import math
def PerpendicularDistanceToFinish(point_b_angle: float,
point_b: gps_pb2.Point) -> float:
"""
cos(B) = Adjacent / Hypotenuse
https://www.mathsisfun.com/algebra/trig-finding-side-right-triangle.html
"""
return math.cos(math.radians(point_b_angle)) * point_b.start_finish_distance | 3c18c323c625893ab474c48eb00d48da543956ba | 5,292 |
import requests
from typing import List
def get_revolut_stocks() -> List[str]:
"""
Gets all tickers offered on Revolut trading platform.
Returns:
list(str)
"""
req = requests.get("https://globefunder.com/revolut-stocks-list/")
tickers = list(pd.read_html(req.content)[0]["Symbol"])
tickers = [ticker.replace(".", "-") for ticker in tickers]
return tickers | 3e7f41a04c653a954609cee618cbf89d962fef1d | 5,293 |
from typing import Tuple
def create_rankings(
a: Dataset, b: Dataset, n_samples: int = 100, unravel: bool = False, **kwargs: int
) -> Tuple[ndarray, ndarray]:
"""
Sample a dataset 'a' with 'n' negative samples given interactions in dataset 'a'
and 'b'.
Practically, this function allows you to generate evaluation data as described in
the work of He et al. [1]. The evaluation procedure assumes that the input datasets
'a' and 'b' have been generated with a leave 'n' out policy, such that dataset 'b'
corresponds to the 'training' dataset (i.e. dataset with 'left out' samples removed)
and 'a' corresponds to the 'test' dataset with 'n' for each user with
n_interactions > n. For each user in 'a', the function will return that user's 'n'
left-out interactions, plus 'n_samples' negative samples (items the user has not
interacted with in both the 'train' and 'test' datasets).
Parameters
----------
a: Dataset
The 'test' dataset (the dataset you wish to use for evaluation).
b: Dataset
The 'train' dataset (the dataset you wish to include for purposes of sampling
items the user has not interacted with -- negative samples).
n_samples: int
The total number of negative samples per user to generate. For example, if the
dataset 'a' was generated from a leave-one-out split, and n_samples=100, that
user would receive 101 samples.
unravel: bool
If 'True', the function will return two arrays, where the first element of the
first array corresponds to the user _vector_ (i.e. user ID + optional metadata),
the first element of the first array corresponds to an associated sampled item
vector(i.e. item ID + optional metadata).
Returns
-------
output: (ndarray, List[ndarray])
If 'unravel=False', the first element corresponds to an array of _ordered_ user
ids, the second the `n_samples+1`per-user samples.
If `unravel=True`, the first element corresponds to an array of _ordered_ user
vectors, the second to each individual item vector. See `unravel` argument and
`_unravel_ranked`, below. This function is provided for use when evaluating
Keras Models with the `predict` method.
References
----------
[1] He et al. https://dl.acm.org/doi/10.1145/3038912.3052569
"""
users, items, _ = a.to_components(
negative_samples=n_samples,
aux_matrix=b.interactions.tocsr(),
shuffle=False,
sampling_mode="absolute",
)
unique_users = unique(users)
sampled_users, sampled_items = (
users[len(unique_users) :],
items[len(unique_users) :],
)
_, grouped = groupby(sampled_users, sampled_items)
grouped = c_[grouped, items[: len(unique_users)]]
if unravel:
return _unravel_sampled(unique_users, grouped, a, **kwargs)
else:
return unique_users, grouped | 28282fc14d02b7f93d58d209d143a315e7b25422 | 5,295 |
def make_even(x):
"""Make number divisible by 2"""
if x % 2 != 0:
x -= 1
return x | 10129eb6abd718414d0ada53915672dcf4d7b5b6 | 5,296 |
def get_num_vehicles(session, query_filters):
"""Gets the total number of annotations."""
# pylint: disable-msg=E1101
num_vehicles_query = session.query(
func.count(Vehicle.id)) \
.join(Photo) \
.filter(Photo.test == True) \
# pylint: enable-msg=E1101
for query_filter in query_filters:
num_vehicles_query = num_vehicles_query.filter(query_filter)
num_vehicles, = num_vehicles_query.one()
return num_vehicles | bf626edad29b136bb595dabb7e878649c08c0d84 | 5,297 |
def task_status_edit(request, status_id, response_format='html'):
"""TaskStatus edit"""
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.profile.has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if 'cancel' not in request.POST:
form = TaskStatusForm(
request.user.profile, request.POST, instance=status)
if form.is_valid():
status = form.save()
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
form = TaskStatusForm(request.user.profile, instance=status)
context = _get_default_context(request)
context.update({'form': form,
'status': status})
return render_to_response('projects/status_edit', context,
context_instance=RequestContext(request), response_format=response_format) | 593384ab55bf889a1e87d7909e848a2dbacad68e | 5,298 |
import platform
def is_windows_system():
"""
| ##@函数目的: 获取系统是否为Windows
| ##@参数说明:True or False
| ##@返回值:
| ##@函数逻辑:
| ##@开发人:jhuang
| ##@时间:
"""
return 'Windows' in platform.system() | 6bfe296188b9dccf8338f0b2bbaaf146d9b22243 | 5,299 |
def seepage_from_unitary(U):
"""
Calculates leakage by summing over all in and output states in the
computational subspace.
L1 = 1- sum_i sum_j abs(|<phi_i|U|phi_j>|)**2
"""
sump = 0
for i in range(2):
for j in range(2):
bra_i = qtp.tensor(qtp.ket([i], dim=[2]),
qtp.ket([2], dim=[3])).dag()
ket_j = qtp.tensor(qtp.ket([j], dim=[2]),
qtp.ket([2], dim=[3]))
p = np.abs((bra_i*U*ket_j).data[0, 0])**2
sump += p
sump /= 2 # divide by dimension of comp subspace
L1 = 1-sump
return L1 | 8bd4185a69d7280868871dc3c62bb10abac1579c | 5,300 |
def auto_get(*args):
"""
auto_get(type, lowEA, highEA) -> ea_t
Retrieve an address from queues regarding their priority. Returns
'BADADDR' if no addresses not lower than 'lowEA' and less than
'highEA' are found in the queues. Otherwise *type will have queue
type.
@param type (C++: atype_t *)
@param lowEA (C++: ea_t)
@param highEA (C++: ea_t)
"""
return _ida_auto.auto_get(*args) | 810ea49a414e3a044bc94cac4d780c7b624433a2 | 5,301 |
def isLineForUser(someLine=None, username=None):
"""determins if a raw output line is for a user"""
doesMatch = False
try:
doesMatch = utils.isLineForMatch(someLine, username)
except Exception as matchErr:
logs.log(str(type(matchErr)), "Error")
logs.log(str(matchErr), "Error")
logs.log(str((matchErr.args)), "Error")
matchErr = None
del matchErr
doesMatch = False
return doesMatch | dbf6b92976c8419fc3b9271eb87871c8c7cf6a1b | 5,302 |
def create_multipart_upload(s3_obj, bucketname, object_key):
"""
Initiates Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket on which multipart upload to be initiated on
object_key (str): Unique object Identifier
Returns:
str : Multipart Upload-ID
"""
mpu = s3_obj.s3_client.create_multipart_upload(Bucket=bucketname, Key=object_key)
upload_id = mpu["UploadId"]
return upload_id | 375d7d04aefa0ef4f91a42e2478ae624057e1bee | 5,304 |
import sqlite3
def cn(DB):
"""Return the cursor and connection object."""
conn = sqlite3.connect(DB)
c = conn.cursor()
return (c,conn) | 76abbec283d45732213f8b94031242146cdb4ee0 | 5,305 |
def _build_category_tree(slug, reference=None, items=None):
"""
Builds a recursive tree with category relations as children.
"""
if items is None:
items = []
for key in reference:
category = reference[key]
if category["parent"] == slug:
children = _build_category_tree(category["nicename"],
reference=reference)
category["children"] = children
items.append(category)
return items | d06cb736b12025b862363a724b2497a71a8a8a30 | 5,306 |
import copy
def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad] | b0d5132cf4ca14095f3d7c637cb50db3fe37d244 | 5,307 |
import re
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input) | 169bfaa0d2bfd7a1f32c1e05a63b41993f82bf4b | 5,308 |
def LoadAllSuitesOfProject(project_name):
"""Loads all of the suites of a project."""
project_key = db.Key.from_path(bite_project.BiteProject.kind(),
project_name)
return BiteSuite.all().ancestor(project_key) | 7a1a27229542ed364dddd2cc9a9cd3343c1d934d | 5,310 |
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVG, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() | 567bc943ecfe34c0604427e0fa8cea11f10c7205 | 5,311 |
def train_model(ad, rsrc_loc, algo='IR', log_dir=None):
"""
Train a CellO model based on the genes of an
input dataset.
Parameters
----------
ad : AnnData object
Expression matrix of n cells by m genes
algo : String
The name of the algorithm used to train the model. 'IR'
trains a model using isotonic regression. 'CLR' trains
a model using cascaded logistic regression.
rsrc_loc: String
The location of the "resources" directory downloaded
via the ''
log_dir : String
Path to a directory in which to write logging information
Returns
-------
A trained CellO model
"""
_download_resources(rsrc_loc)
genes = ad.var.index
# Load the training data
r = load_training_data.load(UNITS, rsrc_loc)
og = r[0]
label_graph = r[1]
label_to_name = r[2]
the_exps = r[3]
exp_to_index = r[4]
exp_to_labels = r[5]
exp_to_tags = r[6]
exp_to_study = r[7]
study_to_exps = r[8]
exp_to_ms_labels = r[9]
X = r[10]
all_genes = r[11]
# Match genes in test data to those in training
# data
train_genes, gene_to_indices = _match_genes(
genes,
all_genes,
rsrc_loc,
log_dir=log_dir
)
# Take a subset of the columns for the training-genes. Note
# that if a given gene in the test set maps to multiple training
# genes, then we sum over the training genes.
X_train = []
for gene in train_genes:
indices = gene_to_indices[gene]
X_train.append(np.sum(X[:,indices], axis=1))
X_train = np.array(X_train).T
assert X_train.shape[1] == len(train_genes)
# Train the model on these genes
print('Training model...')
mod = model.train_model(
ALGO_TO_INTERNAL[algo],
ALGO_TO_PARAMS[algo],
X_train,
the_exps,
exp_to_labels,
label_graph,
item_to_group=exp_to_study,
features=train_genes,
preprocessor_names=PREPROCESSORS,
preprocessor_params=PREPROCESSOR_PARAMS
)
print('done.')
return mod | 46c7736a4f0127ec882d54075564fd447336a332 | 5,313 |
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices) | 394879a014d855dd71786cb0941a3aefd30b70b8 | 5,314 |
def received_date_date(soup):
"""
Find the received date in human readable form
"""
return utils.date_text(history_date(soup, date_type="received")) | 5fbba6129da8d6facc66f9ec21e9b6f45fcb399a | 5,315 |
import uuid
def get_cert_sha256_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha256"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res | 9cd095bd8d2d710b1cf7e4a3155a0b4fc08587f5 | 5,317 |
def analytic_pi(x, c, w, h):
"""Analytic response function for an even pair of Lorentz distributions.
Correspond to
.. math::
\\Pi(x) = \\int_{-\infty}^{\\infty}
\\frac{\\omega^2}{\\omega^2+x^2}\sigma()_{i}
where :math:`\\sigma(\\omega)` is :func:`~even_lorentzian`.
Args:
x (array): matsubara at which the response function is evaluated
c (float): Center of the distribution (+ or -)
w (float): Width of the distribution (variance)
h (float): Height/weight of the distribtuion (area under the curve)
Returns:
array: Values of the integral at imaginary `x`
"""
return 2*h*c/(c**2+(x+w)**2) | fc622e79a6692105c15e05ea353ba925b8378831 | 5,318 |
def run(canvas):
""" This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas)
@Args:
--
canvas : canvas of population to run the rules on.
@returns:
--
None
"""
canvas = np.array(canvas)
next_gen_canvas = np.array(create_canvas(canvas.shape[0]))
for r, row in enumerate(canvas):
for c, pt in enumerate(row):
# print(r-1,r+2,c-1,c+2)
next_gen_canvas[r][c] = __judge_point(
pt, canvas[r - 1: r + 2, c - 1: c + 2]
)
canvas = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
return canvas.tolist() | 4a7ae84cf245755f51c3c7a5c22e646e452e6d7a | 5,319 |
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if tf.gfile.Exists(vocabulary_path):
rev_vocab = []
with tf.gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path) | bf56054aab47ac959fc38929445fc15a1d59b8a9 | 5,320 |
def ravel_space(space):
"""
Convert the space into a Discrete space.
"""
dims = _nested_dim_helper(space)
return Discrete(dims[0]) | baa04d3dd16e1c797bbdb83ff5f42474e77c57b4 | 5,321 |
def _add_col(dataframe, metadata, col_limits, families, weights, random_state):
"""Add a new column to the end of the dataframe by sampling a distribution
from ``families`` according to the column limits and distribution weights
and sampling the required number of values from that distribution."""
nrows, ncols = dataframe.shape
if isinstance(col_limits[1], tuple):
family_counts = get_family_counts(metadata, families)
while len(dataframe.columns) != ncols + 1:
family = random_state.choice(families, p=weights)
idx = families.index(family)
if family_counts[family] < col_limits[1][idx]:
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata
family = random_state.choice(families, p=weights)
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata | b0a711a132f78188cc8b40fe3fb907d022aaa37a | 5,322 |
import struct
def read_and_decrypt_mylogin_cnf(f):
"""Read and decrypt the contents of .mylogin.cnf.
This decryption algorithm mimics the code in MySQL's
mysql_config_editor.cc.
The login key is 20-bytes of random non-printable ASCII.
It is written to the actual login path file. It is used
to generate the real key used in the AES cipher.
:param f: an I/O object opened in binary mode
:return: the decrypted login path file
:rtype: io.BytesIO or None
"""
# Number of bytes used to store the length of ciphertext.
MAX_CIPHER_STORE_LEN = 4
LOGIN_KEY_LEN = 20
# Move past the unused buffer.
buf = f.read(4)
if not buf or len(buf) != 4:
logger.error('Login path file is blank or incomplete.')
return None
# Read the login key.
key = f.read(LOGIN_KEY_LEN)
# Generate the real key.
rkey = [0] * 16
for i in range(LOGIN_KEY_LEN):
try:
rkey[i % 16] ^= ord(key[i:i+1])
except TypeError:
# ord() was unable to get the value of the byte.
logger.error('Unable to generate login path AES key.')
return None
rkey = struct.pack('16B', *rkey)
# Create a decryptor object using the key.
decryptor = _get_decryptor(rkey)
# Create a bytes buffer to hold the plaintext.
plaintext = BytesIO()
while True:
# Read the length of the ciphertext.
len_buf = f.read(MAX_CIPHER_STORE_LEN)
if len(len_buf) < MAX_CIPHER_STORE_LEN:
break
cipher_len, = struct.unpack("<i", len_buf)
# Read cipher_len bytes from the file and decrypt.
cipher = f.read(cipher_len)
plain = _remove_pad(decryptor.update(cipher))
if plain is False:
continue
plaintext.write(plain)
if plaintext.tell() == 0:
logger.error('No data successfully decrypted from login path file.')
return None
plaintext.seek(0)
return plaintext | 84b0831a139db80e0a8d48c7fbaacaef377c93e9 | 5,323 |
def list_files(tag=None, inst_id=None, data_path=None, format_str=None,
supported_tags=None, file_cadence=dt.timedelta(days=1),
two_digit_year_break=None, delimiter=None, file_type=None):
"""Return a Pandas Series of every file for chosen Instrument data.
Parameters
----------
tag : string or NoneType
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : dict or NoneType
keys are inst_id, each containing a dict keyed by tag
where the values file format template strings. (default=None)
file_cadence : dt.timedelta or pds.DateOffset
pysat assumes a daily file cadence, but some instrument data file
contain longer periods of time. This parameter allows the specification
of regular file cadences greater than or equal to a day (e.g., weekly,
monthly, or yearly). (default=dt.timedelta(days=1))
two_digit_year_break : int or NoneType
If filenames only store two digits for the year, then '1900' will be
added for years >= two_digit_year_break and '2000' will be added for
years < two_digit_year_break. If None, then four-digit years are
assumed. (default=None)
delimiter : string or NoneType
Delimiter string upon which files will be split (e.g., '.'). If None,
filenames will be parsed presuming a fixed width format. (default=None)
file_type : str or NoneType
File format for Madrigal data. Load routines currently accepts 'hdf5',
'simple', and 'netCDF4', but any of the Madrigal options may be used
here. If None, will look for all known file types. (default=None)
Returns
-------
out : pds.Series
A pandas Series containing the verified available files
"""
# Initialize the transitional variables
list_file_types = file_types.keys() if file_type is None else [file_type]
sup_tags = {inst_id: {tag: supported_tags[inst_id][tag]}}
out_series = list()
# Cycle through each requested file type, loading the requested files
for ftype in list_file_types:
if supported_tags[inst_id][tag].find('{file_type}') > 0:
sup_tags[inst_id][tag] = supported_tags[inst_id][tag].format(
file_type=file_types[ftype])
out_series.append(pysat.instruments.methods.general.list_files(
tag=tag, inst_id=inst_id, data_path=data_path,
format_str=format_str, supported_tags=sup_tags,
file_cadence=file_cadence,
two_digit_year_break=two_digit_year_break, delimiter=delimiter))
# Combine the file lists, ensuring the files are correctly ordered
if len(out_series) == 0:
out = pds.Series(dtype=str)
elif len(out_series) == 1:
out = out_series[0]
else:
out = pds.concat(out_series).sort_index()
return out | e8dd3c25c953fb6d8ccef6cbef4fd31c579826fd | 5,324 |
def is_on_top(bb1, bb2):
""" For obj 1 to be on top of obj 2:
- obj1 must be above obj 2
- the bottom of obj 1 must be close to the top of obj 2
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1,y1,z1 = bb1_min
x2,y2,z2 = bb2_max
return z1 < z2 + ONTOP_EPSILON and is_above(bb1, bb2) | 666b7a424fc1b8769cc436c07981ae134e6241a9 | 5,325 |
def prepare_definitions(defs, prefix=None):
"""
prepares definitions from a dictionary
With a provided dictionary of definitions in key-value pairs and builds them
into an definition list. For example, if a dictionary contains a key ``foo``
with a value ``bar``, the returns definitions will be a list with the values
``['foo=bar']``. If a key contains a value of ``None``, the key will be
ignored and will not be part of the final definition list. If a ``prefix``
value is provided, each definition entry will be prefixed with the provided
value.
Args:
defs: the arguments to process
prefix (optional): prefix value to prefix each definition
Returns:
list of arguments
"""
final = []
if defs:
for key, val in defs.items():
if val is None:
continue
if prefix:
key = prefix + key
if val:
final.append('{}={}'.format(key, val))
else:
final.append(key)
return final | ddc6d14cc18f8afba766efee65ab365df1d226c2 | 5,326 |
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf | 2e2ddf93311c315f070c91af8bcc1a0df5e94343 | 5,327 |
def concat_features(args, feature_dim_name='feature'):
"""Concatenate Xs along a set of feature dimensions
Parameters
----------
args : iterable
list of tuples of the form (dims, DataArray) where dims is a tuple of dimensions that will be considered feature dimensions
Returns
-------
stacked : xr.DataArray
The output where the data has been stacked along the feature_dim_name
"""
indexes = []
arrays = []
for dims, xarr in args:
stacked_arr = xarr.stack(**{feature_dim_name: dims})
indexes.append(stacked_arr.indexes[feature_dim_name])
arrays.append(stacked_arr)
index = concat_multi_indexes(indexes)
return xr.concat(arrays, dim=index) | d7b44931a5b8a626ca81e5260566c60379d64fc2 | 5,328 |
def _inspect_mixin(
self, geoctx=None, format="pyarrow", file=None, timeout=30, client=None, **params
):
"""
Quickly compute this proxy object using a low-latency, lower-reliability backend.
Inspect is meant for getting simple computations out of Workflows, primarily for interactive use.
It's quicker but less resilient, won't be retried if it fails, and has no progress updates.
If you have a larger computation (longer than ~30sec), or you want to be sure the computation will succeed,
use `~.compute` instead. `~.compute` creates a `.Job`, which runs asynchronously, will be retried if it fails,
and stores its results for later retrieval.
Parameters
----------
geoctx: `.scenes.geocontext.GeoContext`, `~.workflows.types.geospatial.GeoContext`, or None
The GeoContext parameter under which to run the computation.
Almost all computations will require a `~.workflows.types.geospatial.GeoContext`,
but for operations that only involve non-geospatial types,
this parameter is optional.
format: str or dict, default "pyarrow"
The serialization format for the result.
See the `formats
<https://docs.descarteslabs.com/descarteslabs/workflows/docs/formats.html#output-formats>`_
documentation for more information.
If "pyarrow" (the default), returns an appropriate Python object, otherwise returns raw bytes.
file: path or file-like object, optional
If specified, writes results to the path or file instead of returning them.
timeout: int, optional, default 30
The number of seconds to wait for the result.
Raises `~descarteslabs.workflows.models.JobTimeoutError` if the timeout passes.
client: `.workflows.inspect.InspectClient`, optional
Allows you to use a specific InspectClient instance with non-default
auth and parameters
**params: Proxytype
Parameters under which to run the computation.
Returns
-------
result: Python object or bytes
When ``format="pyarrow"`` (the default), returns an appropriate Python object representing
the result, either as a plain Python type, or object from `descarteslabs.workflows.result_types`.
For other formats, returns raw bytes. Consider using `file` in that case to save the results to a file.
"""
if geoctx is not None:
params["geoctx"] = GeoContext._promote(geoctx)
if client is None:
client = _get_global_inspect_client()
return client.inspect(self, format=format, file=file, timeout=timeout, **params) | fd66a1728ca99806dc2c5056cf4a612ca7cac79b | 5,329 |
def list_dvs(service_instance):
"""
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
"""
return utils_common.list_objects(service_instance, vim.DistributedVirtualSwitch) | 2223fe68c13868bea2884b292318e21cb1c2b99c | 5,330 |
from typing import Optional
def gdb_cli_args(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('gdb_cli_args', None) | 635c5cfd397fe286003add99e094778f835a88d9 | 5,331 |
from datetime import datetime
def coerce_rfc_3339_date(input_date):
"""This function returns true if its argument is a valid RFC 3339 date."""
if input_date:
return datetime.datetime.strptime(input_date, "%Y-%m-%dT%H:%M:%SZ")
return False | 83cc2c32b74ad896d79db1a91f4a0fd88b26731e | 5,332 |
def extract_job_url(job):
"""
parse the job data and extract the str for the URL of the job posted
params:
job str: html str representation from bs4
returns:
url str: relative URL path of the job ad
"""
return job.a["href"] | 7517badcc2814e641c04a8f880353d897d434b7f | 5,333 |
import sh
def commit(experiment_name, time):
"""
Try to commit repo exactly as it is when starting the experiment for reproducibility.
"""
try:
sh.git.commit('-a',
m='"auto commit tracked files for new experiment: {} on {}"'.format(experiment_name, time),
allow_empty=True
)
commit_hash = sh.git('rev-parse', 'HEAD').strip()
return commit_hash
except:
return '<Unable to commit>' | a5a75cad77d605ef60905e8b36c6df9913b7bd3c | 5,334 |
def weighted_loss(class_weights):
"""
Create a weighted loss function. Penalise the misclassification
of classes more with the higher usage
"""
weight_values = list(class_weights.values())
def weighted_binary_crossentropy(y_true, y_pred):
# add another dimension to compute dot product
expanded_weights = K.expand_dims(weight_values, axis=-1)
return K.dot(K.binary_crossentropy(y_true, y_pred), expanded_weights)
return weighted_binary_crossentropy | 804a643dff3916f376545a9f481edc418ebf5d8e | 5,335 |
def delete_cluster(resource_root, name):
"""
Delete a cluster by name
@param resource_root: The root Resource object.
@param name: Cluster name
@return: The deleted ApiCluster object
"""
resp = resource_root.delete("%s/%s" % (CLUSTERS_PATH, name))
return ApiCluster.from_json_dict(resp, resource_root) | 2ed12d7f927d6579cbea81765b353f0eecae8f4a | 5,336 |
def mk_test(x, alpha = 0.05):
"""This perform the MK (Mann-Kendall) test to check if there is any trend present in
data or not
Args:
x: a vector of data
alpha: significance level
Returns:
trend: tells the trend (increasing, decreasing or no trend)
h: True (if trend is present) or False (if trend is absence)
p: p value of the sifnificance test
z: normalized test statistics
Examples::
>>> x = np.random.rand(100)
>>> trend = mk_test(x,0.05)
>>> print(trend.trend)
increasing
Credit: http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
"""
n = len(x)
ta = n*(n-1)/2
# calculate S
s = 0
for k in xrange(n-1):
for j in xrange(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in xrange(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
# calculate the p_value
p = 2*(1- norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
else:
trend = 'no trend'
return pd.Series({'trend':trend, 'varS':round(var_s,3), 'p':round(p,3), 'z':round(z,3), 's':round(s,3), 'n':n, 'ta':ta}) | 8586c7ee5cf71ea79db9f57ebc6cc77d942962f7 | 5,337 |
from typing import List
def convert_event_to_boxes(event: Event) -> List[EventBox]:
"""Takes in an event and converts this into a list of boxes that when combined completely cover the time allocated
to this event. Usually, this list will contain a single EventBox as many events start and end on the same day, but
any events split across multiple day boundaries will be split into multiple boxes."""
start_date = event.start_datetime.date()
end_date = event.end_datetime.date()
start_time_float = time_to_float(event.start_datetime.time())
end_time_float = time_to_float(event.end_datetime.time())
days_spanned = (end_date - start_date).days + 1
boxes = []
if days_spanned == 1:
boxes.append(EventBox(0, start_time_float, end_time_float))
else:
boxes.append(EventBox(0, start_time_float, 24.0))
for i in range(max(0, days_spanned - 2)):
boxes.append(EventBox(i + 1, 0.0, 24.0))
boxes.append(EventBox(days_spanned - 1, 0.0, end_time_float))
return boxes | c8f93fb2480792540e9052dd79c654e835021030 | 5,338 |
import copy
def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5):
"""Reduce the outputs of consecutive layers with multiplier.
Args:
conv_defs: Mobilenet conv_defs.
start_id: 0-based index of the starting conv_def to be reduced.
end_id: 0-based index of the last conv_def to be reduced.
multiplier: The multiplier by which to reduce the conv_defs.
Returns:
Mobilenet conv_defs where the output sizes from layers [start_id, end_id],
inclusive, are reduced by multiplier.
Raises:
ValueError if any layer to be reduced does not have the 'num_outputs'
attribute.
"""
defs = copy.deepcopy(conv_defs)
for d in defs['spec'][start_id:end_id+1]:
d.params.update({
'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier))
})
return defs | ffcfad4956f72cf91aeaa5e795e9568d0808417f | 5,339 |
def ajax_save_content(request):
""" Save front end edited content """
site = get_current_site(request)
content_name = request.POST['content_name']
cms_content = CmsContent.objects.get(site=site, name=content_name)
cms_content.content = request.POST['content']
cms_content.save()
return HttpResponse('SUCCESS') | f99bcfaa7ff5773870ed6ba76bfb0cc97fab248b | 5,340 |
def add_regional_group_costs(ws, data_sheet):
"""
"""
ws.sheet_properties.tabColor = "92D050"
##Color white
ws.sheet_view.showGridLines = False
#Set blue and red border strips
set_cell_color(ws, 'A1:AZ1', "004C97")
set_cell_color(ws, 'A2:AZ2', "C00000")
ws = bar_chart(ws, "Estimates!$C$64:$C$71", "Estimates!$B$65:$B$71", "Total Cost by Region", 'Cost ($Bn)', "B4")
ws = bar_chart(ws, "Estimates!$F$64:$F$71", "Estimates!$E$65:$E$71", "Mean Annual 10-Year GDP by Region",'GDP ($Bn)', "L4")
ws = bar_chart(ws, "Estimates!$I$64:$I$71", "Estimates!$H$65:$H$71", "Initial Investment by Region",'Cost ($Bn)', "B20")
ws = bar_chart(ws, "Estimates!$C$75:$C$82", "Estimates!$B$76:$B$82", "GDP Share by Region",'Percent of GDP (%)', "L20")
return ws | a325b33b705819be81725e6fb6c4f277c6add097 | 5,341 |
def random_data(num):
""" will return json random float, hex, int and a random password
{0: {
'float': 186.66541583209647,
'hex': '43435c553c722359e386804f6b28d2c2ee3754456c38f5e7e68f',
'int': 851482763158959204,
'password': '5AJ]-02X0J'
}
}"""
data = {}
count = 0
while count < num:
data.update(
{
count: {
"hex": random_hex(),
"int": randint(1, 10**18),
"float": uniform(0.1, 10**3.01),
"password": randPwStr()
}
}
)
count += 1
return data | 108ebabe8b156218a452cbade729dc09356d2d0b | 5,343 |
def denied(request):
"""Authentication failed and user was denied."""
return render(request, 'djangosaml2/denied.html') | 9341e694163de3d8cd63d448ac39294003046dac | 5,344 |
import time
def monday_of_week(year, week):
"""
Returns a datetime for the monday of the given week of the given year.
"""
str_time = time.strptime('{0} {1} 1'.format(year, week), '%Y %W %w')
date = timezone.datetime(year=str_time.tm_year, month=str_time.tm_mon,
day=str_time.tm_mday, tzinfo=timezone.utc)
if timezone.datetime(year, 1, 4).isoweekday() > 4:
# ISO 8601 where week 1 is the first week that has at least 4 days in
# the current year
date -= timezone.timedelta(days=7)
return date | 886c16df011f86eaa95254e360062d5530e05512 | 5,346 |
from typing import Mapping
from typing import Any
from typing import Dict
def object_meta(metadata: Metadata) -> Mapping[str, Any]:
"""
Return a minimal representation of an ObjectMeta with the supplied information.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#objectmeta
"""
meta: Dict[str, Any] = {}
if metadata.generate_name_from_prefix:
meta["generateName"] = metadata.name
else:
meta["name"] = metadata.name
if metadata.annotations:
meta["annotations"] = metadata.annotations
if metadata.labels:
meta["labels"] = metadata.labels
if metadata.namespace:
meta["namespace"] = metadata.namespace
return meta | fc4d30954f9c61c90511fbfc00b403017f41f6c9 | 5,347 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.