content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def backcasting(
predictor,
window,
curves,
distance="RMS",
columns=("cases", "deaths"),
min_series=14,
step=1,
):
"""
Perform a backcasting performance analysis of the given model. For the sake
of this method, the model is just a function that receives an epidemic curve
dataframe and a list of time windows and return the forecasts for cases and
deaths for the specified times.
"""
windows = np.array(as_seq(windows))
min_window = windows.min(initial=len(curves))
def distance(x, y):
return (x - y).dropna().abs() / x
results = []
for k in range(min_window, len(curves) - min_series, step):
data = curves.iloc[:-k]
prediction = fn(data, windows)
results.append(distance(curves, prediction))
st.write(results[-1])
return pd.concat(results, axis=0) | 0e0eafc06ab6ab4578be1b299fc70ae88796a72d | 3,648,812 |
from typing import Dict
from typing import Callable
from typing import List
def find_keys(d: Dict[K, V], predicate: Callable[[V], bool]) -> List[K]:
"""Find keys where values match predicate."""
return [k for k, v in d.items() if predicate(v)] | 68febd42bcd65ff52a786e4941dd5abf7d6a36ee | 3,648,813 |
def get_maya_property_name(prop, ignore_channel=False):
"""
Given a property, return a reasonable Maya name to use for it.
If ignore_channel is True, return the property for the whole vector, eg. return
'.translate' instead of '.translateX'.
This doesn't create or query anything. It just generates a name to use elsewhere.
"""
prop_parts = prop.path.split('/')
# Get the property key, without any channel suffixes attached.
prop_key = prop_parts[0]
mapping = {
'translation': 'translate',
'rotation': 'rotate',
'scale': 'scale',
}
maya_key = None
if prop_key in mapping:
prop_key = mapping[prop_key]
if prop.path.count('/') == 1 and not ignore_channel:
# If we've been given a single channel, eg. rotation/x, return it.
assert len(prop_parts) == 2, prop_parts
assert prop_parts[1] in ('x', 'y', 'z'), prop_parts
return '%s%s' % (prop_key, prop_parts[1].upper())
else:
# Otherwise, return the vector itself.
return prop_key | 591a49f054db3936d5a345919a2c69491b6f345e | 3,648,814 |
from typing import Concatenate
def model_deepFlavourReference_test(Inputs,nclasses,dropoutRate=0.1,momentum=0.6):
"""
reference 1x1 convolutional model for 'deepFlavour'
with recurrent layers and batch normalisation
standard dropout rate it 0.1
should be trained for flavour prediction first. afterwards, all layers can be fixed
that do not include 'regression' and the training can be repeated focusing on the regression part
(check function fixLayersContaining with invert=True)
"""
globalvars = BatchNormalization(momentum=momentum,name='globals_input_batchnorm') (Inputs[0])
cpf = BatchNormalization(momentum=momentum,name='cpf_input_batchnorm') (Inputs[1])
npf = BatchNormalization(momentum=momentum,name='npf_input_batchnorm') (Inputs[2])
vtx = BatchNormalization(momentum=momentum,name='vtx_input_batchnorm') (Inputs[3])
cpf,npf,vtx = block_deepFlavourConvolutions(charged=cpf,
neutrals=npf,
vertices=vtx,
dropoutRate=dropoutRate,
active=True,
batchnorm=True, batchmomentum=momentum)
#
cpf = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
cpf=BatchNormalization(momentum=momentum,name='cpflstm_batchnorm')(cpf)
cpf = Dropout(dropoutRate)(cpf)
npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
npf=BatchNormalization(momentum=momentum,name='npflstm_batchnorm')(npf)
npf = Dropout(dropoutRate)(npf)
vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
vtx=BatchNormalization(momentum=momentum,name='vtxlstm_batchnorm')(vtx)
vtx = Dropout(dropoutRate)(vtx)
x = Concatenate()( [globalvars,cpf,npf,vtx ])
x = block_deepFlavourDense(x,dropoutRate,active=True,batchnorm=True,batchmomentum=momentum)
flavour_pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
predictions = [flavour_pred]
model = Model(inputs=Inputs, outputs=predictions)
return model | f92f977a5570e647bf394d450bd5a5dea918aeba | 3,648,815 |
import pathlib
def load_spyrelet_class(spyrelet_name, cfg):
"""Load a spyrelet class from a file (whose location is defined in cfg)"""
# discover spyrelet file and class
spyrelet_path_str, _ = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_FILE_KEY])
spyrelet_class_name, spyrelet_cfg_path_str = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_CLASS_KEY])
# resolve the spyrelet file location
# if the path isn't absolute resolve it relative to the config file
spyrelet_path = pathlib.Path(spyrelet_path_str)
if not spyrelet_path.is_absolute():
spyrelet_path = pathlib.Path(spyrelet_cfg_path_str).parent / spyrelet_path
spyrelet_path = spyrelet_path.resolve()
if not spyrelet_path.is_file():
raise SpyreletLoadError(None, f'spyrelet [{spyrelet_name}] file [{spyrelet_path}] doesn\'t exist')
return load_class_from_file(spyrelet_path, spyrelet_class_name) | 877c8a626e7abe3e41146475dc030966c0b9f41e | 3,648,816 |
def see_documentation():
"""
This function redirects to the api documentation
"""
return jsonify({
'@context': responses.CONTEXT,
'rdfs:comment': 'See http://www.conceptnet.io for more information about ConceptNet, and http://api.conceptnet.io/docs for the API documentation.'
}) | 46de921c855797b1b7d231a4cb88c57026ece947 | 3,648,817 |
from django.shortcuts import render
def jhtml_render(request, file_type=None,json_file_url=None, html_template=None, json_render_dict=None, json_render_func=None, file_path=None, url_name=None, app_name=None):
"""
:param request:
:param file_type: json/temp_json
:param json_file_url:
:param html_template:模板文件路径,不包含templates
:param render_var_dict_str: 渲染变量dict
:return:
"""
path = request.path
print(path)
if file_type=='temp_json':
try:
json_file_url = reverse(url_name+'_tjson', current_app=app_name)
except Exception as e:
print('ERROR: no json file url found:', file_path)
render_dict = {'json_to_render_file_url': json_file_url}
if json_render_dict is not None:
render_dict.update(json_render_dict)
if json_render_func is not None:
render_dict.update(json_render_func(request, json_render_dict))
page_name = html_template
if (page_name is not None) and len(page_name) > 0:
page_name = page_name # settings.BASE_DIR /
else:
page_name = 'html/index_for_json.html'
if len(json_file_url) > 3:
if json_file_url[-4:] == 'html':
page_name = json_file_url
#static amis json file render
##if re_one.file_type=='temp_json':
return render(request, page_name, render_dict) | b5d61d69a2c27d883aad60953c7366c6724b905e | 3,648,819 |
def prefix_sums(A):
"""
This function calculate of sums of eements in given slice (contiguous segments of array).
Its main idea uses prefix sums which
are defined as the consecutive totals of the first 0, 1, 2, . . . , n elements of an array.
Args:
A: an array represents number of mushrooms growing on the
consecutive spots along a road.
Returns:
an array contains the consecutive sums of the first n elements of an array A
To use:
>> A=[2,3,7,5,1,3,9]
>> print(prefix_sums(A))
[0, 2, 5, 12, 17, 18, 21, 30]
Time Complexity: O(n)
"""
n = len(A)
P = [0] * (n + 1)
for k in range(1, n + 1):
P[k] = P[k - 1] + A[k - 1]
return P | d61e49eb4a973f7718ccef864d8e09adf0e09ce2 | 3,648,823 |
from run4it.api.scripts import script_import_polar_exercices as script_func
def polar_import():
"""Import data from Polar and save as workouts"""
return script_func('polar_import') | 6a7075184e5c44a3092670fffc94360ef9a363c4 | 3,648,824 |
def dijkstra(G, Gextra, source, target_set, required_datarate, max_path_latency):
"""
:returns a successful path from source to a target from target_set with lowest path length
"""
q = DynamicPriorityQueue()
q.put((source, 0.0), priority=0.0)
marked = set()
parents = {source: None}
while not q.empty():
path_length, (current_node, current_path_latency) = q.pop()
marked.add(current_node)
if current_node in target_set:
return _compute_path(parents, current_node)
for neighbor in G.neighbors_iter(current_node):
if neighbor not in marked:
edata = G.edge[current_node][neighbor]
new_path_latency = current_path_latency + edata["l_cap"]
if (required_datarate <= Gextra.get_edge(current_node, neighbor).b_rem and
new_path_latency <= max_path_latency):
new_path_length = path_length + 1
if not config.USE_HOP_PATH_LENGTH:
new_path_length = new_path_latency
if q.put_or_decrease((neighbor, new_path_latency), other_priority=new_path_length):
parents[neighbor] = current_node
return None | 6a8ff88b7a56308e099d3f9e50c8645c3281a68e | 3,648,825 |
def build_single_class_dataset(name, class_ind=0, **dataset_params):
"""
wrapper for the base skeletor dataset loader `build_dataset`
this will take in the same arguments, but the loader will only iterate
over examples of the given class
I'm just going to overwrite standard cifar loading data for now
"""
trainloader, testloader = build_dataset(name, **dataset_params)
def _filter(loader, mode='train'):
dataset = loader.dataset
assert name in ['cifar10', 'svhn'],\
'we only support cifar and svhn right now'
if name == 'cifar10':
data_attr = mode + '_data' # e.g. train imgs in dataset.train_data
label_attr = mode + '_labels'
else:
data_attr = 'data'
label_attr = 'labels'
data = getattr(dataset, data_attr)
targets = np.array(getattr(dataset, label_attr))
class_inds = np.where(targets == int(class_ind))
data, targets = data[class_inds], targets[class_inds]
setattr(dataset, data_attr, data)
setattr(dataset, label_attr, targets)
return loader
return _filter(trainloader, mode='train'), _filter(testloader, mode='test') | c8d05ecc1292562e846bc62724a224c20746037a | 3,648,826 |
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res | 8eb5bf4ba1f1d0e170a88a7b798b65273db8c1fd | 3,648,827 |
import copy
def preprocess(comment):
"""Pre-Process the comment"""
copy_comment = copy.deepcopy(comment)
# Replacing link
final_comment = replace_link(copy_comment)
nftokens = get_nf_tokens(comment)
return final_comment, nftokens | f7286d5ca3e668b70385cd72485bb81eb8f9eec1 | 3,648,828 |
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx] | 481eccab328da13c4a49b2cf69d8e0e1cf1e48ab | 3,648,829 |
def make_noisy_linear(w=1, std=1):
"""Factory for linear function <w,x> perturbed by gaussian noise N(0,std^2)"""
@Oracle
def noisy_linear(x):
return np.dot(x, w) + np.random.normal(scale=std)
return noisy_linear | 80ec4a37dbbe6dc837707fa9a6e93e27d8dea9b9 | 3,648,830 |
def distance(turtle, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
turtle -- the turtle
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(turtle, x, y) # two coordinates
--or: distance(turtle, (x, y)) # a pair (tuple) of coordinates
--or: distance(turtle, vec) # e.g. as returned by pos(turtle)
--or: distance(turtle, mypen) # where mypen is another turtle
Example:
>>> pos(turtle)
(0.00, 0.00)
>>> distance(turtle, 30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> distance(turtle, pen)
77.0
"""
if type(turtle) != _turtle.Turtle:
raise(TypeError("turtle argument to distance is not a valid turtle"))
return turtle.distance(x, y) | f09b320c2b07374bebd2fd8c16084e7bf676523d | 3,648,831 |
import copy
def asy_ts(gp, anc_data):
""" Returns a recommendation via TS in the asyuential setting. """
anc_data = copy(anc_data)
# Always use a random optimiser with a vectorised sampler for TS.
if anc_data.acq_opt_method != 'rand':
anc_data.acq_opt_method = 'rand'
anc_data.max_evals = 4 * anc_data.max_evals
gp_sample = _get_gp_sampler_for_parallel_strategy(gp, anc_data)
return _maximise_acquisition(gp_sample, anc_data, vectorised=True) | 1514263314cd92b053bfcd655872a03785b47af0 | 3,648,832 |
import re
def checkParams(opts):
"""
检查模块名是否符合命名规则
检查目录是否存在
"""
res = {}
for opt, arg in opts:
if opt in ('--name'):
if re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', arg):
res['name'] = arg
else:
return res
elif opt in ('--dir'):
res['dir'] = arg;
elif opt in ('--type'):
res['type'] = arg
else:
print("Unknown option " + arg)
res['dir'] = res['dir'] + res['name'] + '/'
return res | 5b8306a1c9805786e4a98509dcea3af59ffd04d1 | 3,648,833 |
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
best_bboxes = []
while len(bboxes) > 0:
max_ind = np.argmax(bboxes[:, 4])
best_bbox = bboxes[max_ind]
best_bboxes.append(list(best_bbox))
bboxes = np.concatenate([bboxes[: max_ind], bboxes[max_ind + 1:]])
iou = bboxes_iou(best_bbox[np.newaxis, :4], bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
bboxes[:, 4] = bboxes[:, 4] * weight
score_mask = bboxes[:, 4] > 0.
bboxes = bboxes[score_mask]
return best_bboxes | 10f3f65bd00599aa77f2d832754febfeeed7ca55 | 3,648,834 |
def smart_cast(value):
"""Intelligently cast the given value to a Python data type.
:param value: The value to be cast.
:type value: str
"""
# Handle integers first because is_bool() may interpret 0s and 1s as booleans.
if is_integer(value, cast=True):
return int(value)
elif is_float(value):
return float(value)
elif is_bool(value):
return to_bool(value)
else:
return value | 73676278e8c8bf54536fd3c9982cad7f6064cb75 | 3,648,835 |
from typing import List
from typing import Dict
import math
def find_host_biz_relations(bk_host_ids: List[int]) -> Dict:
"""
查询主机所属拓扑关系
:param bk_host_ids: 主机ID列表 [1, 2, 3]
:return: 主机所属拓扑关系
[
{
"bk_biz_id": 3,
"bk_host_id": 3,
"bk_module_id": 59,
"bk_set_id": 11,
"bk_supplier_account": "0"
}
]
"""
# CMDB 限制了单次查询数量,这里需分批并发请求查询
param_list = [
{"bk_host_id": bk_host_ids[count * constants.QUERY_CMDB_LIMIT : (count + 1) * constants.QUERY_CMDB_LIMIT]}
for count in range(math.ceil(len(bk_host_ids) / constants.QUERY_CMDB_LIMIT))
]
host_biz_relations = request_multi_thread(client_v2.cc.find_host_biz_relations, param_list, get_data=lambda x: x)
return host_biz_relations | 9cd9891a97b5ad3db88a0e8a631775b1dc8c24c7 | 3,648,837 |
def atom_to_atom_line(atom):
"""Takes an atomium atom and turns it into a .cif ATOM record.
:param Atom atom: the atom to read.
:rtype: ``str``"""
name = get_atom_name(atom)
res_num, res_insert = split_residue_id(atom)
return "ATOM {} {} {} . {} {} . {} {} {} {} {} 1 {} {} {} {} {} {} 1".format(
atom.id, atom.element, name, atom.het._name if atom.het else "?",
atom.het._internal_id if atom.het and isinstance(
atom.het, Ligand
) else atom.chain._internal_id if atom.chain else ".",
res_num, res_insert, atom.location[0], atom.location[1], atom.location[2],
atom.bvalue, atom.charge,
res_num, atom.het._name if atom.het else "?",
atom.chain.id if atom.chain else ".", name
) | 30e9f9191947b23dffd9e3f6d63f697de325e5f0 | 3,648,838 |
from .....main import _get_bot
from typing import Union
from typing import Optional
async def edit_chat_invite_link(
token: str = TOKEN_VALIDATION,
chat_id: Union[int, str] = Query(..., description='Unique identifier for the target chat or username of the target channel (in the format @channelusername)'),
invite_link: str = Query(..., description='The invite link to edit'),
name: Optional[str] = Query(None, description='Invite link name; 0-32 characters'),
expire_date: Optional[int] = Query(None, description='Point in time (Unix timestamp) when the link will expire'),
member_limit: Optional[int] = Query(None, description='Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999'),
creates_join_request: Optional[bool] = Query(None, description="True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified"),
) -> JSONableResponse:
"""
Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object.
https://core.telegram.org/bots/api#editchatinvitelink
"""
bot = await _get_bot(token)
try:
entity = await get_entity(bot, chat_id)
except BotMethodInvalidError:
assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@')
entity = chat_id
except ValueError:
raise HTTPException(404, detail="chat not found?")
# end try
result = await bot.edit_chat_invite_link(
entity=entity,
invite_link=invite_link,
name=name,
expire_date=expire_date,
member_limit=member_limit,
creates_join_request=creates_join_request,
)
data = await to_web_api(result, bot)
return r_success(data.to_array()) | 7c83316e0e86eb223b40ed9bf69126d79a4651b4 | 3,648,839 |
def post_live_migrate_at_source(adapter, host_uuid, instance, vif):
"""Performs the post live migrate on the source host.
:param adapter: The pypowervm adapter.
:param host_uuid: The host UUID for the PowerVM API.
:param instance: The nova instance object.
:param vif: The virtual interface of the instance. This may be
called network_info in other portions of the code.
"""
vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif)
return vif_drv.post_live_migrate_at_source(vif) | 0a4165abe0373a96b2b222d4eaa9316649d607b2 | 3,648,840 |
import re
def conv2date(dtstr,tstart=None):
"""Convert epoch string or time interval to matplotlib date"""
#we possibly have a timeinterval as input so wrap in exception block
m=re.search("([\+\-])([0-9]+)([dm])",dtstr)
if m:
if m.group(3) == "m":
dt=30.5*float(m.group(2)) #scale with average days per month
elif m.group(3) == "d":
dt=float(m.group(2))
if m.group(1) == "+":
fac=1
else:
fac=-1
if not tstart:
tstart=0 #Compute timedeltas only
dout=tstart+fac*dt
else:
dout=datestr2num(dtstr)
return dout | b848f45c04bf9ef77fa3af395afb992f6302fb4f | 3,648,841 |
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(MaskedBasicblock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
print('ResNet-18 Use pretrained model for initalization')
return model | 94e339a390723e7dbdec4d95b7f4bb3600faae1f | 3,648,842 |
import pytz
def weather(api_token, city, start, end):
"""
Returns an hourly report of cloud cover, wind and temperature data for the
given city. The report is always in full days. Timestamps are in UTC.
Start and end dates are interpreted as UTC.
"""
a = Astral()
city = a[city]
# hour=0 would give us the previous day. Dark Sky always returns full days so
# we can just make one request per day from start to end, always at midday.
d = start.replace(hour=12, tzinfo=pytz.UTC)
dfs = []
for i in range(_num_days(start, end)):
weather = _raw_weather(api_token, city.latitude, city.longitude, d)
df = _as_dataframe(weather, d)
dfs.append(df)
d = d + timedelta(days=1)
return _tidy(pd.concat(dfs)) | 2d8457cc8388613825dad54686988194eed85b2b | 3,648,844 |
from skimage.transform import iradon
def skimage_radon_back_projector(sinogram, geometry, range, out=None):
"""Calculate forward projection using skimage.
Parameters
----------
sinogram : `DiscreteLpElement`
Sinogram (projections) to backproject.
geometry : `Geometry`
The projection geometry to use.
range : `DiscreteLp`
range of this projection (volume space).
out : ``range`` element, optional
An element in range that the result should be written to.
Returns
-------
sinogram : ``range`` element
Sinogram given by the projection.
"""
# Lazy import due to significant import time
theta = skimage_theta(geometry)
skimage_range = skimage_sinogram_space(geometry, range, sinogram.space)
skimage_sinogram = skimage_range.element()
skimage_sinogram.sampling(clamped_interpolation(range, sinogram))
if out is None:
out = range.element()
else:
# Only do asserts here since these are backend functions
assert out in range
# Rotate back from (rows, cols) to (x, y)
backproj = iradon(skimage_sinogram.asarray().T, theta,
output_size=range.shape[0], filter=None, circle=False)
out[:] = np.rot90(backproj, -1)
# Empirically determined value, gives correct scaling
scaling_factor = 4.0 * float(geometry.motion_params.length) / (2 * np.pi)
# Correct in case of non-weighted spaces
proj_extent = float(sinogram.space.partition.extent.prod())
proj_size = float(sinogram.space.partition.size)
proj_weighting = proj_extent / proj_size
scaling_factor *= (sinogram.space.weighting.const /
proj_weighting)
scaling_factor /= (range.weighting.const /
range.cell_volume)
# Correctly scale the output
out *= scaling_factor
return out | 8158569eca46907091bfbca6aba57cd2a6afa6bf | 3,648,845 |
def get_segment_hosts(master_port):
"""
"""
gparray = GpArray.initFromCatalog( dbconn.DbURL(port=master_port), utility=True )
segments = GpArray.getSegmentsByHostName( gparray.getDbList() )
return segments.keys() | 565921e4b7d46ec357666d50dee7dcdb7127759e | 3,648,846 |
from typing import List
from typing import Dict
from typing import Any
def get_saved_albums(sp: Spotify) -> List[Dict[str, Any]]:
"""Returns the list of albums saved in user library"""
albums = [] # type: List[Dict[str, Any]]
results = sp.current_user_saved_albums(limit=50)
albums.extend(results["items"])
while results["next"]:
results = sp.next(results)
albums.extend(results["items"])
return albums | 525074d9f957b71c0b355d3d343e088d29792363 | 3,648,847 |
def createMergerCatalog(hd_obj, obj_conditions, cosmo, time_since_merger=1):
"""
Function to create Major Merger (MM) catalog
@hd_obj :: header file for the object of interest
@obj_conditions :: prior conditions to define the object sample
@cosmo :: cosmology used in the notebook (Flat Lambda CDM)
@mass_range :: [lower, upper] limits on range on galaxy stellar masses to create pair sample
@time_since_merger :: int to decide the objects with mergers < x Gyr
"""
# converting the time since merger into scale factor
merger_z = z_at_value(cosmo.lookback_time, time_since_merger*u.Gyr)
merger_scale = 1/(1+merger_z)
# defining the merger condition
merger_condition = (hd_obj['HALO_scale_of_last_MM']>merger_scale)
downsample = obj_conditions & merger_condition
return hd_obj[downsample], downsample | ee0ac59fe1a8fa9a40a934caa32ff53cd171f3dc | 3,648,848 |
from typing import Union
from typing import Dict
from typing import List
from typing import Any
import json
def make_response(code: int, body: Union[Dict, List]) -> Dict[str, Any]:
"""Build a response.
Args:
code: HTTP response code.
body: Python dictionary or list to jsonify.
Returns:
Response object compatible with AWS Lambda Proxy Integration
"""
return {
"statusCode": code,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "true",
},
"body": json.dumps(body, default=json_custom),
} | bae0a8720085bdf3734724b00df8d856e362602a | 3,648,850 |
def sql2dict(queryset):
"""Return a SQL alchemy style query result into a list of dicts.
Args:
queryset (object): The SQL alchemy result.
Returns:
result (list): The converted query set.
"""
if queryset is None:
return []
return [record.__dict__ for record in queryset] | c55fa18773142cca591aac8ed6bdc37657569961 | 3,648,851 |
from typing import OrderedDict
import itertools
def build_DNN(input_dim, hidden_dim, num_hidden, embedding_dim=1, vocab_size=20,output_dim=1 ,activation_func=nn.Sigmoid):
""" Function that automates the generation of a DNN by providing a template for
pytorch's nn.Sequential class
Parameters
----------
input_dim : int
Number of dimensions of input vector
hidden_dim : int
Number of dimensions for each hidden layer
num_hidden : int
Number of hidden layers to construct
output_dim : int, default=1
Number of output (label) dimensions
activation_func : nn.Function
Activation function applied to all but the penultimate layer
return nn.Module
The feedforward network as a PyTorch model
"""
embed = OrderedDict([("Embedding", nn.Embedding(vocab_size,embedding_dim))])
input = OrderedDict([("Input", nn.Linear(input_dim,hidden_dim)),("Sig1", activation_func())])
hidden_structure = [[('Hidden{}'.format(i), nn.Linear(hidden_dim,hidden_dim)),
('Sig{}'.format(i+1), nn.Sigmoid())] for i in range(1,num_hidden+1)]
hiddens = OrderedDict(list(itertools.chain.from_iterable(hidden_structure)))
output = OrderedDict([("Output", nn.Linear(hidden_dim,output_dim))])
return nn.Sequential(OrderedDict(**embed, **input, **hiddens, **output)) | 5b7476b20aacb0d6b0f78da6f97f9a1d3262d43c | 3,648,852 |
def float_to_bin(x, m_digits:int):
"""
Convert a number x in range [0,1] to a binary string truncated to length m_digits
arguments:
x: float
m_digits: integer
return:
x_bin: string
The decimal representation of digits AFTER '0.'
Ex:
Input 0.75 has binary representation 0.11
Then this function would return '11'
"""
if x < 0 or x >= 1:
raise ValueError("x must be in interval [0,1)")
x_round = round(x * 2**m_digits)
# print(x_round)
# print(2**m_digits)
if x_round == 2**m_digits:
x_round = 0
x_raw = bin(x_round)
x_bin = x_raw[2:].zfill(m_digits)
return x_bin | f95e72d9449b66681575b230f6c858e8b3833cc2 | 3,648,853 |
from typing import Callable
from typing import List
def apply(func: Callable, args: List):
"""Call `func` expanding `args`.
Example:
>>> def add(a, b):
>>> return a + b
>>> apply(add, [1, 2])
3
"""
return func(*args) | f866087d07c7c036b405f8d97ba993f12c392d76 | 3,648,854 |
def random_energy_model_create(db: Session) -> EnergyModelCreate:
"""
Generate a random energy model create request.
"""
dataset = fixed_existing_dataset(db)
component_1 = fixed_existing_energy_source(db)
return EnergyModelCreate(name=f"EnergyModel-{dataset.id}-" + random_lower_string(),
ref_dataset=dataset.id,
description="EnergyModel description",
parameters=[
EnergyModelParameterCreate(component=component_1.component.name,
attribute=EnergyModelParameterAttribute.yearly_limit,
operation=EnergyModelParameterOperation.set,
value=366.6),
]
) | db5ac3decf6094bef271005732fd9b78a3870be3 | 3,648,855 |
def _indices_3d(f, y, x, py, px, t, nt, interp=True):
"""Compute time and space indices of parametric line in ``f`` function
Parameters
----------
f : :obj:`func`
Function computing values of parametric line for stacking
y : :obj:`np.ndarray`
Slow spatial axis (must be symmetrical around 0 and with sampling 1)
x : :obj:`np.ndarray`
Fast spatial axis (must be symmetrical around 0 and with sampling 1)
py : :obj:`float`
Slowness/curvature in slow axis
px : :obj:`float`
Slowness/curvature in fast axis
t : :obj:`int`
Time sample (time axis is assumed to have sampling 1)
nt : :obj:`int`
Size scaof time axis
interp : :obj:`bool`, optional
Apply linear interpolation (``True``) or nearest interpolation
(``False``) during stacking/spreading along parametric curve
Returns
-------
sscan : :obj:`np.ndarray`
Spatial indices
tscan : :obj:`np.ndarray`
Time indices
dtscan : :obj:`np.ndarray`
Decimal time variations for interpolation
"""
tdecscan = f(y, x, t, py, px)
if not interp:
sscan = (tdecscan >= 0) & (tdecscan < nt)
else:
sscan = (tdecscan >= 0) & (tdecscan < nt - 1)
tscan = tdecscan[sscan].astype(np.int)
if interp:
dtscan = tdecscan[sscan] - tscan
else:
dtscan = None
return sscan, tscan, dtscan | 43a1f8761fb4e2ad32225ebf9e96f0aa2cdf0afd | 3,648,856 |
def indicators_listing(request,option=None):
"""
Generate Indicator Listing template.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param option: Whether or not we should generate a CSV (yes if option is "csv")
:type option: str
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_indicator_csv(request)
return generate_indicator_jtable(request, option) | 772ec90af7b104b4a9712742064d3aba758aab6f | 3,648,857 |
def parse_sensor(csv):
"""
Ideally, the output from the sensors would be standardized and a simple
list to dict conversion would be possible. However, there are differences
between the sensors that need to be accommodated.
"""
lst = csv.split(";")
sensor = lst[SENSOR_QUANTITY]
if sensor in SENSORS:
result = SENSORS[sensor](lst)
else:
result = parse_generic_sensor(lst)
return result | 6673e12403090d130f0ac5590097794ae8f191aa | 3,648,858 |
from datetime import datetime
def samiljeol(year=None):
"""
:parm year: int
:return: Independence Movement Day of Korea
"""
year = year if year else _year
return datetime.date(int(year), 3, 1) | 6ae717e12aa3dc5bd1d273e240294d2bc6a294ff | 3,648,859 |
def get_entries(xml_file):
"""Get every entry from a given XML file: the words, their roots
and their definitions.
"""
tree = get_tree(xml_file)
# each <drv> is one entry
entries = []
for drv_node in tree.iter('drv'):
node_words = get_words_from_kap(drv_node.find('kap'))
root = get_word_root(drv_node)
try:
definitions = get_all_definitions(drv_node)
except AssertionError:
print "Error whilst processing %s: %r" % (xml_file, node_words)
raise
for word in node_words:
entries.append(Entry(word, root, definitions))
return entries | f9647cf79be68afa03908433890e1abbff9284bf | 3,648,860 |
def comoving_radial_distance(cosmo, a, status):
"""comoving_radial_distance(cosmology cosmo, double a, int * status) -> double"""
return _ccllib.comoving_radial_distance(cosmo, a, status) | 72066b4b51a7728608d52c920bade33ecef0b920 | 3,648,861 |
import dateutil
def make_legacy_date(date_str):
"""
Converts a date from the UTC format (used in api v3) to the form in api v2.
:param date_str:
:return:
"""
date_obj = dateutil.parser.parse(date_str)
try:
return date_obj.strftime('%Y%m%d')
except:
return None | 5a2ed526c7bd0dae5a73a55c93d14ec158a0e6df | 3,648,862 |
import torch
def l2_mat(b1, b2):
"""b1 has size B x M x D, b2 has size b2 B x N x D, res has size P x M x N
Args:
b1:
b2:
Returns:
"""
b1_norm = b1.pow(2).sum(dim=-1, keepdim=True)
b2_norm = b2.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(b2_norm.transpose(-2, -1), b1, b2.transpose(-2, -1),
alpha=-2).add_(b1_norm)
# mask = 1.0 - torch.ones(res.shape[0]).diag().to(res.device)
res = res.clamp_min_(torch.finfo(torch.float32).eps).sqrt_()
# res = res * mask
return res | ad254c2c11dccab5dd97c7e72ef3b00c7b6143fb | 3,648,863 |
def take_rich(frame, n, offset=0, columns=None):
"""
A take operation which also returns the schema, offset and count of the data.
Not part of the "public" API, but used by other operations like inspect
"""
if n is None:
data = frame.collect(columns)
else:
data = frame.take(n, offset, columns)
schema = frame.schema if not columns else sparktk.frame.schema.get_schema_for_columns(frame.schema, columns)
return TakeRichResult(data=data, n=n, offset=offset, schema=schema) | de3514d64a74addae76628c37f679693ba68550b | 3,648,865 |
def default_name(class_or_fn):
"""Default name for a class or function.
This is the naming function by default for registries expecting classes or
functions.
Args:
class_or_fn: class or function to be named.
Returns:
Default name for registration.
"""
return camelcase_to_snakecase(class_or_fn.__name__) | 1ed04a87916ae5d0fa9f1173d5fb9f97c26b32e9 | 3,648,866 |
def get_ip_result_by_input_method(
set_input_method,
module_input_method,
var_ip_selector,
username,
bk_biz_id,
bk_supplier_account,
filter_set,
filter_service_template,
produce_method,
var_module_name="",
):
"""
@summary 根据输入方式获取ip
@param var_module_name: 模块属性名
@param set_input_method: 集群输入方式对应tag code
@param module_input_method: 模块输入方式对应tag code
@param var_ip_selector: 表单数据
@param username: 用户名
@param bk_biz_id: 业务id
@param bk_supplier_account: 供应商账户
@param filter_set: 筛选集群
@param filter_service_template: 筛选模块
@param produce_method: 输入方式
@return: 逗号分隔ip字符串
"""
produce_method = "var_ip_{}_value".format(produce_method)
select_method = var_ip_selector[produce_method]
# 获取全部集群列表
set_list = get_set_list(username, bk_biz_id, bk_supplier_account)
# 集群全选,筛选条件不为空则调接口获取集群id列表
if ALL_SELECTED_STR not in select_method[set_input_method]:
selected_set_names = select_method[set_input_method]
# 根据选中的集群名称获取选中的集群列表
set_list = get_list_by_selected_names(selected_set_names, set_list)
# 获取全部服务模板列表
service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account)
# 服务模板全选,则调接口获取服务模板列表
if ALL_SELECTED_STR not in select_method[module_input_method]:
selected_service_template_names = select_method[module_input_method]
# 通过选中的或输入的集群模板获取集群模板列表
service_template_list = get_service_template_list_by_names(
selected_service_template_names, service_template_list
)
# 根据输入获取空闲机module id
service_template_list.extend(
get_biz_inner_module_list(
var_ip_selector,
username,
bk_biz_id,
bk_supplier_account,
produce_method,
set_input_method=set_input_method,
module_input_method=module_input_method,
)
)
# 获取模块id列表
module_ids = get_module_id_list(
bk_biz_id, username, set_list, service_template_list, filter_set, filter_service_template, bk_supplier_account
)
if not var_module_name or var_module_name == "ip":
# 根据模块 id 列表获取 ip 并返回
data = get_ip_list_by_module_id(username, bk_biz_id, bk_supplier_account, module_ids)
else:
# 根据模块属性名获取模块信息
kwargs = {"bk_ids": module_ids, "fields": var_module_name.split(",")}
data = [module_attr[var_module_name] for module_attr in get_module_list(username, bk_biz_id, kwargs=kwargs)]
return data | aa12179a5706f213894962579e5d0be30209f14e | 3,648,868 |
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
"""
N = a.shape[3] # number of filters
M = a.shape[1] * a.shape[2] # height times width of the feature map
A = _gram_matrix(a, N, M)
G = _gram_matrix(g, N, M)
return tf.reduce_sum((G - A) ** 2 / ((2 * N * M) ** 2)) | f19d8fcfc467d4760a44d2cdb872791cc2ad2ffe | 3,648,871 |
def hyp_dist_o(x):
"""
Computes hyperbolic distance between x and the origin.
"""
x_norm = x.norm(dim=-1, p=2, keepdim=True)
return 2 * arctanh(x_norm) | 8864d8625798a8b41e2dd645cfe11e8d73d6d9d3 | 3,648,872 |
def check_image(url):
"""A little wrapper for the :func:`get_image_info` function.
If the image doesn't match the ``flaskbb_config`` settings it will
return a tuple with a the first value is the custom error message and
the second value ``False`` for not passing the check.
If the check is successful, it will return ``None`` for the error message
and ``True`` for the passed check.
:param url: The image url to be checked.
"""
img_info = get_image_info(url)
error = None
if not img_info["content-type"] in flaskbb_config["AVATAR_TYPES"]:
error = "Image type is not allowed. Allowed types are: {}".format(
", ".join(flaskbb_config["AVATAR_TYPES"])
)
return error, False
if img_info["width"] > flaskbb_config["AVATAR_WIDTH"]:
error = "Image is too wide! {}px width is allowed.".format(
flaskbb_config["AVATAR_WIDTH"]
)
return error, False
if img_info["height"] > flaskbb_config["AVATAR_HEIGHT"]:
error = "Image is too high! {}px height is allowed.".format(
flaskbb_config["AVATAR_HEIGHT"]
)
return error, False
if img_info["size"] > flaskbb_config["AVATAR_SIZE"]:
error = "Image is too big! {}kb are allowed.".format(
flaskbb_config["AVATAR_SIZE"]
)
return error, False
return error, True | d0587dc987a079d49eb9a863d5203908acab41c4 | 3,648,873 |
def preprocess(dataset_file_path, len_bound, num_examples = None, reverse = False):
"""
It reads the required files, creates input output pairs.
"""
min_sentence_length = len_bound[0]
max_sentence_length = len_bound[1]
lines = open(str(dataset_file_path), encoding='utf-8', errors = 'ignore').read().strip().split('\n')
if num_examples is not None:
lines = lines[:num_examples] # This takes only some lines
input_lang = []
output_lang = []
seen = set()
for line in lines:
_line = line.split('\t') # seperate the input line and output line
if (len(_line[0].split(" "))>min_sentence_length and len(_line[0].split(" "))<max_sentence_length
and len(_line[1].split(" "))>min_sentence_length and len(_line[1].split(" "))<max_sentence_length):
inp = clean_text(_line[0])
if inp in seen:
continue
seen.add(inp)
input_lang.append(inp)
output_lang.append(clean_text(_line[1]))
assert len(input_lang) == len(output_lang) # make both equal
print("Read %s sentence pairs" % len(input_lang))
if reverse:
return (input_lang, output_lang)
else:
return (output_lang, input_lang) | 5849c1957ccab997bcf835bce2fec71b0a93cd6d | 3,648,874 |
def read_transcriptome(transcriptome):
"""
Parse transcriptome as a dictionary.
"""
result_dict = {}
for sequence in SeqIO.parse(transcriptome, 'fasta'):
result_dict[sequence.name] = sequence.seq
return result_dict | 008df223435de465cd6f36978305ca95bb15b270 | 3,648,875 |
from re import X
def magnus(w, n):
"""
The 'Magnus' map
"""
expr = w.subs(x,1+eps*X).subs(y,1+eps*Y) - 1
return limit(expr / eps**n, eps, 0) | 7faf1935b9348f41e6968b7da5fa59576ad874a5 | 3,648,876 |
def translate_node_coordinates(wn, offset_x, offset_y):
"""
Translate node coordinates
Parameters
-----------
wn: wntr WaterNetworkModel
A WaterNetworkModel object
offset_x: tuple
Translation in the x direction, in meters
offset_y: float
Translation in the y direction, in meters
Returns
--------
A WaterNetworkModel object with updated node coordinates
"""
wn2 = _deepcopy_wn(wn)
for name, node in wn2.nodes():
pos = node.coordinates
node.coordinates = (pos[0]+offset_x, pos[1]+offset_y)
return wn2 | da886a624b9038296d47ffe85a04e62f71f49def | 3,648,878 |
def get_demo_board():
"""Get a demo board"""
demo_board_id = 1
query = Board.query.filter(Board.id == demo_board_id)
query = query.options(joinedload(Board.tasks)).options(raiseload('*'))
board = query.one()
return BoardDetailsSchema().dump(board).data | 69b20a6c7446dc3813ec8d8c454a7a35443bf103 | 3,648,879 |
def cool_KI(n, T):
"""
Returns Koyama & Inutsuka (2002) cooling function
"""
return 2e-19*n*n*(np.exp(-1.184e5/(T + 1e3)) +
1.4e-9*T**0.5*np.exp(-92.0/T)) | 707b9e8d42e4d1b7db069c05b3b74e3f0b37f2e6 | 3,648,880 |
def main(args):
"""
main entry point for the manifest CLI
"""
if len(args) < 2:
return usage("Command expected")
command = args[1]
rest = args[2:]
if "create".startswith(command):
return cli_create(rest)
elif "query".startswith(command):
return cli_query(rest)
elif "verify".startswith(command):
return cli_verify(rest)
else:
return usage("Unknown command: %s" % command) | b89e68c6ef98722a55ff15e8473dec8c8437bf8d | 3,648,881 |
def compute_correlations(states):
"""compute_correlations.
Calculate the average correlation of spin 0 and every other spin.
Parameters
----------
states : list of states.
``len(states)`` must be >= 1!
Returns
-------
correlations : list of floats.
"""
return [
sum(s[0] * s[i] for s in states) / len(states)
for i in range(len(states[0]))
] | 471949aa63a3d65b262fb9dad1c77d160a3f5ac7 | 3,648,882 |
from typing import Sequence
from typing import Any
def parse_sample_str(elems: Sequence[Any]) -> AOList[str]:
""" Choose n floats from a distribution.
Examples:
>>> c = parse_sample_str([4, ["choose", ["one", "two"]]])
>>> c
Sample(4, ChooseS([StrConst('one'), StrConst('two')]))
"""
str_func = "sample"
check_n_params(["n", "dist"], elems, str_func)
n = check_true_int_param(0, elems, "n", str_func)
this_dist = check_str_param(1, elems, "dist", str_func)
return dist.Sample(n, this_dist) | 5996a3b0ed072d4a7a00d7e01cc74efdc65aa8ee | 3,648,883 |
def htlc(TMPL_RCV,
TMPL_OWN,
TMPL_FEE,
TMPL_HASHIMG,
TMPL_HASHFN,
TMPL_TIMEOUT):
"""This contract implements a "hash time lock".
The contract will approve transactions spending algos from itself under two circumstances:
- If an argument arg_0 is passed to the script such that TMPL_HASHFN(arg_0) is equal to TMPL_HASHIMG,
then funds may be closed out to TMPL_RCV.
- If txn.FirstValid is greater than TMPL_TIMEOUT, then funds may be closed out to TMPL_OWN.
The idea is that by knowing the preimage to TMPL_HASHIMG, funds may be released to
TMPL_RCV (Scenario 1). Alternatively, after some timeout round TMPL_TIMEOUT,
funds may be closed back to their original owner, TMPL_OWN (Scenario 2).
Note that Scenario 1 may occur up until Scenario 2 occurs, even if TMPL_TIMEOUT has already passed.
Parameters:
TMPL_RCV: the address to send funds to when the preimage is supplied
TMPL_HASHFN: the specific hash function (sha256 or keccak256) to use (sha256 in this example)
TMPL_HASHIMG: the image of the hash function for which knowing the preimage under TMPL_HASHFN will release funds
TMPL_TIMEOUT: the round after which funds may be closed out to TMPL_OWN
TMPL_OWN: the address to refund funds to on timeout
TMPL_FEE: maximum fee of any transactions approved by this contract """
# First, check that the fee of this transaction is less than or equal to TMPL_FEE
fee_check = Txn.fee() < Int(TMPL_FEE)
# Next, check that this is a payment transaction.
pay_check = Txn.type_enum() == TxnType.Payment
# Next, check that the Receiver field for this transaction is empty
# Because this contract can approve transactions that close out its entire balance,
# it should never have a receiver.
rec_field_check = Txn.receiver() == Global.zero_address()
# Next, check that the Amount of algos transferred is 0. This is for the same reason as
# above: we only allow transactions that close out this account completely, which
# having a non-zero-address CloseRemainderTo will handle for us.
amount_check = Txn.amount() == Int(0)
# Always verify that the RekeyTo property of any transaction is set to the ZeroAddress
# unless the contract is specifically involved ina rekeying operation.
rekey_check = Txn.rekey_to() == Global.zero_address()
# fold all the above checks into a single boolean.
common_checks = And(
fee_check,
pay_check,
rec_field_check,
amount_check,
rekey_check
)
# Payout scenarios : At this point in the execution, there is one boolean variable on the
# stack that must be true in order for the transaction to be valid. The checks we have done
# above apply to any transaction that may be approved by this script.We will now check if we
# are in one of the two payment scenarios described in the functionality section."""
# Scenario 1: Hash preimage has been revealed
# First, check that the CloseRemainderTo field is set to be the TMPL_RCV address.
recv_field_check = Txn.close_remainder_to() == TMPL_RCV
# Next, we will check that arg_0 is the correct preimage for TMPL_HASHIMG under TMPL_HASHFN.
preimage_check = TMPL_HASHFN(Arg(0)) == Bytes("base64", TMPL_HASHIMG)
#Fold the "Scenario 1" checks into a single boolean.
scenario_1 = And(recv_field_check, preimage_check)
# Scenario 2: Contract has timed out
# First, check that the CloseRemainderTo field is set to be the TMPL_OWN address
# (presumably initialized to be the original owner of the funds).
owner_field_check = Txn.close_remainder_to() == TMPL_OWN
# Next, check that this transaction has only occurred after the TMPL_TIMEOUT round.
timeout_check = Txn.first_valid() > Int(TMPL_TIMEOUT)
#Fold the "Scenario 2" checks into a single boolean.
scenario_2 = And(owner_field_check, timeout_check)
# At this point in the program's execution, the stack has three values. At the base of the
# stack is a boolean holding the results of the initial transaction validity checks.
# This is followed by two booleans indicating the results of the scenario 1 and 2 checks.
# We want to approve this transaction if we are in scenario 1 or 2.
# So we logically OR the results of those checks together.
# Finally, we logically AND the scenario checks with the initial checks.
# At this point, the stack contains just one value: a boolean indicating
# whether or not it has been approved by this contract.
return And(Or(scenario_1, scenario_2), common_checks) | 9288458b228dabc1663901e03011feaa8ff9765c | 3,648,884 |
def parse(*args, is_flag=False, **kwargs):
"""alias of parser.parse"""
return _parser.parse(*args, is_flag=is_flag, **kwargs) | f40499277a12bd6e492e43fd7e4328124ac59814 | 3,648,885 |
def oauth_callback():
"""
return: str
"""
auth = tweepy.OAuthHandler(env.TWITTER_API_KEY, env.TWITTER_API_SECRET)
try:
auth.request_token = session['REQUEST_TOKEN']
verifier = request.args.get('oauth_verifier')
auth.get_access_token(verifier)
session['AUTH_TOKEN'],session['AUTH_TOKEN_SECRET'] = auth.access_token, auth.access_token_secret
redirect_url = '/share'
except Exception:
redirect_url = '/'
return redirect_url | a15d7c88c97b23a3ce625e363882fff3197c55b5 | 3,648,886 |
from typing import Tuple
from typing import List
import random
def generate_random_instance(n_instants: int, cost_dim: int, items_per_instant: int = 1) -> \
Tuple[List[List[float]], List[List[List[float]]], float, float]:
"""Generates random values, costs and capacity for a Packing Problem instance.
Instances generated here may not respect guarantees constraints.
Parameters
----------
n_instants : int
Number of instants to be generated.
cost_dim : int
Dimension of the cost vectors to be generated.
items_per_instant : int
Number of items that should be available in each instant.
Returns
-------
values : list of list of float
A list containing, for each instant, a list with that instant item's values.
costs : list of list of list of float
A list containing, for each instant, a list with that instant item's cost vectors.
cap : float
A random problem capacity.
e : float
The best theorical epsilon for the generated problem.
"""
assert items_per_instant > 0
assert cost_dim > 0
values: List[List[float]] = _get_random_values(n_instants, items_per_instant)
costs: List[List[List[float]]] = _get_random_costs(n_instants, items_per_instant, cost_dim)
cap = random.random() * n_instants/2
e = sqrt(log(cost_dim, 2)/cap)
return values.copy(), deepcopy(costs), cap, e | 57ccf4cd5410d2358c434d94beb9bfbb0ca04820 | 3,648,887 |
def recommend_tags_questions(professional_id, threshold=0.01, top=5):
""" Recommends tags for an professional depending on answered questions.
:param professional_id: ID of the professional
:param threshold: Minimum percentage of questions with the tags.
:param top: Top N recommended tags (default: 5)
:return top_tags: DataFrame with the top tags and how many answered questions have these.
"""
professional_tags = get_user_tags(professional_id)
professional = professionals[professionals['professionals_id'] == professional_id]
professional_questions = answers[answers['answers_author_id'] == professional_id]['answers_question_id']
top_tags = tag_questions[tag_questions['tag_questions_question_id'].isin(professional_questions)]
top_tags = pd.merge(top_tags, tags, left_on='tag_questions_tag_id', right_on='tags_tag_id')
top_tags = top_tags[~top_tags['tags_tag_name'].isin(professional_tags)]
top_tags = top_tags.groupby('tags_tag_name').size()/len(professional_questions.index)
top_tags = top_tags[top_tags > threshold]
top_tags = top_tags.sort_values(ascending=False).head(top)
return top_tags | 1b4bc6d37569d4794294028036e59437f66dc552 | 3,648,888 |
from .tools import make_simulationtable
from .model import reservoirs
def simulationtable(request):
"""
called when the simulation page starts to get used
"""
# convert to the right name syntax so you can get the COM ids from the database
selected_reservoir = request.body.decode("utf-8")
reservoirs = reservoirs()
for reservoir in reservoirs:
if reservoirs[reservoir] == selected_reservoir:
selected_reservoir = reservoir
break
return JsonResponse(make_simulationtable(selected_reservoir)) | eaa60d02ee095d5efcc6a4f458bd4bb6745675d0 | 3,648,889 |
from datetime import datetime
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = response.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits | eed6504d712e91110763e28f400dab5faf9300a1 | 3,648,890 |
import numpy
def plot_breakdown_percents(runs, event_labels=[],
title=None, colors=None):
"""
Plots a bar chart with the percent of the total wall-time of all events for
multiple runs.
Parameters
----------
runs: Run object or list of Run objects
The list of runs to display on the figure.
event_labels: string or list of strings, optional
Names of the events to display on the figure;
default: [].
title: string, optional
Title of the figure;
default: None.
colors: iterator, optional
Colors to use;
default: None.
Returns
-------
fig: Matplotlib Figure object
The figure.
ax: Matplotlib Axes object
Single or array of axes.
"""
if not isinstance(runs, (list, tuple)):
runs = [runs]
if not isinstance(event_labels, (list, tuple)):
event_labels = [event_labels]
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.yaxis.grid(zorder=0)
ax.set_ylabel('% of wall-time', fontsize=16)
indices = numpy.arange(len(runs))
bar_width = 0.5
bar_offsets = numpy.zeros(len(runs))
for label in event_labels:
if colors:
color = next(colors)
else:
color = next(ax._get_lines.prop_cycler)['color']
percents = []
for run in runs:
if label in run.events.keys():
percents.append(run.events[label]['percent'])
else:
percents.append(0.0)
ax.bar(indices, percents, bar_width,
label=label,
bottom=bar_offsets,
color=color,
linewidth=0,
zorder=0)
bar_offsets += percents
ax.legend(bbox_to_anchor=(1.0, 1.0), frameon=False)
ax.set_xticks(indices + 0.25 * bar_width)
ax.set_xticklabels([run.label for run in runs], rotation=0, fontsize=16)
ax.set_yticks([0.0, 25.0, 50.0, 75.0, 100.0],
('0', '25', '50', '75', '100'))
ax.set_xlim(indices[0] - 0.5, indices[-1] + 1.0)
ax.set_ylim(0.0, 100.0)
if title:
fig.set_title(title)
return fig, ax | 788c0c466223a2e2aaa695c616fdfc649248b963 | 3,648,891 |
def gen3_file(mock_gen3_auth):
"""
Mock Gen3File with auth
"""
return Gen3File(endpoint=mock_gen3_auth.endpoint, auth_provider=mock_gen3_auth) | ee2af5d8b89c02e205101e0fe56dc58025d72e38 | 3,648,892 |
def rhs_of_rule(rule):
""" This function takes a grammatical rule, and returns its RHS """
return rule[0] | 004b99ac97c50f7b33cc798997463a28c3ae9a6f | 3,648,893 |
from typing import Union
from typing import Optional
from typing import Any
def flow_duration_curve(
x: Union[np.ndarray, pd.Series],
log: bool = True,
plot: bool = True,
non_exceeding:bool = True,
ax: Optional[Union[SubplotBase, Any]] = None,
**kwargs
) -> Union[np.ndarray, Figure]:
"""Calculate a flow duration curve
Calculate flow duration curve from the discharge measurements. The
function can either return a ``matplotlib`` plot or return the ordered (
non)-exceeding probabilities of the observations. These values can then
be used in any external plotting environment.
In case x.ndim > 1, the function will be called iteratively along axis 0.
Parameters
----------
x : numpy.ndarray, pandas.Series
Series of prefereably discharge measurements
log : bool, default=True
if `True` plot on loglog axis, ignored when plot is `False`
plot : bool, default=True
if `False` plotting will be suppressed and the resulting array will
be returned
non_exceeding : bool, default=True
if `True` use non-exceeding probabilities
ax : matplotlib.AxesSubplot | bokeh.Figure , default=None
if not None, will plot into that AxesSubplot or Figure instance.
.. note::
If you pass an object, be sure to set the correct plotting
backend first.
kwargs : kwargs,
will be passed to the ``matplotlib.pyplot.plot`` function
Returns
-------
matplotlib.Figure :
if `plot` was `True`
numpy.ndarray :
if `plot was `False`
Notes
-----
The probabilities are calculated using the Weibull empirical probability.
Following [1]_, this probability can be calculated as:
.. math:: p =m/(n + 1)
where `m` is the rank of an observation in the ordered time series and
`n` are the total observations. The increasion by one will prevent 0%
and 100% probabilities.
References
----------
.. [1] Sloto, R. a., & Crouse, M. Y. (1996). Hysep: a computer program
for streamflow hydrograph separation and analysis. U.S. Geological
Survey Water-Resources Investigations Report, 96(4040), 54.
"""
# omit the Series index
if isinstance(x, pd.Series):
x = x.values
# if x has more than one dimension call this func recursive along axis=0
if x.ndim > 1:
# check if plot was None, then iterate along axis=0
if not plot:
return np.apply_along_axis(flow_duration_curve, 0, x, non_exceeding=non_exceeding, plot=False)
else:
# plot, if ax is None, create
if ax is None:
fig, ax = plt.subplots(1,1)
last_ax = list(map(lambda x: flow_duration_curve(x, log=log, non_exceeding=non_exceeding, ax=ax), x.T))[-1]
return last_ax
# calculate the ranks
ranks = rankdata(x, method='average')
# calculate weibull pdf
N = x.size
# calculate probabilities
p = np.fromiter(map(lambda r: r / (N + 1), ranks), dtype=np.float)
# create sorting index
if non_exceeding:
index = np.argsort(p)
else:
index = np.argsort(p)[::-1]
if not plot:
return p[index]
else:
pfunc = plot_function_loader('flow_duration_curve')
fig = pfunc(func_args=dict(
x=x[index],
y=p[index],
non_exceeding=non_exceeding,
log=log,
figure=ax),
plot_args=kwargs
)
return fig | 3bec0159553a814ac4c68b198a29bf3075f6d202 | 3,648,894 |
def get_fields(filters):
"""
Return sql fields ready to be used on query
"""
fields = (
("(SELECT p.posting_date FROM `tabPurchase Invoice` p Join `tabPurchase Invoice Item` i On p.name = i.parent WHERE i.item_code = `tabItem`.item_code And p.docstatus = 1 limit 1) as pinv_date"),
("CONCAT(`tabItem`._default_supplier, ' - ', `tabAddress`.city, ', ', `tabAddress`.state) as location"),
("Item", "vim_number"),
("Item", "make"),
("Item", "model"),
("Item", "bl"),
("Item", "item_type"),
("Item", "booking_no"),
("Item", "container_no"),
("Item", "part_type"),
("Item", "year"),
("Item", "exterior_color"),
("Item", "status"),
("Delivery Checklist", "status", "vehicle_release"),
("Sales Invoice Item", "item_code"),
("Sales Invoice Item", "vim_number", "cont_vim"),
("Sales Invoice Item", "item_name"),
# ("Sales Invoice", "due_date", "due_date"),
("""(SELECT SUM(b.grand_total) FROM `tabSales Invoice` as b WHERE b.is_return = 1 and b.docstatus = 1 and b.return_against = `tabSales Invoice`.name ) as credit_note"""),
("""0 as gst_total"""),
("""0 as pst_total"""),
("""0 as g_gst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.tax_amount, 0) ) as gst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'PST', `tabSales Taxes and Charges`.tax_amount, 0 ) ) as pst_total"""),
# ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.g_tax, 0 ) ) as g_gst_total"""),
("Sales Invoice", "company"),
("Sales Invoice", "is_return"),
("Sales Invoice", "posting_date", "sinv_date"),
("Sales Invoice", "customer"),
("Sales Invoice", "invoice_type"),
("Sales Invoice", "net_total"),
("Sales Invoice", "currency"),
("Sales Invoice", "base_grand_total"),
("Sales Invoice", "grand_total"),
("Sales Invoice", "name", "sinv_name"),
("Sales Invoice", "outstanding_amount"),
("Sales Invoice", "total_g", "gprice"),
("Payment Entry", "posting_date", "p_posting_date"),
("Payment Entry", "mode_of_payment"),
("Payment Entry Reference", "parent", "payment_entry"),
("Payment Entry Reference", "allocated_amount", "breakdown"),
("`viewPayment and Refunds`.paid_amount"),
("`viewPayment and Refunds`.refund_amount"),
("(SELECT `view_vehicle_g_cost`.purchase_cost + COALESCE(view_vehicle_g_cost.net_lcv, 0) from `view_vehicle_g_cost` where `view_vehicle_g_cost`.item_code = `tabItem`.item_code) as net_cost")
)
sql_fields = []
for args in fields:
sql_field = get_field(args)
sql_fields.append(sql_field)
# frappe.errprint(", ".join(sql_fields))
return ", ".join(sql_fields) | 592d7c051e3af4cb510e43caa774054976f68865 | 3,648,895 |
from typing import Counter
def count_POS_tag(df_pos):
"""Count how often each POS tag occurs
Args:
df_pos ([dataframe]): dataframe, where the entries are list of tuples (token, POS tag)
Returns:
df_pos_stats ([dataframe]): dataframe containing POS tag statistics
"""
# POS tag list
tag_lst = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS',
'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG',
'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB', '$', "''", '(', ')', ',', '.', ':', '``']
# init dataframe
df_pos_stats = pd.DataFrame(0, index=range(len(df_pos)), columns=tag_lst)
# count POS tag
for index, pos in enumerate(df_pos):
count_dict = Counter(tag for _, tag in pos)
for tag, count in count_dict.items():
if tag in tag_lst:
df_pos_stats.loc[index, tag] = count
return df_pos_stats | a9ac14f34c020b78b02d6ae629cbddcdde39af8d | 3,648,896 |
import json
def catch_all(path):
"""
Gets dummy message.
"""
return json.dumps({
'message': 'no one was here',
'ms': get_epochtime_ms()
}) | b93190b546705c1115c1612e4bd79210ab0d8f85 | 3,648,897 |
import typing
from typing import List
from functools import reduce
def dimensions_to_space_time_index(dims, t_idx = (), t_len = (), s_idx = (), s_len = (),
next_idx_valid = 0, invalid = False,
min_port_width = 0, max_port_width = 0, total_time = 0,
first_call = True) -> typing.Tuple[List[SpaceTimeIndex], int]:
"""
Convert a space-time Type to a flat list of SpaceTimeIndexs with the s and t values along with the flat_idx.
This is a recursive function. The parameters other than dim are the status of the current call.
The values are needed to compute the flat t, s, and flat_idx of each inner value
:param dims: The type, it's space and time dimensions
:param t_idx: The index in each of the parent calls' that are TSeqs
:param t_len: The lengths of each of the parent calls' TSeqs
:param s_idx: The index in each of the parent calls' that are SSeqs
:param S_len: The lengths of each of the parent calls' SSeqs
:param next_idx_valid: The next flat_idx to use for valids
:param invalid: Whether this call is in and invalid part of a type. Any invalid parent makes all the children
invalid
:param min_port_width: The minimum width of this type and the other (output or input).
This is used when adding padding at end of top call.
:param max_port_width: The maximum width of this type and the other (output or input).
This is used when adding padding at end of top call.
:param total_time: The total time required by this type.
This is used when adding padding at end of top call.
:param first_val: Whether this is the top, non-recursive call to this function
:return: A list of SpaceTimeIndex
"""
if type(dims) == ST_SSeq or type(dims) == ST_SSeq_Tuple:
nested_result = []
for s in range(dims.n):
(res, next_idx_valid) = \
dimensions_to_space_time_index(dims.t, t_idx, t_len,
tuple([s]) + s_idx, tuple([dims.n]) + s_len,
next_idx_valid, invalid, 0, 0, 0, False)
nested_result += [res]
result = flatten(nested_result), next_idx_valid
elif type(dims) == ST_TSeq:
nested_result = []
for t in range(dims.n + dims.i):
(res, next_idx_valid) = \
dimensions_to_space_time_index(dims.t, tuple([t]) + t_idx, tuple([dims.n + dims.i]) + t_len,
s_idx, s_len, next_idx_valid,
invalid or (t >= dims.n), 0, 0, 0, False)
nested_result += [res]
result = flatten(nested_result), next_idx_valid
else:
# track how much time each t_idx indicates due to nested index structure
# drop the last value because each t_idx time is the product of all
# time dimensions inside of it. No t_idx contains last dimension
time_per_t_len = list(accumulate([1] + list(t_len), lambda x,y : x*y))[:-1]
t_idx_with_time_per_len = zip(time_per_t_len, list(t_idx))
time_per_t_idx = list(map(lambda x: x[0]*x[1], t_idx_with_time_per_len))
t = reduce(lambda x,y: x+y, [0] + time_per_t_idx)
# do same computation for space
time_per_s_len = list(accumulate([1] + list(s_len), lambda x,y : x*y))
s_idx_with_time_per_len = zip(time_per_s_len, list(s_idx))
time_per_s_idx = list(map(lambda x: x[0]*x[1], s_idx_with_time_per_len))
s = reduce(lambda x,y: x+y, [0] + time_per_s_idx)
if invalid:
result = [SpaceTimeIndex(FlatIndex(True, (t, s)), s, t)], next_idx_valid
else:
next_idx_valid += 1
result = [SpaceTimeIndex(FlatIndex(False, next_idx_valid - 1), s, t)], next_idx_valid
if first_call:
padded_result = pad_space_dimension_with_invalids(result[0], min_port_width, max_port_width, total_time)
return fix_invalid_indexes(padded_result), result[1]
else:
return result | bdb24e237ba99288be98112db0f09d6782193594 | 3,648,899 |
from io import StringIO
def unparse(input_dict, output=None, encoding='utf-8', **kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
((key, value),) = input_dict.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value | 31cd6225144fcd1296105a66d2318c6d1a22bcca | 3,648,900 |
def to_bin(val):
"""
Receive int and return a string in binary. Padded by 32 bits considering 2's complement for negative values
"""
COMMON_DIGITS = 32
val_str = "{:b}".format(val) # Count '-' in negative case
padded_len = len(val_str) + ((COMMON_DIGITS - (len(val_str) % COMMON_DIGITS)) % COMMON_DIGITS)
if val < 0:
val_2_complement = val & ((1 << padded_len) - 1)
final_val_str = "{:b}".format(val_2_complement)
else:
final_val_str = "0" * (padded_len - len(val_str)) + val_str
return(final_val_str) | 819d1c0a9d387f6ad1635f0fe0e2ab98b3ca17b0 | 3,648,903 |
def PSingle (refLamb2, lamb2, qflux, qsigma, uflux, usigma, err, nterm=2):
""" Fit RM, EVPA0 to Q, U flux measurements
Also does error analysis
Returns array of fitter parameters, errors for each and Chi Squares of fit
refLamb2 = Reference lambda^2 for fit (m^2)
lamb2 = Array of lambda^2 for fit (m^2)
qflux = Array of Q fluxes (Jy) same dim as lamb2
qsigma = Array of Q errors (Jy) same dim as lamb2
uflux = Array of U fluxes (Jy) same dim as lamb2
usigma = Array of U errors (Jy) same dim as lamb2
err = Obit error stack
nterm = Number of coefficients to fit (1 or 2)
"""
################################################################
#
nlamb2 = len(lamb2)
ret = Obit.RMFitSingle(nlamb2, nterm, refLamb2, lamb2,
qflux, qsigma, uflux, usigma, err.me)
OErr.printErr(err)
OErr.printErrMsg(err,"Fitting failed")
return ret
# end PSingle | 29c3fd75203317265cccc804b1114b5436fd12bc | 3,648,904 |
from typing import List
def exec_waveform_function(wf_func: str, t: np.ndarray, pulse_info: dict) -> np.ndarray:
"""
Returns the result of the pulse's waveform function.
If the wf_func is defined outside quantify-scheduler then the
wf_func is dynamically loaded and executed using
:func:`~quantify_scheduler.helpers.waveforms.exec_custom_waveform_function`.
Parameters
----------
wf_func
The custom waveform function path.
t
The linear timespace.
pulse_info
The dictionary containing pulse information.
Returns
-------
:
Returns the computed waveform.
"""
whitelist: List[str] = ["square", "ramp", "soft_square", "drag"]
fn_name: str = wf_func.split(".")[-1]
waveform: np.ndarray = []
if wf_func.startswith("quantify_scheduler.waveforms") and fn_name in whitelist:
if fn_name == "square":
waveform = waveforms.square(t=t, amp=pulse_info["amp"])
elif fn_name == "ramp":
waveform = waveforms.ramp(t=t, amp=pulse_info["amp"])
elif fn_name == "soft_square":
waveform = waveforms.soft_square(t=t, amp=pulse_info["amp"])
elif fn_name == "drag":
waveform = waveforms.drag(
t=t,
G_amp=pulse_info["G_amp"],
D_amp=pulse_info["D_amp"],
duration=pulse_info["duration"],
nr_sigma=pulse_info["nr_sigma"],
phase=pulse_info["phase"],
)
else:
waveform = exec_custom_waveform_function(wf_func, t, pulse_info)
return waveform | 29c44de1cc94f6d63e41fccbbef5c23b67870b4d | 3,648,905 |
def generate_straight_pipeline():
""" Simple linear pipeline """
node_scaling = PrimaryNode('scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_ridge])
pipeline = Pipeline(node_linear)
return pipeline | 2ef1d8137aeb100f6216d6a853fe22953758faf3 | 3,648,906 |
def get_socialnetwork_image_path(instance, filename):
"""
Builds a dynamic path for SocialNetwork images. This method takes an
instance an builds the path like the next pattern:
/simplesite/socialnetwork/PAGE_SLUG/slugified-path.ext
"""
return '{0}/{1}/{2}/{3}'.format(instance._meta.app_label,
str(instance._meta.model_name),
str(instance.slug),
get_slugified_file_name(filename)
) | b54e53f0c2a79b3b4e6d4d496d6a85264fffcef1 | 3,648,907 |
def openReadBytesFile(path: str):
"""
以只读模式打开二进制文件
:param path: 文件路径
:return: IO文件对象
"""
return openFile(path, "rb") | 72fd2be5264a27a2c5c328cb7a8a4e818d799447 | 3,648,908 |
from datetime import datetime
def diff_time(a:datetime.time,b:datetime.time):
"""
a-b in seconds
"""
return 3600 * (a.hour -b.hour) + 60*(a.minute-b.minute) + (a.second-b.second) + (a.microsecond-b.microsecond)/1000000 | e0557d3d3e1e9e1184d7ea7a84665813e7d32760 | 3,648,909 |
def _create_pseudo_names(tensors, prefix):
"""Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Args:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names.
"""
def one_index(ele):
# Start with "output_1" instead of "output_0".
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1' # Single output.
else:
name = '_'.join(str(p) for p in path)
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names | 5e4ee64026e9eaa8aa70dab85d8dcf0ad0b6d89f | 3,648,910 |
import sqlite3
def search_for_breakpoint(db_name, ids):
"""
Function will retrieve ID of last caluclated grid node to continue interrupted grid caclulation.
:param db_name: str;
:param ids: numpy.array; list of grid node ids to calculate in this batch
:return: int; grid node from which start the calculation
"""
conn = sqlite3.connect(db_name, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = lambda cursor, row: row[0]
cursor = conn.cursor()
sql = f"SELECT last_index FROM auxiliary"
last_idx = np.array(cursor.execute(sql).fetchall())
if last_idx.size == 0:
return 0
elif last_idx[0] in ids:
return np.where(last_idx[0] == ids)[0][0]
else:
raise ValueError('IDs of already calculated objects do not correspond to the generated ID. Breakpoint cannot '
'be generated.')
conn.close() | 3354fcd505de9aefae5f0b4448e1ada7eab0a092 | 3,648,911 |
def rgetattr(obj, attr):
"""
Get named attribute from an object, i.e. getattr(obj, 'a.a') is
equivalent to ``obj.a.a''.
- obj: object
- attr: attribute name(s)
>>> class A: pass
>>> a = A()
>>> a.a = A()
>>> a.a.a = 1
>>> rgetattr(a, 'a.a')
1
>>> rgetattr(a, 'a.c')
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute 'c'
"""
attrs = attr.split(".")
obj = getattr(obj, attrs[0])
for name in attrs[1:]:
obj = getattr(obj, name)
return obj | 5fb58634c4ba910d0a20753c04addf667614a07f | 3,648,913 |
def lambda1_plus_lambda2(lambda1, lambda2):
"""Return the sum of the primary objects tidal deformability and the
secondary objects tidal deformability
"""
return lambda1 + lambda2 | 4ac3ef51bb66861b06b16cec564f0773c7692775 | 3,648,914 |
def create_cut_sht(stockOutline,array,features,partSpacing,margin):
""" """
numParts = len(array)
basePlanes = generate_base_planes_from_array(array)
targetPlanes = create_cut_sht_targets(stockOutline,array,margin,partSpacing)
if targetPlanes == None:
return None
else:
# converts GH branch to python list for a set of features
features = [item for item in features.Branches]
cut_sht = []
for i in range(numParts):
objects = [array[i]]
for item in features[i]:
objects.append(item)
cutPart = reorient_objects(objects,basePlanes[i],targetPlanes[i])
cut_sht.append(cutPart)
return cut_sht | 12d8f56a7b38b06cd89d86fdbf0096f5c8d6e869 | 3,648,916 |
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False | 2a742c7334d68fe0bf6b546fb79bf00a338355f9 | 3,648,917 |
def duplicate_item(api_key: str, board_id: str, item_id: str, *args, **kwargs):
"""Duplicate an item.
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
item_id : `str`
The item's unique identifier.
args : `tuple`
The list of item return fields.
kwargs : `dict`
Optional arguments for item.
Returns
data : `dict`
A monday.com item in dictionary form.
Return Fields
assets : `list[moncli.entities.Asset]`
The item's assets/files.
board : `moncli.entities.Board`
The board that contains this item.
column_values : `list[moncli.entities.ColumnValue]`
The item's column values.
created_at : `str`
The item's create date.
creator : `moncli.entities.User`
The item's creator.
creator_id : `str`
The item's unique identifier.
group : `moncli.entities.Group`
The group that contains this item.
id : `str`
The item's unique identifier.
name : `str`
The item's name.
state : `str`
The board's state (all / active / archived / deleted)
subscriber : `moncli.entities.User`
The pulse's subscribers.
updated_at : `str`
The item's last update date.
updates : `moncli.entities.Update`
The item's updates.
Optional Arguments
with_updates : `bool`
Duplicate with the item's updates.
"""
kwargs = {
'board_id': gql.IntValue(board_id),
'item_id': gql.IntValue(item_id)
}
return execute_query(api_key, query_name=DUPLICATE_ITEM, operation_type=gql.OperationType.MUTATION, fields=args, arguments=kwargs) | 9e24952a2443b4bcf40d2ae5e3e9d65b8485fece | 3,648,918 |
def compute_loss(retriever_logits, retriever_correct, reader_logits,
reader_correct):
"""Compute loss."""
# []
retriever_loss = marginal_log_loss(retriever_logits, retriever_correct)
# []
reader_loss = marginal_log_loss(
tf.reshape(reader_logits, [-1]), tf.reshape(reader_correct, [-1]))
# []
any_retrieved_correct = tf.reduce_any(retriever_correct)
any_reader_correct = tf.reduce_any(reader_correct)
retriever_loss *= tf.cast(any_retrieved_correct, tf.float32)
reader_loss *= tf.cast(any_reader_correct, tf.float32)
loss = retriever_loss + reader_loss
tf.summary.scalar("num_read_correct",
tf.reduce_sum(tf.cast(reader_correct, tf.int32)))
tf.summary.scalar("reader_loss", tf.reduce_mean(reader_loss))
tf.summary.scalar("retrieval_loss", tf.reduce_mean(retriever_loss))
# []
loss = tf.reduce_mean(loss)
return loss | 2576191d23a303e9d045cb7c8bbeccbd49b22b43 | 3,648,920 |
def index() -> render_template:
"""
The main part of the code that is ran when the user visits the address.
Parameters:
covid_data: This is a dictionary of the data returned from the API request.
local_last7days_cases: The number of local cases in the last 7 days.
national_last7days_cases: The number of national cases in the last 7 days.
current_hospital_cases: The number of current hospital cases.
total_deaths: The number of total deaths in The UK.
news: A list of all the news.
update_name: The name of the scheduled update.
update_interval: The time the event will take place.
repeat: Whether the update will repeat.
updating_covid: Whether the update will update the covid data.
updating_news: Whether the update will update the news.
news_to_delete: The title of the news that is to be deleted.
update_to_delete: The title of the update that is to be deleted.
Returns:
A rendered template with the data.
"""
s.run(blocking=False) # stops the scheduler from blocking the server from running
covid_data = covid_API_request()
(local_last7days_cases,
national_last7days_cases,
current_hospital_cases,
total_deaths) = process_covid_data(covid_data)
news = update_news()
update_name = request.args.get("two")
if update_name: # checks if an update has been scheduled
update_interval = request.args.get("update")
repeat = request.args.get("repeat")
updating_covid = request.args.get("covid-data")
updating_news = request.args.get("news")
schedule_covid_updates(update_interval, update_name, repeat, updating_covid, updating_news)
if request.args.get("notif"): # checks if news has been deleted
news_to_delete = request.args.get("notif")
delete_news(news_to_delete)
if request.args.get("update_item"): # checks if an update has been deleted
update_to_delete = request.args.get("update_item")
delete_update(update_to_delete, True)
return render_template('index.html',
title=(title),
news_articles=news,
updates=update,
location=(city),
local_7day_infections=(local_last7days_cases),
nation_location=("United Kingdom"),
national_7day_infections=(national_last7days_cases),
hospital_cases=(f"Hospital Cases: {current_hospital_cases}"),
deaths_total=(f"Total Deaths: {total_deaths}")) | d9357f29c9329c901e8389497435ead319841242 | 3,648,922 |
def r(x):
"""
Cartesian radius of a point 'x' in 3D space
Parameters
----------
x : (3,) array_like
1D vector containing the (x, y, z) coordinates of a point.
Returns
-------
r : float
Radius of point 'x' relative to origin of coordinate system
"""
return np.sqrt((x[0]**2) + (x[1]**2) + (x[2]**2)) | 3729f91a6671c17bc9fda7eebb9809d316a0d714 | 3,648,923 |
def solve(*args):
"""
Crunch the numbers; solve the problem.
solve(IM A, IM b) -> IM
solve(DM A, DM b) -> DM
solve(SX A, SX b) -> SX
solve(MX A, MX b) -> MX
solve(IM A, IM b, str lsolver, dict opts) -> IM
solve(DM A, DM b, str lsolver, dict opts) -> DM
solve(SX A, SX b, str lsolver, dict opts) -> SX
solve(MX A, MX b, str lsolver, dict opts) -> MX
"""
return _casadi.solve(*args) | 8866fba2efa51e7117d1d39fd7d2b7a259209c66 | 3,648,924 |
def NonNegativeInteger(num):
"""
Ensures that the number is non negative
"""
if num < 0:
raise SmiNetValidationError("A non-negative integer is required")
return num | dc5241e8dd7dbd07c5887c35a790ec4eab2593f0 | 3,648,925 |
def to_cartesian(r, ang):
"""Returns the cartesian coordinates of a polar point."""
x = r * np.cos(ang)
y = r * np.sin(ang)
return x, y | bc4e2e21c42b31a7a45185e58fb20a7b4a4b52e4 | 3,648,926 |
def _get_filtered_partially_learnt_topic_summaries(
topic_summaries, topic_ids):
"""Returns a list of summaries of the partially learnt topic ids and the ids
of topics that are no longer present.
Args:
topic_summaries: list(TopicSummary). The list of topic
summary domain objects to be filtered.
topic_ids: list(str). The ids of the topic corresponding to
the topic summary domain objects.
Returns:
tuple. A 2-tuple whose elements are as follows:
- list(TopicSummary). A filtered list with the summary domain
objects of the partially_learnt topics.
- list(str). The ids of the topics that are no longer present.
"""
nonexistent_partially_learnt_topic_ids = []
filtered_partially_learnt_topic_summaries = []
topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids)
for index, topic_summary in enumerate(topic_summaries):
if topic_summary is None:
nonexistent_partially_learnt_topic_ids.append(topic_ids[index])
else:
topic_id = topic_summary.id
if not topic_rights[index].topic_is_published:
nonexistent_partially_learnt_topic_ids.append(topic_id)
else:
filtered_partially_learnt_topic_summaries.append(topic_summary)
return (
filtered_partially_learnt_topic_summaries,
nonexistent_partially_learnt_topic_ids) | c977966381dea0b1b91e904c3f9ec4823d26b006 | 3,648,927 |
def build_bar_chart_with_two_bars_per_label(series1, series2, series1_label, series2_label, series1_labels,
series2_labels,
title, x_axis_label, y_axis_label, output_file_name):
"""
This function builds a bar chart that has two bars per label.
:param series1: a list of values containing the data for the first series
:param series2: a list of values containing the data for the second series
:param series1_label: a label to be shown in the legend for the first series
:param series2_label: a label to be shown in the legend for the second series
:param series1_labels: a list of labels for the first series
:param series2_labels: a list of labels for the second series
:param title: string value of the title of the bar chart
:param x_axis_label: the label to show on the x axis
:param y_axis_label: the label to show on the y axis
:param output_file_name: the name and path of the file where the figure is to be exported to
:return: string path of the image that has been saved of the figure
"""
index_series1 = np.arange(len(series1_labels))
index_series2 = np.arange(len(series2_labels))
fig, ax = plt.subplots()
ax.bar(x=index_series1 - 0.4, height=series1, width=0.4, bottom=0, align='center', label=series1_label)
ax.bar(x=index_series2, height=series2, width=0.4, bottom=0, align='center', label=series2_label)
ax.set_xlabel(x_axis_label, fontsize=10)
ax.set_ylabel(y_axis_label, fontsize=10)
ax.set_xticks(index_series1)
ax.set_xticklabels(series1_labels, fontsize=10, rotation=30)
ax.set_title(title)
ax.legend(loc='upper right', frameon=True)
plt.show()
# fig.savefig(output_file_name, dpi=300, bbox_inches='tight')
# return '../{}'.format(output_file_name)
return "{}".format(write_to_image_file(fig, output_file_name, False, 300)) | f43c4e525f0a2dd07b815883753974e1aa2e08cf | 3,648,928 |
def calculateDescent():
"""
Calculate descent timestep
"""
global descentTime
global tod
descentTime = myEndTime
line = len(originalTrajectory)
for segment in reversed(originalTrajectory):
flInit = int(segment[SEGMENT_LEVEL_INIT])
flEnd = int(segment[SEGMENT_LEVEL_END])
status = segment[STATUS]
if flInit == flEnd and status == '2':
stop=True
for i in range(1,4):
flInitAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_INIT])
flEndAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_END])
statAux = originalTrajectory[line-i][STATUS]
if flInitAux == flEndAux and statAux == '2': pass
else: stop = False; break
if stop: break
else: descentTime-= TIME_STEP
line-=1
tod = {}
tod['LAT'] = originalTrajectory[line][SEGMENT_LAT_INIT]
tod['LON'] = originalTrajectory[line][SEGMENT_LON_INIT]
tod['ALT'] = originalTrajectory[line][SEGMENT_LEVEL_INIT]
logger(myLogFile,rankMsg,LOG_STD,'Descending starts at time '+str(descentTime)+' [s]')
return line | 11c04e49ef63f7f51cb874bf19cd245c40f0f6f4 | 3,648,929 |
def update_tutorial(request,pk):
"""View function for updating tutorial """
tutorial = get_object_or_404(Tutorial, pk=pk)
form = TutorialForm(request.POST or None, request.FILES or None, instance=tutorial)
if form.is_valid():
form.save()
messages.success(request=request, message="Congratulations! Tutorial has been updated.")
return redirect(to="dashboard")
context={
"form":form,
}
return render(request=request, context=context, template_name="dashboard/dashboard_addtutorialseries.html") | 06fe827f26537fe376e79bc95d2ab04f879b971a | 3,648,930 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.