content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_zone_ajax(request):
"""
This view tries to create a new zone and returns an JSON with either
'success' = True or 'success' = False and some errors.
"""
qd = request.POST.copy()
# See if the domain exists.
# Fail if it already exists or if it's under a delegated domain.
root_domain = qd.get('root_domain', None)
primary = qd.get('soa_primary', None)
contact = qd.get('soa_contact', None)
# Find all the NS entries
nss = []
number_re = re.compile('nameserver_(\d+)')
# parse nameserver bits from POST request.
# compile some tuples that look like:
# (<server_fqdn>, <ttl>, [<view_name>,..])
for k, server in request.POST.iteritems():
if k.startswith('nameserver_'):
n = number_re.search(k)
if not n:
continue
ns_number = n.groups()[0]
views = []
if qd.get('private_view_{0}'.format(ns_number), 'off') == 'on':
views.append('private')
if qd.get('public_view_{0}'.format(ns_number), 'off') == 'on':
views.append('public')
ttl = qd.get('ttl_{0}'.format(ns_number))
if ttl and ttl.isdigit():
ttl = int(ttl)
else:
ttl = None
nss.append(
(server, ttl, views)
)
try:
with transaction.commit_on_success():
domain = _create_zone(root_domain, primary, contact, nss)
except (ValueError, ValidationError), e:
return HttpResponse(json.dumps({
'success': False, 'error': str(e)
}), status=400)
return HttpResponse(json.dumps({
'success': True,
'success_url': '/en-US/core/search/#q=zone=:{0}'.format(
domain.name
)
})) | 05a0e8a3edc38bd2821057c131760b2c06fc452c | 14,200 |
def main_plot():
"""The view for rendering the scatter chart"""
img = get_main_image()
return send_file(img, mimetype='image/png', cache_timeout=0) | a285cd3bc9a54b96d4fa52d9cc8b13c1bd070cd2 | 14,201 |
def Ising2dT(beta = 0.4, h = 0, isSym = False):
"""
T = Ising2dT(J,h).
-------------------------
Set up the initial tensor for 2d classical Ising model on a square lattice.
Argument: J is defined to be beta * J = J / kT, and h is
defined to be beta*h = h / kT, where J and h are conventional coupling constants.
Return: a rank 4 tensor T[i,j,k,l]. Each index of the tensor represents
physical classical spin, and the tensor T represents the Boltzmann weight
for interaction on one plaquettes.
"""
pars = {"model":"ising", "dtype":"float64",
"J":1, "H":h, "beta":beta, "symmetry_tensors":isSym}
T0 = get_initial_tensor(pars)
return T0 | 416fabb06a1e8aa0f57456d22c2a89fc4da869c6 | 14,202 |
def __grid_count(self):
"""Get number of grids in the case"""
try:
return self.__case_stub.GetGridCount(self.__request()).count
except grpc.RpcError as exception:
if exception.code() == grpc.StatusCode.NOT_FOUND:
return 0
return 0 | e6237a092b4714d787eb9d145f4e972deeaafb69 | 14,203 |
from sys import path
import yaml
def load_fixtures():
"""Loads data from tests/fixtures into the connected database"""
db.database_proxy.create_tables([StorageGroup, StorageNode])
# Check we're starting from a clean slate
assert StorageGroup.select().count() == 0
assert StorageNode.select().count() == 0
with open(path.join(tests_path, "fixtures/storage.yml")) as f:
fixtures = yaml.safe_load(f)
StorageGroup.insert_many(fixtures["groups"]).execute()
groups = dict(StorageGroup.select(StorageGroup.name, StorageGroup.id).tuples())
# fixup foreign keys for the nodes
for node in fixtures["nodes"]:
node["group"] = groups[node["group"]]
# bulk load the nodes
StorageNode.insert_many(fixtures["nodes"]).execute()
nodes = dict(StorageNode.select(StorageNode.name, StorageNode.id).tuples())
return {"groups": groups, "nodes": nodes} | d80b074db266d86eb93d10cdc78d0023bedce858 | 14,204 |
import math
def timer(method):
"""
Decorator to time a function.
:param method: Method to time.
:type method: function
"""
def wrapper(*args, **kwargs):
"""
Start clock, do function with args, print rounded elapsed time.
"""
starttime = compat.perf_clock()
method(*args, **kwargs)
endtime = compat.perf_clock() - starttime
endtime_proper = math.ceil(endtime * 100) / 100 # rounding
mins, secs = divmod(endtime_proper, 60)
hrs, mins = divmod(mins, 60)
print("COMPLETED IN {0:02d}:{1:02d}:{2:02d}".format(int(hrs), int(mins), int(secs)))
return wrapper | 10105d77a32ce62500bdb86c7fbb772f03b4eff9 | 14,205 |
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False,
any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
"""
Plot var_name variable from netCDF file
\b
Args:
path_nc: Name of netCDF file including path
out_path: Output directory path + file name
var_name: Name of variable in netCDF file to plot on map
Returns:
Nothing, side-effect: save an image
"""
logger.info('Plotting ' + var_name + ' in ' + path_nc)
# Read netCDF file and get time dimension
nc = util.open_or_die(path_nc, 'r', format='NETCDF4')
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
if any_time_data:
ts = nc.variables[tme_name][:] # time-series
if date == -1: # Plot either the last year {len(ts)-1} or whatever year the user wants
plot_yr = len(ts) - 1
else:
plot_yr = date - ts[0]
# Draw empty basemap
m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
# m.drawcoastlines()
# m.drawcountries()
# Find x,y of map projection grid.
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
if fill_mask:
nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan)
else:
nc_vars = np.array(nc.variables[var_name])
# Plot
# Get data for the last year from the netCDF file array
if any_time_data:
mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :])
else:
mask_data = maskoceans(lons, lats, nc_vars[:, :])
m.etopo()
if land_bg:
m.drawlsmask(land_color='white', ocean_color='none', lakes=True) # land_color = (0, 0, 0, 0) for transparent
else:
m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)
cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap)
if annotate_date:
plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20)
if grid:
# where labels intersect = [left, right, top, bottom]
m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5)
m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)
# Add colorbar
cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional',
format=format)
cb.set_label(xlabel)
plt.title(title, y=1.08)
plt.tight_layout()
if not show_plot:
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
else:
plt.show()
nc.close()
return out_path | 7e688fd8e5baae173afc711f47633b3037b03e7d | 14,206 |
def f5_add_policy_method_command(client: Client, policy_md5: str, new_method_name: str,
act_as_method: str) -> CommandResults:
"""
Add allowed method to a certain policy.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
new_method_name (str): Display name of the new method.
act_as_method(str): functionality of the new method. default is GET.
"""
result = client.add_policy_method(policy_md5, new_method_name, act_as_method)
outputs, headers = build_output(OBJECT_FIELDS, result)
readable_output = tableToMarkdown('f5 data for adding policy methods:', outputs, headers,
removeNull=True)
command_results = CommandResults(
outputs_prefix='f5.PolicyMethods',
outputs_key_field='id',
readable_output=readable_output,
outputs=remove_empty_elements(outputs),
raw_response=result
)
return command_results | 0b7297de004913eeeb5dd962a6cd62fee6f3458a | 14,207 |
from typing import Union
from datetime import datetime
def shortdate(value: Union[datetime, date]) -> str:
"""Render a date in short form (deprecated for lack of i18n support)."""
dt: Union[datetime, date]
utc_now: Union[datetime, date]
if isinstance(value, datetime):
tz = get_timezone()
if value.tzinfo is None:
dt = utc.localize(value).astimezone(tz)
else:
dt = value.astimezone(tz)
utc_now = request_timestamp().astimezone(tz)
else:
dt = value
utc_now = request_timestamp().date()
if dt > (
utc_now
- timedelta(days=int(current_app.config.get('SHORTDATE_THRESHOLD_DAYS', 0)))
):
return dt.strftime('%e %b')
else:
# The string replace hack is to deal with inconsistencies in the underlying
# implementation of strftime. See https://bugs.python.org/issue8304
return str(dt.strftime("%e %b '%y")).replace("'", "’") | 445448701e1f16d481f5558284d81c6ed2fd8283 | 14,208 |
def test_set(sc,
idfModel,
numFeatures,
test_file = "data/test_clean.csv"
):
"""
Input :
IDF model obtained in the training phase
number of retained features in the tweet-term structure
Output :
normalized tweet-term format test set
"""
test_text = sc.textFile(test_file)
test_df = test_text.map(lambda x : (0,x)).toDF(["nothing" , "sentence"])
tokenizer_test = Tokenizer(inputCol="sentence", outputCol="words")
wordsData_test = tokenizer_test.transform(test_df)
hashingTF_test = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=numFeatures)
featurizedData_test = hashingTF_test.transform(wordsData_test)
rescaledData_test = idfModel.transform(featurizedData_test)
rescaled_test_df = rescaledData_test.select("features")
return rescaled_test_df | 7e68e536f3f40761e7885d784c392b2e9a6ca428 | 14,209 |
def is_object_based_ckpt(ckpt_path: str) -> bool:
"""Returns true if `ckpt_path` points to an object-based checkpoint."""
var_names = [var[0] for var in tf.train.list_variables(ckpt_path)]
return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names | 043069aae83845be44a3248ce0b95096e86d4b8f | 14,210 |
def search(todos, **kwargs):
"""Return a list of todos that matches the provided filters.
It takes the exact same parameters as the :class:`todotxtio.
Todo` object constructor, and return a list of :class:`todotxtio.Todo` objects as well.
All criteria defaults to ``None`` which means that the criteria is ignored.
A todo will be returned in the results list if all of the criteria matches.
From the moment when a todo is sent in the results list, it will
never be checked again.
:param list todos: List of todos to test
:param dict kwargs: Dictionary of tests
:rtype: list
"""
results = []
for todo in todos:
matches = []
for key, value in kwargs.items():
if key == 'or':
or_matches = []
for k, v in value.items():
or_matches += [search_todo(todo, k, v)]
matches.append(any(match is True for match in or_matches))
else:
matches += [search_todo(todo, key, value)]
if matches and all(match is True for match in matches):
results.append(todo)
return results | dd1b41f756a0d477dab552c7be7f7a4e7114e0dd | 14,211 |
import torch_fidelity
import matplotlib.pyplot as plt
from datetime import datetime
import os
import tqdm
import shutil
def evaluate_prettiness(sampler=None,
folder=None,
input_2='cifar10-train',
n=50000,
batch_size=1000,
clean_afterwards=False,
fid=False,
isc=False,
kid=False):
"""Evaluate a generative model in terms of IS, FID, or KID.
At least one of `model` or `folder` must be present.
Args:
sampler (object, optional): An objective with the method `func` that samples from the model.
folder (str, optional): Path to the folder that contains all the images.
input_2 (str, optional): Name of registered dataset or a path to a folder.
n (int, optional): Number of samples to take.
batch_size (int, optional): Number of samples in each batch.
clean_afterwards (bool, optional): Clean the local cache if True.
Returns:
A dictionary of metric values.
"""
if sampler is None and folder is None:
raise ValueError(f"model and folder cannot both be none")
if folder is None:
now = datetime.datetime.now().strftime("%d:%m:%Y-%H:%M:%S")
folder = os.path.join(os.path.expanduser("~"), 'evaluate_prettiness', f'{now}')
os.makedirs(folder, exist_ok=True)
idx = 0
for _ in tqdm.tqdm(range(n // batch_size), desc='spawn samples'):
batch = sampler(batch_size=batch_size).detach().cpu().numpy()
if batch.shape[1] == 3:
batch = batch.transpose((0, 2, 3, 1))
for img in batch:
img_path = os.path.join(folder, f'{idx:06d}.png')
plt.imsave(img_path, img)
idx += 1
stats = torch_fidelity.calculate_metrics(folder, input_2, isc=isc, fid=fid, kid=kid)
if clean_afterwards:
shutil.rmtree(folder)
return stats | 101f6e0ae8d4bf9be2930aa6dc6e39dd1796f946 | 14,212 |
from typing import Any
def gather(first_step: str = PATH, *, filename: str = FILE, stamp: bool = True) -> dict[str, dict[str, Any]]:
"""Walk the steps on the path to read the trees of configuration."""
user = USER if filename == FILE else filename.split('.')[0]
trees = [(where, tree) for where, tree in walk_the_path(first_step, filename=filename) if tree is not None]
return {f'{user}_{steps:{PAD}}': dict(tree, **{LABEL: where}) if stamp else dict(tree) for steps, (where, tree) in enumerate(reversed(trees))} | c290b7bffcf3cb2b022ab1e4bbef68e6ebf4da3c | 14,213 |
def _extractKernelVersion(kernel):
"""
Extract version string from raw kernel binary.
@param bytes kernel Raw kernel binary.
@return string Version string if found.
"""
try:
versionOffset = kernel.index(b'Linux version')
for i in range(versionOffset, versionOffset+1024):
if kernel[i]==0x00:
return kernel[versionOffset:i]
return None
except IndexError as exc:
return None | f32e995a4a16376b26b0e1d5af826f2f0e71df87 | 14,214 |
def get_vdw_rad(atomic_num):
"""Function to get the user defined atomic radius"""
atomic_rad_dict = {6: 1.7, 7: 1.55, 8: 1.52, 9: 1.47}
if atomic_num in atomic_rad_dict:
return atomic_rad_dict[atomic_num]
else:
return float(Chem.GetPeriodicTable().GetRvdw(atomic_num)) | 98bd3e346afce37458c4ab1ea298e50af1121c21 | 14,215 |
def input_literal(term, prompt):
"""Get console input of literal values and structures."""
while True:
input_string = read_line(term, prompt)
if input_string:
break
return eval_literal(input_string) | 93611e823a59bc61002cc80b481525ac5c91354e | 14,216 |
from typing import List
from typing import Union
from typing import Mapping
from typing import Pattern
def _parse_string(
value_expr: str, target_expr: str, ref_parts: List[str],
a_type: Union[mapry.String, mapry.Path],
pattern_uids: Mapping[Pattern[str], int],
auto_id: mapry.go.generate.AutoID) -> str:
"""
Generate the code to parse a string.
The code parses the JSONable ``value_expr`` into the ``target_expr``.
:param value_expr: Go expression of the value
:param target_expr: Go expression of where to store the parsed value
:param ref_parts: Go expression of reference path segments to the value
:param a_type: mapry definition of the value type
:param pattern_uids: uniquely identified patterns
:param auto_id: generator of unique identifiers
:return: generated code
"""
uid = auto_id.next_identifier()
return _PARSE_STRING_TPL.render(
uid=uid,
value_expr=value_expr,
ref_parts=ref_parts,
target_expr=target_expr,
a_type=a_type,
pattern_uids=pattern_uids).rstrip("\n") | bcb653ea8d02ea88569d67fedd5d1e83893a1519 | 14,217 |
from datetime import datetime
def get_dots_case_json(casedoc, anchor_date=None):
"""
Return JSON-ready array of the DOTS block for given patient.
Pulling properties from PATIENT document.
Patient document trumps casedoc in this use case.
"""
if anchor_date is None:
anchor_date = datetime.now(tz=timezone(settings.TIME_ZONE))
enddate = anchor_date
ret = {
'regimens': [
# non art is 0
int(getattr(casedoc, CASE_NONART_REGIMEN_PROP, None) or 0),
# art is 1
int(getattr(casedoc, CASE_ART_REGIMEN_PROP, None) or 0),
],
'regimen_labels': [
list(casedoc.nonart_labels),
list(casedoc.art_labels)
],
'days': [],
# dmyung - hack to have query_observations timezone
# be relative specific to the eastern seaboard
'anchor': anchor_date.strftime("%d %b %Y"),
}
observations = query_observations(
casedoc._id, enddate-timedelta(days=DOT_DAYS_INTERVAL), enddate)
for delta in range(DOT_DAYS_INTERVAL):
obs_date = enddate - timedelta(days=delta)
day_arr = filter_obs_for_day(obs_date.date(), observations)
day_data = DOTDay.merge_from_observations(day_arr)
ret['days'].append(day_data.to_case_json(casedoc, ret['regimen_labels']))
ret['days'].reverse()
return ret | 4f9e6febcdc7e66f855411d601b69b4aad6955f3 | 14,218 |
import os
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path | 8424aa6393778234f71858a816b7375b845c42b2 | 14,219 |
import time
def sleeping_func(arg, secs=10, result_queue=None):
"""This methods illustrates how the workers can be used."""
time.sleep(secs)
if result_queue is not None:
result_queue.put(arg)
else:
return arg | c15dfac46f9b47fcc82ff539116ecc683a593b9c | 14,220 |
import sys
def suffix(s):
"""Add '3' suffix to programs for Python 3."""
if sys.version_info[0] == 3:
s = s + '3'
return s | 0ba1495032e57553adf97d7aa49a85e110c1acf0 | 14,221 |
def make_coll(db_auth, db_user, db_pass, mongo_server_ip='127.0.0.1'):
"""
Function to establish a connection to a local MonoDB instance.
Parameters
----------
coll_name: String.
Name of MongoDB collection to retrieve.
db_auth: String.
MongoDB database that should be used for user authentication.
db_user: String.
Username for MongoDB authentication.
db_user: String.
Password for MongoDB authentication.
Returns
-------
collection: pymongo.collection.Collection.
Collection within MongoDB that holds the scraped news stories.
"""
connection = MongoClient(mongo_server_ip)
if db_auth:
connection[db_auth].authenticate(db_user, db_pass)
db = connection.event_scrape
collection = db['stories']
return collection | eb4297e76c5c0a4bf344430eba26d4ed6e68128c | 14,222 |
def sms_send(recipient):
"""
Attempt to send SMS message using Twilio's API.
If this fails, use the Summit API to send the SMS message.
"""
body = request.get_data()
try:
message = send_sms_through_provider('Twilio', recipient, body)
except TwilioRestException:
message = send_sms_through_provider('Summit', recipient, body)
return jsonify({
message.id_key: getattr(message, message.id_key),
'from': message.from_,
'to': message.to,
'body': message.body,
}) | 15f6049af35970ccbefc3e75ba726281ed2d3329 | 14,223 |
from typing import Dict
from typing import Optional
def cat_to_sub_cat(
dp: Image, categories_dict_names_as_key: Dict[str, str], cat_to_sub_cat_dict: Optional[Dict[str, str]] = None
) -> Image:
"""
Replace some category with its affiliated sub category of CategoryAnnotations. Suppose your category name is 'foo'
and comes along with sub_category_annotations 'foo_1' and 'foo_2' then this adapter will replace 'foo' with
'foo_1' or 'foo_2', respectively.
:param dp: Image datapoint
:param categories_dict_names_as_key: A dict of all possible categories and their ids
:param cat_to_sub_cat_dict: e.g. {"foo": "sub_cat_1", "bak":"sub_cat_2"}
:return: Image with updated Annotations
"""
if cat_to_sub_cat_dict is None:
return dp
categories_dict = categories_dict_names_as_key
for ann in dp.get_annotation_iter(category_names=list(cat_to_sub_cat_dict.keys())):
sub_cat_type = cat_to_sub_cat_dict.get(ann.category_name, "")
sub_cat = ann.get_sub_category(sub_cat_type)
if sub_cat:
ann.category_name = sub_cat.category_name
ann.category_id = categories_dict[ann.category_name]
return dp | f2c7dbb95e1a47e4a6775db3857a5f37c9c6b5a8 | 14,224 |
def index_to_str(idx):
"""
Generates a string representation from an index array.
:param idx: The NumPy boolean index array.
:return: The string representation of the array.
"""
num_chars = int(idx.shape[0] / 6 + 0.5)
s = ""
for i in range(num_chars):
b = i * 6
six = idx[b:b+6]
c = 0
for j in range(six.shape[0]):
c = c * 2 + int(six[j])
s = s + chr(c+32)
return s | 7f7d49ca31bd70e5f19addaa4913a2cf14382e2d | 14,225 |
def ArclinkStatusLine_ClassName():
"""ArclinkStatusLine_ClassName() -> char const *"""
return _DataModel.ArclinkStatusLine_ClassName() | 4589b3c8bae93b28f5c17b8d432813ac504e58e6 | 14,226 |
import re
def build_sfdisk_partition_line(table_type, dev_path, size, details):
"""Build sfdisk partition line using passed details, returns str."""
line = f'{dev_path} : size={size}'
dest_type = ''
source_filesystem = str(details.get('fstype', '')).upper()
source_table_type = ''
source_type = details.get('parttype', '')
# Set dest type
if re.match(r'^0x\w+$', source_type):
# Both source and dest are MBR
source_table_type = 'MBR'
if table_type == 'MBR':
dest_type = source_type.replace('0x', '').lower()
elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type):
# Source is a GPT type
source_table_type = 'GPT'
if table_type == 'GPT':
dest_type = source_type.upper()
if not dest_type:
# Assuming changing table types, set based on FS
if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}):
dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem]
line += f', type={dest_type}'
# Safety Check
if not dest_type:
std.print_error(f'Failed to determine partition type for: {dev_path}')
raise std.GenericAbort()
# Add extra details
if details.get('partlabel', ''):
line += f', name="{details["partlabel"]}"'
if details.get('partuuid', '') and source_table_type == table_type:
# Only add UUID if source/dest table types match
line += f', uuid={details["partuuid"].upper()}'
# Done
return line | 8ef87f9c4db06382d5788ab846ae5b8cf1c7d2f4 | 14,227 |
import argparse
def parse_args() -> argparse.Namespace:
"""Parse user command line arguments."""
parser = argparse.ArgumentParser(
description='compare annotations in xml format between different image label sets')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--prune', action='store_true')
parser.add_argument('--check', choices=['relaxed', 'normal', 'strict'], default='normal')
parser.add_argument('--data', required=False, help='xml and image directories', nargs='+')
parser.add_argument('--out', required=False, help='output directory')
return parser | 06bcce4a9ac5adae26bbc74a8cd9a42288fc9749 | 14,228 |
def get_allocation_window(allocation,
default_start_date=_get_zero_date_utc(),
default_end_date=_get_current_date_utc()):
"""
Returns a tuple containing the allocation windows start and end date
"""
if not allocation.start_date:
window_start_date = default_start_date
else:
window_start_date = allocation.start_date
if not allocation.end_date:
window_end_date = default_end_date
else:
window_end_date = allocation.end_date
return window_start_date, window_end_date | 7367eb11eac50829de27315155b934297f6bc684 | 14,229 |
def get_instances_in_service(group, region: str):
"""Get set of instance IDs with ELB "InService" state"""
instances_in_service = set()
# TODO: handle auto scaling groups without any ELB
lb_names = group["LoadBalancerNames"]
if lb_names:
# check ELB status
elb = BotoClientProxy("elb", region)
for lb_name in lb_names:
result = elb.describe_instance_health(LoadBalancerName=lb_name)
for instance in result["InstanceStates"]:
if instance["State"] == "InService":
instances_in_service.add(instance["InstanceId"])
else:
# just use ASG LifecycleState
group = get_auto_scaling_group(
BotoClientProxy("autoscaling", region), group["AutoScalingGroupName"]
)
for instance in group["Instances"]:
if instance["LifecycleState"] == "InService":
instances_in_service.add(instance["InstanceId"])
return instances_in_service | dad36c288c3bacc146343c713238c4eaff41ca9a | 14,230 |
def IDFromUID(s,code=''):
""" Create an ID object from the given string UID.
This can raise an Error in case the string does not map to a
valid UID. code is used in the verification process if given.
"""
id = _EmptyClass()
id.__class__ = ID
id.set_uid(s,code)
return id | 5e37d90313517e11bc914fb57320406653da3e3a | 14,231 |
def ordered_pair_accuracy(labels, predictions, weights=None, name=None):
"""Computes the percentage of correctedly ordered pair.
For any pair of examples, we compare their orders determined by `labels` and
`predictions`. They are correctly ordered if the two orders are compatible.
That is, labels l_i > l_j and predictions s_i > s_j and the weight for this
pair is the weight from the l_i.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
name: A string used as the name for this metric.
Returns:
A metric for the accuracy or ordered pairs.
"""
with ops.name_scope(name, 'ordered_pair_accuracy',
(labels, predictions, weights)):
clean_labels, predictions, weights, _ = _prepare_and_validate_params(
labels, predictions, weights)
label_valid = math_ops.equal(clean_labels, labels)
valid_pair = math_ops.logical_and(
array_ops.expand_dims(label_valid, 2),
array_ops.expand_dims(label_valid, 1))
pair_label_diff = array_ops.expand_dims(
clean_labels, 2) - array_ops.expand_dims(clean_labels, 1)
pair_pred_diff = array_ops.expand_dims(
predictions, 2) - array_ops.expand_dims(predictions, 1)
# Correct pairs are represented twice in the above pair difference tensors.
# We only take one copy for each pair.
correct_pairs = math_ops.to_float(pair_label_diff > 0) * math_ops.to_float(
pair_pred_diff > 0)
pair_weights = math_ops.to_float(
pair_label_diff > 0) * array_ops.expand_dims(
weights, 2) * math_ops.to_float(valid_pair)
return math_ops.reduce_mean(correct_pairs * pair_weights) | 5e6c5e0bc480822149a04b5efaffe2474d1a8394 | 14,232 |
def samp(*args, **kwargs):
"""
The HTML <samp> element is an element intended to identify sample
output from a computer program. It is usually displayed in the
browser's default monotype font (such as Lucida Console).
"""
return el('samp', *args, **kwargs) | eaf9e69413b3ccafc1f0fed9549efb89b7fb5715 | 14,233 |
import os
def get_fsuae_dir():
"""Get FS-UAE dir"""
user_home_dir = os.path.expanduser('~')
directories = [os.path.join(user_home_dir, _f) for _f in os.listdir(user_home_dir) \
if os.path.isdir(os.path.join(user_home_dir, _f))]
for directory in directories:
fsuae_dir = os.path.join(directory, 'FS-UAE')
fsuae_config_dir = os.path.join(fsuae_dir, 'Configurations')
if os.path.isdir(fsuae_config_dir):
return fsuae_dir
return None | b3cbcea6449c4a8836304bc0cb68f1db502f7a8e | 14,234 |
from typing import List
import os
def _generate_sections_of_url(url: str) -> 'List[str]':
"""Generate Sections of a URL's path
:param url: The URL you wish to split
:type url: str
:return: A list of url paths
:rtype: List[str]
"""
path = urlparse.urlsplit(url).path
sections = []
temp = ""
while (path != '/'):
temp = os.path.split(path)
if temp[0] == '':
break
path = temp[0]
# Insert at the beginning to keep the proper url order
sections.insert(0, temp[1])
return sections | 06569a7ec2dd459f89f685c5baf4ae0cccacad25 | 14,235 |
import getpass
import xml.dom
import xml.dom.minidom
import base64
def get_ucs_cco_image_list(username=None, password=None, mdf_id_list=None,
proxy=None):
"""
Gets the list of images available on CCO
Args:
username (str): username to connect to image server
password (str): password to connect to image server
mdf_id_list (list): list of mdf id
proxy (str): proxy used for connection
Returns:
List of UcsCcoImage objects
Example:
image_list = get_ucs_cco_image_list("username", "password")
"""
if username is None:
username = raw_input("Username: ")
if password is None:
password = getpass.getpass()
ucs_mdf_ids = (283612660, 283853163, 283862063)
url = "https://www.cisco.com/cgi-bin/front.x/ida/locator/locator.pl"
ida_xml_query_header = 'input_xml=<?xml version="1.0" encoding="UTF-8"?>' \
'<locator>' \
'<input>'
ida_xml_query_mdf_id = '<mdfConcept id="%s" name=""/>'
ida_xml_query_footer = '</input></locator>'
# create input_xml string to post as
# data to the respective url via post method
input_xml = ""
input_xml += ida_xml_query_header
if not mdf_id_list:
for mdf_id in ucs_mdf_ids:
input_xml += ida_xml_query_mdf_id % mdf_id
else:
for mdf_id in mdf_id_list:
input_xml += ida_xml_query_mdf_id % mdf_id
input_xml += ida_xml_query_footer
log.debug(input_xml)
# base64encode for Authorization header
credential = base64.b64encode((username + ":" + password).encode()).decode(
'utf-8')
log.debug(credential)
# send request to server
driver = UcsDriver(proxy)
driver.add_header("Authorization", "Basic %s" % credential)
ida_xml_response = driver.post(uri=url,
data=input_xml.encode(),
dump_xml=True,
read=True)
if not ida_xml_response:
raise UcsValidationException("No Response from <%s>" % url)
doc = xml.dom.minidom.parseString(ida_xml_response)
image_node_list = doc.getElementsByTagName("image")
if not image_node_list:
raise UcsValidationException("No Images Found")
# Serialize image nodes in objects
cco_image_list = []
for image_node in image_node_list:
# print image_node.toxml()
image = UcsCcoImage()
image.network_credential = credential
property_node_list = [child_node for child_node in
image_node.childNodes if
child_node.nodeType == child_node.ELEMENT_NODE
and child_node.localName == "property"]
for property_node in property_node_list:
if not property_node.hasAttribute("name"):
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_VERSION:
image.version = property_node.getAttribute("value")
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_IMAGE_NAME:
image.image_name = property_node.getAttribute("value")
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_URL:
image.url = property_node.getAttribute("value")
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_IP_URL:
image.ip_url = property_node.getAttribute("value")
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_SIZE:
image.size = int(property_node.getAttribute("value"))
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_CHECKSUM:
if property_node.getAttribute("type") == "md5":
image.checksum_md5 = property_node.getAttribute("value")
continue
if property_node.getAttribute(
"name") == _UcsCcoImageList.IDAC_TAG_FILE_DESCRIPTION:
image.file_description = property_node.getAttribute("value")
continue
cco_image_list.append(image)
return cco_image_list | faa11a9f6f8e6d0c8fed3dee3d69e1cce25f49b5 | 14,236 |
def knownTypes():
"""Returns all known resource types"""
return loader.typeToExtension.keys()+['WorldModel','MultiPath','Point','Rotation','Matrix3','ContactPoint'] | d332a3344e43bc8f2026eed6feff137fdb2b9b2e | 14,237 |
def args_for_blocking_web_whatsapp_com_http():
""" Returns arguments for blocking web.whatsapp.com over http """
return ["-iptables-reset-keyword", "Host: web.whatsapp.com"] | a15a8ebc087467ec1a8e6817366f93df7b0a181b | 14,238 |
def zeta_vector():
"""The :func:`zeta` vector.
:func:`zeta_vector` returns :math:`\zeta` parameters calculated
by formula (5) on page 17 in `the technical paper`_, which is
.. math::
\\bf \zeta= W^{-1}(p-\mu)
"""
return np.linalg.inv(W_matrix()) @ (m_vector() - mu_vector()) | 7650ad5fb443344e82f6e4bd9fd2cba697e7f768 | 14,239 |
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None):
"""Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn
:param k: int
fourier term
:param col_name: str
column in the dataframe used to generate fourier series
:param function_name: str
sin or cos
:param seas_name: strcols_interact
appended to new column names added for fourier terms
:return: str
column name in DataFrame returned by fourier_series_fcn
"""
# patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms
name = f"{function_name}{k:.0f}_{col_name}"
if seas_name is not None:
name = f"{name}_{seas_name}"
return name | 5c15b52728d0333c9c7df59030d6ead66473c823 | 14,240 |
import uuid
def unique_filename():
"""Creates a UUID-based unique filename"""
return str(uuid.uuid1()) | ee0d9090a4c5f8a6f0ddef2d670f7beb845a4114 | 14,241 |
import mdtraj
import tempfile
def _create_trajectory(molecule):
"""Create an `mdtraj` topology from a molecule object.
Parameters
----------
molecule: openff.toolkit.topology.Molecule
The SMILES pattern.
Returns
-------
mdtraj.Trajectory
The created trajectory.
"""
# Check whether the molecule has a configuration defined, and if not,
# define one.
if molecule.n_conformers <= 0:
molecule.generate_conformers(n_conformers=1)
# We need to save out the molecule and then reload it as the toolkit
# will not always save the atoms in the same order that they are
# present in the molecule object.
with tempfile.NamedTemporaryFile(suffix=".pdb") as file:
molecule.to_file(file.name, "PDB")
# Load the pdb into an mdtraj object.
mdtraj_trajectory = mdtraj.load_pdb(file.name)
# Change the assigned residue name (sometimes molecules are assigned
# an amino acid residue name even if that molecule is not an amino acid,
# e.g. C(CO)N is not Gly) and save the altered object as a pdb.
for residue in mdtraj_trajectory.topology.residues:
_generate_residue_name(residue, molecule.to_smiles())
return mdtraj_trajectory | de9e2a94d266dbdc3201ff74cb2bd27e939850d1 | 14,242 |
def preprocess(image):
"""Load and preprocess image."""
# Create the array of the right shape to feed into the keras model
data = []
size = (96, 96)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image = np.asarray(image)
x = preprocess_input(image)
data.append(x)
data = np.array(data)
return data | d59eb9e10f6d69e6a1cdcc0d25230f6bd35947d1 | 14,243 |
import torch
def move_to(obj, device):
"""Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283
Arguments:
obj {dict, list} -- Object to be moved to device
device {torch.device} -- Device that object will be moved to
Raises:
TypeError: object is of type that is not implemented to process
Returns:
type(obj) -- same object but moved to specified device
"""
if torch.is_tensor(obj):
return obj.to(device)
elif isinstance(obj, dict):
res = {k: move_to(v, device) for k, v in obj.items()}
return res
elif isinstance(obj, list):
return [move_to(v, device) for v in obj]
elif isinstance(obj, tuple):
return tuple(move_to(list(obj), device))
else:
raise TypeError("Invalid type for move_to") | 97abd322f292fe605a06e8235ecb353ed9a01bf8 | 14,244 |
def split(C, dims, axis=1):
"""
Splits the columns or rows of C.
Suppse C = [X_1, X_2, ..., X_B] is an (n x sum_b d_b) matrix.
Returns a list of the constituent matrices as a list.
Parameters
----------
C: array-like, shape (n, sum_b d_b)
The concatonated block matrix.
dims: list of ints
The dimensions of each matrix i.e. [d_1, ..., d_B]
axis: int [0, 1]
Which axis to split (1 mean columns 0 means rows)
Output
------
blocks: list of array-like
[X_1, X_2, ..., X_B]
"""
idxs = np.append([0], np.cumsum(dims))
blocks = []
if axis == 1:
assert idxs[-1] == C.shape[1]
for b in range(len(dims)):
blocks.append(C[:, idxs[b]:idxs[b + 1]])
elif axis == 0:
for b in range(len(dims)):
blocks.append(C[idxs[b]:idxs[b + 1], :])
else:
raise ValueError('axis must be either 0 or 1')
return blocks | 2fd55cdde7bc5315f2a78236775c1f36aa8714fd | 14,245 |
def build_binary_value(char_str, bits, alphabet) -> str:
"""
This method converts a string char_str into binary, using n bits per
character and decoding from the supplied alphabet or from ASCII when bits=7
This is almost the inverse method to build_string in the decompress module.
:param char_str: string.
:param bits: number of bits per character.
:param alphabet: Alphabet.
:return: binary value.
"""
if bits == 7:
indices = [ord(char_) for char_ in char_str]
else:
indices = [alphabet.index(char_) for char_ in char_str]
binary_char_list = ["{0:b}".format(index).zfill(bits) for index in indices]
return ''.join(binary_char_list) | 50830dd5cfa3f5428b0946e7382220f9b5ff1915 | 14,246 |
def computeAnswer(inputData):
"""Compute the answer to the task, from the input data."""
# Do some calculations on the inputData
answer = str(int(inputData) * 2)
# EDIT ME (remove this line once done)
return answer | 3bf90dc1c05ca422ffda70d8a053eb76f6dcc66b | 14,247 |
import re
import itertools
import collections
def label_schema_matching(
df, endpoint=DBpedia, uri_data_model=False, to_lowercase=True, remove_prefixes=True,
remove_punctuation=True, prefix_threshold=1, progress=True, caching=True):
"""A schema matching method by checking for attribute -- rdfs:label between
links.
Args:
df (pd.DataFrame): The dataframe where matching attributes are supposed
to be found.
endpoint (Endpoint, optional): SPARQL Endpoint to be queried. Defaults
to DBpedia.
uri_data_model (bool, optional): If enabled, the URI is directly
queried instead of a SPARQL endpoint. Defaults to False.
to_lowercase (bool, optional): Converts queried strings to lowercase.
Defaults to True.
remove_prefixes (bool, optional): Removes prefices of queried strings.
Defaults to True.
remove_punctuation (bool, optional): Removes punctuation from queried
strings. Defaults to True.
prefix_threshold (int, optional): The number of occurences after which
a prefix is considered "common". Defaults to 1.
progress (bool, optional): If True, progress bars will be shown to
inform the user about the progress made by the process (if
"uri_data_model" = True). Defaults to True.
caching (bool, optional): Turn result-caching for queries issued during
the execution on or off. Defaults to True.
Returns:
pd.DataFrame: Two columns with matching links and a third column with the overlapped label.
"""
matches = pd.DataFrame(
columns=["uri_1", "uri_2", "same_label"])
# Get URIs from the column names
cat_cols = [col for col in df.columns if re.findall("https*:", col)]
cat_cols_stripped = [re.sub(r"^.*http://", "http://", col)
for col in cat_cols]
# transform attributes to sparql values list form
values = "(<"+pd.Series(cat_cols_stripped).str.cat(sep=">) (<")+">) "
if uri_data_model:
# Query these URIs for the label
query = "SELECT ?value ?o WHERE {VALUES (?value) {(<**URI**>)} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }"
labels = uri_querier(pd.DataFrame(cat_cols_stripped),
0, query, progress = progress, caching=caching).drop_duplicates().set_index("value")
else:
query = "SELECT ?value ?o WHERE {VALUES (?value) {" + values + \
"} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }"
# query the equivalent classes/properties
labels = endpoint_wrapper(query, endpoint, caching=caching).reset_index(drop=True)
if labels.empty:
return matches
# Get common prefixes
common_prefixes = get_common_prefixes(labels, prefix_threshold)
# Clean the results (i.e. the labels)
labels["o"] = labels["o"].apply(lambda x: clean_string(
x, common_prefixes, to_lowercase, remove_prefixes, remove_punctuation))
# Create a dictionary
if labels.index.name == "value":
labels.reset_index(inplace=True)
labels_dict = labels.set_index("value").T.to_dict("list")
#check if there are no matches
tmp = set()
for v in labels_dict.values():
tmp.update(v)
if len(labels_dict) == len(tmp):
combinations = list(itertools.combinations(cat_cols_stripped,2))
combinations_sorted = [sorted(x) for x in combinations]
matches = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"])
matches["same_label"] = 0
return matches
else:
# Combine the uris that have the same labels into a DataFrame
new_labels_dict = collections.defaultdict(list)
for key, values in labels_dict.items():
for i in values:
new_labels_dict[i].append(key)
df_labels = pd.DataFrame(
list(new_labels_dict.values()), columns=["uri_1", "uri_2"])
#df_labels["same_label"] = pd.DataFrame(list(new_labels_dict.keys()))
df_labels.dropna(inplace=True)
# restrict the order of uris in one row
for _, row in df_labels.iterrows():
new_match = {"uri_1": min(row["uri_1"], row["uri_2"]),
"uri_2": max(row["uri_1"], row["uri_2"]), "same_label": 1}
matches = matches.append(new_match, ignore_index=True)
# Get back the uris that are not quired by rdfs:label and turn df into dict
no_label = pd.DataFrame({"value": [
x for x in cat_cols_stripped if x not in list(labels["value"])], "o": np.nan})
labels = labels.append(no_label, ignore_index=True)
full_labels_dict = labels.set_index("value").T.to_dict("list")
# Create all unique combinations from the URIs, order them alphabetically and turn them into a DataFrame
combinations = list(itertools.combinations(full_labels_dict.keys(), 2))
combinations_sorted = [sorted(x) for x in combinations]
result = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"])
# merged with the non_matched combinations and drop duplicates
for _, row in result.iterrows():
new_match = {"uri_1": min(row["uri_1"], row["uri_2"]),
"uri_2": max(row["uri_1"], row["uri_2"]),
"same_label": 0}
matches = matches.append(new_match, ignore_index=True)
matches.drop_duplicates(
subset=["uri_1", "uri_2"], inplace=True, ignore_index=True)
return matches | 0577c29206da3c6528b85a4868a6f4db12450122 | 14,248 |
def create_stats_table(stats, yaxes):
""" Create data table with median statistics
Parameters
----------
stats : :obj:`list`
List of lists containing data stats for each iterations from
:func:`ragavi.ragavi.stats_display`
yaxes : :obj:`list`
Contains y-axes for the current plot
Returns
-------
Bokeh column layout containing data table with stats
"""
# number of y-axes
n_ys = len(yaxes)
# number of fields, spws and corrs
n_items = len(stats) // n_ys
stats = np.array(stats)
d_stats = dict(
spw=stats[:n_items, 0],
field=stats[:n_items, 1],
corr=stats[:n_items, 2],
)
# get the data in a more useable format
datas = stats[:, 3].reshape(-1, n_items).T
for y in range(n_ys):
d_stats[yaxes[y]] = datas[:, y]
source = ColumnDataSource(data=d_stats)
cols = "spw field corr".split() + yaxes
columns = [TableColumn(field=x, title=x.capitalize()) for x in cols]
dtab = DataTable(source=source, columns=columns,
fit_columns=True, height=150,
max_height=180, max_width=600,
sizing_mode="stretch_width")
t_title = Div(text="Median Statistics")
logger.debug("Stats table generated")
return column([t_title, dtab], sizing_mode="stretch_both") | 5ace2e38c265049b9d082c2f042b0e4b0edf5ad7 | 14,249 |
from datetime import datetime
def get_last_month_date_dmy() -> str:
"""Returns last month date (dd/mm/yyyy for calls report)."""
return (datetime.now() - timedelta(30)).date().strftime("%d/%m/%Y") | b1dc2066c30797195a8e5e03b994d0374c0b5a2f | 14,250 |
def irange(start, end):
"""Inclusive range from start to end (vs. Python insanity.)
irange(1,5) -> 1, 2, 3, 4, 5"""
return range( start, end + 1 ) | 91d4c270b1d9304b4ee82c0cb16aee5d518db3d5 | 14,251 |
import os
import re
def _make_rel_url_path(src, dst):
"""src is a file or dir which wants to adress dst relatively, calculate
the appropriate path to get from here to there."""
srcdir = os.path.abspath(src + "/..")
dst = os.path.abspath(dst)
# For future reference, I hate doing dir munging with string operations
# with a fiery passion, but pragmatism won out over making a lib.. .
common = os.path.commonprefix((srcdir, dst))
reldst = dst[len(common):]
srcdir = srcdir[len(common):]
newpath = re.sub(""".*?[/\\\]|.+$""", "../", srcdir) or "./"
newpath = newpath + reldst
newpath = newpath.replace("\\", "/")
newpath = newpath.replace("//", "/")
return newpath | 39c6d5b4ec42b61d13fe3229f869bc6e1b823ec3 | 14,252 |
def get_required_params(request, expected_params: list, type: str = 'POST') -> dict:
"""Gets the list of params from request, or returns None if ANY is missing.
:param request: The Request
:type request: flask.Request
:param expected_params: The list of expected parameters
:type expected_params: list
:param type: The request type, defaults to POST, can be GET to get query params.
:type type: str
:return: Dictorinary with parameters as keys and values as values
:rtype: dict
"""
res = {}
for param in expected_params:
if type == 'POST':
val = request.form.get(param)
elif type == 'GET':
val = request.args.get(param)
else:
val = None
if not val:
return None
res[param] = val
return res | 2d0b2970464877ed74ecf3bfe0d45325ce3fafe4 | 14,253 |
import tempfile
import os
def md_to_notebook(text):
"""Convert a Markdown text to a Jupyter notebook, using Pandoc"""
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(text.encode('utf-8'))
tmp_file.close()
pandoc(u'--from markdown --to ipynb -s --atx-headers --wrap=preserve --preserve-tabs', tmp_file.name, tmp_file.name)
with open(tmp_file.name, encoding='utf-8') as opened_file:
notebook = ipynb_reads(opened_file.read(), as_version=4)
os.unlink(tmp_file.name)
return notebook | 91bde8d0a145f50cefbf9e73f021e03612d6d89f | 14,254 |
def bot_send(msg, bot_id, broadcast):
"""
Send a message to a telegram user or group specified on chat_id
chat_id must be a number!
bot_id == bot_username
"""
if broadcast == True:
bot = telegram.Bot(token=config[bot_id]["bot_api_token"])
bot.sendMessage(chat_id=config[bot_id]["group_chat_id"], text=msg)
else:
print(msg)
return None | f5647d489c6c4873a031a7a11f9112164881c2e7 | 14,255 |
import glob
import os
import json
def dataset_source_xnat(bids_dir):
"""
Method to check if the data was downloaded from xnat
:param bids_dir: BIDS Directory
:return: True or False
"""
dataset_description_file = glob.glob(bids_dir + "/**/dataset_description.json", recursive = True)
if not os.path.exists(dataset_description_file[0]):
return False
else:
with open(dataset_description_file[0], 'r') as f:
json_contents = json.load(f)
if 'DatasetDOI' not in json_contents:
return False
elif not json_contents['DatasetDOI'].endswith('xnat'):
return False
return True | f0970308a14f5c4f2b152891c115428be666d3f9 | 14,256 |
import random
def split_dataset(dataset, num_train=1200):
"""
Split the dataset into a training and test set.
Args:
dataset: an iterable of Characters.
Returns:
A tuple (train, test) of Character sequences.
"""
all_data = list(dataset)
random.shuffle(all_data)
return all_data[:num_train], all_data[num_train:] | 140a9926ff5dc70e1a2b3ec9887111595c030355 | 14,257 |
def get_constant():
"""
Keep learning rate constant
"""
def update(lr, epoch):
return lr
return update | 1b68c67202c1c22c1aa6a6d532796e2bba0b42ee | 14,258 |
def spatial_pack_nhwc(data, kernel, stride, padding, in_bits, weight_bits,
pack_dtype, out_dtype, dorefa=False):
""" Compute convolution with pack on spatial axes. """
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
_, H, W, CI, IB = data_q.shape
KH, KW, _, CO, KB = kernel_q.shape
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
HCAT, WCAT = KH-1, KW-1
wkl = _get_workload(data, kernel, stride, padding, out_dtype, "NHWC")
sch = _get_schedule(wkl, "NHWC")
VH = sch.vh
VW = sch.vw
VC = sch.vc
PAD_H = H + 2*HPAD
PAD_W = W + 2*WPAD
OH = (H + 2*HPAD - KH) // HSTR + 1
OW = (W + 2*WPAD - KW) // WSTR + 1
dvshape = (1, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, CI, IB)
kvshape = (CO, KH, KW, CI, VC, KB)
ovshape = (1, OH, OW, CO, VH, VW, VC)
oshape = (1, OH, OW, CO)
if (HPAD != 0 and WPAD != 0):
data_pad = pad(data_q, (0, HPAD, WPAD, 0, 0), name="data_pad")
else:
data_pad = data_q
data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, ci, b: \
data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][ci][b], name='data_vec')
kernel_vec = tvm.compute(kvshape, lambda co, dh, dw, ci, vc, b: \
kernel_q[dh][dw][ci][co*VC+vc][b], name='kernel_vec')
ci = tvm.reduce_axis((0, CI), name='ci')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
b1 = tvm.reduce_axis((0, IB), name='ib')
b2 = tvm.reduce_axis((0, KB), name='kb')
def _conv(n, h, w, co, vh, vw, vc):
b1b2 = (b1+b2).astype(out_dtype)
if dorefa:
return tvm.sum(
(tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) &
kernel_vec[co, dh, dw, ci, vc, b2].astype(out_dtype)) -
tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) &
~kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype)) << b1b2,
axis=[dh, dw, ci, b1, b2])
return tvm.sum(tvm.popcount(
data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] &
kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) << b1b2,
axis=[dh, dw, ci, b1, b2])
conv = tvm.compute(ovshape, _conv, name='conv')
return tvm.compute(oshape, lambda n, h, w, co:
conv[n][h//VH][w//VW][co//VC][h%VH][w%VW][co%VC],
name='output_unpack', tag='spatial_bitserial_conv_nhwc') | 9d2527fb9878cc759e5cad0d1df4057cd852bc9f | 14,259 |
import pickle
def load_random_tt_distribution(numAgents, r, pu, samples):
"""
Load a file with a population of random turn-taking values, assuming that it exists
Parameters:
* numAgents -- the desired number of probabilistic agents to include
* r -- the turn-taking resolution
* pu -- the probability that a bit in each usage attempt sequence will be 1
* samples -- the number of random turn-taking values to generate
See Section 3 of:
Raffensperger, P. A., Webb, R. Y., Bones, P. J., and McInnes, A. I. (2012).
A simple metric for turn-taking in emergent communication.
Adaptive Behavior, 20(2):104-116.
"""
filename = get_tt_distribution_filename(numAgents, r, pu, samples)
file = open(filename, 'r')
return pickle.load(file) | 948dfa02ff387fbb69902bf35b5cc428f054a6e7 | 14,260 |
from typing import ContextManager
def fail_after(seconds: float) -> ContextManager[CancelScope]:
"""
Create a cancel scope with the given timeout, and raises an error if it is actually
cancelled.
This function and move_on_after() are similar in that both create a cancel scope
with a given timeout, and if the timeout expires then both will cause CancelledError
to be raised within the scope. The difference is that when the CancelledError
exception reaches move_on_after(), it’s caught and discarded. When it reaches
fail_after(), then it’s caught and TimeoutError is raised in its place.
"""
return fail_at(get_running_loop().time() + seconds) | 917fe4d7d0a599caa855210bd86bb0b57263e71c | 14,261 |
from typing import Union
from typing import Set
from typing import Dict
import copy
def _get_dataset_names_mapping(
names: Union[str, Set[str], Dict[str, str]] = None
) -> Dict[str, str]:
"""Take a name or a collection of dataset names
and turn it into a mapping from the old dataset names to the provided ones if necessary.
Args:
names: A dataset name or collection of dataset names.
When str or Set[str] is provided, the listed names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current names will be
mapped to new names in the resultant pipeline.
Returns:
A dictionary that maps the old dataset names to the provided ones.
Examples:
>>> _get_dataset_names_mapping("dataset_name")
{"dataset_name": "dataset_name"} # a str name will stay the same
>>> _get_dataset_names_mapping(set(["ds_1", "ds_2"]))
{"ds_1": "ds_1", "ds_2": "ds_2"} # a Set[str] of names will stay the same
>>> _get_dataset_names_mapping({"ds_1": "new_ds_1_name"})
{"ds_1": "new_ds_1_name"} # a Dict[str, str] of names will map key to value
"""
if names is None:
return {}
if isinstance(names, str):
return {names: names}
if isinstance(names, dict):
return copy.deepcopy(names)
return {item: item for item in names} | df271cb4cd102eb3731e12b8d92fd4cca8ef8145 | 14,262 |
import json
def _json_keyify(args):
""" converts arguments into a deterministic key used for memoizing """
args = tuple(sorted(args.items(), key=lambda e: e[0]))
return json.dumps(args) | 2800a9a0db0cf8d51efbcbeda2c023172f6662f5 | 14,263 |
def tgsegsm_vect(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgse, ygse, zgse cartesian GSE coordinates.
Returns
-------
xgsm: list of float
Cartesian GSM coordinates.
ygsm: list of float
Cartesian GSM coordinates.
zgsm: list of float
Cartesian GSM coordinates.
"""
xgsm, ygsm, zgsm = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgsm = xgse
ygsm = cdze * ygse + sdze * zgse
zgsm = -sdze * ygse + cdze * zgse
return xgsm, ygsm, zgsm | 1c1809c722ae84e2d7bd467f78e9cefddb7cf884 | 14,264 |
def choose_a_pick_naive(numbers_left):
"""
Choose any larger number
:param numbers_left:
:return:
"""
if numbers_left[0] > numbers_left[-1]:
return 0, numbers_left[0]
elif numbers_left[-1] > numbers_left[0]:
return -1, numbers_left[-1]
else:
return 0, numbers_left[0] | 70405a4ad9d1ee1afbec93bea13d7eab3068b42e | 14,265 |
import os
def get_group_names(exp_path, uniquechannel ='Ki_t', fname="trajectoriesDat.csv"):
"""Similar to get_grp_list, but uses trajectoriesDat column names"""
if "_combined" in exp_path:
pattern = 'trajectoriesDat_region'
grp_suffix = 'EMS'
files = os.listdir(exp_path)
trajectories = [x.replace(f'{pattern}_','') for x in files if pattern in x]
grp_numbers = [int(x.replace('.csv','')) for x in trajectories]
grp_numbers.sort()
grp_list = [f'{grp_suffix}-{str(x)}' for x in grp_numbers if x > 0]
if 0 in grp_numbers:
grp_list = grp_list + ['All']
else:
trajectories_cols = pd.read_csv(os.path.join(exp_path, fname), index_col=0,
nrows=0).columns.tolist()
cols = [col for col in trajectories_cols if uniquechannel in col]
if len(cols) != 0:
grp_list = [col.replace(f'{uniquechannel}_', '') for col in cols]
grp_suffix = grp_list[0][:3]
grp_numbers = [int(grp.replace('EMS-', '')) for grp in grp_list]
if len(cols) > 1:
grp_list = grp_list + ['All']
grp_numbers = grp_numbers + [0]
else:
grp_list = None
grp_suffix=None
grp_numbers = None
return grp_list, grp_suffix, grp_numbers | c26bfc7d3769119a4efe92012bd556959e567ecd | 14,266 |
import os
import json
def serializer_roundtrip(serializer, obj):
"""Serializes an object to a file, then deserializes it and returns the result"""
@with_temporary_directory
def helper(tmp_dir, serializer, obj):
"""Helper function: takes care of creating and deleting the temp directory for the output"""
path = os.path.join(tmp_dir, 'out.txt')
with open(path, 'w') as f:
try:
json.dump(serializer.serialize(obj), f)
except ValueError as e:
print("test_serialization.serializer_roundtrip - invalid serialization:")
print(str(serializer.serialize(obj)))
raise e
with open(path, 'r') as f:
return serializer.deserialize(json.load(f))
return helper(serializer, obj) | db4a0d0bd0e2920debb026873f75e2b900b154bd | 14,267 |
def _Run(args, holder, target_https_proxy_arg, release_track):
"""Issues requests necessary to import target HTTPS proxies."""
client = holder.client
resources = holder.resources
target_https_proxy_ref = target_https_proxy_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)
try:
target_https_proxy = export_util.Import(
message_type=client.messages.TargetHttpsProxy,
stream=data,
schema_path=_GetSchemaPath(release_track))
except yaml_validator.ValidationError as e:
raise compute_exceptions.ValidationError(str(e))
# Get existing target HTTPS proxy.
try:
old_target_https_proxy = target_https_proxies_utils.SendGetRequest(
client, target_https_proxy_ref)
except apitools_exceptions.HttpError as error:
if error.status_code != 404:
raise error
# Target HTTPS proxy does not exist, create a new one.
return _SendInsertRequest(client, resources, target_https_proxy_ref,
target_https_proxy)
if old_target_https_proxy == target_https_proxy:
return
console_io.PromptContinue(
message=('Target Https Proxy [{0}] will be overwritten.').format(
target_https_proxy_ref.Name()),
cancel_on_no=True)
# Populate id and fingerprint fields. These two fields are manually
# removed from the schema files.
target_https_proxy.id = old_target_https_proxy.id
if hasattr(old_target_https_proxy, 'fingerprint'):
target_https_proxy.fingerprint = old_target_https_proxy.fingerprint
# Unspecified fields are assumed to be cleared.
cleared_fields = []
if target_https_proxy.description is None:
cleared_fields.append('description')
if target_https_proxy.serverTlsPolicy is None:
cleared_fields.append('serverTlsPolicy')
if target_https_proxy.authorizationPolicy is None:
cleared_fields.append('authorizationPolicy')
if hasattr(target_https_proxy,
'certificateMap') and target_https_proxy.certificateMap is None:
cleared_fields.append('certificateMap')
if hasattr(target_https_proxy,
'httpFilters') and not target_https_proxy.httpFilters:
cleared_fields.append('httpFilters')
if target_https_proxy.proxyBind is None:
cleared_fields.append('proxyBind')
if target_https_proxy.quicOverride is None:
cleared_fields.append('quicOverride')
if not target_https_proxy.sslCertificates:
cleared_fields.append('sslCertificates')
if target_https_proxy.sslPolicy is None:
cleared_fields.append('sslPolicy')
if target_https_proxy.urlMap is None:
cleared_fields.append('urlMap')
with client.apitools_client.IncludeFields(cleared_fields):
return _SendPatchRequest(client, resources, target_https_proxy_ref,
target_https_proxy) | 26163d575701045e126ec52ae9adbb24fb98a54a | 14,268 |
def get_mono_cell(locus_file, TotalSNPs, TotalBi_SNPs_used):
"""Determine value to add to [0,0] cell"""
TotalBP, Loci_count = totalbp(locus_file)
return int((TotalBi_SNPs_used * TotalBP) / TotalSNPs) - TotalBi_SNPs_used, \
TotalBP, Loci_count | b6890f4a5129eb0892c6af9f5385dba98612776f | 14,269 |
def remove_bad_particles(st, min_rad='calc', max_rad='calc', min_edge_dist=2.0,
check_rad_cutoff=[3.5, 15], check_outside_im=True,
tries=50, im_change_frac=0.2, **kwargs):
"""
Removes improperly-featured particles from the state, based on a
combination of particle size and the change in error on removal.
Parameters
-----------
st : :class:`peri.states.State`
The state to remove bad particles from.
min_rad : Float, optional
All particles with radius below min_rad are automatically deleted.
Set to 'calc' to make it the median rad - 25* radius std.
Default is 'calc'.
max_rad : Float, optional
All particles with radius above max_rad are automatically deleted.
Set to 'calc' to make it the median rad + 15* radius std.
Default is 'calc'.
min_edge_dist : Float, optional
All particles within min_edge_dist of the (padded) image
edges are automatically deleted. Default is 2.0
check_rad_cutoff : 2-element list of floats, optional
Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]
are checked if they should be deleted. Set to 'calc' to make it the
median rad +- 3.5 * radius std. Default is [3.5, 15].
check_outside_im : Bool, optional
If True, checks if particles located outside the unpadded image
should be deleted. Default is True.
tries : Int, optional
The maximum number of particles with radii < check_rad_cutoff
to try to remove. Checks in increasing order of radius size.
Default is 50.
im_change_frac : Float, , optional
Number between 0 and 1. If removing a particle decreases the
error by less than im_change_frac*the change in the image, then
the particle is deleted. Default is 0.2
Returns
-------
removed: Int
The cumulative number of particles removed.
"""
is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos >
np.array(st.ishape.shape) + st.pad - pad)).any(axis=1)
# returns True if the position is within 'pad' of the _outer_ image edge
removed = 0
attempts = 0
n_tot_part = st.obj_get_positions().shape[0]
q10 = int(0.1 * n_tot_part) # 10% quartile
r_sig = np.sort(st.obj_get_radii())[q10:-q10].std()
r_med = np.median(st.obj_get_radii())
if max_rad == 'calc':
max_rad = r_med + 15*r_sig
if min_rad == 'calc':
min_rad = r_med - 25*r_sig
if check_rad_cutoff == 'calc':
check_rad_cutoff = [r_med - 7.5*r_sig, r_med + 7.5*r_sig]
# 1. Automatic deletion:
rad_wrong_size = np.nonzero(
(st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[0]
near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(),
min_edge_dist - st.pad))[0]
delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist()
delete_poses = st.obj_get_positions()[delete_inds].tolist()
message = ('-'*27 + 'SUBTRACTING' + '-'*28 +
'\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1')
with log.noformat():
CLOG.info(message)
for pos in delete_poses:
ind = st.obj_closest_particle(pos)
old_err = st.error
p, r = st.obj_remove_particle(ind)
p = p[0]
r = r[0]
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
tuple(p) + (r,) + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
removed += 1
# 2. Conditional deletion:
check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[0]) |
(st.obj_get_radii() > check_rad_cutoff[1]))[0]
if check_outside_im:
check_edge_inds = np.nonzero(
is_near_im_edge(st.obj_get_positions(), st.pad))[0]
check_inds = np.unique(np.append(check_rad_inds, check_edge_inds))
else:
check_inds = check_rad_inds
check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])]
tries = np.min([tries, check_inds.size])
check_poses = st.obj_get_positions()[check_inds[:tries]].copy()
for pos in check_poses:
old_err = st.error
ind = st.obj_closest_particle(pos)
killed, p, r = check_remove_particle(
st, ind, im_change_frac=im_change_frac)
if killed:
removed += 1
check_inds[check_inds > ind] -= 1 # cleaning up indices....
delete_poses.append(pos)
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
p + r + (old_err, st.error))
with log.noformat():
CLOG.info(part_msg)
return removed, delete_poses | 09d767cc2513b542a99f8a846c866a1a8902ebf5 | 14,270 |
def _pyside_import_module(moduleName):
""" The import for PySide
"""
pyside = __import__('PySide', globals(), locals(), [moduleName], -1)
return getattr(pyside, moduleName) | 7b3b18214d12322e230c78678f3ef4fdc1717f10 | 14,271 |
import os
def get_models(args):
"""
:param args: argparse.Namespace
commandline arguments
:return: dict of BaseReport
"""
models = dict()
if os.path.isfile(args.cm_input):
models[args.cm_input] = CheckmarxReport
if os.path.isfile(args.sn_input):
models[args.sn_input] = SnykReport
return models | effbf1c87a0325470204ea82db078ee108993399 | 14,272 |
import unicodedata
def sanitize_str(value: str) -> str:
"""Removes Unicode control (Cc) characters EXCEPT for tabs (\t), newlines (\n only), line separators (U+2028) and paragraph separators (U+2029)."""
return "".join(ch for ch in value if unicodedata.category(ch) != 'Cc' and ch not in {'\t', '\n', '\u2028', '\u2029'}) | 5b5eae2b377a834e377a8bf7bcd7cefc2278c2f7 | 14,273 |
def get_tariff_estimated(reporter,
partner='000',
product='all',
year=world_trade_data.defaults.DEFAULT_YEAR,
name_or_id='name'):
"""Tariffs (estimated)"""
return _get_data(reporter, partner, product, year,
datatype='aveestimated', datasource='trn', name_or_id=name_or_id) | d4fd81e640d014bf52725a1274d0f3a2c0eebeba | 14,274 |
def task_result_api_view(request, taskid):
"""
Get task `state` and `result` from API endpoint.
Use case: you want to provide to some user with async feedback about
about status of some task.
Example:
# urls.py
urlpatterns = [
url(r'^api/task/result/(.+)/', task_result_api_view),
...
]
# some_views.py
context = {}
# ...
async_request = some_important_task.delay(...)
# ...
context['async_task_id'] = str(async_request.id)
Now we can check the state and result form Front-end side.
"""
result = AsyncResult(taskid)
response = {'task-id': taskid, 'state': result.state}
response.update({'result': _safe_result(result.result)})
return JsonResponse(response) | 94e46b3282a1f69e16a8979906b118d8684e1799 | 14,275 |
def get_horizon_coordinates(fp_pointings_spherical):
"""
It converts from spherical to Horizon coordinates, with the conventions:
Altitute = np.pi / 2 - zenith angle (theta)
Azimuth = 2 * np.pi - phi
Parameters
----------
fp_pointings_spherical : numpy array of shape (..., 2), radians
They are the spherical coordinates (theta, phi) that will be converted.
Returns
-------
out : numpy array of shape (..., ), numpy array of shape (..., )
"""
Alt = np.pi/2 - fp_pointings_spherical[..., 0] #rad
Az = 2 * np.pi - fp_pointings_spherical[..., 1] #rad
return Alt, Az | 7fbc11fe6195129d9c18c0161fe59fab6e31a29c | 14,276 |
import json
import array
def indexed_chunking_random_test(f_list=indexed_chunking_f_list,
x=None,
return_debug_info=False,
verbose=0):
"""made it so you can just run a function (several times) to test, but if you want to see print outs use verbose=1,
and if you want to get a bunch of variables that will then allow you to diagnose things,
specify return_debug_info=True"""
if x is None:
x = randint(10, 1000)
if isinstance(x, int):
n_pts = x
x = sorted(randint(1, 100000, n_pts))
assert sorted(x) == x, "x is not sorted!"
kwargs = random_kwargs_for_list(x)
if verbose:
print(("x: {} elements. min: {}, max: {}".format(len(x), x[0], x[-1])))
t = {k: v for k, v in kwargs.items() if k != 'key'}
if verbose:
print(("kwargs: {}\n".format(json.dumps(t, indent=2))))
b = list(f_list[0](iter(x), **kwargs))
bb = None
all_good = True
idx_where_different = array([])
for i, f in enumerate(f_list[1:], 1):
bb = list(f(iter(x), **kwargs))
all_good = True
if len(b) != len(bb):
all_good &= False
if verbose:
print(("{}: Not the same length! Base had {} elements, comp has {}".format(
i, len(b), len(bb))))
idx_where_different = where([x[0] != x[1] for x in zip(b, bb)])[0]
if len(idx_where_different) > 0:
all_good &= False
if verbose:
print(("{} values where different".format(len(idx_where_different))))
if not all_good:
if verbose:
print("STOPPING HERE: Check the variables for diagnosis")
break
print("")
if all_good:
if verbose:
print("All good!")
if return_debug_info:
return all_good, idx_where_different, x, b, bb, kwargs
else:
return all_good | b15bc0cf51c6a5c9fc7de0a72af621441afcef1e | 14,277 |
from typing import Any
def patch_object_type() -> None:
"""
Patches `graphene.ObjectType` to make it indexable at runttime. This is necessary for it be
generic at typechecking time.
"""
# Lazily import graphene as it is actually an expensive thing to do and we don't want to slow down things at
# type-checking time.
from graphene import ObjectType # pylint: disable=import-outside-toplevel
ObjectTypeMetaclass = type(ObjectType)
def __getitem__(cls: TypeOf[TypeOf[ObjectType]], _: Any) -> TypeOf[TypeOf[ObjectType]]:
return cls
ObjectTypeMetaclass.__getitem__ = __getitem__ | 4ed77870c9df03d072b55bc3a919c59d3e761f38 | 14,278 |
import os
def GetFiles(dir, dirname):
"""Given a directory and the dirname of the directory, recursively
traverse the directory and return a list of tuples containing
(filename, relative filename), where 'relative filename' is
generated using GetZipPath.
"""
files = []
for (dirpath, dirnames, filenames) in os.walk(dir, True):
# skip emacs backup files
files.extend([os.path.join(dirpath, f) for f in filenames
if not f.endswith("~")])
# skip CVS and .svn dirs
# funky slice assignment here
dirnames[:] = [d for d in dirnames if d != 'CVS' and d != '.svn']
return [(f, GetZipPath(f, dir, dirname)) for f in files] | 0b012cd0234f051dd68f9d6858dec5e33bdfd18c | 14,279 |
import time
def date_format(time_obj=time, fmt='%Y-%m-%d %H:%M:%S') -> str:
"""
时间转字符串
:param time_obj:
:param fmt:
:return:
"""
_tm = time_obj.time()
_t = time.localtime(_tm)
return time.strftime(fmt, _t) | 0a614763b040587b80743ffacfff6bbb0a6c7365 | 14,280 |
from typing import Optional
def clean_pin_cite(pin_cite: Optional[str]) -> Optional[str]:
"""Strip spaces and commas from pin_cite, if it is not None."""
if pin_cite is None:
return pin_cite
return pin_cite.strip(", ") | 9c495fcc4f1cf192c1358f50fef569c4d6b36290 | 14,281 |
def instrument_code_to_name(rwc_instrument_code):
"""Use the rwc_instrument_map.json to convert an rwc_instrument_code
to its instrument name.
Parameters
----------
rwc_instrument_code : str
Two character instrument code
Returns
-------
instrument_name : str
Full instrument name, if it exists, else None
"""
code = RWC_INSTRUMENT_MAP.get(rwc_instrument_code, None)
return code if code else None | 9059bb69b86e5c8e326b5c51a745e61c15c41389 | 14,282 |
def record_time(ad, fallback_to_launch=True):
"""
RecordTime falls back to launch time as last-resort and for jobs in the queue
For Completed/Removed/Error jobs, try to update it:
- to CompletionDate if present
- else to EnteredCurrentStatus if present
- else fall back to launch time
"""
if ad["JobStatus"] in [3, 4, 6]:
if ad.get("CompletionDate", 0) > 0:
return ad["CompletionDate"]
elif ad.get("EnteredCurrentStatus", 0) > 0:
return ad["EnteredCurrentStatus"]
if fallback_to_launch:
return _LAUNCH_TIME
return 0 | 517eb369f9d04048bce87b4301761f2b3b629303 | 14,283 |
def getTeamCompatibility(mentor, team):
"""
Gets a "compatibility score" between a mentor and a team (used as the weight in the later optimization problem)
Uses the functions defined above to compute different aspects of the score
"""
score = 0
# find value from overlapping availabilities
# value may differ depending on transportation type used, so try them all
bestOverlap = -noOverlapCost # baseline to beat is no overlap at all
for transitType in range(numTypesTransit):
# check if this transit type is better than previous best and update if needed
bestOverlap = max(bestOverlap, getSingleOverlapValue(mentor, team, transitType))
score += bestOverlap
# find value from team type matches
score += getTeamTypeValue(mentor, team)
# find value from team requests / requirements
score += getTeamRequestedValue(mentor, team)
return score | a9cd0c65b4419051045706852c3d64baff787e4f | 14,284 |
def mean_edges(graph, feat, weight=None):
"""Averages all the values of edge field :attr:`feat` in :attr:`graph`,
optionally multiplies the field by a scalar edge field :attr:`weight`.
Parameters
----------
graph : DGLGraph
The graph.
feat : str
The feature field.
weight : optional, str
The weight field. If None, no weighting will be performed,
otherwise, weight each edge feature with field :attr:`feat`.
for calculating mean. The weight feature associated in the :attr:`graph`
should be a tensor of shape ``[graph.number_of_edges(), 1]``.
Returns
-------
tensor
The averaged tensor.
Notes
-----
Return a stacked tensor with an extra first dimension whose size equals
batch size of the input graph.
The i-th row of the stacked tensor contains the readout result of
the i-th graph in the batched graph. If a graph has no edges,
a zero tensor with the same shape is returned at the corresponding row.
Examples
--------
>>> import dgl
>>> import torch as th
Create two :class:`~dgl.DGLGraph` objects and initialize their
edge features.
>>> g1 = dgl.DGLGraph() # Graph 1
>>> g1.add_nodes(2)
>>> g1.add_edges([0, 1], [1, 0])
>>> g1.edata['h'] = th.tensor([[1.], [2.]])
>>> g1.edata['w'] = th.tensor([[3.], [6.]])
>>> g2 = dgl.DGLGraph() # Graph 2
>>> g2.add_nodes(3)
>>> g2.add_edges([0, 1, 2], [1, 2, 0])
>>> g2.edata['h'] = th.tensor([[1.], [2.], [3.]])
Average over edge attribute :attr:`h` without weighting for each graph in a
batched graph.
>>> bg = dgl.batch([g1, g2], edge_attrs='h')
>>> dgl.mean_edges(bg, 'h')
tensor([[1.5000], # (1 + 2) / 2
[2.0000]]) # (1 + 2 + 3) / 3
Sum edge attribute :attr:`h` with normalized weight from edge attribute :attr:`w`
for a single graph.
>>> dgl.mean_edges(g1, 'h', 'w') # h1 * (w1 / (w1 + w2)) + h2 * (w2 / (w1 + w2))
tensor([[1.6667]]) # 1 * (3 / (3 + 6)) + 2 * (6 / (3 + 6))
See Also
--------
sum_nodes
mean_nodes
sum_edges
"""
return _mean_on(graph, 'edges', feat, weight) | 8219a4f543fe0903e3a9b313fd3cc142435da788 | 14,285 |
from typing import Optional
async def update_config_file(config: ConfigDTO, reboot_processor: Optional[bool] = True):
"""
Overwrites the configuration used by the processor.
"""
config_dict = map_to_file_format(config)
success = update_config(config_dict, reboot_processor)
if not success:
return handle_response(config_dict, success)
return map_config(extract_config(), "") | 165c2f59056ce0d71b237897e7379f517b158dc5 | 14,286 |
import requests
def integration_session(scope="session"):
"""
creates a Session object which will persist over the entire test run ("session").
http connections will be reused (higher performance, less resource usage)
Returns a Session object
"""
s = requests.sessions.Session()
s.headers.update(test_headers)
return s | c002b6d7875be41355990efe0bb10712661f50fe | 14,287 |
import json
def get_json_dump(json_object, indent=4, sort_keys=False):
""" Short handle to get a pretty printed str from a JSON object. """
return json.dumps(json_object, indent=indent, sort_keys=sort_keys) | 505548cdf972ef891b7bcc3bcd7be3347769faec | 14,288 |
from re import S
def number_of_real_roots(f, *gens, **args):
"""Returns the number of distinct real roots of `f` in `(inf, sup]`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import number_of_real_roots
>>> f = Poly(x**2 - 1, x)
Count real roots in the (-oo, oo) interval:
>>> number_of_real_roots(f)
2
Count real roots in the (0, 2) interval:
>>> number_of_real_roots(f, inf=0, sup=2)
1
Count real roots in the (2, oo) interval:
>>> number_of_real_roots(f, inf=2)
0
References
==========
.. [Davenport88] J.H. Davenport, Y. Siret, E. Tournier, Computer
Algebra Systems and Algorithms for Algebraic Computation,
Academic Press, London, 1988, pp. 124-128
"""
def sign_changes(seq):
count = 0
for i in xrange(1, len(seq)):
if (seq[i-1] < 0 and seq[i] >= 0) or \
(seq[i-1] > 0 and seq[i] <= 0):
count += 1
return count
F = Poly(f, *gens, **args)
if not F.is_Poly:
return 0
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
if F.degree() < 1:
return 0
inf = args.get('inf', None)
if inf is not None:
inf = sympify(inf)
if not inf.is_number:
raise ValueError("Not a number: %s" % inf)
elif abs(inf) is S.Infinity:
inf = None
sup = args.get('sup', None)
if sup is not None:
sup = sympify(sup)
if not sup.is_number:
raise ValueError("Not a number: %s" % sup)
elif abs(sup) is S.Infinity:
sup = None
sturm = F.sturm()
if inf is None:
signs_inf = sign_changes([ s.LC()*(-1)**s.degree() for s in sturm ])
else:
signs_inf = sign_changes([ s.eval(inf) for s in sturm ])
if sup is None:
signs_sup = sign_changes([ s.LC() for s in sturm ])
else:
signs_sup = sign_changes([ s.eval(sup) for s in sturm ])
return abs(signs_inf - signs_sup) | d0ed0923aba4e5749a5b5baf267914ea29800c6f | 14,289 |
def heap_sort(arr: list):
"""
Heap sorting a list. Big-O: O(n log n).
@see https://www.geeksforgeeks.org/heap-sort/
"""
def heapify(sub: list, rdx: int, siz: int):
"""
Heapifying range between rdx and size ([rdx:siz]).
@param sub: a slice of list.
@param rdx: root/parent index to start.
@param siz: size of heap.
"""
largest = ndx = rdx # assuming the root is the largest
while ndx < siz:
l_index = 2 * ndx + 1 # child index at left = 2*i + 1
r_index = 2 * ndx + 2 # child index at right = 2*i + 2
# reset largest index if left child exists and is greater than root.
if l_index < siz and sub[ndx] < sub[l_index]:
largest = l_index
# check if right child is greater than the value at the largest index.
if r_index < siz and sub[largest] < sub[r_index]:
largest = r_index
# change root, if needed
if largest != ndx:
sub[ndx], sub[largest] = sub[largest], sub[ndx] # swap
ndx = largest # heapify the root.
continue
return
pass
n = len(arr)
# build a max heap.
parent = n // 2 - 1 # the last parent (that can have children)
for i in range(parent, -1, -1):
heapify(arr, i, n)
# extract elements one by one.
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, 0, i)
return arr | 9b53f3027804cab16c9850d4858377f49afe7bbf | 14,290 |
def find_max_path(triangle):
"""
Find maximum-sum path from top of triangle to bottom
"""
# Start by copying the values
sums = [[x for x in row] for row in triangle]
# Efficient algorithm: start at the bottom and work our way up, computing max sums
for reverse_index, row in enumerate(reversed(sums)):
if reverse_index == 0:
# Easy: max value for subpaths from last row is cell value itself
continue
# Now we need to take sum of each cell and max of two subpaths
row_below = sums[-reverse_index]
for col_index, col in enumerate(row):
left = row_below[col_index]
right = row_below[col_index + 1]
row[col_index] = col + max(left, right)
return sums[0][0] | 1eb0afd076c455e67eacc867d04020ae82c68936 | 14,291 |
def plot_partregress(results, exog_idx=None, xnames=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None or list of int
(column) indices of the exog used in the plot, default is all.
xnames : None or list of strings
Names for the numbers given in exog_idx. Default is
results.model.exog_names.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress_ax : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
fig = utils.create_mpl_fig(fig)
#maybe add option for using wendog, wexog instead
y = results.model.endog
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if xnames is None:
exog_idx = range(k_vars)
xnames = results.model.exog_names
else:
exog_idx = []
for name in xnames:
exog_idx.append(results.model.exog_names.index(name))
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_fontsize = 'small'
else:
nrows = len(exog_idx)
ncols = 1
title_fontsize = None
for i,idx in enumerate(exog_idx):
others = range(k_vars)
others.pop(idx)
exog_others = exog[:, others]
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress_ax(y, exog[:, idx], exog_others, ax=ax,
varname=xnames[i])
return fig | c858b08b732bcd4b325c548ba59bed76316b5551 | 14,292 |
def ufloats_overlap_range(ufloats, vmin, vmax):
"""Return whether the +/- 1 sigma range overlaps the value range."""
vals = []
sigmas = []
for val in ufloats:
if isinstance(val, float):
vals.append(val)
sigmas.append(0)
else:
vals.append(val.nominal_value)
sigmas.append(val.std_dev)
vals = np.array(vals)
sigmas = np.array(sigmas)
return ((vals - sigmas <= vmax) | (vals + sigmas >= vmin)).all() | 1dee17437e1ba8904450895a748c9871a9964909 | 14,293 |
from typing import Tuple
import multiprocessing
import itertools
def exact_qaoa_values_on_grid(
graph: nx.Graph,
xlim: Tuple[float, float] = (0, np.pi / 2),
ylim: Tuple[float, float] = (-np.pi / 4, np.pi / 4),
x_grid_num: int = 20,
y_grid_num: int = 20,
num_processors: int = 1,
dtype=np.complex128):
"""Compute exact p=1 QAOA values on a grid.
Args:
graph: The graph representing the Hamiltonian.
xlim: The range of values for gamma.
ylim: The range of values for beta.
num: The number of points in a single dimension of the grid.
The total number of points evaluated will be num^2.
Returns:
A 2-dimensional Numpy array containing the QAOA values.
The rows index the betas and the columns index the gammas.
"""
a, b = xlim
c, d = ylim
gammas = np.linspace(a, b, x_grid_num)
betas = np.linspace(c, d, y_grid_num)
HamC = create_ZZ_HamC(graph, dtype=dtype)
N = graph.number_of_nodes()
with multiprocessing.Pool(num_processors) as pool:
vals = pool.starmap(_ising_qaoa_expectation,
[(N, HamC, x, True, dtype)
for x in itertools.product(gammas, betas)])
return np.reshape(np.array(vals), (x_grid_num, y_grid_num)).T | 1ac1f93a9716e687983c3c557f5ee19cea8afb2d | 14,294 |
def typecheck_eq(expr, ctxt=[]):
"""(par (A) (= A A Bool :chainable))
(par (A) (distinct A A Bool :pairwise))
"""
typ = typecheck_expr(expr.subterms[0], ctxt)
for term in expr.subterms[1:]:
t = typecheck_expr(term, ctxt)
if t != typ:
if not (is_subtype(t, typ) or is_subtype(typ, t)):
raise TypeCheckError(expr, term, typ, t)
return BOOLEAN_TYPE | 78cbf7b3510b30adde74d03a9f0168fdbfbc6bab | 14,295 |
def precision(x, for_sum=False):
"""
This function returns the precision of a given datatype using a comporable numpy array
"""
if not for_sum:
return np.finfo(x.dtype).eps
else:
return np.finfo(x.dtype).eps * x.size | c8d634638c0c8ce43c024d9c342e71adae6534bc | 14,296 |
def parse_line(line, line_count, retries):
"""Coordinate retrieval of scientific name or taxonomy ID.
Read line from input file, calling functions as appropriate to retrieve
scientific name or taxonomy ID.
:param line: str, line from input file
:line_count: number of line in input file - enable tracking if error occurs
:param retries: parser argument, maximum number of retries excepted if network error encountered
Return list of genus, species and taxonomy ID """
line_data = []
# For taxonomy ID retrieve scientific name
if line.startswith("NCBI:txid"):
gs_name = get_genus_species_name(line[9:], line_count, retries)
line_data = gs_name.split(" ", 1)
line_data.append(line)
# For scientific name retrieve taxonomy ID
else:
tax_id = get_tax_id(line, line_count, retries)
line_data = line.split()
line_data.append(tax_id)
return line_data | 895ae24672221fe78654f4c2796a419640c19d42 | 14,297 |
def prop_rotate(old_image, theta, **kwargs):
"""Rotate and shift an image via interpolation (bilinear by default)
Parameters
----------
old_image : numpy ndarray
Image to be rotated
theta : float
Angle to rotate image in degrees counter-clockwise
Returns
-------
new_image : numpy ndarray
Returns rotated & shifted image with the same dimensions as the input image
Other Parameteres
-----------------
XC, YC : float
Center of rotation in image pixels; (0,0) is at center of first pixel;
if not specified, the center of the image is assumed to be the center
of rotation
XSHIFT, YSHIFT : float
Amount to shift rotated image in pixels
MISSING : float
Value to set extrapolated pixels.
"""
if old_image.dtype == np.dtype("complex128") or old_image.dtype == np.dtype("complex64"):
is_complex = 1
else:
is_complex = 0
new_image = np.copy(old_image)
if proper.use_cubic_conv:
n = old_image.shape[0]
if not "XC" in kwargs:
XC = int(n / 2)
if not "YC" in kwargs:
YC = int(n / 2)
if not "XSHIFT" in kwargs:
xshift = 0.
if not "YSHIFT" in kwargs:
yshift = 0.
if not "MISSING" in kwargs:
missing = 0.
t = -theta * np.pi / 180.
x0 = np.arange(n, dtype = np.float64) - XC - xshift
for j in range(n):
y0 = j - YC - yshift
xi = x0 * np.cos(t) - y0 * np.sin(t) + YC
yi = x0 * np.sin(t) + y0 * np.cos(t) + XC
new_image[j,:] = proper.prop_cubic_conv(old_image, xi, yi, GRID = False)
else:
theta = -1. * theta
if is_complex:
new_image.real = rotate(old_image.real, theta, reshape = False, prefilter = True)
new_image.imag = rotate(old_image.imag, theta, reshape = False, prefilter = True)
else:
new_image = rotate(old_image, theta, reshape = False, prefilter = False)
return new_image | b7c94899aba6dc5507bba1f1231954740dfbae1e | 14,298 |
from typing import Dict
from typing import Any
def append_tf_example(data: Dict[Text, Any],
schema: Dict[Text, Any]) -> tf.train.Example:
"""Add tf example to row"""
feature = {}
for key, value in data.items():
data_type = schema[key]
value = CONVERTER_MAPPING[data_type](value)
if data_type == DataType.INT:
feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value))
elif data_type == DataType.FLOAT:
feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value))
elif data_type == DataType.BYTES:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=value))
else:
feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=value))
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
return tf_example | 15fb71794c4e87923197927d80597a8f0e960690 | 14,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.