content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def voucher_and_partial_matches_with_coupons(voucher_and_partial_matches):
"""
Returns a voucher with partial matching CourseRuns and valid coupons
"""
context = voucher_and_partial_matches
products = [
ProductFactory(content_object=course_run)
for course_run in context.partial_matches
]
coupon_eligibility_list = [
CouponEligibilityFactory(product=product) for product in products
]
payment_versions = [
CouponPaymentVersionFactory(amount=1, company=context.company)
for _ in coupon_eligibility_list
]
coupon_versions = [
CouponVersionFactory(
coupon=coupon_eligibility_list[i].coupon,
payment_version=payment_versions[i],
)
for i in range(len(coupon_eligibility_list))
]
return SimpleNamespace(
**vars(voucher_and_partial_matches),
products=products,
coupon_eligibility_list=coupon_eligibility_list,
coupon_versions=coupon_versions,
payment_versions=payment_versions,
) | 4f9e5732b0f3863504dec2aeef1309c0c24abc77 | 14,100 |
import batman
def one_transit(t=np.linspace(0,27,19440),
per=1., rp=0.1, t0=1., a=15., inc=87., ecc=0.,
w=90., limb_dark ='nonlinear', u=[0.5,0.1,0.1,-0.1]):
"""
~Simulates a one-sector long TESS light curve with injected planet transits per input parameters.~
Requires: batman; numpy
Args: t =times at which to calculate light curve, default is one TESS sector;
per =orbital period;
rp =planet radius (in units of stellar radii);
t0 =time of inferior conjunction);
a =semi-major axis (in units of stellar radii);
inc =orbital inclination (in degrees);
ecc =eccentricity;
w =longitude of periastron (in degrees);
limb_dark =limb darkening model;
u =limb darkening coefficients [u1, u2, u3, u4];
outputs: flux array =light curve with one injected transit at per, for use right before sim_lc to get TESS lc
"""
#### maybe should make params its own fcn and split this fcn into 2....
params = batman.TransitParams(); params.t0 = t0; params.per = per
params.rp = rp; params.a = a; params.inc = inc; params.ecc = ecc
params.w = w; params.limb_dark = limb_dark; params.u = u
m = batman.TransitModel(params, t) #initializes model
flux = m.light_curve(params) #calculates light curve
return flux, m, params | 4bb9a59e307cdab7554c10ae952279588c47bd94 | 14,101 |
import os
import re
def scrape_md_file(md_path):
"""
Yield the Python scripts and URLs in the md_file in path.
Parameters
----------
md_path : str
path to md file to scrape
Returns
-------
python_examples : List[str]
The list of Python scripts included in the provided file.
urls :
"""
# check there is a README in that folder
if not os.path.isfile(md_path):
return [], []
with open(md_path, 'r') as f:
readme_content = f.read()
pythons = re.findall('```python(.*?)```', readme_content, flags=re.DOTALL)
urls = re.findall('http[s]?://(?:[0-9a-zA-Z]|[-/.%:_])+', readme_content)
return pythons, urls | afac5538a469dafb06dfd2df40a28be5284b61be | 14,102 |
def activate(request: Request) -> dict:
"""View to activate user after clicking email link.
:param request: Pyramid request.
:return: Context to be used by the renderer.
"""
code = request.matchdict.get('code', None)
registration_service = get_registration_service(request)
return registration_service.activate_by_email(code) | ccc543ff740d3c7ebbe7e0404c0ef6a7fc310866 | 14,103 |
def _create_eval_metrics_fn(
dataset_name, is_regression_task
):
"""Creates a function that computes task-relevant metrics.
Args:
dataset_name: TFDS name of dataset.
is_regression_task: If true, includes Spearman's rank correlation
coefficient computation in metric function; otherwise, defaults to
accuracy computation.
Returns:
Relevant metric function.
"""
def get_accuracy(guess, gold):
return (guess == gold).mean()
def get_mcc(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
tn = ((guess == 0) & (gold == 0)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
mcc_denom = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
mcc = (tp * tn - fp * fn) / (mcc_denom + 1e-6)
return mcc
def get_f1(guess, gold):
tp = ((guess == 1) & (gold == 1)).sum()
fp = ((guess == 1) & (gold == 0)).sum()
fn = ((guess == 0) & (gold == 1)).sum()
f1 = (2 * tp) / (2 * tp + fp + fn + 1e-6)
return f1
def get_f1_accuracy_mean(guess, gold):
return (get_f1(guess, gold) + get_accuracy(guess, gold)) / 2.0
def get_spearmanr(x, y):
return scipy_stats.spearmanr(x, y).correlation
eval_metrics = {}
if is_regression_task:
eval_metrics["spearmanr"] = get_spearmanr
else:
eval_metrics["accuracy"] = get_accuracy
if dataset_name == "glue/cola":
eval_metrics["mcc"] = get_mcc
elif dataset_name in ("glue/mrpc", "glue/qqp"):
eval_metrics["f1_accuracy_mean"] = get_f1_accuracy_mean
def metrics_fn(stats):
res = {}
for name, fn in eval_metrics.items():
res[name] = fn(stats["prediction"], stats["label"])
return res
return metrics_fn | 732baaf729739d7150f09185233efaa873045605 | 14,104 |
def brighter(rgb):
"""
Make the color (rgb-tuple) a tad brighter.
"""
_rgb = tuple([ int(np.sqrt(a/255) * 255) for a in rgb ])
return _rgb | f1d6ba4deea3896ce6754d622913b7f2d2af91e4 | 14,105 |
def delete_workspace_config(namespace, workspace, cnamespace, config):
"""Delete method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
mnamespace (str): Method namespace
method (str): Method name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
"""
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __delete(uri) | e86106fc6eabc0f1ae703e31abe3283e9df3e31b | 14,106 |
import math
def test_filled_transparent_graphs_2():
""" Two functions with transparend grid over them """
coordinate_system = cartesius.CoordinateSystem()
coordinate_system.add(
charts.Function(
math.sin,
start = -4,
end = 5,
step = 0.02,
fill_color = (0, 0, 255),
transparency_mask = 100))
coordinate_system.add(
charts.Function(
math.cos,
start = -4,
end = 5,
step = 0.02,
fill_color = (200, 255, 200),
transparency_mask = 100))
coordinate_system.add(elements.Grid(1, 1, transparency_mask=140))
return coordinate_system.draw(300, 200), coordinate_system.draw(300, 200, antialiasing=True) | 9cc51358b2e92a869ea318ac8d18f3b9ea988012 | 14,107 |
def get_shader_code(name):
""" Returns the shader as a string """
fname = op.join( op.dirname(__file__), name )
if op.exists( fname ):
with open(fname) as f:
return f.read() | bdd21d6c36b5e71608d48ecdce32adb79bb58428 | 14,108 |
import torch
def compute_translation_error(pred_pose, gt_pose, reduction="mean"):
"""
Computes the error (meters) in translation components of pose prediction.
Inputs:
pred_pose - (bs, 3) --- (x, y, theta)
gt_pose - (bs, 3) --- (x, y, theta)
Note: x, y must be in meters.
"""
error = torch.sqrt(
F.mse_loss(pred_pose[:, :2], gt_pose[:, :2], reduction=reduction)
)
return error | e1e0863c37a3c42e3081d5b21f529172315ccb66 | 14,109 |
def get_base_snippet_action_menu_items(model):
"""
Retrieve the global list of menu items for the snippet action menu,
which may then be customised on a per-request basis
"""
menu_items = [
SaveMenuItem(order=0),
DeleteMenuItem(order=10),
]
for hook in hooks.get_hooks('register_snippet_action_menu_item'):
action_menu_item = hook(model)
if action_menu_item:
menu_items.append(action_menu_item)
return menu_items | d741097c3e75764578e3f1aa6cc33cb194a40b42 | 14,110 |
def assign_file(package, source):
"""Initializes package output class.
Parameters
----------
package : :obj:`str`
Name of the package that generated the trajectory file.
source : :obj:`str`
Path to the trajectory file.
Returns
-------
The class corresponding to the correct package.
"""
if package.lower() == 'gamess':
return GAMESS(source)
else:
raise ValueError(f'{package} is not supported.') | 70f01dc69ef87738fd87b6c321787d7159a85e3a | 14,111 |
import logging
def _magpie_register_services_with_db_session(services_dict, db_session, push_to_phoenix=False,
force_update=False, update_getcapabilities_permissions=False):
# type: (ServicesSettings, Session, bool, bool, bool) -> bool
"""
Registration procedure of :term:`Services` from ``providers`` section using pre-established database session.
.. seealso::
:func:`magpie_register_services_from_config`
"""
db_session.begin(subtransactions=True)
existing_services_names = [n[0] for n in db_session.query(models.Service.resource_name)]
magpie_anonymous_user = get_constant("MAGPIE_ANONYMOUS_USER")
anonymous_user = UserService.by_user_name(magpie_anonymous_user, db_session=db_session)
for svc_name, svc_values in services_dict.items():
svc_new_url = svc_values["url"]
svc_type = svc_values["type"]
svc_config = svc_values.get("configuration")
svc_sync_type = svc_values.get("sync_type")
if force_update and svc_name in existing_services_names:
svc = models.Service.by_service_name(svc_name, db_session=db_session)
if svc.url == svc_new_url:
print_log("Service URL already properly set [{url}] ({svc})"
.format(url=svc.url, svc=svc_name), logger=LOGGER)
else:
print_log("Service URL update [{url_old}] => [{url_new}] ({svc})"
.format(url_old=svc.url, url_new=svc_new_url, svc=svc_name), logger=LOGGER)
svc.url = svc_new_url
svc.sync_type = svc_sync_type
svc.configuration = svc_config
elif not force_update and svc_name in existing_services_names:
print_log("Skipping service [{svc}] (conflict)" .format(svc=svc_name), logger=LOGGER)
else:
print_log("Adding service [{svc}]".format(svc=svc_name), logger=LOGGER)
svc = models.Service(
resource_name=svc_name,
resource_type=models.Service.resource_type_name,
url=svc_new_url,
type=svc_type,
configuration=svc_config,
sync_type=svc_sync_type
)
db_session.add(svc)
getcap_perm = Permission.GET_CAPABILITIES
if update_getcapabilities_permissions and anonymous_user is None:
print_log("Cannot update 'getcapabilities' permission of non existing anonymous user",
level=logging.WARN, logger=LOGGER)
elif update_getcapabilities_permissions and getcap_perm in SERVICE_TYPE_DICT[svc_type].permissions:
svc = db_session.query(models.Service.resource_id).filter_by(resource_name=svc_name).first()
svc_perm_getcapabilities = UserResourcePermissionService.by_resource_user_and_perm(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id,
db_session=db_session
)
if svc_perm_getcapabilities is None:
print_log("Adding '{}' permission to anonymous user.".format(getcap_perm.value), logger=LOGGER)
svc_perm_getcapabilities = models.UserResourcePermission(
user_id=anonymous_user.id,
perm_name=getcap_perm.value,
resource_id=svc.resource_id
)
db_session.add(svc_perm_getcapabilities)
transaction.commit()
if push_to_phoenix:
return _phoenix_update_services(services_dict)
return True | b2f96f213f1ab84e7be56788a1b4dad6d93dbe16 | 14,112 |
def journal(client):
"""
Fetch journal entries which reference a member.
"""
client.require_auth()
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select A.tx_id, A.wallet_id, A.debit, A.credit, B.currency_id, C.narrative
from journal A
inner join wallet B on B.id = A.wallet_id
inner join wallet_transaction C on C.id = A.tx_id
where B.member_id = %s
order by C.created
""", [client.session["member_id"]])
rs = yield from cursor.fetchall()
return [dict(i) for i in rs] | 06eada531634f25ba076114b3858eae0a75b1807 | 14,113 |
def triplet_to_rrggbb(rgbtuple):
"""Converts a (red, green, blue) tuple to #rrggbb."""
hexname = _tripdict.get(rgbtuple)
if hexname is None:
hexname = '#%02x%02x%02x' % rgbtuple
_tripdict[rgbtuple] = hexname
return hexname | 9ba66d9aadb8385726178b32d69e14adfe380229 | 14,114 |
import math
def stab_cholesky(M):
""" A numerically stable version of the Cholesky decomposition.
Used in the GLE implementation. Since many of the matrices used in this
algorithm have very large and very small numbers in at once, to handle a
wide range of frequencies, a naive algorithm can end up having to calculate
the square root of a negative number, which breaks the algorithm. This is
due to numerical precision errors turning a very tiny positive eigenvalue
into a tiny negative value.
Instead of this, an LDU decomposition is used, and any small negative numbers
in the diagonal D matrix are assumed to be due to numerical precision errors,
and so are replaced with zero.
Args:
M: The matrix to be decomposed.
"""
n = M.shape[1]
D = np.zeros(n,float)
L = np.zeros(M.shape,float)
for i in range(n):
L[i,i] = 1.
for j in range(i):
L[i,j] = M[i,j]
for k in range(j):
L[i,j] -= L[i,k]*L[j,k]*D[k]
if (not D[j] == 0.0):
L[i,j] = L[i,j]/D[j]
D[i] = M[i,i]
for k in range(i):
D[i] -= L[i,k]*L[i,k]*D[k]
S = np.zeros(M.shape,float)
for i in range(n):
if (D[i]>0):
D[i] = math.sqrt(D[i])
else:
warning("Zeroing negative element in stab-cholesky decomposition: " + str(D[i]), verbosity.low)
D[i] = 0
for j in range(i+1):
S[i,j] += L[i,j]*D[j]
return S | 73f2989bb77513090b8ccbcf99b5f31a3aab9115 | 14,115 |
from datetime import datetime
import json
def prodNeventsTrend(request):
"""
The view presents historical trend of nevents in different states for various processing types
Default time window - 1 week
"""
valid, response= initRequest(request)
defaultdays = 7
equery = {}
if 'days' in request.session['requestParams'] and request.session['requestParams']['days']:
try:
days = int(request.session['requestParams']['days'])
except:
days = defaultdays
starttime = datetime.now() - timedelta(days=days)
endtime = datetime.now()
request.session['requestParams']['days'] = days
else:
starttime = datetime.now() - timedelta(days=defaultdays)
endtime = datetime.now()
request.session['requestParams']['days'] = defaultdays
equery['timestamp__range'] = [starttime, endtime]
if 'processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype']:
if '|' not in request.session['requestParams']['processingtype']:
equery['processingtype'] = request.session['requestParams']['processingtype']
else:
pts = request.session['requestParams']['processingtype'].split('|')
equery['processingtype__in'] = pts
events = ProdNeventsHistory.objects.filter(**equery).values()
timeline = set([ev['timestamp'] for ev in events])
timelinestr = [datetime.strftime(ts, defaultDatetimeFormat) for ts in timeline]
if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] and request.session['requestParams']['view'] == 'separated':
view = request.session['requestParams']['view']
else:
view = 'joint'
plot_data = []
if view == 'joint':
ev_states = ['running', 'waiting']
data = {}
for es in ev_states:
data[es] = {}
for ts in timelinestr:
data[es][ts] = 0
for ev in events:
for es in ev_states:
data[es][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(es)]
else:
processingtypes = set([ev['processingtype'] for ev in events])
ev_states = ['running', 'waiting']
lines = []
for prtype in processingtypes:
for evst in ev_states:
lines.append(str(prtype + '_' + evst))
if len(processingtypes) > 1:
lines.append('total_running')
lines.append('total_waiting')
data = {}
for l in lines:
data[l] = {}
for ts in timelinestr:
data[l][ts] = 0
for ev in events:
for l in lines:
if ev['processingtype'] in l:
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
if l.startswith('total'):
data[l][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(l.split('_')[1])]
for key, value in data.items():
newDict = {'state': key, 'values':[]}
for ts, nevents in value.items():
newDict['values'].append({'timestamp': ts, 'nevents':nevents})
newDict['values'] = sorted(newDict['values'], key=lambda k: k['timestamp'])
plot_data.append(newDict)
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
plot_data_list = [['timestamp'],]
plot_data_list[0].extend([point['timestamp'] for point in plot_data[0]['values']])
for i, line in enumerate(plot_data):
plot_data_list.append([line['state']])
plot_data_list[i+1].extend([point['nevents'] for point in plot_data[i]['values']])
dump = json.dumps(plot_data_list, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'built': datetime.now().strftime("%H:%M:%S"),
'plotData': json.dumps(plot_data)
}
response = render_to_response('prodNeventsTrend.html', data, content_type='text/html')
setCacheEntry(request, "prodNeventsTrend", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response | 55b7fcbf352e98b01ce4146ff5b5984c86c435d3 | 14,116 |
def create_storage_policy_zios(session, cloud_name, zios_id, policy_name, drive_type, drive_quantity,
policy_type_id, description=None, return_type=None, **kwargs):
"""
Creates a new policy to ZIOS.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type cloud_name: str
:param cloud_name: The cloud 'name' as returned by get_all_clouds. For
example: 'zadaralab01'. Required.
:type zios_id: int
:param zios_id: The ZIOS 'id' value as returned by get_all_zios_objects. Required.
:type policy_name: str
:param policy_name: Policy name. Required
:type drive_type: str
:param drive_type: Drive type internal name. Required
:type drive_quantity: int
:param drive_quantity: Number of drives to add. Required.
:type policy_type_id: int
:param policy_type_id: Storage policy type id. Required.
:type description: str
:param description: Policy description
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
zios_id = verify_zios_id(zios_id)
cloud_name = verify_cloud_name(cloud_name)
drive_type = verify_field(drive_type, 'drive_type')
drive_quantity = verify_capacity(drive_quantity, 'drive_quantity')
policy_type_id = verify_capacity(policy_type_id, 'policy_type_id')
body_values = {"name":policy_name, "drive_type":drive_type,
"drive_quantity":drive_quantity, "policy_type_id":policy_type_id}
if description is not None:
body_values["description"] = description
path = "/api/clouds/{0}/zioses/{1}/policy.json".format(cloud_name, zios_id)
return session.post_api(path=path, body=body_values, return_type=return_type, **kwargs) | 16cec8686df1fb634064e75478364285dcfc3c1d | 14,117 |
def format_tooltips(G, **kwargs):
""" Annotate G, format tooltips.
"""
# node data = [(n, {...}), ...]
node_data = {}
if isinstance(G, nx.Graph):
node_data = G.nodes(True)
elif 'nodes' in G:
node_data = [(d["id"], d) for d in G['nodes']]
# unique ids
member_uids = np.sort(np.unique([
__ for n,d in node_data for __ in d['members']
]))
# array of tooltips
node_tooltips = []
for n,d in node_data:
# progress
print("Formatting tooltip... NodeID:", n)
member_ids = d['members']
# member images
images = d['image'][member_ids]
images = [IMG_HTML.format(src=_) for _ in images]
# format tooltip for node
node_tooltip = NODE_HTML.format(
node_id=n, node_name=d['name'],
node_size=len(member_ids),
data_size=len(member_uids),
images=images
)
# add to array
node_tooltips.append(node_tooltip)
# make numpy array
return np.array(node_tooltips) | cfbfc3012dffce017110288847f3bcefa4612645 | 14,118 |
import os
import glob
import shutil
def copy_files(extension, source, target=None):
"""Copy matching files from source to target.
Scan the ``source`` folder and copy any file that end with
the given ``extension`` to the ``target`` folder.
Both ``source`` and ``target`` are expected to be either a ``str`` or a
list or tuple of strings to be joined using ``os.path.join``.
``sourec`` will be interpreted as a path relative to the ``atm`` root
code folder, and ``target`` will be interpreted as a path relative to
the user's current working directory.
If ``target`` is ``None``, ``source`` will be used, and if the ``target``
directory does not exist, it will be created.
Args:
extension (str):
File extension to copy.
source (str or iterabe):
Source directory.
target (str or iterabe or None):
Target directory. Defaults to ``None``.
Returns:
dict:
Dictionary containing the file names without extension as keys
and the new paths as values.
"""
if isinstance(source, (list, tuple)):
source = os.path.join(*source)
if isinstance(target, (list, tuple)):
target = os.path.join(*target)
elif target is None:
target = source
source_dir = os.path.join(os.path.dirname(__file__), source)
target_dir = os.path.join(os.getcwd(), target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
file_paths = dict()
for source_file in glob.glob(os.path.join(source_dir, '*.' + extension)):
file_name = os.path.basename(source_file)
target_file = os.path.join(target_dir, file_name)
print('Generating file {}'.format(target_file))
shutil.copy(source_file, target_file)
file_paths[file_name[:-(len(extension) + 1)]] = target_file
return file_paths | 5b6ae6a908448487206612e7686e573c266bc287 | 14,119 |
def add_centroid_frags(fragList, atmList):
"""Add centroid to each fragment."""
for frag in fragList:
atoms = [atmList[i] for i in frag['ids']]
frag['cx'], frag['cy'], frag['cz'] = centroid_atmList(atoms)
return fragList | 1f050fcf0b60a7bb62d6d5be844b9a895e91fc7f | 14,120 |
from typing import Optional
import os
import logging
import sys
def load_mat(path: str, mat: str, fid: str, size: Optional[int] = None, overwrite: Optional[bool] = False, loop: Optional[int] = 0) -> np.ndarray:
"""Get the raw data for one individual file.
If the file does not exist in the specified path then tries to download it
from Google Drive.
"""
filepath = os.path.join(path, mat)
if os.path.exists(filepath) and not overwrite:
if filepath.endswith('.mat'):
try:
return loadmat(filepath)
except ValueError:
try:
return tables.open_file(filepath, driver="H5FD_CORE")
except:
pass
# logging.warning('Corrupt database!!\n, overwriting...')
# return load_mat(path, mat, fid, size, overwrite=True)
elif filepath.endswith('.edf'):
try:
return mne.io.read_raw_edf(filepath)
except:
pass
elif filepath.endswith('.npy'):
try:
return np.load(filepath)
except:
pass
elif filepath.endswith('.bdf'):
try:
return mne.io.read_raw_bdf(filepath)
except:
pass
elif filepath.endswith('.gdf'):
try:
return mne.io.read_raw_gdf(filepath)
except:
pass
if loop > 2:
logging.warning(
'Several unsuccessful attempts, the data access quota could be compromised.')
logging.warning(
'Many read and write tasks over Google Drive databases could block the background access system almost 24 hours.')
sys.exit()
if drive_mounted():
logging.warning('Corrupt database!!')
return
else:
logging.warning('Corrupt database!!\noverwriting...')
return load_mat(path, mat, fid, size, overwrite=True, loop=loop + 1)
else:
logging.warning('Database not found!')
logging.warning('downloading...')
if drive_mounted():
logging.warning('Write on the shared drive has been disabled.')
logging.warning(
f'The directory name is optional for Google Drive mounted environment')
sys.exit()
os.makedirs(path, exist_ok=True)
gdd.download_file_from_google_drive(file_id=fid,
dest_path=filepath,
unzip=False,
overwrite=overwrite,
size=size)
return load_mat(path, mat, fid, size, loop=loop + 1) | 2824a3c6b40db2b7f47e4d33d92da5dc27dc702b | 14,121 |
def _apply_sobel(img_matrix):
"""
Input: img_matrix(height, width) with type float32
Convolves the image with sobel mask and returns the magnitude
"""
dx = sobel(img_matrix, 1)
dy = sobel(img_matrix, 0)
grad_mag = np.hypot(dx, dy) # Calculates sqrt(dx^2 + dy^2)
grad_mag *= 255 / grad_mag.max() # Normalize the gradient magnitudes
return grad_mag | 5c297cf822e1d5cba092070ecb52f57b1dbe720b | 14,122 |
def isDeleted(doc_ref):
"""
Checks if document is logically deleted, i.e. has a deleted timestamp.
Returns: boolean
"""
return exists(doc_ref) and 'ts_deleted' in get_doc(doc_ref) | 0c7357357edfc645c771acbe40730cb4668fe13e | 14,123 |
from typing import Optional
def sys_wait_for_event(
mask: int, k: Optional[Key], m: Optional[Mouse], flush: bool
) -> int:
"""Wait for an event then return.
If flush is True then the buffer will be cleared before waiting. Otherwise
each available event will be returned in the order they're recieved.
Args:
mask (int): :any:`Event types` to wait for.
k (Optional[Key]): A tcod.Key instance which might be updated with
an event. Can be None.
m (Optional[Mouse]): A tcod.Mouse instance which might be updated
with an event. Can be None.
flush (bool): Clear the event buffer before waiting.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
"""
return int(
lib.TCOD_sys_wait_for_event(
mask,
k.key_p if k else ffi.NULL,
m.mouse_p if m else ffi.NULL,
flush,
)
) | 4c0ba8f8b49f0f0dc837739afb46f667785b8a8c | 14,124 |
def get_test():
"""
Return test data.
"""
context = {}
context['test'] = 'this is a test message'
return flask.jsonify(**context) | 01f99a070a61414461d9a407574591f715ca5c63 | 14,125 |
def num_poisson_events(rate, period, rng=None):
"""
Returns the number of events that have occurred in a Poisson
process of ``rate`` over ``period``.
"""
if rng is None:
rng = GLOBAL_RNG
events = 0
while period > 0:
time_to_next = rng.expovariate(1.0/rate)
if time_to_next <= period:
events = events + 1
period = period - time_to_next
return events | 0f2378040bcf6193507bd15cb01c9e753e5c5235 | 14,126 |
import fnmatch
def findmatch(members,classprefix):
"""Find match for class member."""
lst = [n for (n,c) in members]
return fnmatch.filter(lst,classprefix) | 05038eb4796161f4cc64674248473c01fd4b13aa | 14,127 |
def is_narcissistic(number):
"""Must return True if number is narcissistic"""
return sum([pow(int(x), len(str(number))) for x in str(number)]) == number | b94486d4df52b7108a1c431286e7e86c799abf58 | 14,128 |
def Plot1DFields(r,h,phi_n_bar,g_s,g_b):
"""
Generates a nice plot of the 1D fields with 2 axes and a legend.
Note: The sizing works well in a jupyter notebook
but probably should be adjusted for a paper.
"""
fig,ax1 = plt.subplots(figsize=(6.7,4))
fig.subplots_adjust(right=0.8)
ax2 = ax1.twinx()
p1, = ax1.plot(r,h,'C0-',label=r'$h$')
p2, = ax2.plot(r,phi_n_bar,'C1-',label=r'$\bar{\phi}_n$')
p3, = ax2.plot(r,g_s,'C2-',label=r'$g_s$')
p4, = ax2.plot(r,g_b,'C3-',label=r'$g_b$')
ax1.set_xlabel(r'$r$',labelpad=0)
ax1.set_ylabel(r'$h$',rotation=0,labelpad=10)
ax1.set_xlim(r[0],r[-1])
ax2.set_ylabel('$\\bar{\\phi}_n$\n$g_s$\n$g_b$',rotation=0,labelpad=12,va='center')
ax2.set_ylim(-0.05,1.05)
lines = [p1,p2,p3,p4]
ax1.legend(lines,[l.get_label() for l in lines],loc='center left',bbox_to_anchor=(1.16,0.54))
return fig,[ax1,ax2] | 3e48dc6745e49ca36a2d9d1ade8b684ac24c3c25 | 14,129 |
def get_yesterday():
"""
:return:
"""
return _get_passed_one_day_from_now(days=1).date() | 99201bd9cde9fdf442d17a6f1c285523e3b867cc | 14,130 |
def classroom_mc():
"""
Corresponds to the 2nd line of Table 4 in https://doi.org/10.1101/2021.10.14.21264988
"""
concentration_mc = mc.ConcentrationModel(
room=models.Room(volume=160, inside_temp=models.PiecewiseConstant((0., 24.), (293,)), humidity=0.3),
ventilation=models.MultipleVentilation(
ventilations=(
models.SlidingWindow(
active=models.PeriodicInterval(period=120, duration=120),
outside_temp=TorontoTemperatures['Dec'],
window_height=1.6,
opening_length=0.2,
),
models.AirChange(active=models.PeriodicInterval(period=120, duration=120), air_exch=0.25),
)
),
infected=mc.InfectedPopulation(
number=1,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
virus=virus_distributions['SARS_CoV_2_ALPHA'],
mask=models.Mask.types["No mask"],
activity=activity_distributions['Light activity'],
expiration=build_expiration('Speaking'),
host_immunity=0.,
),
evaporation_factor=0.3,
)
return mc.ExposureModel(
concentration_model=concentration_mc,
short_range=(),
exposed=mc.Population(
number=19,
presence=models.SpecificInterval(((0, 2), (2.5, 4), (5, 7), (7.5, 9))),
activity=activity_distributions['Seated'],
mask=models.Mask.types["No mask"],
host_immunity=0.,
),
) | c272bc1de9b5b76eb55aa5b8d6dfbe42d2c95e66 | 14,131 |
import linecache
import ast
def smart_eval(stmt, _globals, _locals, filename=None, *, ast_transformer=None):
"""
Automatically exec/eval stmt.
Returns the result if eval, or NoResult if it was an exec. Or raises if
the stmt is a syntax error or raises an exception. If stmt is multiple
statements ending in an expression, the statements are exec-ed and the
final expression is eval-ed and returned as the result.
filename should be the filename used for compiling the statement. If
given, stmt will be saved to the Python linecache, so that it appears in
tracebacks. Otherwise, a default filename is used and it isn't saved to the
linecache. To work properly, "fake" filenames should start with < and end
with >, and be unique for each stmt.
Note that classes defined with this will have their module set to
'__main__'. To change this, set _globals['__name__'] to the desired
module.
To transform the ast before compiling it, pass in an ast_transformer
function. It should take in an ast and return a new ast.
Examples:
>>> g = l = {}
>>> smart_eval('1 + 1', g, l)
2
>>> smart_eval('a = 1 + 1', g, l)
<class 'mypython.mypython.NoResult'>
>>> g['a']
2
>>> smart_eval('a = 1 + 1; a', g, l)
2
"""
if filename:
if filename != "<stdin>":
# (size, mtime, lines, fullname)
linecache.cache[filename] = (len(stmt), None, stmt.splitlines(keepends=True), filename)
else:
filename = mypython_file()
p = ast.parse(stmt)
if ast_transformer:
p = ast_transformer(p)
expr = None
res = NoResult
if p.body and isinstance(p.body[-1], ast.Expr):
expr = p.body.pop()
code = compile(p, filename, 'exec')
exec(code, _globals, _locals)
if expr:
code = compile(ast.Expression(expr.value), filename, 'eval')
res = eval(code, _globals, _locals)
return res | d314e1a2f5536304f302ca7c79875a894275b171 | 14,132 |
from typing import Optional
from typing import Dict
from typing import Any
import pathlib
import os
import tomli
def parse_toml(path_string: Optional[str]) -> Dict[str, Any]:
"""Parse toml"""
if not path_string:
path = pathlib.Path(os.getcwd())
else:
path = pathlib.Path(path_string)
toml_path = path / "pyproject.toml"
if not toml_path.exists():
return {}
with open(toml_path, encoding="utf8") as handle:
pyproject_toml = tomli.loads(handle.read())
config = pyproject_toml.get("tool", {}).get("pydoc_fork", {})
loose_matching = {
k.replace("--", "").replace("-", "_"): v for k, v in config.items()
}
return loose_matching | 7adffb1dd2bd73c0fa6c2ef85d9a5e80582b95a8 | 14,133 |
def mtxv(m1, vin):
"""
Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(3)
libspice.mtxv_c(m1, vin, vout)
return stypes.cVectorToPython(vout) | 5602f4c399983b10e0d701b53026ceabc2af35cd | 14,134 |
def cmip_recipe_basics(func):
"""A decorator for starting a cmip recipe
"""
def parse_and_run(*args, **kwargs):
set_verbose(_logger, kwargs.get('verbose'))
opts = parse_recipe_options(kwargs.get('options'), add_cmip_collection_args_to_parser)
# Recipe is run.
returnval = func(*args, **kwargs)
return returnval
return parse_and_run | 3411b68180d878802379a413524f9a3db185a654 | 14,135 |
def optimize_concrete_function(
concrete_function: function.ConcreteFunction,
strip_control_dependencies: bool) -> wrap_function.WrappedFunction:
"""Returns optimized function with same signature as `concrete_function`."""
wrapped_fn = wrap_function.WrappedFunction(
concrete_function.graph,
variable_holder=wrap_function.VariableHolder(share_variables=True))
fetches = concrete_function.structured_outputs
if strip_control_dependencies:
flat_outputs, _ = tf2_utils.strip_and_get_tensors_and_control_dependencies(
tf.nest.flatten(fetches, expand_composites=True))
fetches = tf.nest.pack_sequence_as(
concrete_function.structured_outputs,
flat_outputs,
expand_composites=True)
result = wrapped_fn.prune(
feeds=concrete_function.inputs,
fetches=fetches,
input_signature=concrete_function.structured_input_signature)
# TODO(b/163329414): Remove once `prune` retains shape information for all
# components.
for original_out, pruned_out in zip(concrete_function.outputs,
result.outputs):
pruned_out.set_shape(original_out.get_shape())
return result | 3176b544dd6fc2305a0e66a9da0cc25d7dc11658 | 14,136 |
def serialize_cupcake(cupcake):
"""Serialize a cupcake SQLAlchemy obj to dictionary."""
return {
"id": cupcake.id,
"flavor": cupcake.flavor,
"size": cupcake.size,
"rating": cupcake.rating,
"image": cupcake.image,
} | 35fa140cf8b6527984002e28be1f102ee6c71a1b | 14,137 |
def compute_accuracy(labels, logits):
"""Compute accuracy for a single batch of data, given the precomputed logits
and expected labels. The returned accuracy is normalized by the batch size.
"""
current_batch_size = tf.cast(labels.shape[0], tf.float32)
# logits is the percent chance; this gives the category for each.
predictions = tf.argmax(logits, axis=1)
# return the average number of items equal to their label.
return tf.reduce_sum(tf.cast(tf.equal(labels, predictions),
tf.float32)) / current_batch_size | 2e53fc01053a5caafa2cdd976715dd31d3d43b0f | 14,138 |
import torch
def get_data(generic_iterator):
"""Code to get minibatch from data iterator
Inputs:
- generic_iterator; iterator for dataset
Outputs:
- data; minibatch of data from iterator
"""
data = next(generic_iterator)
if torch.cuda.is_available():
data = data.cuda()
return data | 364151694fb452279691986f5533e182a8b905f3 | 14,139 |
from re import T
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.RandomGrayscale(p=cfg.gs_p),
T.RandomResizedCrop(
crop,
scale=(cfg.crop_s0, cfg.crop_s1),
ratio=(cfg.crop_r0, cfg.crop_r1),
interpolation=3,
),
T.RandomHorizontalFlip(p=cfg.hf_p),
*extra_t,
base_transform(),
]
) | 4d8ac62e4ad550f563d9adb237db8853a0c7d36a | 14,140 |
def _check_definition_contains_or(definition_dict, key, values):
"""need docstring"""
out = False
for value in values:
if (np.array(list(definition_dict[key])) == value).any():
out = True
break
return out | bb15bdbe50476ea46425be20e0c35229352ba03f | 14,141 |
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
WARNING : this function doesn't limit the number of threads at the same time
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result | d88c66af120f9a4408bf6e1d61c08f2fdcf81acd | 14,142 |
def star_hexagon(xy, radius=5, **kwargs):
"""
|\
c | \ b
|__\
a
"""
x,y = xy
r = radius
a = 1/4*r
b = a*2
c = a*3**(1/2)
return plt.Polygon(xy=(
(x, y-2*c), (x+a, y-c), (x+a+b, y-c),
(x+b, y), (x+a+b, y+c), (x+a, y+c),
(x, y+2*c), (x-a, y+c), (x-a-b, y+c),
(x-b, y), (x-a-b, y-c), (x-a, y-c),
), closed=True, **kwargs) | 62f09f26e98723764d03a678634cbb00f051e105 | 14,143 |
def calibrate(leveled_arcs, sat_biases, stn_biases):
"""
???
"""
calibrated_arcs = []
for arc in leveled_arcs:
if arc.sat[0] == 'G':
sat_bias = sat_biases['GPS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GPS'][arc.stn.upper()][0] * NS_TO_TECU
elif arc.sat[0] == 'R':
sat_bias = sat_biases['GLONASS'][int(arc.sat[1:])][0] * NS_TO_TECU
stn_bias = stn_biases['GLONASS'][arc.stn.upper()][0] * NS_TO_TECU
else:
raise ValueError('Satellite bias for {} not found'.format(arc.sat))
data_map = {'gps_time': arc.gps_time.values,
'az': arc.az.values,
'el': arc.el.values,
'satx': arc.satx.values,
'saty': arc.saty.values,
'satz': arc.satz.values,
'sobs': arc.L_I + sat_bias + stn_bias,
'sprn': arc.P_I + sat_bias + stn_bias}
calibrated_arc = CalibratedArc(data_map)
calibrated_arc.xyz = arc.xyz
calibrated_arc.llh = arc.llh
calibrated_arc.stn = arc.stn
calibrated_arc.recv_type = arc.recv_type
calibrated_arc.sat = arc.sat
calibrated_arc.L = arc.L
calibrated_arc.L_scatter = arc.L_scatter
calibrated_arc.sat_bias = sat_bias
calibrated_arc.stn_bias = stn_bias
calibrated_arcs.append(calibrated_arc)
return calibrated_arcs | 63065e0000ebaa48b71d3f9ed9814277b6bf63ed | 14,144 |
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K=10):
"""
Implements the negative sampling cost function and gradients for word2vec
:param predicted: ndarray, the predicted (center) word vector(v_c)
:param target: integer, the index of the target word
:param outputVectors: 2D ndarray, output word vectors (as rows)
:param dataset: an interface into the dataset
:param K: integer, no of negative samples
:return:
cost: cost function for negative sampling
gradPred: gradient with respect to predicted (input / center) word vector
grad: gradient with respect to output word vectors
"""
grad = np.zeros(outputVectors.shape)
gradPred = np.zeros(predicted.shape)
indices = [target]
for k in xrange(K):
newidx = dataset.sampleTokenIdx()
while newidx == target:
newidx = dataset.sampleTokenIdx()
indices += [newidx]
labels = np.array([1] + [-1 for k in xrange(K)]).reshape(-1, 1)
vecs = outputVectors[indices, :]
t = sigmoid(vecs.dot(predicted.T) * labels)
cost = -np.sum(np.log(t))
delta = labels * (t - 1)
gradPred = delta.reshape((1, K + 1)).dot(vecs).flatten()
gradtemp = delta.dot(predicted)
for k in xrange(K + 1):
grad[indices[k]] += gradtemp[k, :]
return cost, gradPred, grad | 4e1fbf082d97b1a4c7b5b5f9ee722c54fc993712 | 14,145 |
from datetime import datetime
import pytz
def isotime(timestamp):
"""ISO 8601 formatted date in UTC from unix timestamp"""
return datetime.fromtimestamp(timestamp, pytz.utc).isoformat() | f6a922d75a186e26f158edc585691e31bf430b01 | 14,146 |
def initializeSeam():
"""
This function defines the seams of a baseball. It is
based, in large extant, on the work from
http://www.darenscotwilson.com/spec/bbseam/bbseam.html
"""
n = 109 #number of points were calculating on the seam line
alpha = np.linspace(0,np.pi*2,n)
x = np.zeros(len(alpha))
y = np.zeros(len(alpha))
z = np.zeros(len(alpha))
R = (2 + 15/16.)/2
for i in range(len(alpha)-1):
x[i] = ((1/13)*R*((9*np.cos(alpha[i]) - 4*np.cos(3*alpha[i]))))
y[i] = ((1/13)*R*((9*np.sin(alpha[i]) + 4*np.sin(3*alpha[i]))))
z[i] = ((12/13)*R*np.cos(2*alpha[i]))
return x,y,z | 43c7e968ecd98595c46e679676f23cbb07d28bb3 | 14,147 |
def check_model_consistency(model, grounding_dict, pos_labels):
"""Check that serialized model is consistent with associated json files.
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()}
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_labels = groundings <= model_labels
shortforms = set(grounding_dict.keys())
model_shortforms = set(model.shortforms)
consistent_shortforms = shortforms == model_shortforms
model_labels = set(model.estimator.named_steps['logit'].classes_)
consistent_pos_labels = set(pos_labels) <= model_labels
return consistent_labels and consistent_shortforms and \
consistent_pos_labels | b5d1beda0be5ceccec158839c61c1d79349596ef | 14,148 |
def get_submission_info(tile_grid, collections, tile_indices,
period_start, period_end, period_freq):
""" Return information about tracked order submissions
"""
return {
'submitted': dt.datetime.today().isoformat(),
'collections': collections,
'tile_grid': tile_grid.to_dict(),
'tile_indices': list(tile_indices),
'period_start': period_start.isoformat(),
'period_end': period_end.isoformat(),
'period_freq': period_freq
} | 990740ef15760fd5514598772d496db47a436786 | 14,149 |
def load_obj(path):
"""Load an object from a Python file.
path is relative to the data dir. The file is executed and the obj
local is returned.
"""
localdict = {}
with open(_DATADIR / path) as file:
exec(file.read(), localdict, localdict)
return localdict['obj'] | 8c44141e58d0aa1402f6d5244857fbe3d07ddc84 | 14,150 |
def dp_policy_evaluation(env, pi, v=None, gamma=1, tol=1e-3, iter_max=100,
verbose=True):
"""Evaluates state-value function by performing iterative policy evaluation
via Bellman expectation equation (in-place)
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 75
Args:
env: Environment
pi: Policy
v: Initial value function or None
gamma: Discount factor
tol: Tolerance to stop iteration
iter_max: Maximum iteration count
Returns:
v: State-value function
"""
if v is None:
v = np.zeros(env.observation_space.n)
for i_iter in range(iter_max):
if verbose:
print("\r> DP Policy evaluation: Iteration {}/{}".format(
i_iter+1, iter_max), end="")
delta = 0
for state in range(env.observation_space.n):
v_new = 0
for action in range(env.action_space.n):
for (prob,state2,reward,done) in env.P[state][action]:
v_new += pi[state][action] * prob * (
reward + gamma*v[state2]
)
delta = max(delta, np.abs(v_new-v[state]))
v[state] = v_new
if delta < tol:
break
if verbose:
print()
return v | e920f48f6b37f9815b077b18e02b0403b78f2ce7 | 14,151 |
def gpst2utc(tgps, leaps_=-18):
""" calculate UTC-time from gps-time """
tutc = timeadd(tgps, leaps_)
return tutc | a1bf6aa583ae1827cce572f809831b3396bdd91b | 14,152 |
def create_shell(username, session_id, key):
"""Instantiates a CapturingSocket and SwiftShell and hooks them up.
After you call this, the returned CapturingSocket should capture all
IPython display messages.
"""
socket = CapturingSocket()
session = Session(username=username, session=session_id, key=key)
shell = SwiftShell.instance()
shell.display_pub.session = session
shell.display_pub.pub_socket = socket
return [socket, shell] | 13af90ea2497211c75d66fbff334ee95ede678b8 | 14,153 |
def _get_index_sort_str(env, name):
"""
Returns a string by which an object with the given name shall be sorted in
indices.
"""
ignored_prefixes = env.config.cmake_index_common_prefix
for prefix in ignored_prefixes:
if name.startswith(prefix) and name != prefix:
return name[len(prefix):]
return name | cdf7a509ef8f49ff15cac779e37f0bc5ab98c613 | 14,154 |
from datetime import datetime
def utcnow():
"""Gets current time.
:returns: current time from utc
:rtype: :py:obj:`datetime.datetime`
"""
return datetime.datetime.utcnow() | a85b4e28b0cbc087f3c0bb641e896958ea267c3f | 14,155 |
def elem2full(elem: str) -> str:
"""Retrieves full element name for short element name."""
for element_name, element_ids, element_short in PERIODIC_TABLE:
if elem == element_short:
print(element_name)
return element_name
else:
raise ValueError(f"Index {elem} does not match any element.") | 2c78531dc21722cc504182abec469eabdfeec862 | 14,156 |
import os
def fixture_path(relapath=''):
""":return: absolute path into the fixture directory
:param relapath: relative path into the fixtures directory, or ''
to obtain the fixture directory itself"""
return os.path.join(os.path.dirname(__file__), 'fixtures', relapath) | 4630d61c08c52570a9cb15522bd10cea5f82dbbd | 14,157 |
def create_random_totp_secret(secret_length: int = 72) -> bytes:
"""
Generate a random TOTP secret
:param int secret_length: How long should the secret be?
:rtype: bytes
:returns: A random secret
"""
random = SystemRandom()
return bytes(random.getrandbits(8) for _ in range(secret_length)) | 6ecaf035212e5e4e2d8e71856c05ea15407fdb19 | 14,158 |
def _get_roles_can_update(community_id):
"""Get the full list of roles that current identity can update."""
return _filter_roles("members_update", {"user", "group"}, community_id) | 7701cc425a83212dbd5ffd039a629b06a17fcb83 | 14,159 |
def register_external_compiler(op_name, fexternal=None, level=10):
"""Register the external compiler for an op.
Parameters
----------
op_name : str
The name of the operator.
fexternal : function (attrs: Attrs, args: List[Expr], compiler: str)
-> new_expr: Expr
The function for wrapping a call expr with compiler_begin and
compiler_end.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMExternalCompiler", fexternal, level) | 0d41fce383407af3d8a60d1886950424b89ee18b | 14,160 |
def kl_divergence_from_logits_bm(logits_a, logits_b):
"""Gets KL divergence from logits parameterizing categorical distributions.
Args:
logits_a: A tensor of logits parameterizing the first distribution.
logits_b: A tensor of logits parameterizing the second distribution.
Returns:
The (batch_size,) shaped tensor of KL divergences.
"""
beta_coeff = 1
alphas = tf.exp(logits_a)
betas = tf.exp(logits_b)
a_zero = tf.reduce_sum(alphas, -1)
loss1 = tf.lgamma(a_zero) - tf.reduce_sum(tf.lgamma(alphas), -1)
loss2 = tf.reduce_sum(
(alphas - betas) * (tf.digamma(alphas) - tf.digamma(tf.expand_dims(a_zero, -1))), -1)
kl_loss = loss1 + loss2
return kl_loss | 8078fcbd4c4c58bed888ba5b45e99783799bde42 | 14,161 |
import logging
def if_stopped_or_playing(speaker, action, args, soco_function, use_local_speaker_list):
"""Perform the action only if the speaker is currently in the desired playback state"""
state = speaker.get_current_transport_info()["current_transport_state"]
logging.info(
"Condition: '{}': Speaker '{}' is in state '{}'".format(
action, speaker.player_name, state
)
)
if (state != "PLAYING" and action == "if_playing") or (
state == "PLAYING" and action == "if_stopped"
):
logging.info("Action suppressed")
return True
action = args[0]
args = args[1:]
logging.info(
"Action invoked: '{} {} {}'".format(speaker.player_name, action, " ".join(args))
)
return process_action(
speaker, action, args, use_local_speaker_list=use_local_speaker_list
) | 7daa5bd040e6753ce1e39807071e0911a8dd3182 | 14,162 |
def compute_src_graph(hive_holder, common_table):
""" computes just the src part of the full version graph.
Side effect: updates requirements of blocks to actually point to real dep versions
"""
graph = BlockVersionGraph()
versions = hive_holder.versions
graph.add_nodes(versions.itervalues())
references = References()
for block_holder in hive_holder.block_holders:
dep_table = block_holder.requirements
base_version = versions[block_holder.block_name]
for target_bcn in block_holder.external_targets():
target_block_name = target_bcn.block_name
if target_block_name in versions:
other_version = versions[target_block_name]
else:
other_version = common_table[target_block_name]
references[other_version].add(target_bcn.cell_name)
graph.add_edge(base_version, other_version)
dep_table.add_version(other_version)
return graph, references | 55d150e583e93c0d5b25490543738f79ba23fe64 | 14,163 |
def get_uv(seed=0, nrm=False, vector=False):
"""Dataset with random univariate data
Parameters
----------
seed : None | int
Seed the numpy random state before generating random data.
nrm : bool
Add a nested random-effects variable (default False).
vector : bool
Add a 3d vector variable as ``ds['v']`` (default ``False``).
"""
if seed is not None:
np.random.seed(seed)
ds = permute([('A', ('a1', 'a2')),
('B', ('b1', 'b2')),
('rm', ['s%03i' % i for i in range(20)])])
ds['rm'].random = True
ds['intvar'] = Var(np.random.randint(5, 15, 80))
ds['intvar'][:20] += 3
ds['fltvar'] = Var(np.random.normal(0, 1, 80))
ds['fltvar'][:40] += 1.
ds['fltvar2'] = Var(np.random.normal(0, 1, 80))
ds['fltvar2'][40:] += ds['fltvar'][40:].x
ds['index'] = Var(np.repeat([True, False], 40))
if nrm:
ds['nrm'] = Factor(['s%03i' % i for i in range(40)], tile=2, random=True)
if vector:
x = np.random.normal(0, 1, (80, 3))
x[:40] += [.3, .3, .3]
ds['v'] = NDVar(x, (Case, Space('RAS')))
return ds | 1394ae09705aa01f9309399ab8f1b7fcff04e010 | 14,164 |
from typing import Iterable
def private_names_for(cls, names):
"""
Returns:
Iterable of private names using privateNameFor()"""
if not isinstance(names, Iterable):
raise TypeError('names must be an interable')
return (private_name_for(item, cls) for item in names) | 606afdcfd8eed1e288df71a79f50a37037d84139 | 14,165 |
import os
def find_vcs_root(location="", dirs=(".git", ".hg", ".svn"), default=None) -> str:
"""Return current repository root directory."""
if not location:
location = os.getcwd()
prev, location = None, os.path.abspath(location)
while prev != location:
if any(os.path.isdir(os.path.join(location, d)) for d in dirs):
return location
prev, location = location, os.path.abspath(os.path.join(location, os.pardir))
return default | bb8c7525085a46bcb8822e7fcd1b9d1875b819b0 | 14,166 |
def invert_trimat(A, lower=False, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of triangular matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Triangular matrix.
lower: if True A is lower triangular, else A is upper triangular.
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Log determinant of A
A^{-1}
"""
if right_inv:
fh=lambda x: la.solve_triangular(A.T, x.T, lower=not(lower)).T
else:
fh=lambda x: la.solve_triangular(A, x, lower=lower)
if return_logdet or return_inv:
r = [fh]
else:
r = fh
if return_logdet:
logdet=np.sum(np.log(np.diag(A)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r | ad449fe1718136e64a6896e74fbb8ee7a3cefcec | 14,167 |
def category_input_field_delete(request, structure_slug,
category_slug, module_id,
field_id, structure):
"""
Deletes a field from a category input module
:type structure_slug: String
:type category_slug: String
:type module_id: Integer
:type field_id: Integer
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: structure slug
:param category_slug: category slug
:param module_id: input module id
:param field_id: module field id
:param structure: structure object (from @is_manager)
:return: redirect
"""
category = get_object_or_404(TicketCategory,
organizational_structure=structure,
slug=category_slug)
module = get_object_or_404(TicketCategoryModule,
pk=module_id,
ticket_category=category)
if not module.can_be_deleted():
# log action
logger.error('[{}] manager of structure {}'
' {} tried to delete a field'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
module,
category))
messages.add_message(request, messages.ERROR,
_("Impossibile eliminare il modulo {}."
" Ci sono delle richieste collegate").format(module))
else:
field = get_object_or_404(TicketCategoryInputList,
pk=field_id,
category_module=module)
# log action
logger.info('[{}] manager of structure {}'
' {} deleted the field {}'
' from module {} of category {}'.format(timezone.localtime(),
structure,
request.user,
field,
module,
category))
field.delete()
messages.add_message(request, messages.SUCCESS,
_("Campo {} eliminato con successo").format(field.name))
return redirect('uni_ticket:manager_category_input_module',
structure_slug=structure_slug,
category_slug=category_slug,
module_id=module_id) | f5319ea5b992529e7b8d484b1dc6c0be621f9955 | 14,168 |
def cat_to_num(att_df):
"""
Changes categorical variables in a dataframe to numerical
"""
att_df_encode = att_df.copy(deep=True)
for att in att_df_encode.columns:
if att_df_encode[att].dtype != float:
att_df_encode[att] = pd.Categorical(att_df_encode[att])
att_df_encode[att] = att_df_encode[att].cat.codes
return att_df_encode | dbd57022ddd99d8ea4936da45d8c2cbe09078b81 | 14,169 |
async def handle_get(request):
"""Handle GET request, can be display at http://localhost:8080"""
text = (f'Server is running at {request.url}.\n'
f'Try `curl -X POST --data "text=test" {request.url}example`\n')
return web.Response(text=text) | 2398870ab6479d8db3517b89e0177cab674156b0 | 14,170 |
def values_target(size: tuple, value: float, cuda: False) -> Variable:
""" returns tensor filled with value of given size """
result = Variable(full(size=size, fill_value=value))
if cuda:
result = result.cuda()
return result | be9db2c08fbac00e1f8d10b859da8422e7331901 | 14,171 |
def get_new_perpendicular_point_with_custom_distance_to_every_line_segment(
line_segments: np.ndarray, distance_from_the_line: np.ndarray
):
"""
:param line_segments: array of shape [number_of_line_segments, 2, 2]
:param distance_from_the_line: how far the new point to create from the reference
:return:
"""
return new_perpendicular_point_to_line_segment(
line_segments, distance_from_the_line
) | 3ebb94b9fa4b7f28e655a3e9a4fe93ec40276dff | 14,172 |
import requests
def tmdb_find_movie(movie: str, tmdb_api_token: str):
"""
Search the tmdb api for movies by title
Args:
movie (str): the title of a movie
tmdb_api_token (str): your tmdb v3 api token
Returns:
dict
"""
url = 'https://api.themoviedb.org/3/search/movie?'
params = {'query': movie, 'language': 'en-US', 'api_key': tmdb_api_token, }
return requests.get(url, params).json() | ea676fbb91f451b20ce4cd2f7258240ace3925b3 | 14,173 |
def is_missing_artifact_error(err: WandbError):
"""
Check if a specific W&B error is caused by a 404 on the artifact we're looking for.
"""
# This is brittle, but at least we have a test for it.
return "does not contain artifact" in err.message | 023bdab0b3a2914272a1087a5c42ba81ec064548 | 14,174 |
def create_reforecast_valid_times(start_year=2000):
"""Inits from year 2000 to 2019 for the same days as in 2020."""
reforecasts_inits = []
inits_2020 = create_forecast_valid_times().forecast_time.to_index()
for year in range(start_year, reforecast_end_year + 1):
# dates_year = pd.date_range(start=f"{year}-01-02", end=f"{year}-12-31", freq="7D")
dates_year = pd.DatetimeIndex([i.strftime("%Y%m%d").replace("2020", str(year)) for i in inits_2020])
dates_year = xr.DataArray(
dates_year,
dims="forecast_time",
coords={"forecast_time": dates_year},
)
reforecasts_inits.append(dates_year)
reforecasts_inits = xr.concat(reforecasts_inits, dim="forecast_time")
reforecast_valid_times = create_valid_time_from_forecast_time_and_lead_time(reforecasts_inits, leads)
reforecast_valid_times = (
reforecast_valid_times.rename("test").assign_coords(valid_time=reforecast_valid_times).to_dataset()
)
reforecast_valid_times = xr.ones_like(reforecast_valid_times).astype("float32")
return reforecast_valid_times | 0ea34549aef8b8b4551534560ab29c9580f9f1ca | 14,175 |
def _checkerror(fulloutput):
"""
Function to check the full output for known strings and plausible fixes to the error.
Future: add items to `edict` where the key is a unique string contained in the offending
output, and the data is the reccomended solution to resolve the problem
"""
edict = {'multiply': ('NOTE: you might(?) need to clean the `tmp/` folder!'),
'already defined': ('NOTE: you probably (might?) need to clean the `tmp/` folder!'),
'unresolved externals': ('NOTE: consider recompiling the linked libraries to'
'have the correct name mangling for cl.exe:'
'ifort: /names:lowercase /assume:underscore '),
"KeyError: 'void'": ('There may be an issue with public/private function '
'definitions or a missing variable definition in the last '
'function listed above. For the first error consider using '
'the parameter `functiondict` or checking to ensure all '
'module functions are public... For the second error, check '
'that all of the parameters in the subroutine are defined'),
"No such file or directory": ('There may be a space in the path to one of the '
'source code or library folders'),
"LINK : fatal error LNK1104: cannot open file": ('The pyd is currently in use, '
'restart any kernels using it !')
}
# iterate through the keys in the error dictionary and see if the key is in the full output
extramessage = ''
for error_key in edict.keys():
if error_key in fulloutput:
extramessage = edict[error_key]
return extramessage | 5312beff6f998d197a3822e04e60d47716520f50 | 14,176 |
def create_pre_process_block(net, ref_layer_name, means, scales=None):
"""
Generates the pre-process block for the IR XML
Args:
net: root XML element
ref_layer_name: name of the layer where it is referenced to
means: tuple of values
scales: tuple of values
Returns:
pre-process XML element
"""
pre_process = SubElement(net, 'pre-process')
pre_process.set('reference-layer-name', ref_layer_name)
for idx in range(len(means)):
channel_xml = SubElement(pre_process, 'channel')
channel_xml.set('id', str(idx))
mean_xml = SubElement(channel_xml, 'mean')
mean_xml.set('value', str(means[idx]))
if scales:
scale_xml = SubElement(channel_xml, 'scale')
scale_xml.set('value', str(scales[idx]))
return pre_process | 54013ec9d06cf7eff9b0af18d1655a5455a894be | 14,177 |
def GetSystemFaultsFromState(state, spot_wrapper):
"""Maps system fault data from robot state proto to ROS SystemFaultState message
Args:
data: Robot State proto
spot_wrapper: A SpotWrapper object
Returns:
SystemFaultState message
"""
system_fault_state_msg = SystemFaultState()
system_fault_state_msg.faults = getSystemFaults(state.system_fault_state.faults, spot_wrapper)
system_fault_state_msg.historical_faults = getSystemFaults(state.system_fault_state.historical_faults, spot_wrapper)
return system_fault_state_msg | cda2d0bbe3ee3ca02724828d9f0f882695c3e0b0 | 14,178 |
def findAnEven(L):
"""
:Assumes L is a list of integers:
:Returns the first even number in L:
:Raises ValueError if L does not contain an even number:
"""
for num in L:
if num % 2 == 0:
return num
raise ValueError | 93f7854bd376d52df40b23d21bfde784db124106 | 14,179 |
def get_points(wire):
"""
get all points (including starting point), where the wire bends
>>> get_points(["R75","D30","R83","U83","L12","D49","R71","U7","L72"])
[((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))]
>>> get_points(["U62","R66","U55","R34","D71","R55","D58","R83"])
[((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))]
>>> get_points(["R98","U47","R26","D63","R33","U87","L62","D20","R33","U53","R51"])
[((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))]
>>> get_points(["U98","R91","D20","R16","D67","R40","U7","R15","U6","R7"])
[((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))]
"""
starting_point = (0, 0)
result = []
for part in wire:
end_point = get_end_point(starting_point, part)
result.append((starting_point, end_point))
starting_point = end_point
return result | 8f0e7bad7500b8113d6ce601c6f2af472192774f | 14,180 |
def getcutscheckerboard(rho):
"""
:param rho:
:return: cell centers and values along horizontal, vertical, diag cut
"""
ny, nx = rho.shape
assert nx == ny
n = ny
horizontal = rho[6 * n // 7, :]
vertical = rho[:, n // 7]
if np.abs(horizontal[0]) < 1e-15:
horizontal = horizontal[2:-2]
if np.abs(vertical[0]) < 1e-15:
vertical = vertical[2:-2]
diag = [rho[i, i] for i in range(n)]
if np.abs(diag[0]) < 1e-15:
diag = diag[2:-2]
edges = np.linspace(0, 7, len(horizontal) + 1)
centers = (edges[1:] + edges[:-1]) / 2
return centers, horizontal, vertical, diag | 31d95160d1b34b50e616a346e04d5b6567886677 | 14,181 |
def errorString(node, error):
"""
Format error messages for node errors returned by checkLinkoStructure.
inputs:
node - the node for the error.
error - a (backset, foreset) tuple, where backset is the set of
missing backlinks and foreset is the set of missing forelinks.
returns: string
string - the error string message.
"""
back, fore = error[0], error[1]
if len(back) == 0:
back = 'None'
if len(fore) == 0:
fore = 'None'
return ('Node {0}: missing backlinks {1},'
' missing forelinks {2}').format(node, back, fore) | df87b7838ed84fe4e6b95002357f616c96d04ad0 | 14,182 |
def deep_update(target, source):
"""
Deep merge two dicts
"""
if isinstance(source, dict):
for key, item in source.items():
if key in target:
target[key] = deep_update(target[key], item)
else:
target[key] = source[key]
return target | 5db0c6fa31f3d4408a359d90dbf6e50dfdc12cdc | 14,183 |
import hashlib
def md5_hash_file(path):
"""
Return a md5 hashdigest for a file or None if path could not be read.
"""
hasher = hashlib.md5()
try:
with open(path, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
except IOError:
# This may happen if path has been deleted
return None | 514cafcffa0ae56d54f43508ece642d25b4be442 | 14,184 |
def Constant(value):
"""
Produce an object suitable for use as a source in the 'connect' function that
evaluates to the given 'value'
:param value: Constant value to provide to a connected target
:return: Output instance port of an instance of a Block that produces the given constant when evaluated
"""
global _constantCounter
blockName = "Constant" + str(_constantCounter)
constBlock = defineBlock(blockName)
defineOutputs(constBlock, "out")
defineBlockOutputBehaviour(constBlock.out, lambda: value)
setMetaData(constBlock.out, "Sensation-Producing", False)
inst = createInstance(blockName, "constant" + str(_constantCounter))
_constantCounter += 1
return inst.out | 1763d657e3396286516e6669e57b7ee297463b14 | 14,185 |
def _Backward3a_T_Ps(P, s):
"""Backward equation for region 3a, T=f(P,s)
Parameters
----------
P : float
Pressure [MPa]
s : float
Specific entropy [kJ/kgK]
Returns
-------
T : float
Temperature [K]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for the
Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS
Industrial Formulation 1997 for the Thermodynamic Properties of Water and
Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 6
Examples
--------
>>> _Backward3a_T_Ps(20,3.8)
628.2959869
>>> _Backward3a_T_Ps(100,4)
705.6880237
"""
I = [-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5,
-4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10]
J = [28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1,
4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2]
n = [0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3,
-0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4,
-0.154852214233853, 0.112305046746695e2, -0.297000213482822e2,
0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1,
0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5,
-0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3,
0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1,
-0.240614376434179, -0.474341365254924e1, 0.724093999126110,
0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1,
-0.359344365571848e-2, -0.735196448821653, 0.188367048396131,
0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2]
Pr = P/100
sigma = s/4.4
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (Pr+0.240)**i * (sigma-0.703)**j
return 760*suma | cb0b9b55106cf771e95505c00043e5772faaef40 | 14,186 |
import re
def expandvars(s):
"""Expand environment variables of form %var%.
Unknown variables are left unchanged.
"""
global _env_rx
if '%' not in s:
return s
if _env_rx is None:
_env_rx = re.compile(r'%([^|<>=^%]+)%')
return _env_rx.sub(_substenv, s) | ede7861831ea9d9e74422eb3a92a13ba4d1937f2 | 14,187 |
def make_map_counts(events, ref_geom, pointing, offset_max):
"""Build a WcsNDMap (space - energy) with events from an EventList.
The energy of the events is used for the non-spatial axis.
Parameters
----------
events : `~gammapy.data.EventList`
Event list
ref_geom : `~gammapy.maps.WcsGeom`
Reference WcsGeom object used to define geometry (space - energy)
offset_max : `~astropy.coordinates.Angle`
Maximum field of view offset.
Returns
-------
cntmap : `~gammapy.maps.WcsNDMap`
Count cube (3D) in true energy bins
"""
count_map = WcsNDMap(ref_geom)
fill_map_counts(count_map, events)
# Compute and apply FOV offset mask
offset_map = make_separation_map(ref_geom, pointing)
offset_mask = offset_map.data >= offset_max
count_map.data[:, offset_mask] = 0
return count_map | 7a22340c8f3909d6ca559361290a4608a0321de1 | 14,188 |
def stats_aggregate():
""" RESTful CRUD Controller """
return crud_controller() | 4a8439139257f39e0d2a34b576e9a9bd98cded5c | 14,189 |
def format_dB(num):
"""
Returns a human readable string of dB. The value is divided
by 10 to get first decimal digit
"""
num /= 10
return f'{num:3.1f} {"dB"}' | 13d6313834333ee2ea432cf08470b6ce1efe1ad6 | 14,190 |
def _check_index_dtype(k):
"""
Check the dtype of the index.
Parameters
----------
k: slice or array_like
Index into an array
Examples
--------
>>> _check_index_dtype(0)
dtype('int64')
>>> _check_index_dtype(np.datetime64(0, 'ms'))
dtype('<M8[ms]')
>>> _check_index_dtype(slice(5, 8))
dtype('int64')
"""
if not isinstance(k, slice):
if hasattr(k, "__len__") and len(k) == 0:
return np.intp
return np.asarray(k).dtype
arr = [v for v in (k.start, k.stop, k.step) if v is not None]
return _check_index_dtype(arr) | f9f7bac24f7ceba57978d7e1aed7c4e052c79f35 | 14,191 |
def _wrapper_for_precessing_snr(args):
"""Wrapper function for _precessing_snr for a pool of workers
Parameters
----------
args: tuple
All args passed to _precessing_snr
"""
return _precessing_snr(*args) | 4d64d7e658ecfeed6206abd0827f37805c3ecd0c | 14,192 |
def readpcr(path):
"""
Only for multipattern formats.
"""
with open(path) as file:
lines = file.readlines()
# Strip comments
lines = [line for line in lines if not line.startswith("!")]
pcr = {} # main dictionary
line = 0 # line reference
##### start read
# pcr name
pcr['name'] = lines[line].strip()
line = line + 1
# patterns
pcr['patterns'] = {}
patt_1 = lines[line].split()
npatt = int(patt_1[1])
pcr['patterns']["npatt"] = npatt
for n in range(npatt):
pcr['patterns'][n] = {"is_refined": bool(patt_1[n + 2])}
line = line + 1
# pattern weights
weights = list(map(float, lines[line].split()[1:]))
for n in range(npatt):
pcr['patterns'][n]['weight'] = weights[n]
line = line + 1
# global flags
flags = list(map(int, lines[line].split()))
pcr["phases"] = {"nphases": flags[0]}
pcr["fl_divergence"] = flags[1]
pcr["fl_refl_reorder"] = flags[2]
pcr["fl_single_crystal_job"] = flags[3]
pcr["fl_optimisations"] = flags[4]
pcr["fl_automatic_refine"] = flags[5]
line = line + 1
# pattern flags
for n in range(npatt):
pattflags = {}
flags = list(map(int, lines[line].split()[0:14]))
pattflags["jobtype"] = flags[0]
pattflags["profile_type"] = flags[1]
pattflags["background_type"] = flags[2]
pattflags["excluded_regions"] = flags[3]
pattflags["scatterfactor_userdef"] = flags[4]
pattflags["preferred_orientation_type"] = flags[5]
pattflags["refine_weighting_type"] = flags[6]
pattflags["lorentz_polar_corr"] = flags[7]
pattflags["resolution_function_type"] = flags[8]
pattflags["reduction_factor"] = flags[9]
pattflags["scattering_unit"] = flags[10]
pattflags["intensity_corr"] = flags[11]
pattflags["anm"] = flags[12]
pattflags["int"] = flags[13]
pcr['patterns'][n] = {"flags": pattflags}
line = line + 1
# pattern names
for n in range(npatt):
pcr['patterns'][n]["filename"] = lines[line].strip()
line = line + 1
# output flags
flags = list(map(int, lines[line].split()))
pcr["out_correlation_matrix"] = flags[0]
pcr["out_update_pcr"] = flags[1]
pcr["out_nli"] = flags[2]
pcr["out_sym_file"] = flags[3]
pcr["out_rpa"] = flags[4]
pcr["out_reduced_verbose"] = flags[5]
line = line + 1
# output pattern flags
for n in range(npatt):
pattflags = {}
flags = list(map(int, lines[line].split()[0:11]))
pattflags["out_integrated"] = flags[0]
pattflags["out_ppl"] = flags[1]
pattflags["out_ioc"] = flags[2]
pattflags["out_ls1"] = flags[3]
pattflags["out_ls2"] = flags[4]
pattflags["out_ls3"] = flags[5]
pattflags["out_prf"] = flags[6]
pattflags["out_ins"] = flags[7]
pattflags["out_hkl"] = flags[8]
pattflags["out_fou"] = flags[9]
pattflags["out_ana"] = flags[10]
pcr['patterns'][n]['output'] = pattflags
line = line + 1
# experiment pattern flags
for n in range(npatt):
expatt = {}
flags = list(map(float, lines[line].split()))
expatt["lmd_1"] = flags[0]
expatt["lmd_2"] = flags[1]
expatt["lmd_ratio"] = flags[2]
expatt["background_start"] = flags[3]
expatt["prf_cutoff"] = flags[4]
expatt["monocrh_polarization_corr"] = flags[5]
expatt["absorp_corr"] = flags[6]
expatt["asymetry_corr_lim"] = flags[7]
expatt["polarization_factor"] = flags[8]
expatt["2nd-muR"] = flags[9]
pcr['patterns'][n]["flags"].update(expatt)
line = line + 1
# refinement flags
flags = lines[line].split()
pcr["ref_cycles"] = int(flags[0])
pcr["ref_convergence"] = float(flags[1])
pcr["ref_r_atomic"] = float(flags[2])
pcr["ref_r_anisotropic"] = float(flags[3])
pcr["ref_r_profile"] = float(flags[4])
pcr["ref_r_global"] = float(flags[5])
line = line + 1
# refinement pattern
for n in range(npatt):
refpatt = {}
flags = list(map(float, lines[line].split()))
refpatt["theta_min"] = flags[0]
refpatt["steo"] = flags[1]
refpatt["theta_max"] = flags[2]
refpatt["incident_angle"] = flags[3]
refpatt["max_beam_angle"] = flags[4]
pcr['patterns'][n]["flags"].update(refpatt)
line = line + 1
# excluded regions
for n in range(npatt):
excluded = pcr['patterns'][n]["flags"]['excluded_regions']
if excluded != 0:
ranges = []
for _ in range(excluded):
ranges.append(tuple(map(float, lines[line].split())))
line = line + 1
pcr['patterns'][n]["excluded"] = ranges
else:
line = line + 1
# refined parameters
nrefined = int(lines[line].split()[0])
line = line + 1
# data setup per pattern type
for n in range(npatt):
# powder data setup
scattering_unit = pcr['patterns'][n]["flags"]['scattering_unit']
if scattering_unit == 0:
flags = list(map(float, lines[line].split()))
expatt["zero_point"] = flags[0]
expatt["zero_point_code"] = flags[1]
expatt["systematic_shift_cos"] = flags[2]
expatt["systematic_shift_cos_code"] = flags[3]
expatt["systematic_shift_sin"] = flags[4]
expatt["systematic_shift_sin_code"] = flags[5]
expatt["wavelength"] = flags[6]
expatt["wavelength_code"] = flags[7]
more = bool(flags[8])
if more:
# microadsorption (not implemented)
line = line + 1
pcr['patterns'][n]["flags"].update(expatt)
elif scattering_unit == 1:
raise NotImplementedError
elif scattering_unit == 2:
raise NotImplementedError
line = line + 1
# background coefficients
background_type = pcr['patterns'][n]["flags"]['background_type']
if background_type == 0:
pcr['patterns'][n]['background_poly'] = list(map(float, lines[line].split()))
line = line + 1
pcr['patterns'][n]['background_code'] = list(map(float, lines[line].split()))
else:
raise NotImplementedError
line = line + 1
# start phase reading
nphases = pcr["phases"]["nphases"]
for ph in range(nphases):
phase = {}
# read name
phase["name"] = lines[line].strip()
line = line + 1
# read codes
phcodes = lines[line].split()
phase["natoms"] = int(phcodes[0])
phase["n_constraints_distance"] = int(phcodes[1])
phase["n_constraints_angle"] = int(phcodes[2]) # TODO can be n_constraints_magmoment
phase["job_type"] = int(phcodes[3])
phase["symmetry_reading_mode"] = int(phcodes[4])
phase["size_strain_mode"] = int(phcodes[5])
phase["n_usedef_parameters"] = int(phcodes[6])
phase["weight_coeff"] = float(phcodes[7])
phase["n_propagation_vectors"] = int(phcodes[8])
line = line + 1
more = int(phcodes[9])
if more:
raise NotImplementedError
# read contribution
contributes = list(map(bool, lines[line].split()))
phase["pattern"] = {}
for n in range(npatt):
phase["pattern"][n] = {'contributes': contributes[n]}
line = line + 1
# specific pattern parameters
if any(contributes):
for n in range(npatt):
params_1 = list(map(int, lines[line].split()))
line = line + 1
params_2 = list(map(float, lines[line].split()))
line = line + 1
phase["pattern"][n]["reflexions"] = params_1[0]
phase["pattern"][n]["profile_type"] = params_1[1]
phase["pattern"][n]["job_type"] = params_1[2]
phase["pattern"][n]["Nsp_Ref"] = params_1[3]
phase["pattern"][n]["Ph_Shift"] = params_1[4]
phase["pattern"][n]["preferred_orientation_d1"] = params_2[0]
phase["pattern"][n]["preferred_orientation_d2"] = params_2[1]
phase["pattern"][n]["preferred_orientation_d3"] = params_2[2]
phase["pattern"][n]["brindley_coeff"] = params_2[3]
phase["pattern"][n]["reflx_int_data_weight"] = params_2[4]
phase["pattern"][n]["reflx_int_exclusion"] = params_2[5]
phase["pattern"][n]["reflx_chi2_weight"] = params_2[6]
else:
line = line + 1
# spacegroup
phase["spacegroup"] = lines[line][0:21].strip()
line = line + 1
# atoms
natoms = phase["natoms"]
atoms = {}
for n in range(natoms):
atom_flags = lines[line].split()
line = line + 1
atom_codes = lines[line].split()
line = line + 1
atoms[n] = {}
atoms[n]["label"] = atom_flags[0]
atoms[n]["type"] = atom_flags[1]
atoms[n]["x"] = float(atom_flags[2])
atoms[n]["y"] = float(atom_flags[3])
atoms[n]["z"] = float(atom_flags[4])
atoms[n]["biso"] = float(atom_flags[5])
atoms[n]["occ"] = float(atom_flags[6])
atoms[n]["symmetry_subs_in"] = int(atom_flags[7])
atoms[n]["symmetry_subs_fin"] = int(atom_flags[8])
atoms[n]["isotropic_type"] = int(atom_flags[9])
atoms[n]["specie"] = int(atom_flags[10])
atoms[n]["x_code"] = atom_codes[0]
atoms[n]["y_code"] = atom_codes[1]
atoms[n]["z_code"] = atom_codes[2]
atoms[n]["biso_code"] = atom_codes[3]
atoms[n]["occ_code"] = atom_codes[4]
phase["atoms"] = atoms
# profile parameters
for n in range(npatt):
profile_1 = lines[line].split()
line = line + 1
profile_1_codes = list(map(float, lines[line].split()))
line = line + 1
profile_2 = list(map(float, lines[line].split()))
line = line + 1
profile_2_codes = list(map(float, lines[line].split()))
line = line + 1
phase['pattern'][n]['scale'] = float(profile_1[0])
phase['pattern'][n]['shape'] = float(profile_1[1])
phase['pattern'][n]['biso_overall'] = float(profile_1[2])
phase['pattern'][n]['strain_param1'] = float(profile_1[3])
phase['pattern'][n]['strain_param2'] = float(profile_1[4])
phase['pattern'][n]['strain_param3'] = float(profile_1[5])
phase['pattern'][n]['strain_model'] = int(profile_1[6])
phase['pattern'][n]['halfwidth_U'] = profile_2[0]
phase['pattern'][n]['halfwidth_V'] = profile_2[1]
phase['pattern'][n]['halfwidth_W'] = profile_2[2]
phase['pattern'][n]['lorrenzian_strain_X'] = profile_2[3]
phase['pattern'][n]['lorrenzian_strain_Y'] = profile_2[4]
phase['pattern'][n]['gaussian_particle_size'] = profile_2[5]
phase['pattern'][n]['lorenzian_particle_size'] = profile_2[6]
cell = list(map(float, lines[line].split()))
line = line + 1
cell_codes = list(map(float, lines[line].split()))
line = line + 1
phase["cell"] = {}
phase['cell']['a'] = cell[0]
phase['cell']['b'] = cell[1]
phase['cell']['c'] = cell[2]
phase['cell']['alpha'] = cell[3]
phase['cell']['beta'] = cell[4]
phase['cell']['gamma'] = cell[5]
phase['pattern'][n]['halfwidth_U'] = profile_2[0]
phase['pattern'][n]['halfwidth_V'] = profile_2[1]
phase['pattern'][n]['halfwidth_W'] = profile_2[2]
phase['pattern'][n]['lorrenzian_strain_X'] = profile_2[3]
phase['pattern'][n]['lorrenzian_strain_Y'] = profile_2[4]
phase['pattern'][n]['gaussian_particle_size'] = profile_2[5]
phase['pattern'][n]['lorenzian_particle_size'] = profile_2[6]
orientation = list(map(float, lines[line].split()))
line = line + 1
orientation_codes = list(map(float, lines[line].split()))
line = line + 1
phase['pattern'][n]['orientation_param1'] = profile_2[0]
phase['pattern'][n]['orientation_param2'] = profile_2[1]
phase['pattern'][n]['assymetry_param1'] = profile_2[2]
phase['pattern'][n]['assymetry_param2'] = profile_2[3]
phase['pattern'][n]['assymetry_param3'] = profile_2[4]
phase['pattern'][n]['assymetry_param4'] = profile_2[5]
pcr["phases"][ph] = phase
# pattern to plot
pcr["plot_pattern"] = list(map(float, lines[line].split()))
return pcr | b3f612752b064de5f8ac77b6d5d77baf9aff3400 | 14,193 |
import logging
def put_file_store(store_name, store, block_on_existing=None, user=None): # noqa: E501
"""Create/update store
# noqa: E501
:param store_name: Name of the store
:type store_name: str
:param store: Store information
:type store: dict | bytes
:rtype: FileStore
"""
if connexion.request.is_json:
store = SwaggerFileStore.from_dict(connexion.request.get_json()) # noqa: E501
if store_name != store.name:
return Error(code=400, message="URL and body names don't match"), 400
session = Database.get_session()
try:
# Check the store
q = session.query(FileStore).filter(FileStore.name == store_name) # type: Query
# Create new store or use existing
model = None
if q.first():
# Existing store
if block_on_existing:
return Error(code=1000, message="Already exists."), 400
model = q.first()
else:
model = FileStore()
session.add(model)
model.from_swagger_model(store, user=user)
session.commit()
q = session.query(FileStore).filter(FileStore.uid == model.uid)
return q.first().to_swagger_model(user=user), 200
except Exception as e:
logging.exception("File store put failed")
session.rollback()
return Error(code=500, message="Exception occurred"), 500 | 2e799c7fc2f394c925562c8600b83a1149586ad2 | 14,194 |
from typing import Dict
from typing import Set
from typing import Tuple
def assign_sections(region_table: RegionTable, sections: Dict[str, int]):
"""Assign memory sections.
This is a packing problem and therefore reasonably complex.
A simplistic algorithm is used here which may not always be optimal if user
assigned addresses are used for some sections.
"""
used_space: Set[Tuple[int, int]] = set()
def in_used_space(start, end):
return start > 0xfff or end > 0xfff or any(
map(lambda x: (start >= x[0] and start <= x[1]) or (end >= x[0] and end <= x[1]), used_space))
def find_free_space(size):
for _, end in used_space:
start_to_try = end + 1
end_to_try = end + size
if not in_used_space(start_to_try, end_to_try):
return start_to_try, end_to_try
raise AssemblyError("ran out of free space")
for name, item in region_table.items():
if in_used_space(item.start, item.end):
raise AssemblyError("region {} assigned in used space, memory is likely full".format(name))
used_space.add((item.start, item.end))
for section_name, section_size in sections.items():
section_start, section_end = find_free_space(section_size)
used_space.add((section_start, section_end))
region_table[section_name] = Region(type="user", start=section_start, end=section_end, count=0) | 6ad4af4c67be9e9d07a4464bfc1d3c529a5afd4b | 14,195 |
def humanize_arrow_date( date ):
"""
Date is internal UTC ISO format string.
Output should be "today", "yesterday", "in 5 days", etc.
Arrow will try to humanize down to the minute, so we
need to catch 'today' as a special case.
"""
try:
then = arrow.get(date).to('local')
now = arrow.utcnow().to('local')
if then.date() == now.date():
human = "Today"
else:
human = then.humanize(now)
if human == "in a day":
human = "Tomorrow"
except:
human = date
return human | 511e5f7a85a5906d78ed9b252076b1f0e8ea02d9 | 14,196 |
def getCourseTeeHoles(request, courseId, courseTeeId):
"""
Getter function for list of courses and tees
"""
resultList = list(Tee.objects.filter(course_tee_id=courseTeeId).values('id', 'yardage', 'par', 'handicap', 'hole__id', 'hole__name', 'hole__number'))
return JsonResponse({'data' : resultList}) | 7abac65449503d2309cf8cae49b7e555488333f9 | 14,197 |
import tarfile
from pathlib import Path
import tqdm
import logging
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False):
"""Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
"""
sng = SNOwGLoBES(SNOwGLoBESdir)
if detector_input == 'all':
detector_input = list(sng.detectors)
detector_input.remove('d2O')
elif isinstance(detector_input,str):
detector_input = [detector_input]
result = {}
#Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use
with TemporaryDirectory(prefix='snowglobes') as tempdir:
with tarfile.open(tarball_path) as tar:
tar.extractall(tempdir)
flux_files = list(Path(tempdir).glob('*.dat'))
if len(detector_input)>0:
detector_input = tqdm(detector_input, desc='Detectors', leave=False)
for det in detector_input:
res=sng.run(flux_files, det)
result[det]=dict(zip((f.stem for f in flux_files),res))
# save result to file for re-use in collate()
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Saving simulation results to {cache_file}')
np.save(cache_file, result)
return result | fad0b3e54f1b3908f4efef6cc5ae0729756e14c9 | 14,198 |
import argparse
def args():
"""
Argument Parsing Handler:
-m <path_to_keras> :
Path to keras model
-o <model_output> :
Path to directory that will store pb model
"""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--path_to_keras", type=str,
help="Path to keras model.", default='')
parser.add_argument("-o", "--model_output", type=str,
help="Path to directory that will store pb model.", default='')
return parser.parse_args() | 6d2ccb8c871af5e4c9616690c80b566c49424130 | 14,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.