content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_connection(host, username, password):
""" create a database connection to the SQLite database
specified by db_file
:return: Connection object or None
"""
try:
conn = mysql.connect(host=host, # your host, usually db-guenette_neutrinos.rc.fas.harvard.edu
user=username, # your username
passwd=password, # your password
db='guenette_neutrinos') # name of the data base
# autocommit=False) # Prevent automatic commits
return conn
except mysql.Error as e:
print(e)
return None | 09c540115ce788d1f5fd09d789327ac6951cb9a2 | 7,600 |
def Dadjust(profile_ref, profile_sim, diffsys, ph, pp=True, deltaD=None, r=0.02):
"""
Adjust diffusion coefficient fitting function by comparing simulated
profile against reference profile. The purpose is to let simulated
diffusion profile be similar to reference profile.
Parameters
----------
profile_ref : DiffProfile
Reference diffusion profile
profile_sim : DiffProfile
Simulated diffusion profile
diffsys : DiffSystem
Diffusion system
ph : int
Phase # to be adjusted, 0 <= ph <= diffsys.Np-1
Xp : 1d-array
Reference composition to adjust their corresponding diffusivities.
If provided, spline function Dfunc must be determined by [Xp, Dp]
alone, where Dp = exp(Dfunc(Xp)).
pp : bool, optional
Point Mode (True) or Phase Mode (False). Point Mode
adjusts each Dp at Xp by itself. In Phase Mode, all Dp are
adjusted by the same rate, i.e. the diffusivity curve shape won't
change.
deltaD: float, optional
Only useful at Phase Mode. deltaD gives the rate to change
diffusion coefficients DC. DC = DC * 10^deltaD
r : float, optional
Only useful at Phase Mode, default = 0.02, 0 < r < 1. r gives the
range to calculate the concentration gradient around X, [X-r, X+r].
"""
dref, Xref, Ifref = profile_ref.dis, profile_ref.X, profile_ref.If
dsim, Xsim, Ifsim = profile_sim.dis, profile_sim.X, profile_sim.If
if ph >= diffsys.Np:
raise ValueError('Incorrect phase #, 0 <= ph <= %i' % diffsys.Np-1)
if pp and 'Xspl' not in dir(diffsys):
raise ValueError('diffsys must have Xspl properties in per-point mode')
Dfunc, Xr, Np = diffsys.Dfunc[ph], diffsys.Xr[ph], diffsys.Np
rate = 1
# If there is phase consumed, increase adjustment rate
if len(Ifref) != len(Ifsim):
print('Phase consumed found, increase adjustment rate')
rate = 2
if Xr[1] > Xr[0]:
idref = np.where((Xref >= Xr[0]) & (Xref <= Xr[1]))[0]
idsim = np.where((Xsim >= Xr[0]) & (Xsim <= Xr[1]))[0]
else:
idref = np.where((Xref <= Xr[0]) & (Xref >= Xr[1]))[0]
idsim = np.where((Xsim <= Xr[0]) & (Xsim >= Xr[1]))[0]
if 'Xspl' in dir(diffsys):
Xp = diffsys.Xspl[ph]
else:
Xp = np.linspace(Xr[0], Xr[1], 30)
Dp = np.exp(splev(Xp, Dfunc))
# If this is consumed phase, increase DC by 2 or 10^deltaD
if len(idsim) == 0:
Dp = np.exp(splev(Xp, Dfunc))
if deltaD is None:
return Dfunc_spl(Xp, Dp*2)
else:
return Dfunc_spl(Xp, Dp*10**deltaD)
dref, Xref = dref[idref], Xref[idref]
dsim, Xsim = dsim[idsim], Xsim[idsim]
# Per phase adjustment
if not pp:
if deltaD is not None:
return Dfunc_spl(Xp, Dp*10**deltaD)
# Calculate deltaD by phase width
# When it comes to first or last phase, data closed to end limits are not considered
fdis_ref = disfunc(dref, Xref)
fdis_sim = disfunc(dsim, Xsim)
X1, X2 = Xr[0], Xr[1]
if ph == 0:
X1 = Xr[0]*0.9 + Xr[1]*0.1
if ph == Np-1:
X2 = Xr[0]*0.1 + Xr[1]*0.9
ref = splev([X1, X2], fdis_ref)
sim = splev([X1, X2], fdis_sim)
wref = ref[1]-ref[0]
wsim = sim[1]-sim[0]
Dp *= np.sqrt(wref/wsim)
return Dfunc_spl(Xp, Dp)
# Point Mode adjustment
for i in range(len(Xp)):
# X1, X2 is the lower, upper bound to collect profile data
# X1, X2 cannot exceed phase bound Xr
if Xr[0] < Xr[1]:
X1, X2 = max(Xp[i]-r, Xr[0]), min(Xp[i]+r, Xr[1])
else:
X1, X2 = max(Xp[i]-r, Xr[1]), min(Xp[i]+r, Xr[0])
# Calculate the gradient inside [X1, X2] by linear fitting
fdis_ref = disfunc(dref, Xref)
fdis_sim = disfunc(dsim, Xsim)
Xf = np.linspace(X1, X2, 10)
pref = np.polyfit(splev(Xf, fdis_ref), Xf, 1)[0]
psim = np.polyfit(splev(Xf, fdis_sim), Xf, 1)[0]
# Adjust DC by gradient difference
Dp[i] *= (psim/pref)**rate
return Dfunc_spl(Xp, Dp) | d8b13e8d785a31219197936a9bd7b5d275f23351 | 7,601 |
def setup_test():
"""setup test"""
def create_test_tables(db):
"""create test tables"""
db("""
create table if not exists person (
id integer PRIMARY KEY AUTOINCREMENT,
name varchar(100),
age integer,
kids integer,
salary decimal(10,2),
birthdate date
)
""")
def delete_test_tables(db):
"""drop test tables"""
db('drop table if exists person')
db = zoom.database.database('sqlite3', ':memory:')
delete_test_tables(db)
create_test_tables(db)
return db | 539ca396ba3098e79ec5064ccde7245d91106ef2 | 7,602 |
def compute_levenshtein_blocks(seq1, seq2, max_complexity=1e8):
"""Compute the Levenshtein blocks of insertion, deletion, replacement.
"""
# TODO: better method for dealing with long sequences?
l1, l2 = len(seq1), len(seq2)
if l1 * l2 > max_complexity:
return [("change", (0, l1), (0, l2))]
def block_format(op, s1, e1, s2, e2):
if op == "delete":
return (op, (s1, e1 + 1), (s2, e2))
if op == "insert":
return (op, (s1, e1), (s2, e2 + 1))
else:
return (op, (s1, e1 + 1), (s2, e2 + 1))
edits = Levenshtein.editops(seq1, seq2)
if len(edits) == 0:
return []
bop, s1, s2 = edits[0]
e1, e2 = s1, s2
blocks = []
for (op, _e1, _e2) in edits[1:]:
continuity = any(
[
all([op == "delete", _e1 == e1 + 1, e2 == _e2]),
all([op == "insert", _e1 == e1, _e2 == e2 + 1]),
all([op == "replace", _e1 == e1 + 1, _e2 == e2 + 1]),
]
)
if op == bop and continuity:
e1, e2 = _e1, _e2
else:
blocks.append(block_format(bop, s1, e1, s2, e2))
bop, s1, s2 = op, _e1, _e2
e1, e2 = s1, s2
blocks.append(block_format(bop, s1, e1, s2, e2))
return blocks | 78b12b0cdffdf42403ace31d33d30c1e0a0e23d4 | 7,603 |
def mapdict_values(function, dic):
"""
Apply a function to a dictionary values,
creating a new dictionary with the same keys
and new values created by applying the function
to the old ones.
:param function: A function that takes the dictionary value as argument
:param dic: A dictionary
:return: A new dicitonary with same keys and values changed
Example:
>>> dic1 = { 'a' : 10, 'b' : 20, 'c' : 30 }
>>> mapdict_values(lambda x: x*2, dic1)
{'a': 20, 'b': 40, 'c': 60}
>>> dic1
{'a': 10, 'b': 20, 'c': 30}
"""
return dict(map(lambda x: (x[0], function(x[1])), dic.items())) | 03abbe7d7ec32d70ad0d4729913037f2199e977c | 7,604 |
from typing import Optional
async def callback(
request: Request,
code: str = None,
error: Optional[str] = Query(None),
db: AsyncSession = Depends(get_db),
):
"""
Complete the OAuth2 login flow
"""
client = get_discord_client()
with start_span(op="oauth"):
with start_span(op="oauth.authorization_token"):
# Get the authorization token
if code:
token = await client.authorize_access_token(request)
else:
return RedirectResponse(URL("/login").include_query_params(error=error))
with start_span(op="oauth.user_info"):
# Get the user's info
client.token = token
user_info = await client.userinfo(token=token)
user_id = int(user_info.get("id"))
with start_span(op="permissions"):
with start_span(op="permissions.access"):
# Get the user's role ids
roles = list(map(lambda r: r.id, await get_user_roles(user_id)))
# Determine if the user has panel access
if (await CONFIG.panel_access_role()) not in roles:
return RedirectResponse("/login?error=unauthorized")
with start_span(op="permissions.admin"):
# Get all the user's guilds
async with ClientSession() as session:
async with session.get(
"https://discord.com/api/v8/users/@me/guilds",
headers={"Authorization": f"Bearer {token['access_token']}"},
) as response:
guilds = await response.json()
# Determine if the user has admin access
is_owner = any(
map(
lambda g: g.get("id") == str(SETTINGS.discord_guild_id)
and g.get("owner"),
guilds,
)
)
is_admin = (await CONFIG.management_role()) in roles or is_owner
# Save the user's info to the database
user = User(
id=user_id,
username=user_info["username"],
avatar=user_info["picture"],
is_admin=is_admin,
)
# Insert and ignore failures
try:
db.add(user)
await db.commit()
except IntegrityError:
pass
# Store the info in the session
request.session["logged_in"] = True
request.session["user"] = dict(user_info)
request.session["is_admin"] = is_admin
request.session["expiration"] = dict(token).get("expires_at")
return RedirectResponse("/login/complete") | f7d76c385360f6d2113cd7fb470344c1e7c96027 | 7,605 |
def align_centroids(config, ref):
"""Align centroids"""
diff_centroids = np.round(ref.mean(axis=0) - config.mean(axis=0))
# diff_centroids = np.round(diff_centroids).astype(int)
config = config + diff_centroids
return config | cd579a911cb4ae59aa274836de156620305e592a | 7,606 |
def _make_headers_df(headers_response):
"""
Parses the headers portion of the watson response and creates the header dataframe.
:param headers_response: the ``row_header`` or ``column_header`` array as returned
from the Watson response,
:return: the completed header dataframe
"""
headers_df = util.make_dataframe(headers_response)
headers_df = headers_df[
["text", "column_index_begin", "column_index_end", "row_index_begin", "row_index_end", "cell_id",
"text_normalized"]]
return headers_df | 621d46da0de2056ac98747a51f2ac2cbfdd52e5e | 7,607 |
def getMemInfo() -> CmdOutput:
"""Returns the RAM size in bytes.
Returns:
CmdOutput: The output of the command, as a `CmdOutput` instance containing
`stdout` and `stderr` as attributes.
"""
return runCommand(exe_args=ExeArgs("wmic", ["memorychip", "get", "capacity"])) | c57312d83182349e0847d0eb49606c401a3a0d27 | 7,608 |
def svn_swig_py_make_editor(*args):
"""svn_swig_py_make_editor(PyObject * py_editor, apr_pool_t pool)"""
return _delta.svn_swig_py_make_editor(*args) | 2041342a1bef3ea0addb004e1bd4539c58445c66 | 7,609 |
def register_confirm(request, activation_key):
"""finish confirmation and active the account
Args:
request: the http request
activation_key: the activation key
Returns:
Http redirect to successful page
"""
user_safety = get_object_or_404(UserSafety, activation_key=activation_key)
if user_safety.user.is_confirmed:
return HttpResponseRedirect('/home/project')
if user_safety.key_expires < timezone.now():
return render_to_response('accounts/confirmExpires.html')
user = user_safety.user
user.is_confirmed = True
user.save()
return render_to_response('accounts/confirmed.html') | c677f246ff3088d58912bc136f1d2461f58ba10b | 7,610 |
def get_best_z_index(classifications):
"""Get optimal z index based on quality classifications
Ties are broken using the index nearest to the center of the sequence
of all possible z indexes
"""
nz = len(classifications)
best_score = np.min(classifications)
top_z = np.argwhere(np.array(classifications) == best_score).ravel()
return top_z[np.argmin(np.abs(top_z - (nz // 2)))] | 90b10dda47c071a3989a9de87061694270e67d69 | 7,611 |
import glob
def mean_z_available():
"""docstring for mean_z_available"""
if glob.glob("annual_mean_z.nc"):
return True
return False | d53f8dc6fe540e8f74fd00760d1c810e510e53b8 | 7,612 |
import time
def wait_for_url(monitor_url, status_code=None, timeout=None):
"""Blocks until the URL is availale"""
if not timeout:
timeout = URL_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
if is_url(monitor_url, status_code):
return True
time.sleep(1)
LOG.error('URL %s could not be reached after %s seconds',
monitor_url, timeout)
return False | 7d7ca1fd51d4415c58ab3928bd163401fb548b9a | 7,613 |
import requests
import io
import tarfile
def sources_from_arxiv(eprint):
"""
Download sources on arXiv for a given preprint.
:param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``).
:returns: A ``TarFile`` object of the sources of the arXiv preprint.
"""
r = requests.get("http://arxiv.org/e-print/%s" % (eprint,))
file_object = io.BytesIO(r.content)
return tarfile.open(fileobj=file_object) | b26c46009b23c5a107d6303b567ab97492f91ad9 | 7,614 |
import subprocess
import os
def nvidia_smi_gpu_memused(): # pragma: no cover
"""Returns the GPU memory used by the process.
(tested locally, cannot be tested on Travis CI bcs no GPU available)
Returns
-------
int
[MiB]
"""
# if theano.config.device=='cpu': return -2
try:
xml = subprocess.Popen(['nvidia-smi', '-q', '-x'], stdout=subprocess.PIPE).communicate()[0]
root = ET.fromstring(xml)
for gpu in root.findall('gpu'):
for proc in gpu.find('processes').findall('process_info'):
if int(proc.find('pid').text) == os.getpid():
return int(proc.find('used_memory').text.split(' ')[0])
except:
return -1
return -1 | 147fb1c537e00ca88f567a4ab0701bafd8624e0d | 7,615 |
def render():
"""
This method renders the HTML webside including the isOnline Status and the last 30 database entries.
:return:
"""
online = isonline()
return render_template("index.html", news=News.query.order_by(News.id.desc()).limit(30), online=online) | 4b0584d33fb84f05afbbcfe016d7428c4f75a4d3 | 7,616 |
import aiohttp
async def execute_request(url):
"""Method to execute a http request asynchronously
"""
async with aiohttp.ClientSession() as session:
json = await fetch(session, url)
return json | 1845fed4acce963a0bc1bb780cdea16ba9dec394 | 7,617 |
from typing import List
def game_over(remaining_words: List[str]) -> bool:
"""Return True iff remaining_words is empty.
>>> game_over(['dan', 'paul'])
False
>>> game_over([])
True
"""
return remaining_words == [] | 8d29ef06bd5d60082646cef00f77bbabfbac32eb | 7,618 |
import csv
def read_manifest(instream):
"""Read manifest file into a dictionary
Parameters
----------
instream : readable file like object
"""
reader = csv.reader(instream, delimiter="\t")
header = None
metadata = {}
for row in reader:
if header is None:
header = row
else:
metadata[row[0]] = row[1]
return metadata | afa6c2bb0a9d81267b1d930026a229be924a1994 | 7,619 |
def get_backbone_from_model(model:Model, key_chain:list) -> nn.Cell:
"""Obtain the backbone from a wrapped mindspore Model using the
key chain provided.
Args:
model(Model): A Model instance with wrapped network and loss.
key_chain(list[str]): the keys in the right order according to
to which we can get backbone.
Returns:
The desired backbone(nn.Cell)."""
network = model.train_network
# if network is a WithLossCell
if getattr(model, '_loss_fn') is None:
assert hasattr(network, '_net')
network = getattr(network, '_net')
for key in key_chain:
assert hasattr(network, key), f'network has no attr named {key}'
network = getattr(network, key)
return network | 0ddabf30c50e9d58ce18b0010107d92f8518b9bc | 7,620 |
def dv_upper_lower_bound(f):
"""
Donsker-Varadhan lower bound, but upper bounded by using log outside.
Similar to MINE, but did not involve the term for moving averages.
"""
first_term = f.diag().mean()
second_term = logmeanexp_nodiag(f)
return first_term - second_term | a7f9a3910a934f836204c5c47d9139be31860ec1 | 7,621 |
def create_training_files_for_document(
file_name,
key_field_names,
ground_truth_df,
ocr_data,
pass_number):
"""
Create the ocr.json file and the label file for a document
:param file_path: location of the document
:param file_name: just the document name.ext
:param key_field_names: names of the key fields to extract
:param ocr_data: Previously OCR form
:param pass_number: Are we processing word level or both word and line level
"""
extraction_file_name = file_name[:-4] + '.ocr.json'
# Now we go and reverse search the form for the Ground Truth values
key_field_data = find_anchor_keys_in_form(
df_gt=ground_truth_df,
filename=extraction_file_name,
data=ocr_data,
anchor_keys=key_field_names,
pass_number=pass_number)
print(f"key_field_data {len(key_field_data)} {key_field_data} {file_name}")
label_file, unique_fields_extracted = create_label_file(
file_name,
key_field_names,
key_field_data[extraction_file_name]
)
return ocr_data, label_file, unique_fields_extracted | 4832e28904f2c950ceb5526eaa8ab61568c55a8c | 7,622 |
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
See pull for valid source format details.
.. container:: verbose
With -B/--bookmarks, the result of bookmark comparison between
local and remote repositories is displayed. With -v/--verbose,
status is also displayed for each bookmark like below::
BM1 01234567890a added
BM2 1234567890ab advanced
BM3 234567890abc diverged
BM4 34567890abcd changed
The action taken locally when pulling depends on the
status of each bookmark:
:``added``: pull will create it
:``advanced``: pull will update it
:``diverged``: pull will create a divergent bookmark
:``changed``: result depends on remote changesets
From the point of view of pulling behavior, bookmark
existing only in the remote repository are treated as ``added``,
even if it is in fact locally deleted.
.. container:: verbose
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
Examples:
- show incoming changes with patches and full description::
hg incoming -vp
- show incoming changes excluding merges, store a bundle::
hg in -vpM --bundle incoming.hg
hg pull incoming.hg
- briefly list changes inside a bundle::
hg in changes.hg -T "{desc|firstline}\\n"
Returns 0 if there are incoming changes, 1 otherwise.
"""
if opts.get('graph'):
cmdutil.checkunsupportedgraphflags([], opts)
def display(other, chlist, displayer):
revdag = cmdutil.graphrevs(other, chlist, opts)
showparents = [ctx.node() for ctx in repo[None].parents()]
cmdutil.displaygraph(ui, revdag, displayer, showparents,
graphmod.asciiedges)
hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
return 0
if opts.get('bundle') and opts.get('subrepos'):
raise util.Abort(_('cannot combine --bundle and --subrepos'))
if opts.get('bookmarks'):
source, branches = hg.parseurl(ui.expandpath(source),
opts.get('branch'))
other = hg.peer(repo, opts, source)
if 'bookmarks' not in other.listkeys('namespaces'):
ui.warn(_("remote doesn't support bookmarks\n"))
return 0
ui.status(_('comparing with %s\n') % util.hidepassword(source))
return bookmarks.incoming(ui, repo, other)
repo._subtoppath = ui.expandpath(source)
try:
return hg.incoming(ui, repo, source, opts)
finally:
del repo._subtoppath | 9bf41cdc4de5c82634fae038940951ad738fd636 | 7,623 |
import time
def timeout(limit=5):
"""
Timeout
This decorator is used to raise a timeout error when the
given function exceeds the given timeout limit.
"""
@decorator
def _timeout(func, *args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
duration = time.time() - start
if duration > limit:
msg = f"Function {func.__name__} exceeded timeout limit ({limit} seconds)"
raise TimeoutError(msg)
return result
return _timeout | c68fee9530512ce1603ec7976f4f1278205b1f92 | 7,624 |
from typing import Union
def OIII4363_flux_limit(combine_flux_file: str, verbose: bool = False,
log: Logger = log_stdout()) -> \
Union[None, np.ndarray]:
"""
Determine 3-sigma limit on [OIII]4363 based on H-gamma measurements
:param combine_flux_file: Filename of ASCII file containing emission-line
flux measurements
:param verbose: Write verbose message to stdout. Default: file only
:param log: logging.Logger object
:return: Array containing 3-sigma flux limit
"""
log_verbose(log, "starting ...", verbose=verbose)
try:
combine_fits = asc.read(combine_flux_file)
except FileNotFoundError:
log.warning(f"File not found! {combine_flux_file}")
return
Hgamma = combine_fits['HGAMMA_Flux_Gaussian'].data
Hgamma_SN = combine_fits['HGAMMA_S/N'].data
flux_limit = (Hgamma / Hgamma_SN) * 3
log_verbose(log, "finished.", verbose=verbose)
return flux_limit | 109f887693df16661d7766840b0026f7e9bca82d | 7,625 |
import numpy
def convert_units_co2(ds,old_data,old_units,new_units):
"""
Purpose:
General purpose routine to convert from one set of CO2 concentration units
to another.
Conversions supported are:
umol/m2/s to gC/m2 (per time step)
gC/m2 (per time step) to umol/m2/s
mg/m3 to umol/mol
mgCO2/m3 to umol/mol
umol/mol to mg/m3
mg/m2/s to umol/m2/s
mgCO2/m2/s to umol/m2/s
Usage:
new_data = qcutils.convert_units_co2(ds,old_data,old_units,new_units)
where ds is a data structure
old_data (numpy array) is the data to be converted
old_units (string) is the old units
new_units (string) is the new units
Author: PRI
Date: January 2016
"""
ts = int(ds.globalattributes["time_step"])
if old_units=="umol/m2/s" and new_units=="gC/m2":
new_data = old_data*12.01*ts*60/1E6
elif old_units=="gC/m2" and new_units=="umol/m2/s":
new_data = old_data*1E6/(12.01*ts*60)
elif old_units in ["mg/m3","mgCO2/m3"] and new_units=="umol/mol":
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.co2_ppmfrommgpm3(old_data,Ta,ps)
elif old_units=="umol/mol" and new_units in ["mg/m3","mgCO2/m3"]:
Ta,f,a = GetSeriesasMA(ds,"Ta")
ps,f,a = GetSeriesasMA(ds,"ps")
new_data = mf.co2_mgpm3fromppm(old_data,Ta,ps)
elif old_units in ["mg/m2/s","mgCO2/m2/s"] and new_units=="umol/m2/s":
new_data = mf.Fc_umolpm2psfrommgpm2ps(old_data)
else:
msg = " Unrecognised conversion from "+old_units+" to "+new_units
log.error(msg)
new_data = numpy.ma.array(old_data,copy=True,mask=True)
return new_data | 38ce2987bfa4c5505fe64779ce752617862138fd | 7,626 |
def query_urlhaus(session, provided_ioc, ioc_type):
""" """
uri_dir = ioc_type
if ioc_type in ["md5_hash", "sha256_hash"]:
uri_dir = "payload"
api = "https://urlhaus-api.abuse.ch/v1/{}/"
resp = session.post(api.format(uri_dir), timeout=180, data={ioc_type: provided_ioc})
ioc_dicts = []
if resp.status_code == 200 and resp.text != "":
resp_content = resp.json()
if ioc_type == "host":
if "urls" not in resp_content.keys() or len(resp_content["urls"]) == 0:
ioc_dicts.append({"no data": provided_ioc})
return ioc_dicts
for url in resp_content["urls"]:
ioc_dict = {
"provided_ioc": provided_ioc,
"host": resp_content.get("host", None),
"firstseen (host)": resp_content.get("firstseen", None),
"urlhaus_reference (host)": resp_content.get("urlhaus_reference", None),
"url": url.get("url", None),
"url_status": url.get("url_status", None),
"date_added (url)": url.get("date_added", None),
"urlhaus_reference (url)": url.get("urlhaus_reference", None)
}
if url["tags"] != None:
ioc_dict.update({
"tags (url)": ",".join(url.get("tags", None))
})
ioc_dicts.append(ioc_dict)
elif ioc_type == "url":
if "payloads" not in resp_content.keys() or len(resp_content["payloads"]) == 0:
ioc_dicts.append({"invalid": provided_ioc})
return ioc_dicts
for payload in resp_content["payloads"]:
ioc_dict = {
"provided_ioc": provided_ioc,
"host": resp_content.get("host", None),
"url": resp_content.get("url", None),
"url_status": resp_content.get("url_status", None),
"date_added (url)": resp_content.get("date_added", None),
"urlhaus_reference (url)": resp_content.get("urlhaus_reference", None),
"filename (payload)": payload.get("filename", None),
"content_type (payload)": payload.get("content_type", None),
"response_size (payload)": payload.get("response_size", None),
"md5_hash (payload)": payload.get("response_md5", None),
"sha256_hash (payload)": payload.get("response_sha256", None),
"firstseen (payload)": payload.get("firstseen", None),
"signature (payload)": payload.get("signature", None)
}
if resp_content["tags"] != None:
ioc_dict.update({
"tags (url)": ",".join(resp_content.get("tags", None))
})
if payload["virustotal"] != None:
ioc_dict.update({
"vt_result (payload)": payload["virustotal"].get("result", None),
"vt_link (payload)": payload["virustotal"].get("link", None)
})
ioc_dicts.append(ioc_dict)
elif ioc_type in ["md5_hash", "sha256_hash"]:
if len(resp_content["urls"]) == 0:
ioc_dicts.append({"invalid": provided_ioc})
return ioc_dicts
for url in resp_content["urls"]:
ioc_dict = {
"provided_ioc": provided_ioc,
"content_type (payload)": resp_content.get("content_type", None),
"file_size (payload)": resp_content.get("file_size", None),
"md5_hash (payload)": resp_content.get("md5_hash", None),
"sha256_hash (payload)": resp_content.get("sha256_hash", None),
"firstseen (payload)": resp_content.get("firstseen", None),
"lastseen (payload)": resp_content.get("lastseen", None),
"signature (payload)": resp_content.get("signature", None),
"url": url.get("url", None),
"url_status": url.get("url_status", None),
"filename (url)": url.get("filename", None),
"firstseen (url)": url.get("firstseen", None),
"lastseen (url)": url.get("lastseen", None),
"urlhaus_reference (url)": url.get("urlhaus_reference", None)
}
if resp_content["virustotal"] != None:
ioc_dict.update({
"vt_result (payload)": resp_content["virustotal"].get("result", None),
"vt_link (payload)": resp_content["virustotal"].get("link", None)
})
ioc_dicts.append(ioc_dict)
return ioc_dicts
return [{"invalid": provided_ioc}] | 171bff1e9b1bfdf8ac6b91a4bbbd7226f80c8c4c | 7,627 |
def arrow_to_json(data):
"""
Convert an arrow FileBuffer into a row-wise json format.
Go via pandas (To be revisited!!)
"""
reader = pa.ipc.open_file(data)
try:
frame = reader.read_pandas()
return frame.to_json(orient='records')
except:
raise DataStoreException("Unable to convert to JSON") | d49ee49b7071d0b857feeb878c99ce65e82460e9 | 7,628 |
import pathlib
def get_wmc_pathname(subject_id, bundle_string):
"""Generate a valid pathname of a WMC file given subject_id and
bundle_string (to resolve ACT vs noACT).
The WMC file contrains the bundle-labels for each streamline of the
corresponding tractogram.
"""
global datadir
ACT_string = 'ACT'
if bundle_string in noACT_list:
ACT_string = 'noACT'
try:
pathname = next(pathlib.Path(f'{datadir}/sub-{subject_id}/').glob(f'dt-neuro-wmc.tag-{ACT_string}.id-*/classification.mat'))
return pathname
except StopIteration:
print('WMC file not available!')
raise FileNotFoundError | fcc570e3e59b99b94de95dc4f15c1fee2fe0f0f2 | 7,629 |
def _union_polygons(polygons, precision = 1e-4, max_points = 4000):
""" Performs the union of all polygons within a PolygonSet or list of
polygons.
Parameters
----------
polygons : PolygonSet or list of polygons
A set containing the input polygons.
precision : float
Desired precision for rounding vertex coordinates.
max_points : int
The maximum number of vertices within the resulting polygon.
Returns
-------
unioned : polygon
The result of the union of all the polygons within the input
PolygonSet.
"""
polygons = _merge_floating_point_errors(polygons, tol = precision/1000)
unioned = gdspy.boolean(polygons, [], operation = 'or',
precision = precision, max_points = max_points)
return unioned | f6951a67a2ed4099b5321b98517810de43024036 | 7,630 |
from typing import Callable
from re import T
from typing import Optional
def parse_or_none(
field: str,
field_name: str,
none_value: str,
fn: Callable[[str, str], T],
) -> Optional[T]:
""" If the value is the same as the none value, will return None.
Otherwise will attempt to run the fn with field and field name as the
first and 2nd arguments.
"""
if field == none_value:
return None
try:
val = fn(field, field_name)
except LineParseError as e:
msg = e.message + (
f"\nThe value may also be '{none_value}', which will be"
"interpreted as None."
)
raise LineParseError(msg)
return val | 4a0c2d8ec819fe6b8a9a24a60f54c62cb83e68ac | 7,631 |
def get_lattice_parameter(elements, concentrations, default_title):
"""Finds the lattice parameters for the provided atomic species using Vagars law.
:arg elements: A dictionary of elements in the system and their concentrations.
:arg title: The default system title.
:arg concentrations: The concentrations of each element.
"""
if elements == None:
lat_param = 1.0
title = default_title
else:
if len(elements) != len(concentrations):
raise ValueError("You have provided {} element names when {} elements are present "
"in the system. Please provide the correct number of elements."
.format(len(elements),len(concentrations)))
else:
title = ""
lat_param = 0
for i in range(len(elements)):
lat_param += concentrations[i]*all_elements[elements[i]]
if concentrations[i] > 0:
title += " {} ".format(elements[i])
lat_param = float(lat_param) / sum(concentrations)
title = "{0} {1}\n".format(default_title.strip(),title)
return lat_param, title | 34e914e38b8c4d25d9ed5fd09f435d7358f99a99 | 7,632 |
import string
def tokenize(text):
"""
Tokenizes,normalizes and lemmatizes a given text.
Input:
text: text string
Output:
- array of lemmatized and normalized tokens
"""
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return wn.NOUN
tokens = word_tokenize(text.lower()) #split words into tokens and turn thwm into lower case
tokens = [w for w in tokens if (w not in stopwords.words("english") and w not in string.punctuation)] # remove stopwords and punctuation
tagged_words = pos_tag(tokens) #tag the tokens
lemmed = [WordNetLemmatizer().lemmatize(w.lower(), pos=penn_to_wn(tag)) for (w,tag) in tagged_words] #lemmatize the tagged words
if len(lemmed) == 0: #no lemmatized word should have zero length
return ["error"]
return lemmed | 672af73d594c7a134226f4ae9a265f19b14ced34 | 7,633 |
def bandpass_filterbank(bands, fs=1.0, order=8, output="sos"):
"""
Create a bank of Butterworth bandpass filters
Parameters
----------
bands: array_like, shape == (n, 2)
The list of bands ``[[flo1, fup1], [flo2, fup2], ...]``
fs: float, optional
Sampling frequency (default 1.)
order: int, optional
The order of the IIR filters (default: 8)
output: {'ba', 'zpk', 'sos'}
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (b) and denominator (a) polynomials of the IIR filter. Only
returned if output='ba'.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer function. Only
returned if output='zpk'.
sos : ndarray
Second-order sections representation of the IIR filter. Only returned
if output=='sos'.
"""
filters = []
nyquist = fs / 2.0
for band in bands:
# remove bands above nyquist frequency
if band[0] >= nyquist:
raise ValueError("Bands should be below Nyquist frequency")
# Truncate the highest band to Nyquist frequency
norm_band = np.minimum(0.99, np.array(band) / nyquist)
# Compute coefficients
coeffs = butter(order / 2, norm_band, "bandpass", output=output)
filters.append(coeffs)
return filters | 4cbe3acb30a0f08d39e28b46db520fdac420010d | 7,634 |
def get_couch_client(https: bool = False,
host: str = 'localhost',
port: int = 5984,
request_adapter: BaseHttpClient = HttpxCouchClient,
**kwargs) -> CouchClient:
"""
Initialize CouchClient
Parameters
----------
https: bool = False
Schema type. Use https if value is True
host: str = 'localhost'
CouchDB host
port: int = 5984
CouchDB port
request_adapter: BaseHttpClient = HttpxCouchClient
Http client adapter
Returns
-------
CouchClient
CouchDB API realisation
"""
schema = 'http'
if https:
schema += 's'
url = f'{schema}://{host}'
if port:
url += f':{port}'
http_client = request_adapter.get_client(url, **kwargs)
return CouchClient(http_client=http_client) | db242556c11debc9dff57929182d3e6932ef13d1 | 7,635 |
def compute_rmse(loss_mse):
"""
Computes the root mean squared error.
Args:
loss_mse: numeric value of the mean squared error loss
Returns:
loss_rmse: numeric value of the root mean squared error loss
"""
return np.sqrt(2 * loss_mse) | a81024cd402c00b0d6f3bfaccc089695fb5f4e0a | 7,636 |
def __detect_geometric_decomposition(pet: PETGraphX, root: CUNode) -> bool:
"""Detects geometric decomposition pattern
:param pet: PET graph
:param root: root node
:return: true if GD pattern was discovered
"""
for child in pet.subtree_of_type(root, NodeType.LOOP):
if not (child.reduction or child.do_all):
return False
for child in pet.direct_children_of_type(root, NodeType.FUNC):
for child2 in pet.direct_children_of_type(child, NodeType.LOOP):
if not (child2.reduction or child2.do_all):
return False
return True | 27d90b6ced48a0db081d9881e39600d641855343 | 7,637 |
def add_two_frags_together(fragList, atm_list, frag1_id, frag2_id):
"""Combine two fragments in fragList."""
new_id = min(frag1_id, frag2_id)
other_id = max(frag1_id, frag2_id)
new_fragList = fragList[:new_id] # copy up to the combined one
new_frag = { # combined frag
'ids': fragList[frag1_id]['ids'] + fragList[frag2_id]['ids'],
'syms': fragList[frag1_id]['syms'] + fragList[frag2_id]['syms'],
'grp': new_id,
'chrg': fragList[frag1_id]['chrg'] + fragList[frag2_id]['chrg'],
'mult': fragList[frag1_id]['mult'] + fragList[frag2_id]['mult'] - 1,
'name': fragList[new_id]['name'],
}
new_frag = add_centroids([new_frag], atm_list)
new_fragList.extend(new_frag) # add new frag
# add up to removed frag
new_fragList.extend(fragList[new_id+1:other_id])
# change rest of values
for i in range(other_id+1,len(fragList)):
fragList[i]['grp'] = i-1
fragList[i]['name'] = f"frag{i-1}"
new_fragList.append(fragList[i])
for i in range(len(new_fragList)):
if i != new_fragList[i]["grp"]:
print(i, "does not")
return new_fragList, new_id | 9c226883d6c021e151c51889017f56ea6a4cba3a | 7,638 |
from typing import Tuple
import tqdm
from sys import stdout
import tarfile
def load_dataset(
file: str,
out_dir: str = "/tmp",
download: bool = True,
url: str = None,
labels: str = "labels",
verbose: int = 2,
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Load Dataset from storage or cloud h5 format
Args:
file (str): File name or file path if local (tar gzipped, file extension not necessary)
out_dir (str, optional): Location to save the dataset (or open if local). Defaults to '/tmp'.
download (bool, optional): Whether to download from repo.
If false, 'file' should be the path to the tar file. Defaults to 'True'.
url (str, optional): URL of cloud storage pointing to file. Defaults to None.
labels (str, optional): Key of labels in hdf5 file
verbose (int, optional): Verbosity level: 2 is most, 0 is none. Defaults to 2.
Returns:
Tuple[ndarray, ndarray, ndarray, ndarray]: X, y train, X, y test
"""
file += ".tar.gz" if not file.endswith(".tar.gz") else ""
location = join(out_dir, file)
url = (
url if url else f"https://storage.gorchilov.net/datasets/{file.split('/')[-1]}"
)
# get from cloud
if not exists(location) and download:
res = get(url, allow_redirects=True, stream=True)
with open(location, "wb") as f:
if verbose == 2 and "Content-Length" in head(url).headers:
filesize = int(head(url).headers["Content-Length"])
with tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=filesize * 1024,
file=stdout,
desc=file,
) as progress:
for chunk in res.iter_content(chunk_size=1024):
datasize = f.write(chunk)
progress.update(datasize)
else:
f.write(res.content)
if verbose > 0:
print("Finished downloading file")
# open tarball
tar = tarfile.open(location, "r:gz")
# get filenames from tarball
files = list(filter(lambda x: x.name[0] != ".", tar.getmembers()))
train_filename = join(
out_dir, next(filter(lambda x: "train" in x.name, files)).name,
)
test_filename = join(out_dir, next(filter(lambda x: "test" in x.name, files)).name)
# extract files if not already
if not exists(train_filename) or not exists(test_filename):
tar.extractall(path=out_dir)
if verbose > 0:
print("Extracted tarball")
tar.close()
train_file = File(train_filename, mode="r")
test_file = File(test_filename, mode="r")
X_train = train_file["data"][:]
y_train = train_file[labels][:]
train_file.close()
X_test = test_file["data"][:]
y_test = test_file[labels][:]
test_file.close()
return (X_train, y_train, X_test, y_test) | 17ad37b04c5dc14f0575d07cf677d6335058bf8b | 7,639 |
def concatenate(arrays, axis=0):
"""
Joins a sequence of tensors along an existing axis.
Args:
arrays: Union[Tensor, tuple(Tensor), list(Tensor)], a tensor or a list
of tensors to be concatenated.
axis (int, optional): The axis along which the tensors will be joined,
if axis is None, tensors are flattened before use. Default is 0.
Returns:
Tensor, a tensor concatenated from a tensor or a list of tensors.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x1 = np.ones((1,2,3))
>>> x2 = np.ones((1,2,1))
>>> x = np.concatenate((x1, x2), axis=-1)
>>> print(x.shape)
(1, 2, 4)
"""
array_type = F.typeof(arrays)
if _check_is_tensor(array_type):
# if the input is a single tensor
# if only one tensor is provided, it is treated as a tuple along the
# first dimension. For example, a tensor of shape (3,4,5) will be treated
# as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
if axis is None:
return ravel(arrays)
arr_shape = F.shape(arrays)
_check_axes_range((axis,), len(arr_shape))
# move axis 0 to the disiganated position, while keep other axes' relative
# positions unchanged
new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis)
arrays = transpose(arrays, new_axes)
arrays = reshape(arrays, new_shape)
return arrays
flattened_arrays = ()
if axis is None:
for arr in arrays:
flattened_arrays += (ravel(arr),)
axis = -1
return P.Concat(axis)(flattened_arrays)
arr_shape = F.shape(arrays[0])
_check_axes_range((axis,), len(arr_shape))
# if only one tensor in the tuple/list, return the tensor itself
if len(arrays) == 1:
return arrays[0]
return P.Concat(axis)(arrays) | a85db3673d3a50d76374b809b583a8ca5325d4c3 | 7,640 |
import json
def get_answer():
"""
get answer
"""
# logger
M_LOG.info("get_answer")
# exist answer in dict ?
if "answer" in gdct_data:
# convert to JSON
l_json = json.dumps(gdct_data["answer"])
M_LOG.debug("Requested answer: %s", str(l_json))
# remove answer from dict
del gdct_data["answer"]
# return ok
return flask.Response(l_json, status=200, mimetype="application/json")
M_LOG.debug("No answer yet...")
# return error
return flask.Response(status=503) | 7f4b97b2a470d491326cdd341475bc15987fa299 | 7,641 |
def default_todo_data():
"""Словарь с данными пользователя поумолчанию"""
return {"title": "Молоко", "description": "Купить молоко в Ашане 200 литров", "created_datetime": "2041-08-12T00:00:00.000Z"} | 2cd873f266d758c8d8510418fd173f35f4b366ab | 7,642 |
def generate_new_key(access_key, secret_key, user_to_rotate):
"""generates a new key pair and returns the access key and secret key"""
LOGGER.info("Begin generate new key")
iam_client = boto3.client('iam', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
resp = iam_client.create_access_key(UserName=user_to_rotate)
LOGGER.debug(resp)
LOGGER.info("End generate new key")
return resp['AccessKey']['AccessKeyId'].strip(), resp['AccessKey']['SecretAccessKey'].strip() | 41fff2da39408661f329d2e1f47488dce8d76652 | 7,643 |
def withCHID(fcn):
"""decorator to ensure that first argument to a function is a Channel
ID, ``chid``. The test performed is very weak, as any ctypes long or
python int will pass, but it is useful enough to catch most accidental
errors before they would cause a crash of the CA library.
"""
# It may be worth making a chid class (which could hold connection
# data of _cache) that could be tested here. For now, that
# seems slightly 'not low-level' for this module.
def wrapper(*args, **kwds):
"withCHID wrapper"
if len(args)>0:
chid = args[0]
args = list(args)
if isinstance(chid, int):
args[0] = chid = dbr.chid_t(args[0])
if not isinstance(chid, dbr.chid_t):
msg = "%s: not a valid chid %s %s args %s kwargs %s!" % (
(fcn.__name__, chid, type(chid), args, kwds))
raise ChannelAccessException(msg)
return fcn(*args, **kwds)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__dict__.update(fcn.__dict__)
return wrapper | 98ac8fdc812a8e9b7706e1932db662819e830597 | 7,644 |
import inspect
def current_user_get():
"""ユーザー情報取得 user info get
Returns:
Response: HTTP Respose
"""
app_name = multi_lang.get_text("EP020-0001", "ユーザー情報:")
exec_stat = multi_lang.get_text("EP020-0017", "取得")
error_detail = ""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}'.format(inspect.currentframe().f_code.co_name))
globals.logger.debug('#' * 50)
ret_user = user_get()
return jsonify({"result": "200", "info": ret_user}), 200
except common.UserException as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail)
except Exception as e:
return common.server_error_to_message(e, app_name + exec_stat, error_detail) | f2bf2c81b176e985973dad9e5841ecd1af599a48 | 7,645 |
def asin(a: Dual) -> Dual:
"""inverse of sine or arcsine of the dual number a, using math.asin(x)"""
if abs(a.value) >= 1:
raise ValueError('Arcsin cannot be evaluated at {}.'.format(a.value))
value = np.arcsin(a.value)
ders = dict()
for k,v in a.ders.items():
ders[k] = 1/(np.sqrt(1-a.value**2))*v
return Dual(value, ders) | 6b15e737ae5beb69f8963aa752d7fba761dce56f | 7,646 |
def hydrotopeQ(cover,hydrotopemap):
"""Get mean values of the cover map for the hydrotopes"""
grass.message(('Get mean hydrotope values for %s' %cover))
tbl = grass.read_command('r.univar', map=cover, zones=hydrotopemap,
flags='gt').split('\n')[:-1] #:-1 as last line hast line break]
tbl = [tuple(l.split('|')) for l in tbl]
tbl = np.array(tbl[1:], dtype=list(zip(tbl[0],['S250']*len(tbl[0]))))
tbl = np.array(list(zip(tbl['zone'],tbl['mean'])), dtype=[('cat',np.int64),('mean',np.float64)])
return tbl[np.isfinite(tbl['mean'])] | 371dc496a4bb2e33fc382dddaea66e83aa613abc | 7,647 |
import re
def convert_to_seconds(duration_str):
"""
return duration in seconds
"""
seconds = 0
if re.match(r"[0-9]+$", duration_str):
seconds = int(duration_str)
elif re.match(r"[0-9]+s$", duration_str):
seconds = int(duration_str[:-1])
elif re.match(r"[0-9]+m$", duration_str):
seconds = 60 * int(duration_str[:-1])
elif re.match(r"[0-9]+h$", duration_str):
seconds = 3600 * int(duration_str[:-1])
elif re.match(r"[0-9]+d$", duration_str):
seconds = 84600 * int(duration_str[:-1])
return seconds | 222905e6089510c6f204c6ea710572a5b2132d28 | 7,648 |
def get_chunk_n_rows(row_bytes: int,
working_memory: Num,
max_n_rows: int = None) -> int:
"""Calculates how many rows can be processed within working_memory
Parameters
----------
row_bytes : int
The expected number of bytes of memory that will be consumed
during the processing of each row.
working_memory : int or float, optional
The number of rows to fit inside this number of MiB will be returned.
max_n_rows : int, optional
The maximum return value.
Returns
-------
int or the value of n_samples
Warns
-----
Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB.
"""
chunk_n_rows = int(working_memory * (2 ** 20) // row_bytes)
if max_n_rows is not None:
chunk_n_rows = min(chunk_n_rows, max_n_rows)
if chunk_n_rows < 1:
# Could not adhere to working_memory config.
chunk_n_rows = 1
return chunk_n_rows | b7c2ab10c59edb6c2541e31264b28e06266d2fc3 | 7,649 |
import re
def elasticsearch_ispartial_log(line):
"""
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]'
>>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists'
>>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]'
>>> elasticsearch_ispartial_log(line1)
False
>>> elasticsearch_ispartial_log(line2)
True
>>> elasticsearch_ispartial_log(line3)
True
"""
match_result = []
for p in LOG_BEGIN_PATTERN:
if re.match(p, line) != None:
return False
return True | 7263be4a74f1a92a6347393f62d9b67300d30c30 | 7,650 |
def find_signal_analysis(prior, sparsity, sigma_data):
"""
Generates a signal using an analytic prior.
Works only with square and overcomplete full-rank priors.
"""
N, L = prior.shape
k = np.sum(np.random.random(L) > (1 - sparsity))
V = np.zeros(shape=(L, L - k))
while np.linalg.matrix_rank(V) != L - k:
s = np.random.permutation(N)
V = prior[s[:L - k]]
x = np.random.normal(scale=sigma_data, size=(L))
x / np.linalg.norm(x)
x -= np.linalg.pinv(V) @ V @ x
return x | 49a7c26b6bc934d3588ae25c99eb62e0b544616f | 7,651 |
from typing import List
import asyncio
import requests
def download_images(sorted_urls) -> List:
"""Download images and convert to list of PIL images
Once in an array of PIL.images we can easily convert this to a PDF.
:param sorted_urls: List of sorted URLs for split financial disclosure
:return: image_list
"""
async def main(urls):
image_list = []
loop = asyncio.get_event_loop()
futures = [loop.run_in_executor(None, requests.get, url) for url in urls]
for response in await asyncio.gather(*futures):
image_list.append(response.content)
return image_list
loop = asyncio.get_event_loop()
image_list = loop.run_until_complete(main(sorted_urls))
return image_list | 3efde31975c7912e16ab2d990417c2aa753ca5bf | 7,652 |
def get_molecules(struct,
bonds_kw={"mult":1.20, "skin":0.0, "update":False},
ret="idx"):
"""
Returns the index of atoms belonging to each molecule in the Structure.
"""
bonds = struct.get_bonds(**bonds_kw)
## Build connectivity matrix
graph = np.zeros((struct.geometry.shape[0],struct.geometry.shape[0]))
for atom_idx,bonded_idx_list in enumerate(bonds):
for bond_idx in bonded_idx_list:
graph[atom_idx][bonded_idx_list] = 1
graph = csr_matrix(graph)
n_components, component_list = connected_components(graph)
molecule_idx_list = [np.where(component_list == x)[0]
for x in range(n_components)]
if ret == "idx":
return molecule_idx_list
elif ret == "struct":
## Returns list of structures
geo = struct.get_geo_array()
ele = struct.geometry["element"]
molecule_struct_list = []
for idx,entry in enumerate(molecule_idx_list):
mol_geo = geo[entry]
mol_ele = ele[entry]
mol = Structure.from_geo(mol_geo,mol_ele)
mol.struct_id = "{}_molecule_{}".format(struct.struct_id,
idx)
molecule_struct_list.append(mol)
return molecule_struct_list
else:
## Returns list of structures
geo = struct.get_geo_array()
ele = struct.geometry["element"]
molecule_struct_dict = {}
for idx,entry in enumerate(molecule_idx_list):
mol_geo = geo[entry]
mol_ele = ele[entry]
mol = Structure.from_geo(mol_geo,mol_ele)
mol.struct_id = "{}_molecule_{}".format(struct.struct_id,
idx)
molecule_struct_dict[mol.struct_id] = mol
return molecule_struct_dict | 99b67f95114ddd6c712c8fe63a0713a914b8888f | 7,653 |
def cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33):
"""Finds the c divides conditions for the symmetry preserving HNFs.
Args:
a (int): a from the HNF.
b (int): b from the HNF.
c (int): c from the HNF.
d (int): d from the HNF.
e (int): e from the HNF.
f (int): f from the HNF.
al1 (numpy.array): array of alpha1 values from write up.
al2 (numpy.array): array of alpha2 values from write up.
al3 (numpy.array): array of alpha3 values from write up.
x11 (numpy.array): array of pg values for x(1,1) spot.
x21 (numpy.array): array of pg values for x(2,1) spot.
x22 (numpy.array): array of pg values for x(2,2) spot.
x23 (numpy.array): array of pg values for x(2,3) spot.
x31 (numpy.array): array of pg values for x(3,1) spot.
x32 (numpy.array): array of pg values for x(3,2) spot.
x33 (numpy.array): array of pg values for x(3,3) spot.
Returns:
HNFs (list of lists): The symmetry preserving HNFs.
"""
HNFs = []
if np.allclose(x23,0):
if b == None:
# find the b values, d and e still unkown
if not np.allclose(al3, 0):
N=0
at = al3[np.nonzero(al3)]
val = np.unique(N*c/at)
while any(abs(val) <c):
for v in val:
if v < c and v >= 0 and np.allclose(v%1==0):
b = v
c1 = a*x21 + b*(x22-al1-x11)
c2 =(-b*al2)
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 =c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
val = np.unique(N*c/at)
elif not np.allclose(al2,0):
N=0
at = al2[np.nonzero(al2)]
val = np.unique(N*c/at)
while any(abs(val) <c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c1 = a*x21 + b*(x22-al1-x11)
c3 =(-b*al3)
if np.allclose(c1%c,0) and np.allclose(c3%c,0):
be1 = c1/c
be2 =-b*al2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
val = np.unique(N*c/at)
else:
if not np.allclose((x22-x11-al1),0):
N=0
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt)))
while any(abs(val) <c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c2 = -b*al2
c3 =(-b*al3)
if np.allclose(c2%c,0) and np.allclose(c3%c,0):
be1 = (a*x21+b*(x22-x11-al1))/c
be2 =-b*al2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
N += 1
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt)))
else:
c1 = a*x21
c2 = 0
c3 = 0
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
else:
c1 = a*x21 + b*(x22-al1-x11)
c2 = (-b*al2)
c3 = (-b*a13)
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in HNFs:
HNFs.append(t)
else:
if np.allclose(al3,0):
if np.allclose((f*x23)%c,0):
if b == None and e == None and d == None:
if np.allclose(al3,0) and np.allclose(al2,0) and np.allclose(al3,0):
N = 0
xt = x23[np.nonzero(x23)]
val = np.unique(N*c/xt)
while any(abs(val)<f):
for v in val:
if v <f and v>=0 and np.allclose(v%1,0):
e = v
for b in range(c):
N2 =0
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
d = v2
be1 = (a*x21+b*(x22-x11)+d*x23)/c
be2 = e*x23/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.appned(t)
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt)))
N += 1
val = np.unique(N*c/xt)
elif not np.allclose(al3,0):
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(al3)))
while any(abs(val) < c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
while any(abs(val2)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(x22)*len(xt)))
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
else:
for b in range(c):
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2 >= 0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val2)<f):
for v3 in val3:
if v3 <f and v3 >= 0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(al2)*len(xt)))
elif b == None:
if not np.allclose(al3,0):
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif not np.allclose(al2,0):
N = max(np.round(e*x23/c))
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al2[np.nonzero(al2)]
val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at)))
else:
if not np.allclose((x22-x11-al1),0):
N = min(np.round((a*x21-d*x23)/c))
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt)))
while any(abs(val) < c):
for v in val:
if v < c and v>=0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N += 1
xt = (x22-x11-al1)
xt = xt[np.nonzero(xt)]
val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt)))
else:
c1 = a*x21+d*x23
c2 = e*x23
c3 = f*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
elif d == None and e == None:
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c
be2 = (e*x32-b*al2)/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
else:
c1 = a*x21+b*(x22-al1-x11)+d*x23
c2 = -b*al2+e*x23
c3 = -b*al3+f*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
# else:
# print("f: ",f)
# print("c: ",c)
# print("x32: ",x32)
# print("failed f*x32/c")
else:
if b==None and d==None and e==None:
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif b==None:
N = max(np.round(f*x23/c))
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
while any(abs(val) < c):
for v in val:
if v < c and v>= 0 and np.allclose(v%1,0):
b = v
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N -= 1
at = al3[np.nonzero(al3)]
val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at)))
elif d==None and e==None:
N2 = min(np.round(-b*al2/c))
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
while any(abs(val2)<f):
for v2 in val2:
if v2 <f and v2>=0 and np.allclose(v2%1,0):
e = v2
N3 = min(np.round((a*x21+b*(x22-x11-al1))/c))
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
while any(abs(val3)<f):
for v3 in val3:
if v3 <f and v3>=0 and np.allclose(v3%1,0):
d = v3
c1 = a*x21+b*(x22-x11-al1)+d*x23
c2 = -b*al2+e*x23
if np.allclose(c1%c,0) and np.allclose(c2%c,0):
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
N3 += 1
xt = x23[np.nonzero(x23)]
val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt)))
N2 += 1
xt = x23[np.nonzero(x23)]
val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2)))
else:
be1 = c1/c
be2 = c2/c
tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33)
for t in tHNFs:
HNFs.append(t)
return HNFs | 20a0044050964c5705f3bce2297f2724d6f12f71 | 7,654 |
def numeric_field_list(model_class):
"""Return a list of field names for every numeric field in the class."""
def is_numeric(type):
return type in [BigIntegerField, DecimalField, FloatField, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField,
SmallIntegerField]
fields = []
for (field, type) in field_list(model_class):
if is_numeric(type):
fields += [field]
return fields | a501c2a7bc87f7cdea8945a946937f72cc0576a9 | 7,655 |
import tokenize
def _get_lambda_source_code(lambda_fn, src):
"""Attempt to find the source code of the ``lambda_fn`` within the string ``src``."""
def gen_lambdas():
def gen():
yield src + "\n"
g = gen()
step = 0
tokens = []
for tok in tokenize.generate_tokens(getattr(g, "next", getattr(g, "__next__", None))):
if step == 0:
if tok[0] == tokenize.NAME and tok[1] == "lambda":
step = 1
tokens = [tok]
level = 0
elif step == 1:
if tok[0] == tokenize.NAME:
tokens.append(tok)
step = 2
else:
step = 0
elif step == 2:
if tok[0] == tokenize.OP and tok[1] == ":":
tokens.append(tok)
step = 3
else:
step = 0
elif step == 3:
if level == 0 and (tok[0] == tokenize.OP and tok[1] in ",)" or tok[0] == tokenize.ENDMARKER):
yield tokenize.untokenize(tokens).strip()
step = 0
else:
tokens.append(tok)
if tok[0] == tokenize.OP:
if tok[1] in "[({": level += 1
if tok[1] in "])}": level -= 1
assert not tokens
actual_code = lambda_fn.__code__.co_code
for lambda_src in gen_lambdas():
try:
fn = eval(lambda_src, globals(), locals())
if fn.__code__.co_code == actual_code:
return lambda_src.split(":", 1)[1].strip()
except Exception:
pass
return "<lambda>" | 5192a299bf88c9fdc070fae28e585cda3a09aadc | 7,656 |
import requests
import json
def retrieve_keycloak_public_key_and_algorithm(token_kid: str, oidc_server_url: str) -> (str, str):
""" Retrieve the public key for the token from keycloak
:param token_kid: The user token
:param oidc_server_url: Url of the server to authorize with
:return: keycloak public key and algorithm
"""
handle = f'{oidc_server_url}/protocol/openid-connect/certs'
logger.info(f'Getting public key for the kid={token_kid} from the keycloak...')
r = requests.get(handle)
if r.status_code != 200:
error = "Could not get certificates from the keycloak. " \
"Reason: [{}]: {}".format(r.status_code, r.text)
logger.error(error)
raise ValueError(error)
try:
json_response = r.json()
except Exception:
error = "Could not retrieve the public key. " \
"Got unexpected response: '{}'".format(r.text)
logger.error(error)
raise ValueError(error)
try:
matching_key = next((item for item in json_response.get('keys') if item['kid'] == token_kid), None)
matching_key_json = json.dumps(matching_key)
public_key = RSAAlgorithm.from_jwk(matching_key_json)
except Exception as e:
error = f'Invalid public key!. Reason: {e}'
logger.error(error)
raise ValueError(error)
logger.info(f'The public key for the kid={token_kid} has been fetched.')
return matching_key.get('alg'), public_key | 87e706b56c63b991e1524b5d6ffcec86d6a9bc67 | 7,657 |
def read_conformations(filename, version="default", sep="\t", comment="#",
encoding=None, mode="rb", **kw_args):
"""
Extract conformation information.
Parameters
----------
filename: str
Relative or absolute path to file that contains the RegulonDB information.
Returns
-------
"""
kw_args["mode"] = mode
kw_args["encoding"] = encoding
conformations = list()
with open_file(filename, **kw_args) as (file_h, ext):
iter_rowset = FILE_PARSERS.get(ext, iter_rowset_flat_file)
for row in iter_rowset(file_h):
tf_id = row["transcription_factor_id"]
try:
t_factor = elem.TranscriptionFactor[tf_id, version]
except KeyError:
LOGGER.warn("unknown transcription factor %s", tf_id)
LOGGER.warn("Please parse transcription factor information before"\
" parsing conformations.")
continue
conf = elem.Conformation(
unique_id=row["conformation_id"],
name_space=version,
tf=t_factor,
state=row["final_state"],
interaction=row["interaction_type"],
conformation_type=row.get("conformation_type", None), # version dependent
apo_holo=row.get("apo_holo_conformation", None) # version dependent
)
t_factor.conformations.add(conf)
conformations.append(conf)
return conformations | 3588ee68a8a498dbfb1f85d65a8eff65b5ff5ed1 | 7,658 |
def maskRipple(inRpl, outFile, mask):
"""maskRipple(inRpl, outFile, mask)
Sets the individual data items to zero based on the specified mask. If mask.getRGB(c,r)>0 /
then copy the contents at(c,r) of inRpl to outFile.rpl. Otherwise the contents of outFile /
is set to all zeros."""
outRpl = "%s.rpl" % outFile
outRaw = "%s.raw" % outFile
len = rpl.getDepth()
ty = rpl.getDataType()
res = ept.RippleFile(rpl.getColumns(), rpl.getRows(), rpl.getDepth(), rpl.getDataType(), rpl.getDataSize(), ept.RippleFile.DONT_CARE_ENDIAN, outRpl, outRaw)
zero = (0) * len
for c in xrange(0, rpl.getColumns()):
for r in xrange(0, rpl.getRows()):
rpl.setPosition(c, r)
res.setPosition(c, r)
if mask.getRGB(c, r) > 0:
if ty == rpl.FLOAT:
res.write(rpl.readDouble(len))
else:
res.write(rpl.readInt(len))
return res | 65d5464e9de469cf45b47991ed838a79c587d965 | 7,659 |
def GetCurrentScene() -> Scene:
"""
Returns current scene. Raises SpykeException
if current scene is not set.
"""
if not _currentScene:
raise SpykeException("No scene is set current.")
return _currentScene | 82a065e4cbd0aa4b326d53b3360aac52a99ac682 | 7,660 |
import argparse
import sys
import os
def Login():
"""Performs interactive login and caches authentication token.
Returns:
non-zero value on error.
"""
ConfirmUserAgreedToS()
parser = argparse.ArgumentParser()
parser.add_argument('--browser', action='store_true',
help=('Use browser to get goma OAuth2 token.'))
options = parser.parse_args(sys.argv[2:])
config = GomaOAuth2Config()
config.update(DefaultOAuth2Config())
func = GetAuthorizationCodeViaCommandLine
if options.browser:
func = GetAuthorizationCodeViaBrowser
config['refresh_token'] = GetRefreshToken(func, config)
err = VerifyRefreshToken(config)
if err:
sys.stderr.write(err + '\n')
return 1
config.Save()
flags = configFlags(config)
for k in flags:
if k not in os.environ:
os.environ[k] = flags[k]
if not CheckPing():
return 1
return 0 | 7b305746fa128bcdce834ee036e5e231f5f72223 | 7,661 |
def timeago(seconds=0, accuracy=4, format=0, lang="en", short_name=False):
"""Translate seconds into human-readable.
:param seconds: seconds (float/int).
:param accuracy: 4 by default (units[:accuracy]), determine the length of elements.
:param format: index of [led, literal, dict].
:param lang: en or cn.
:param units: day, hour, minute, second, ms.
>>> timeago(93245732.0032424, 5)
'1079 days, 05:35:32,003'
>>> timeago(93245732.0032424, 4, 1)
'1079 days 5 hours 35 minutes 32 seconds'
>>> timeago(-389, 4, 1)
'-6 minutes 29 seconds 0 ms'
"""
assert format in [0, 1,
2], ValueError("format arg should be one of 0, 1, 2")
negative = "-" if seconds < 0 else ""
is_en = lang == "en"
seconds = abs(seconds)
if is_en:
if short_name:
units = ("day", "hr", "min", "sec", "ms")
else:
units = ("day", "hour", "minute", "second", "ms")
elif lang == "cn":
if short_name:
units = (u"日", u"时", u"分", u"秒", u"毫秒")
else:
units = (u"天", u"小时", u"分钟", u"秒", u"毫秒")
times = split_seconds(seconds)
if format == 2:
return dict(zip(units, times))
day, hour, minute, second, ms = times
if format == 0:
day_str = ("%d %s%s, " %
(day, units[0], "s" if day > 1 and is_en else "")
if day else "")
mid_str = ":".join(("%02d" % i for i in (hour, minute, second)))
if accuracy > 4:
mid_str += ",%03d" % ms
return negative + day_str + mid_str
elif format == 1:
if seconds:
# find longest valid fields index (non-zero for head and tail)
for index, item in enumerate(times):
if item != 0:
head_index = index
break
for index, item in enumerate(reversed(times)):
if item != 0:
tail_index = len(times) - index
break
result_str = [
"%d %s%s" %
(num, unit, "s" if is_en and num > 1 and unit != "ms" else "")
for num, unit in zip(times, units)
][head_index:tail_index][:accuracy]
result_str = " ".join(result_str)
else:
result_str = "0 %s" % units[-1]
return negative + result_str | b6a5858c3f5c5291b03654d076eb3f1e835f78c0 | 7,662 |
def generate_headline(ids=None):
"""Generate and return an awesome headline.
Args:
ids:
Iterable of five IDs (intro, adjective, prefix, suffix, action).
Optional. If this is ``None``, random values are fetched from the
database.
Returns:
Tuple of parts and permalink (intro, adjective, prefix, suffix, action,
permalink)
"""
print('[schlagzeilengenerator] Generating a headline...')
# Correct endings
adjective_endings = {
'm': 'r',
'f': '',
's': 's',
'p': '',
}
# Get random database entries
if ids is not None:
d_intro = get_by_id('intro', ids[0])
d_adjective = get_by_id('adjective', ids[1])
d_prefix = get_by_id('prefix', ids[2])
d_suffix = get_by_id('suffix', ids[3])
d_action = get_by_id('action', ids[4])
else:
d_intro = get_random('intro')
d_adjective = get_random('adjective')
d_prefix = get_random('prefix')
d_suffix = get_random('suffix')
d_action = get_random('action')
ids = (d_intro['id'], d_adjective['id'], d_prefix['id'], d_suffix['id'], d_action['id'])
# Get data from dictionaries
case = d_suffix['case']
intro = d_intro['text']
adjective = d_adjective['text'] + adjective_endings[case]
prefix = d_prefix['text']
suffix = d_suffix['text']
if case == 'p':
action = '%s %s' % (d_action['action_p'], d_action['text'])
else:
action = '%s %s' % (d_action['action_s'], d_action['text'])
# Build permalink
permalink = b64encode(b','.join(str(i).encode('ascii') for i in ids))
return intro, adjective, prefix, suffix, action.strip(), permalink | 09fda0075b036ea51972b2f124733de9f34671fc | 7,663 |
import webbrowser
def open_in_browser(path):
"""
Open directory in web browser.
"""
return webbrowser.open(path) | 41328b2b478f0bd69695da1868c412188e494d08 | 7,664 |
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the cell state (memory)
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"] # forget gate weight
bf = parameters["bf"]
Wi = parameters["Wi"] # update gate weight (notice the variable name)
bi = parameters["bi"] # (notice the variable name)
Wc = parameters["Wc"] # candidate value weight
bc = parameters["bc"]
Wo = parameters["Wo"] # output gate weight
bo = parameters["bo"]
Wy = parameters["Wy"] # prediction weight
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈1 line)
concat = np.concatenate((a_prev,xt),axis=0)
# Compute values for ft (forget gate), it (update gate),
# cct (candidate value), c_next (cell state),
# ot (output gate), a_next (hidden state) (≈6 lines)
ft = sigmoid(np.dot(Wf,concat)+bf) # forget gate
it = sigmoid(np.dot(Wi,concat)+bi) # update gate
cct = np.tanh(np.dot(Wc,concat)+bc) # candidate value
c_next = ft*c_prev+it*cct # cell state
ot = sigmoid(np.dot(Wo,concat)+bo) # output gate
a_next = ot*np.tanh(c_next) # hidden state
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy,a_next)+by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache | 9d1ae3ea6da9de6827b5ecd9f8871ee8aae26d30 | 7,665 |
def encode_letter(letter):
"""
This will encode a tetromino letter as a small integer
"""
value = None
if letter == 'i':
value = 0
elif letter == 'j':
value = 1
elif letter == 'l':
value = 2
elif letter == 'o':
value = 3
elif letter == 's':
value = 4
elif letter == 't':
value = 5
elif letter == 'z':
value = 6
return value | 6c72c4c9e44c93d045296ab1f49c7783f2b4fc59 | 7,666 |
async def register_log_event(
registration: LogEventRegistration, db: Session = Depends(get_db)
):
"""
Log event registration handler.
:param db:
:param registration: Registration object
:return: None
"""
reg_id = str(uuid4())
# Generate message for registration topic
msg = LogEventRegistrationMessage(
to_address=registration.address,
keyword=registration.keyword,
position=registration.position,
)
# Produce message for registration topic
producer.produce(
topic=settings.REGISTRATIONS_TOPIC,
key=string_serializer(reg_id, key_context),
value=json_serializer(msg.dict(), value_context),
callback=acked,
)
retry_count = 0
while True:
if retry_count >= settings.MAX_CONFIRM_WAIT:
raise HTTPException(
500, "Registration not confirmed. Try again. (NOINSERT)"
)
try:
# Query the DB to check if insert was done correctly
row = crud.get_event_registration_by_id_no_404(db, reg_id)
if row:
break
else:
retry_count += 1
sleep(1)
except:
retry_count += 1
sleep(1)
# Check if query returned correct result
if (
not row.to_address == registration.address
and not row.keyword == registration.keyword
and not row.position == registration.position
):
raise HTTPException(500, "Registration not confirmed. Try again. (NOMATCH)")
return {"reg_id": reg_id, "status": "registered"} | 62b84b9efa88512634d9c7a050e7c61ff06ba71a | 7,667 |
def cvAbsDiffS(*args):
"""cvAbsDiffS(CvArr src, CvArr dst, CvScalar value)"""
return _cv.cvAbsDiffS(*args) | b888683d1522c46c9dc7738a18b80f56efe975d3 | 7,668 |
from . import views # this must be placed here, after the app is created
def create_template_app(**kwargs):
"""Create a template Flask app"""
app = create_app(**kwargs)
app.register_blueprints()
return app | fbbb0018cd4da6897842f658ba3baf207e5614cc | 7,669 |
def mse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mse(predict,actual),decimals = 2)
1.33
>>> actual = [1,1,1];predict = [1,1,1]
>>> mse(predict,actual)
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
score = square_diff.mean()
return score | c42ee6d5531d40f727c41463f938c9c8f4ec6e84 | 7,670 |
import random
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
"""Make a measurement object."""
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
] | 10c452936e889a8553afd1a9a570e34abae73470 | 7,671 |
from re import S
def _nrc_coron_rescale(self, res, coord_vals, coord_frame, siaf_ap=None, sp=None):
"""
Function for better scaling of NIRCam coronagraphic output for sources
that overlap the image masks.
"""
if coord_vals is None:
return res
nfield = np.size(coord_vals[0])
psf_sum = _nrc_coron_psf_sums(self, coord_vals, coord_frame, siaf_ap=siaf_ap)
if psf_sum is None:
return res
# Scale by countrate of observed spectrum
if (sp is not None) and (not isinstance(sp, list)):
nspec = 1
obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave)
sp_counts = obs.countrate()
elif (sp is not None) and (isinstance(sp, list)):
nspec = len(sp)
if nspec==1:
obs = S.Observation(sp[0], self.bandpass, binset=self.bandpass.wave)
sp_counts = obs.countrate()
else:
sp_counts = []
for i, sp_norm in enumerate(sp):
obs = S.Observation(sp_norm, self.bandpass, binset=self.bandpass.wave)
sp_counts.append(obs.countrate())
sp_counts = np.array(sp_counts)
else:
nspec = 0
sp_counts = 1
if nspec>1 and nspec!=nfield:
_log.warn("Number of spectra should be 1 or equal number of field points")
# Scale by count rate
psf_sum *= sp_counts
# Re-scale PSF by total sums
if isinstance(res, fits.HDUList):
for i, hdu in enumerate(res):
hdu.data *= (psf_sum[i] / hdu.data.sum())
elif nfield==1:
res *= (psf_sum[0] / res.sum())
else:
for i, data in enumerate(res):
data *= (psf_sum[i] / data.sum())
return res | 3b4e8596177e126955c7665333dd1305603f4e66 | 7,672 |
from .serializers import DocumentRelationSerializer
def re_list(request):
""" Returns the available relation tasks for a specific user
Accessed through a JSON API endpoint
"""
cmd_str = ""
with open('mark2cure/api/commands/get-relations.sql', 'r') as f:
cmd_str = f.read()
# Start the DB Connection
c = connection.cursor()
c.execute('SET @user_work_max = {rel_work_size};'.format(rel_work_size=20))
c.execute('SET @k_max = {completions};'.format(completions=settings.ENTITY_RECOGNITION_K))
c.execute('SET @user_id = {user_id};'.format(user_id=request.user.pk))
c.execute('SET @rel_ann_content_type_id = 56;')
c.execute(cmd_str)
queryset = [{'id': x[0],
'document_id': x[1],
'title': x[2],
'total_document_relationships': x[3],
'user_document_relationships': x[4],
'community_answered': x[5],
'community_completed': x[6],
'community_progress': x[7],
'user_completed': x[8],
'user_progress': x[9],
'user_answered': x[10],
'user_view_completed': x[11]} for x in c.fetchall()]
# Close the connection
c.close()
# Prevent documents from being shown that have since been completed
# by the community before the requqest.user could complete everything
for idx, item in enumerate(queryset):
if int(item['user_document_relationships']) <= 0:
document = get_object_or_404(Document, pk=item['id'])
first_section = document.section_set.first()
view = View.objects.filter(task_type='re', section=first_section, user=request.user).last()
# print(' - X:', document, first_section, view)
# (TODO) Why is there no View on these sections?
if view:
Point.objects.create(user=request.user,
amount=settings.RELATION_DOC_POINTS,
content_type=ContentType.objects.get_for_model(view),
object_id=view.id)
view.completed = True
view.save()
del queryset[idx]
serializer = DocumentRelationSerializer(queryset, many=True)
return Response(serializer.data) | 458b9fc9e784144e96ef8b4203e8b7b868c3350a | 7,673 |
import os
import time
def run_tc(discover):
"""
BeautifulReport模块实现测试报告
:param discover: 测试套件
:return:
"""
if not os.path.exists(path_conf.REPORT_PATH):
os.makedirs(path_conf.REPORT_PATH)
fileName = path_conf.PROJECT_NAME + '_' + time.strftime('%Y-%m-%d %H_%M_%S') + '.html'
try:
result = BeautifulReport(discover)
# theme四种用法:theme_default theme_cyan theme_candy theme_memories
result.report(filename=fileName,
description=path_conf.PROJECT_NAME + '_testreport',
report_dir=path_conf.REPORT_PATH,
theme='theme_cyan')
except Exception as e:
log.exception('Failed to generate test report')
raise e
else:
log.info('Test report generated successfully [%s]' % fileName)
return fileName | da601470bf901905f5ca43e1b6bdeec5d330eafa | 7,674 |
def csv_to_blob_ref(csv_str, # type: str
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_name, # type: str
blob_path_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> AzmlBlobTable
"""
Uploads the provided CSV to the selected Blob Storage service, and returns a reference to the created blob in
case of success.
:param csv_str:
:param blob_service: the BlockBlobService to use, defining the connection string
:param blob_container: the name of the blob storage container to use. This is the "root folder" in azure blob
storage wording.
:param blob_name: the "file name" of the blob, ending with .csv or not (in which case the .csv suffix will be
appended)
:param blob_path_prefix: an optional folder prefix that will be used to store your blob inside the container.
For example "path/to/my/"
:param charset:
:return:
"""
# setup the charset used for file encoding
if charset is None:
charset = 'utf-8'
elif charset != 'utf-8':
print("Warning: blobs can be written in any charset but currently only utf-8 blobs may be read back into "
"DataFrames. We recommend setting charset to None or utf-8 ")
# validate inputs (the only one that is not validated below)
validate('csv_str', csv_str, instance_of=str)
# 1- first create the references in order to check all params are ok
blob_reference, blob_full_name = create_blob_ref(blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name)
# -- push blob
blob_stream = BytesIO(csv_str.encode(encoding=charset))
# noinspection PyTypeChecker
blob_service.create_blob_from_stream(blob_container, blob_full_name, blob_stream,
content_settings=ContentSettings(content_type='text.csv',
content_encoding=charset))
# (For old method with temporary files: see git history)
return blob_reference | c0df47839e963a5401204bcd422c7f78a94efc87 | 7,675 |
def col_rev_reduce(matrix, col, return_ops=False):
"""
Reduces a column into reduced echelon form by transforming all numbers above the pivot position into 0's
:param matrix: list of lists of equal length containing numbers
:param col: index of column
:param return_ops: performed operations are returned
:return: list of lists of equal length containing numbers
"""
ops = []
pivot_row = 0 # Defaults to top row
# Find pivot row of the column
for row in range(len(matrix)-1, -1, -1):
if matrix[row][col] != 0:
pivot_row = row
break
# Transform all numbers above the pivot to 0
if matrix[pivot_row][col] != 0 and matrix[pivot_row][col] != 1:
factor = 1 / matrix[pivot_row][col]
matrix = row_multiply(matrix, pivot_row, factor)
ops.append(['multiplication', pivot_row, factor])
if pivot_row != 0:
for row in range(pivot_row):
if matrix[row][col] != 0:
factor = matrix[row][col] / matrix[pivot_row][col]
matrix = row_subtract(matrix, pivot_row, row, factor)
ops.append(['subtract', pivot_row, row, factor])
if return_ops:
return matrix, ops
else:
return matrix | ab97078f0c92537532673d3dba3cb399d932342e | 7,676 |
from typing import Dict
import math
def calculate_correlations(tetra_z: Dict[str, Dict[str, float]]) -> pd.DataFrame:
"""Return dataframe of Pearson correlation coefficients.
:param tetra_z: dict, Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs, dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx + 1 :]:
if not sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys()):
raise AssertionError()
tets = sorted(tetra_z[org1].keys())
zscores = [
[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets],
]
zmeans = [sum(zscore) / len(zscore) for zscore in zscores]
zdiffs = [
[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]],
]
diffprods = sum(
[zdiffs[0][i] * zdiffs[1][i] for i in range(len(zdiffs[0]))]
)
zdiffs2 = [sum([z * z for z in zdiffs[0]]), sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations | 8acc745fb35f41b38ba186e8f367ea895191f894 | 7,677 |
def rowfuncbynumber(tup,othertable, number):
"""tup is the tuple of row labels for the current row. By default it is passed back unmodified.
You can supply your own rowfunc to transform it when the tables being merged do not have the
same structure or if you want to prevent the merging of certain rows. Note that the tuple
starts with "row" or "column", so the first normally visible element is tup[1].
othertable is True if the function was called while processing the "other" table
and False if processing the main table.
number is the row number of the table. If the join is just by position, you can use
this function to align the tables even if the labels are not unique
to use this function, specify ROWFUNCTION=SPSSINC_MERGE_TABLES.rowfuncbynumber"""
if debug:
print(("row:", (othertable and "other:" or "main:"), number, tup))
tup = (str(number),)
return tup | bd64c52489a9786b4c1de2c9ee7c95643b2a5fe9 | 7,678 |
import os
def save_book_metadata() -> FlaskResponse:
"""
XHR request. Update the information about a book.
Raises:
404: if the user is not admin or the request is not POST.
"""
if not is_admin():
abort(404) # pragma: no cover
if not (
all(
x in request.form
for x in [
"input-book-id", # hidden field
"input-book-url", # hidden field
"input-book-filename", # hidden field, this is the current file before update
"input-title",
"input-description",
"input-period",
"input-status",
"input-status",
"input-crowdfunding-goal",
"input-access-level",
]
)
and (all(x in request.files for x in ["input-book-file", "input-thumbnail"]))
):
return basic_json(False, "Bad request, missing data!")
book_id = int(request.form["input-book-id"])
book_url = secure_filename(escape(request.form["input-book-url"]))
book_filename = secure_filename(escape(request.form["input-book-filename"]))
title = escape(request.form["input-title"].strip())
if not title:
return basic_json(False, "Missing title!")
description_md = escape(request.form["input-description"].strip())
if not description_md:
return basic_json(False, "Missing description!")
description_html = markdown.Markdown(
extensions=current_app.config["MD_EXT"]
).convert(description_md)
period = escape(request.form["input-period"].strip())
status = escape(request.form["input-status"]).lower()
if status not in ["released", "crowdfunding"]:
status = "draft" # reset unknown or empty status to 'draft'
try:
crowdfunding_goal = float(request.form["input-crowdfunding-goal"])
except (ValueError, TypeError):
crowdfunding_goal = 0
if status == "crowdfunding" and crowdfunding_goal <= 0:
return basic_json(False, "Crowdfunding goal required or change status!")
access_level = int(request.form["input-access-level"])
if not check_access_level_range(access_level):
return basic_json(False, "Invalid access level!")
book_dir_path = os.path.join(current_app.config["SHELF_FOLDER"], book_url)
file = request.files["input-book-file"]
new_book = file.filename != ""
if new_book:
if not file_extension(file.filename, "book"):
return basic_json(False, "Wrong book file extension!")
new_book_filename = secure_filename(
file.filename.rsplit("/", 1)[1] if "/" in file.filename else file.filename
)
if book_filename != new_book_filename: # replace the old file with the new one
old_path_file = os.path.join(book_dir_path, book_filename)
if os.path.isfile(old_path_file):
os.remove(old_path_file)
book_filename = new_book_filename
file.save(os.path.join(book_dir_path, new_book_filename))
thumbnail = request.files["input-thumbnail"]
new_thumbnail = thumbnail.filename != ""
if new_thumbnail:
thumbnail_ext = file_extension(thumbnail.filename, "any")
if thumbnail_ext != "jpg":
return basic_json(
False, "Thumbnail extension must be jpg!"
) # changes had been done if new_book anyway!
thumbnail_path = os.path.join(book_dir_path, "card.jpg")
thumbnail.save(thumbnail_path)
preview_card = preview_image(thumbnail_path).decode()
cursor = mysql.cursor()
cursor.execute(
"""UPDATE shelf SET file_name='{file_name}', title='{title}',
period='{period}', description_md='{description_md}',
description_html='{description_html}', access_level={access_level},
date_modified=CURRENT_TIMESTAMP,status='{status}',
crowdfunding_goal={crowdfunding_goal},
preview_card={preview_card}
WHERE book_id={book_id}""".format(
file_name=book_filename,
title=title,
period=period,
description_md=description_md,
description_html=description_html,
access_level=access_level,
status=status,
crowdfunding_goal=crowdfunding_goal if crowdfunding_goal > 0 else "NULL",
book_id=book_id,
preview_card="'" + preview_card + "'" if new_thumbnail else "preview_card",
)
)
mysql.commit()
return basic_json(True, "Changes saved!") | df92df8dbc817df1778b5d25c71f6d66a46463b5 | 7,679 |
def query_category_members(category, language='en', limit=100):
"""
action=query,prop=categories
Returns all the members of a category up to the specified limit
"""
url = api_url % (language)
query_args = {
'action': 'query',
'list': 'categorymembers',
'cmtitle': category,
'format': 'json',
'cmlimit': min(limit, 500)
}
members = []
while True:
json = _run_query(query_args, language)
for member in json['query']['categorymembers']:
members.append(member['title'])
if 'query-continue' in json and len(members) <= limit:
continue_item = json['query-continue']['categorymembers']['cmcontinue']
query_args['cmcontinue'] = continue_item
else:
break
return members[0:limit] | 4a09d73cce237152405031004e967192ad3f8929 | 7,680 |
from typing import List
def _tokenize_text(text: str, language: str) -> List[str]:
"""Splits text into individual words using the correct method for the given language.
Args:
text: Text to be split.
language: The configured language code.
Returns:
The text tokenized into a list of words.
"""
if language == constants.LANGUAGE_CODE_JA:
return _split_words_in_japanese(text)
else:
return text.split() | 284f1a7625de149b7f97ce51dcf88110ebae02b0 | 7,681 |
def ml64_sort_order(c):
"""
Sort function for measure contents.
Items are sorted by time and then, for equal times, in this order:
* Patch Change
* Tempo
* Notes and rests
"""
if isinstance(c, chirp.Note):
return (c.start_time, 10)
elif isinstance(c, Rest):
return (c.start_time, 10)
elif isinstance(c, MeasureMarker):
return (c.start_time, 1)
elif isinstance(c, TempoEvent):
return (c.start_time, 3)
elif isinstance(c, ProgramEvent):
return (c.start_time, 2)
else:
return (c.start_time, 5) | 752a68796a12835661cfce5b2cfe5cba3ad5d7ef | 7,682 |
def binary_logistic_loss_grad(linear_o, y):
"""Derivative of the binary_logistic_loss w.r.t. the linear output"""
# Sometimes denom overflows, but it's OK, since if it's very large, it would
# be set to INF and the output correctly takes the value of 0.
# TODO: Fix overflow warnings.
denom = 1 + np.exp(y.flatten() * linear_o.flatten())
return -y / (denom * linear_o.size) | 8c7aabfedafbd08f0e82b5d8a01837a70ab314ac | 7,683 |
def electron_mass_MeVc2():
"""The rest mass of the electron in MeV/c**2
https://en.wikipedia.org/wiki/Electron
"""
return 0.5109989461 | 4496ddcc35a0aa6528cc19e47233f5a81626fefe | 7,684 |
def opensearch_plugin(request):
"""Render an OpenSearch Plugin."""
host = "%s://%s" % ("https" if request.is_secure() else "http", request.get_host())
# Use `render_to_response` here instead of `render` because `render`
# includes the request in the context of the response. Requests
# often include the session, which can include pickable things.
# `render_to_respones` doesn't include the request in the context.
return render_to_response(
"search/plugin.html",
{"host": host, "locale": request.LANGUAGE_CODE},
content_type="application/opensearchdescription+xml",
) | 5df7e8a8bb89ff5e83b51f1bc4b634db9dea6930 | 7,685 |
from datetime import datetime
def serialize_time(output_value: datetime.time) -> str:
""" Serializes an internal value to include in a response. """
return output_value.isoformat() | 81fc648eaf27efc47531f9895a9523aa5f012cf6 | 7,686 |
from . import persist
def find_posix_python(version):
"""Find the nearest version of python and return its path."""
if version:
# Try the exact requested version first
path = find_executable('python' + version)
persist.debug('find_posix_python: python{} => {}'.format(version, path))
# If that fails, try the major version
if not path:
path = find_executable('python' + version[0])
persist.debug('find_posix_python: python{} => {}'.format(version[0], path))
# If the major version failed, see if the default is available
if not path:
path = find_executable('python')
persist.debug('find_posix_python: python =>', path)
else:
path = find_executable('python')
persist.debug('find_posix_python: python =>', path)
return path | c3bbfa6d4dba5321242ada0f73acccd701bc9796 | 7,687 |
def decode_binary(state_int):
"""
Decode binary representation into the list view
:param state_int: integer representing the field
:return: list of GAME_COLS lists
"""
assert isinstance(state_int, int)
bits = int_to_bits(state_int, bits=GAME_COLS*GAME_ROWS + GAME_COLS*BITS_IN_LEN)
res = []
len_bits = bits[GAME_COLS*GAME_ROWS:]
for col in range(GAME_COLS):
vals = bits[col*GAME_ROWS:(col+1)*GAME_ROWS]
lens = bits_to_int(len_bits[col*BITS_IN_LEN:(col+1)*BITS_IN_LEN])
if lens > 0:
vals = vals[:-lens]
res.append(vals)
return res | a2dd5462031eeb82d9e3b59565d41b2b06e8e2d8 | 7,688 |
def clean_features(vgsales):
"""
This function cleans up some of the dataset's features. The dataset is
quite messy as many values are missing from both categorical and numerical
features. Many of these features are difficult to impute in a reasonable
manner.
<class 'pandas.core.frame.DataFrame'>
Index: 16719 entries, Wii Sports to Winning Post 8 2016
Data columns (total 9 columns):
Platform 16719 non-null category
Release 16450 non-null Int64
Genre 16717 non-null category
Publisher 16665 non-null category
Sales 16719 non-null float64
Metacritic 8137 non-null float64
Metacritic_N 8137 non-null Int64
Developer 10096 non-null category
ESRB 9950 non-null category
dtypes: Int64(2), category(5), float64(2)
memory usage: 1.5+ MB
Some of the hardest features to impute (genre or platform, for example)
don't have many nulls. Others, like the review averages, can be imputed.
:param path: A path to a Video_Games_Sales_as_at_22_Dec_2016.csv compatible
dataset.
"""
# A few of the release years are set to 2020 or other years past 2016.
# Just setting them to 2016 here. They're not a lot of them anyway.
vgsales.Release.loc[vgsales.Release > 2016] = 2016
# =============================================================================
# https://en.wikipedia.org/wiki/Entertainment_Software_Rating_Board
#
# The ESRB feature will be converted to an ordinal variable for machine
# learning during preprocessing later. Thus, we organize them here and
# add an NA for missing values.
# =============================================================================
esrb_ordinal = ["NA", "RP", "EC", "E", "E10+", "T", "M", "AO"]
vgsales.ESRB.cat.set_categories(esrb_ordinal, True, False, True)
return vgsales | ffcae20af436d4012381c4933c841c3689fbbca0 | 7,689 |
def get_object(proposition):
"""[75]
Returns the object of a given proposition
"""
return proposition[2][0] | dc9d5fe007bb66ee92cddd964bb29b897a561c8c | 7,690 |
import hmac
def __verify_hmac(data: bytes, ohmac: bytes, key: bytes) -> bool:
"""
This function verifies that a provided HMAC matches a computed HMAC for
the data given a key.
Args:
data: the data to HMAC and verify
ohmac: the original HMAC, normally appended to the data
key: the key to HMAC with for verification
Returns:
a boolean value denoting whether or not the HMAC's match
"""
return compare_digest(ohmac, hmac(key, data, HMAC_HS).digest()) | 6381bb70e35cccafdb3dcafc2428ca5ca850364a | 7,691 |
def create_block_option_from_template(text: str, value: str):
"""Helper function which generates the option block for modals / views"""
return {"text": {"type": "plain_text", "text": str(text), "emoji": True}, "value": str(value)} | 23f0cf455e659eddeca0b4eda732995feeac6341 | 7,692 |
from typing import Any
import json
def get_token_payload(token: str) -> Any:
"""Extract the payload from the token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
dict
"""
decoded = json.loads(_base64_decode(token.split('.')[0]))
del decoded['session_id']
return decoded | 1b9b03f8e9db6940cc44725025c1ed2ccf751e89 | 7,693 |
import torch
def create_mock_target(number_of_nodes, number_of_classes):
"""
Creating a mock target vector.
"""
return torch.LongTensor([np.random.randint(0, number_of_classes-1) for node in range(number_of_nodes)]) | e226d9e7d1944b0736952d5952e8ef3438a1e54b | 7,694 |
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFISTAFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = SplinerFISTAPeakFinder(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = findPeaksStd.initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties) | 6f045b664157437fb33ab3804b84fe1c7d1deb4e | 7,695 |
def UpdateDatabase(asset, images, status):
"""Update the database entries of the given asset with the given data."""
return {'asset': asset} | 1d7d42355410be7481e706e47d7810755974dadc | 7,696 |
def get_max_word_length(days: dict, keys: list) -> int:
"""
Находит длину самого длинного слова.
"""
max_word_len = 0
for key in keys:
if days.get(key):
for _, data in days.get(key).items():
value = data.split(" ")
for word in value:
if len(word) > max_word_len:
max_word_len = len(word)
return int(max_word_len) | 8a98c7384839f10fdfa713c535b3bf7765416b4c | 7,697 |
def rateCBuf(co2: float, par: float, params: dict,
rates: dict, states: dict) -> float:
"""
Rate of increase of carbohydrates in the buffer
During the light period, carbohydrates produced by
photosynthesis are stored in the buffer and, whenever
carbohydrates are available in the buffer, carbohydrates flow
to the plant organs. This carbohydrate flow stops when the
buffer approaches its lower limit. When the buffer approaches
its upper limit, further carbohydrates cannot be stored and
photosynthesis will be inhibited.
Parameters
----------
co2 : float
Carbon dioxide concentration on air [μmol {CO2} mol-1 {air}]
par : float
Photosynthetic active radiation [μmol {photons} m-2 s-1]
params : dict
Parameters saved as model constants
rates : dict
Auxiliary variable including rates and
flows for the different fruit development stages
states : dict
State variables of the model
Returns
-------
float
Rate of accumulation of carbohydrates in the buffer [mg m-2 s-1]
"""
# These rates relate to the carbs available in the buffer by the maximum
# value available for the buffer. So in theory even if all of them
# are maximum, they would be compatible. However, the buffer is not always
# in the maximum. So they could reach their potential and demand more
# carbs than are available in the buffer.
# If there are not enough, there is the inhibition phenomena, but right
# now they don't seem compatible, as there is growth without
# enough carbs because of the different treatment of the first fruit
# stage.
rates["MCBufLeaf"] = mcBufOrg(organ="leaf", params=params, states=states)
rates["MCBufFruit"] = mcBufOrg(organ="fruit", params=params, states=states)
rates["MCBufStem"] = mcBufOrg(organ="stem", params=params, states=states)
co2_st = co2Stom(co2=co2, params=params)
# Photosynthesis Rate
mcAirBuf_ = mcAirBuf(co2=co2_st, par=par, params=params, states=states)
# Growth respiration
mcBufAir_ = mcBufAir(params=params, states=states)
cBuf_ = (mcAirBuf_ - rates["MCBufLeaf"] - rates["MCBufFruit"] - rates["MCBufStem"] -
mcBufAir_)
return cBuf_ | 33a6c5fcc6d9d1a0641d197dffa1ee5fd6afd038 | 7,698 |
import hashlib
import json
def get_config_tag(config):
"""Get configuration tag.
Whenever configuration changes making the intermediate representation
incompatible the tag value will change as well.
"""
# Configuration attributes that affect representation value
config_attributes = dict(frame_sampling=config.proc.frame_sampling)
sha256 = hashlib.sha256()
sha256.update(json.dumps(config_attributes).encode("utf-8"))
return sha256.hexdigest()[:40] | 2cab6e9473822d0176e878114ceb3fda94d1e0f7 | 7,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.