hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794781bd6e364a332a2613163d1cb55f1e20423f | 1,838 | py | Python | fast_jtnn/vocab.py | croningp/JTNN-VAE | bc5562dabe30118fa10be3b53395a504f8d78173 | [
"MIT"
] | 2 | 2021-11-12T11:54:54.000Z | 2022-03-28T18:05:10.000Z | fast_jtnn/vocab.py | croningp/JTNN-VAE | bc5562dabe30118fa10be3b53395a504f8d78173 | [
"MIT"
] | null | null | null | fast_jtnn/vocab.py | croningp/JTNN-VAE | bc5562dabe30118fa10be3b53395a504f8d78173 | [
"MIT"
] | null | null | null | import copy
import rdkit.Chem as Chem
def get_slots(smiles):
mol = Chem.MolFromSmiles(smiles)
return [
(atom.GetSymbol(), atom.GetFormalCharge(), atom.GetTotalNumHs())
for atom in mol.GetAtoms()
]
class Vocab(object):
benzynes = [
"C1=CC=CC=C1",
"C1=CC=NC=C1",
"C1=CC=NN=C1",
"C1=CN=CC=N1",
"C1=CN=CN=C1",
"C1=CN=NC=N1",
"C1=CN=NN=C1",
"C1=NC=NC=N1",
"C1=NN=CN=N1",
]
penzynes = [
"C1=C[NH]C=C1",
"C1=C[NH]C=N1",
"C1=C[NH]N=C1",
"C1=C[NH]N=N1",
"C1=COC=C1",
"C1=COC=N1",
"C1=CON=C1",
"C1=CSC=C1",
"C1=CSC=N1",
"C1=CSN=C1",
"C1=CSN=N1",
"C1=NN=C[NH]1",
"C1=NN=CO1",
"C1=NN=CS1",
"C1=N[NH]C=N1",
"C1=N[NH]N=C1",
"C1=N[NH]N=N1",
"C1=NN=N[NH]1",
"C1=NN=NS1",
"C1=NOC=N1",
"C1=NON=C1",
"C1=NSC=N1",
"C1=NSN=C1",
]
def __init__(self, smiles_list):
self.vocab = smiles_list
self.vmap = {x: i for i, x in enumerate(self.vocab)}
self.slots = [get_slots(smiles) for smiles in self.vocab]
Vocab.benzynes = [
s
for s in smiles_list
if s.count("=") >= 2 and Chem.MolFromSmiles(s).GetNumAtoms() == 6
] + ["C1=CCNCC1"]
Vocab.penzynes = [
s
for s in smiles_list
if s.count("=") >= 2 and Chem.MolFromSmiles(s).GetNumAtoms() == 5
] + ["C1=NCCN1", "C1=NNCC1"]
def get_index(self, smiles):
return self.vmap[smiles]
def get_smiles(self, idx):
return self.vocab[idx]
def get_slots(self, idx):
return copy.deepcopy(self.slots[idx])
def size(self):
return len(self.vocab)
| 23.564103 | 77 | 0.476061 |
794781c1a01e5a86b741d3a1d0e4b5d36c2234c0 | 5,434 | py | Python | lib/fpn/proposal_assignments/proposal_assignments_postnms.py | zxydi1992/neural-motifs | d37a441c612b51c0085d364a3c6a58c2309164d4 | [
"MIT"
] | null | null | null | lib/fpn/proposal_assignments/proposal_assignments_postnms.py | zxydi1992/neural-motifs | d37a441c612b51c0085d364a3c6a58c2309164d4 | [
"MIT"
] | null | null | null | lib/fpn/proposal_assignments/proposal_assignments_postnms.py | zxydi1992/neural-motifs | d37a441c612b51c0085d364a3c6a58c2309164d4 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from .proposal_assignments_rel import _sel_rels
from nmotif.lib.fpn.box_utils import bbox_overlaps
from nmotif.lib.pytorch_misc import to_variable
import torch
@to_variable
def proposal_assignments_postnms(
rois, gt_boxes, gt_classes, gt_rels, nms_inds, image_offset, fg_thresh=0.5,
max_objs=100, max_rels=100, rand_val=0.01):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
pred_inds_np = rois[:, 0].cpu().numpy().astype(np.int64)
pred_boxes_np = rois[:, 1:].cpu().numpy()
nms_inds_np = nms_inds.cpu().numpy()
sup_inds_np = np.setdiff1d(np.arange(pred_boxes_np.shape[0]), nms_inds_np)
# split into chosen and suppressed
chosen_inds_np = pred_inds_np[nms_inds_np]
chosen_boxes_np = pred_boxes_np[nms_inds_np]
suppre_inds_np = pred_inds_np[sup_inds_np]
suppre_boxes_np = pred_boxes_np[sup_inds_np]
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
rois = []
obj_labels = []
rel_labels = []
num_box_seen = 0
for im_ind in range(num_im):
chosen_ind = np.where(chosen_inds_np == im_ind)[0]
suppre_ind = np.where(suppre_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
# Get IOUs between chosen and GT boxes and if needed we'll add more in
chosen_boxes_i = chosen_boxes_np[chosen_ind]
suppre_boxes_i = suppre_boxes_np[suppre_ind]
n_chosen = chosen_boxes_i.shape[0]
n_suppre = suppre_boxes_i.shape[0]
n_gt_box = gt_boxes_i.shape[0]
# add a teensy bit of random noise because some GT boxes might be duplicated, etc.
pred_boxes_i = np.concatenate((chosen_boxes_i, suppre_boxes_i, gt_boxes_i), 0)
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i) + rand_val*(
np.random.rand(pred_boxes_i.shape[0], gt_boxes_i.shape[0])-0.5)
# Let's say that a box can only be assigned ONCE for now because we've already done
# the NMS and stuff.
is_hit = ious > fg_thresh
obj_assignments_i = is_hit.argmax(1)
obj_assignments_i[~is_hit.any(1)] = -1
vals, first_occurance_ind = np.unique(obj_assignments_i, return_index=True)
obj_assignments_i[np.setdiff1d(
np.arange(obj_assignments_i.shape[0]), first_occurance_ind)] = -1
extra_to_add = np.where(obj_assignments_i[n_chosen:] != -1)[0] + n_chosen
# Add them in somewhere at random
num_inds_to_have = min(max_objs, n_chosen + extra_to_add.shape[0])
boxes_i = np.zeros((num_inds_to_have, 4), dtype=np.float32)
labels_i = np.zeros(num_inds_to_have, dtype=np.int64)
inds_from_nms = np.sort(np.random.choice(num_inds_to_have, size=n_chosen, replace=False))
inds_from_elsewhere = np.setdiff1d(np.arange(num_inds_to_have), inds_from_nms)
boxes_i[inds_from_nms] = chosen_boxes_i
labels_i[inds_from_nms] = gt_classes_i[obj_assignments_i[:n_chosen]]
boxes_i[inds_from_elsewhere] = pred_boxes_i[extra_to_add]
labels_i[inds_from_elsewhere] = gt_classes_i[obj_assignments_i[extra_to_add]]
# Now, we do the relationships. same as for rle
all_rels_i = _sel_rels(bbox_overlaps(boxes_i, gt_boxes_i),
boxes_i,
labels_i,
gt_classes_i,
gt_rels_i,
fg_thresh=fg_thresh,
fg_rels_per_image=100)
all_rels_i[:,0:2] += num_box_seen
rois.append(np.column_stack((
im_ind * np.ones(boxes_i.shape[0], dtype=np.float32),
boxes_i,
)))
obj_labels.append(labels_i)
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
num_box_seen += boxes_i.size
rois = torch.FloatTensor(np.concatenate(rois, 0)).cuda(gt_boxes.get_device(), async=True)
labels = torch.LongTensor(np.concatenate(obj_labels, 0)).cuda(gt_boxes.get_device(), async=True)
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(gt_boxes.get_device(),
async=True)
return rois, labels, rel_labels
| 40.552239 | 100 | 0.635996 |
79478292166fc524440b421b0697ed7f1c90223f | 15,520 | py | Python | common/waf.py | cillianodonnell/rtems-docs | deb9c29b79fccd03790d35190996a348f64d5b61 | [
"BSD-2-Clause"
] | null | null | null | common/waf.py | cillianodonnell/rtems-docs | deb9c29b79fccd03790d35190996a348f64d5b61 | [
"BSD-2-Clause"
] | null | null | null | common/waf.py | cillianodonnell/rtems-docs | deb9c29b79fccd03790d35190996a348f64d5b61 | [
"BSD-2-Clause"
] | null | null | null | import sys, os, re
from waflib.Build import BuildContext
import latex
sphinx_min_version = (1, 3)
def build_date():
import datetime
now = datetime.date.today()
m = now.strftime('%B')
y = now.strftime('%Y')
if now.day % 10 == 1:
s = 'st'
elif now.day % 10 == 2:
s = 'nd'
elif now.day == 3:
s = 'rd'
else:
s = 'th'
d = '%2d%s' % (now.day, s)
return '%s %s %s' % (d, m, y)
def version_cmdline(ctx):
return '-Drelease="%s" -Dversion="%s"' % (ctx.env.VERSION, ctx.env.VERSION)
def sphinx_cmdline(ctx, build_type, conf_dir, doctrees, source_dir, output_dir):
rule = "${BIN_SPHINX_BUILD} %s -b %s -c %s %s -d %s %s %s" % \
(sphinx_verbose(ctx), build_type, conf_dir, version_cmdline(ctx),
doctrees, source_dir, output_dir)
return rule
def cmd_spell(ctx):
from waflib import Options
from sys import argv
from subprocess import call
Options.commands = None # stop warnings about knowing commands.
if not ctx.env.BIN_ASPELL:
ctx.fatal("'aspell' is required please install and re-run configure.")
if len(argv) < 3:
ctx.fatal("Please supply at least one file name")
files = argv[2:]
path = ctx.path.parent.abspath()
# XXX: add error checking eg check if file exists.
for file in files:
cmd = ctx.env.BIN_ASPELL + \
["-c",
"--personal=%s/common/spell/dict/rtems" % path,
"--extra-dicts=%s/common/spell/en_GB-ise-w_accents.multi" % path,
file]
print("running:", cmd)
call(cmd)
def cmd_linkcheck(ctx):
conf_dir = ctx.path.get_src()
source_dir = ctx.path.get_src()
buildtype = 'linkcheck'
build_dir, output_node, output_dir, doctrees = build_dir_setup(ctx, buildtype)
rule = sphinx_cmdline(ctx, buildtype, conf_dir, doctrees, source_dir, output_dir)
ctx(
rule = rule,
cwd = ctx.path.abspath(),
source = ctx.path.ant_glob('**/*.rst'),
target = "linkcheck/output.txt"
)
class spell(BuildContext):
__doc__ = "Check spelling. Supply a list of files or a glob (*.rst)"
cmd = 'spell'
fun = 'cmd_spell'
class linkcheck(BuildContext):
__doc__ = "Check all external URL references."
cmd = 'linkcheck'
fun = 'cmd_linkcheck'
def check_sphinx_version(ctx, minver):
version = ctx.cmd_and_log(ctx.env.BIN_SPHINX_BUILD +
['--version']).split(" ")[-1:][0].strip()
try:
ver = tuple(map(int, re.split('[\D]', version)))
except:
ctx.fatal("Sphinx version cannot be checked: %s" % version)
if ver < minver:
ctx.fatal("Sphinx version is too old: %s" % ".".join(map(str, ver)))
return ver
def sphinx_verbose(ctx):
return ' '.join(ctx.env.SPHINX_VERBOSE)
def is_top_build(ctx):
from_top = False
if ctx.env['BUILD_FROM_TOP'] and ctx.env['BUILD_FROM_TOP'] == 'yes':
from_top = True
return from_top
def build_dir_setup(ctx, buildtype):
where = buildtype
if is_top_build(ctx):
where = os.path.join(ctx.path.name, where)
bnode = ctx.bldnode.find_node(where)
if bnode is None:
ctx.bldnode.make_node(where).mkdir()
build_dir = ctx.path.get_bld().relpath()
output_node = ctx.path.get_bld().make_node(buildtype)
output_dir = output_node.abspath()
doctrees = os.path.join(os.path.dirname(output_dir), 'doctrees', buildtype)
return build_dir, output_node, output_dir, doctrees
def pdf_resources(ctx, buildtype):
packages_base = ctx.path.parent.find_dir('common/latex')
if packages_base is None:
ctx.fatal('Latex package directory not found')
base = packages_base.path_from(ctx.path)
fnode = ctx.path.get_bld().make_node(buildtype)
fnode.mkdir()
local_packages = latex.local_packages()
if local_packages is not None:
srcs = [os.path.join(base, p) for p in local_packages]
ctx(
features = "subst",
is_copy = True,
source = srcs,
target = [fnode.make_node(p) for p in local_packages]
)
ctx(
features = "subst",
is_copy = True,
source = os.path.join(base, ctx.env.RTEMSEXTRAFONTS),
target = fnode.make_node('rtemsextrafonts.sty')
)
def html_resources(ctx, buildtype):
for dir_name in ["_static", "_templates"]:
files = ctx.path.parent.find_node("common").ant_glob("%s/*" % dir_name)
fnode = ctx.path.get_bld().make_node(os.path.join(buildtype, dir_name))
fnode.mkdir() # dirs
ctx(
features = "subst",
is_copy = True,
source = files,
target = [fnode.make_node(x.name) for x in files]
)
# copy images
# ctx.path.get_bld().make_node("images").mkdir()
# files = ctx.path.parent.ant_glob("images/**")
# ctx(
# features = "subst",
# is_copy = True,
# source = files,
# target = [x.srcpath().replace("../", "") for x in files]
# )
def check_sphinx_extension(ctx, extension):
def run_sphinx(bld):
rst_node = bld.srcnode.make_node('testbuild/contents.rst')
rst_node.parent.mkdir()
rst_node.write('.. COMMENT test sphinx\n')
bld(rule = bld.kw['rule'], source = rst_node)
ctx.start_msg("Checking for '%s'" % (extension))
try:
ctx.run_build(fragment = 'xx',
rule = "${BIN_SPHINX_BUILD} -b html -D extensions=%s -C . out" % (extension),
build_fun = run_sphinx,
env = ctx.env)
except ctx.errors.ConfigurationError:
ctx.end_msg('not found (see README.txt)', 'RED')
ctx.fatal('The configuration failed')
ctx.end_msg('found')
def cmd_configure(ctx):
check_sphinx = not ctx.env.BIN_SPHINX_BUILD
if check_sphinx:
ctx.msg('Checking version', ctx.env.VERSION)
ctx.find_program("sphinx-build", var="BIN_SPHINX_BUILD", mandatory = True)
ctx.find_program("aspell", var = "BIN_ASPELL", mandatory = False)
ctx.start_msg("Checking if Sphinx is at least %s.%s" % sphinx_min_version)
ver = check_sphinx_version(ctx, sphinx_min_version)
ctx.end_msg("yes (%s)" % ".".join(map(str, ver)))
ctx.start_msg("Checking Sphinx Verbose ")
if 'SPHINX_VERBOSE' not in ctx.env:
ctx.env.append_value('SPHINX_VERBOSE', ctx.options.sphinx_verbose)
level = sphinx_verbose(ctx)
if level == '-Q':
level = 'quiet'
ctx.end_msg(level)
#
# Check extensions.
#
check_sphinx_extension(ctx, 'sphinx.ext.autodoc')
check_sphinx_extension(ctx, 'sphinx.ext.coverage')
check_sphinx_extension(ctx, 'sphinx.ext.doctest')
check_sphinx_extension(ctx, 'sphinx.ext.graphviz')
check_sphinx_extension(ctx, 'sphinx.ext.intersphinx')
check_sphinx_extension(ctx, 'sphinx.ext.mathjax')
check_sphinx_extension(ctx, 'sphinxcontrib.bibtex')
#
# Optional builds.
#
ctx.env.BUILD_PDF = 'no'
if ctx.options.pdf:
check_tex = not ctx.env.PDFLATEX
if check_tex:
ctx.load('tex')
if not ctx.env.PDFLATEX or not ctx.env.MAKEINDEX:
ctx.fatal('The programs pdflatex and makeindex are required for PDF output')
if 'PDFLATEXFLAGS' not in ctx.env or \
'-shell-escape' not in ctx.env['PDFLATEXFLAGS']:
ctx.env.append_value('PDFLATEXFLAGS', '-shell-escape')
latex.configure_tests(ctx)
ctx.env.BUILD_PDF = 'yes'
ctx.envBUILD_SINGLEHTML = 'no'
if ctx.options.singlehtml:
check_inliner = not ctx.env.BIN_INLINER
if check_inliner:
ctx.env.BUILD_SINGLEHTML = 'yes'
ctx.find_program("inliner", var = "BIN_INLINER", mandatory = False)
if not ctx.env.BIN_INLINER:
ctx.fatal("Node inliner is required install with 'npm install -g inliner' " +
"(https://github.com/remy/inliner)")
def doc_pdf(ctx, source_dir, conf_dir):
buildtype = 'latex'
build_dir, output_node, output_dir, doctrees = build_dir_setup(ctx, buildtype)
pdf_resources(ctx, buildtype)
rule = sphinx_cmdline(ctx, buildtype, conf_dir, doctrees, source_dir, output_dir)
ctx(
rule = rule,
cwd = ctx.path,
source = ctx.path.ant_glob('**/*.rst'),
target = ctx.path.find_or_declare("%s/%s.tex" % (buildtype,
ctx.path.name))
)
ctx(
features = 'tex',
cwd = output_dir,
type = 'pdflatex',
source = "%s/%s.tex" % (buildtype, ctx.path.name),
prompt = 0
)
ctx.install_files('${PREFIX}',
'%s/%s.pdf' % (buildtype, ctx.path.name),
cwd = output_node,
quiet = True)
def doc_singlehtml(ctx, source_dir, conf_dir):
#
# Use a run command to handle stdout and stderr output from inliner. Using
# a standard rule in the build context locks up.
#
def run(task):
src = task.inputs[0].abspath()
tgt = task.outputs[0].abspath()
cmd = '%s %s' % (task.env.BIN_INLINER[0], src)
so = open(tgt, 'w')
se = open(tgt + '.err', 'w')
r = task.exec_command(cmd, stdout = so, stderr = se)
so.close()
se.close()
#
# The inliner does not handle internal href's correctly and places the
# input's file name in the href. Strip these.
#
with open(tgt, 'r') as i:
before = i.read()
after = before.replace('index.html', '')
i.close()
with open(tgt, 'w') as o:
o.write(after)
o.close()
return r
buildtype = 'singlehtml'
build_dir, output_node, output_dir, doctrees = build_dir_setup(ctx, buildtype)
html_resources(ctx, buildtype)
rule = sphinx_cmdline(ctx, buildtype, conf_dir, doctrees, source_dir, output_dir)
ctx(
rule = rule,
cwd = ctx.path,
source = ctx.path.ant_glob('**/*.rst'),
target = ctx.path.find_or_declare("%s/index.html" % (buildtype)),
install_path = None
)
ctx(
rule = run,
inliner = ctx.env.BIN_INLINER,
source = "%s/index.html" % buildtype,
target = "%s/%s.html" % (buildtype, ctx.path.name),
install_path = '${PREFIX}'
)
def doc_html(ctx, conf_dir, source_dir):
buildtype = 'html'
build_dir, output_node, output_dir, doctrees = build_dir_setup(ctx, buildtype)
html_resources(ctx, buildtype)
rule = sphinx_cmdline(ctx, buildtype, conf_dir, doctrees, source_dir, output_dir)
ctx(
rule = rule,
cwd = ctx.path,
source = ctx.path.ant_glob('**/*.rst'),
target = ctx.path.find_or_declare('%s/index.html' % buildtype),
install_path = None
)
ctx.install_files('${PREFIX}/%s' % (ctx.path.name),
output_node.ant_glob('**/*', quiet = True),
cwd = output_node,
relative_trick = True,
quiet = True)
def cmd_build(ctx):
conf_dir = ctx.path.get_src()
source_dir = ctx.path.get_src()
if ctx.env.BUILD_PDF == 'yes':
doc_pdf(ctx, source_dir, conf_dir)
if ctx.env.BUILD_SINGLEHTML == 'yes':
doc_singlehtml(ctx, source_dir, conf_dir)
doc_html(ctx, source_dir, conf_dir)
def cmd_options(ctx):
ctx.add_option('--disable-extra-fonts',
action = 'store_true',
default = False,
help = "Disable building with extra fonts for better quality (lower quality).")
ctx.add_option('--sphinx-verbose',
action = 'store',
default = "-Q",
help = "Sphinx verbose.")
ctx.add_option('--pdf',
action = 'store_true',
default = False,
help = "Build PDF.")
ctx.add_option('--singlehtml',
action = 'store_true',
default = False,
help = "Build Single HTML file, requires Node Inliner")
def cmd_options_path(ctx):
cmd_options(ctx)
ctx.add_option('--rtems-path-py',
type = 'string',
help = "Full path to py/ in RTEMS source repository.")
def cmd_configure_path(ctx):
if not ctx.options.rtems_path_py:
ctx.fatal("--rtems-path-py is required")
ctx.env.RTEMS_PATH = ctx.options.rtems_path_py
cmd_configure(ctx)
def xml_catalogue(ctx, building):
#
# The following is a hack to find the top_dir because the task does
# provided a reference to top_dir like a build context.
#
top_dir = ctx.get_cwd().find_node('..')
#
# Read the conf.py files in each directory to gather the doc details.
#
catalogue = {}
sp = sys.path[:]
for doc in building:
sys.path.insert(0, top_dir.find_node(doc).abspath())
#
# Import using the imp API so the module is reloaded for us.
#
import imp
mf = imp.find_module('conf')
try:
bconf = imp.load_module('bconf', mf[0], mf[1], mf[2])
finally:
mf[0].close()
sys.path = sp[:]
catalogue[doc] = {
'title': bconf.project,
'version': str(ctx.env.VERSION),
'release': str(ctx.env.VERSION),
'pdf': bconf.latex_documents[0][1].replace('.tex', '.pdf'),
'html': '%s/index.html' % (doc),
'singlehtml': '%s.html' % (doc)
}
bconf = None
import xml.dom.minidom as xml
cat = xml.Document()
root = cat.createElement('rtems-docs')
root.setAttribute('date', build_date())
cat.appendChild(root)
heading = cat.createElement('catalogue')
text = cat.createTextNode(str(ctx.env.VERSION))
heading.appendChild(text)
root.appendChild(heading)
builds = ['html']
if ctx.env.BUILD_PDF == 'yes':
builds += ['pdf']
if ctx.env.BUILD_SINGLEHTML == 'yes':
builds += ['singlehtml']
for d in building:
doc = cat.createElement('doc')
name = cat.createElement('name')
text = cat.createTextNode(d)
name.appendChild(text)
title = cat.createElement('title')
text = cat.createTextNode(catalogue[d]['title'])
title.appendChild(text)
release = cat.createElement('release')
text = cat.createTextNode(catalogue[d]['release'])
release.appendChild(text)
version = cat.createElement('version')
text = cat.createTextNode(catalogue[d]['version'])
version.appendChild(text)
doc.appendChild(name)
doc.appendChild(title)
doc.appendChild(release)
doc.appendChild(version)
for b in builds:
output = cat.createElement(b)
text = cat.createTextNode(catalogue[d][b])
output.appendChild(text)
doc.appendChild(output)
root.appendChild(doc)
catnode = ctx.get_cwd().make_node('catalogue.xml')
catnode.write(cat.toprettyxml(indent = ' ' * 2, newl = os.linesep))
cat.unlink()
| 34.565702 | 99 | 0.579188 |
7947842c374785da9c1b39fdf777608f286863a2 | 1,720 | py | Python | mro.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | 2 | 2022-01-13T15:55:58.000Z | 2022-02-11T23:18:34.000Z | mro.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | mro.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | """
POO - MRO - METHOD Resolution Order
Resolução da ordem de métodos é a ordem de execução dos métodos (quem será executado primeiro).
Em Python, a gente pode conferir a ordem de execução dos métodos (MRO) de 3 formas:
- Via propriedade da classe __mro__
- Via método mro()
- Via help
>>> from mro import Pinguim
Eu sou Tux do mar!
>>> Pinguim.__mro__
(<class 'mro.Pinguim'>, <class 'mro.Aquatico'>, <class 'mro.Terrestre'>, <class 'mro.Animal'>, <class 'object'>)
# ORDEM DE RESOLUÇÃO DE MÉTODOS
help(Pinguim)
Method resolution order:
| Pinguim
| Aquatico
| Terrestre
| Animal
| builtins.object
"""
class Animal:
def __init__(self, nome):
self.__nome = nome
def cumprimentar(self):
return f'Eu sou {self.__nome}'
class Aquatico(Animal):
def __init__(self, nome):
super().__init__(nome)
def nadar(self):
return f'{self._Animal__nome} está nadando.'
def cumprimentar(self):
return f'Eu sou {self._Animal__nome} do mar!'
class Terrestre(Animal):
def __init__(self, nome):
super().__init__(nome)
def andar(self):
return f'{self._Animal__nome} está andando.'
def cumprimentar(self):
return f'Eu sou {self._Animal__nome} da terra!'
class Pinguim(Aquatico, Terrestre): # Alterando a ordem de herança, irá mudar a execução do código
def __init__(self, nome):
super().__init__(nome)
# Testando
tux = Pinguim('Tux')
print(tux.cumprimentar()) # ???? Eu sou Tux da terra!/Eu sou Tux do mar! Method Resolution Order - MRO
"""
class Pinguim(Aquatico, Terrestre):
Eu sou Tux do mar!
""" | 24.927536 | 113 | 0.630233 |
7947843d5aeb05d5a727d5e604625f136101ef54 | 4,052 | py | Python | tests/unittests/algo/test_dehb.py | dendisuhubdy/evolve | 54e0aa05b8df5f7833186b32462629ddb9198eea | [
"BSD-3-Clause"
] | 1 | 2017-09-07T06:20:39.000Z | 2017-09-07T06:20:39.000Z | tests/unittests/algo/test_dehb.py | dendisuhubdy/evolve | 54e0aa05b8df5f7833186b32462629ddb9198eea | [
"BSD-3-Clause"
] | null | null | null | tests/unittests/algo/test_dehb.py | dendisuhubdy/evolve | 54e0aa05b8df5f7833186b32462629ddb9198eea | [
"BSD-3-Clause"
] | null | null | null | """Perform integration tests for `orion.algo.dehb`."""
import itertools
from typing import ClassVar
import numpy
import pytest
from orion.algo.dehb.dehb import IMPORT_ERROR, UnsupportedConfiguration
from orion.core.utils import backward, format_trials
from orion.testing.algo import BaseAlgoTests, TestPhase, last_phase_only
if IMPORT_ERROR:
pytest.skip("skipping DEHB tests", allow_module_level=True)
# These are the total number of suggestions that the algorithm will make
# for each "phase" (including previous ones).
# The maximum number is 32 and then it will be done and stop suggesting mode.
COUNTS = [8 + 4 * 3, 4 + 2 + 4]
COUNTS = numpy.cumsum(COUNTS)
class TestDEHB(BaseAlgoTests):
"""Test suite for algorithm DEHB"""
algo_name = "dehb"
config = {
"seed": 1234,
# Because this is so random
# Add other arguments for your algorithm to pass test_configuration
"mutation_factor": 0.65,
"crossover_prob": 0.45,
"mutation_strategy": "rand2dir",
"crossover_strategy": "exp",
"boundary_fix_type": "clip",
"min_clip": None,
"max_clip": None,
}
space = {"x": "uniform(0, 1)", "y": "uniform(0, 1)", "f": "fidelity(1, 10, base=2)"}
phases: ClassVar[list[TestPhase]] = [
TestPhase("random", 0, "space.sample"),
*[
TestPhase(f"rung{i}", budget - 1, "space.sample")
for i, budget in enumerate(COUNTS)
],
]
def test_config_mut_strategy_isnot_valid(self):
with pytest.raises(UnsupportedConfiguration):
self.create_algo(config=dict(mutation_strategy="123"))
def test_config_cross_strategy_isnot_valid(self):
with pytest.raises(UnsupportedConfiguration):
self.create_algo(config=dict(crossover_strategy="123"))
def test_config_fix_mode_isnot_valid(self):
with pytest.raises(UnsupportedConfiguration):
self.create_algo(config=dict(boundary_fix_type="123"))
def test_missing_fidelity(self):
with pytest.raises(RuntimeError):
space = self.create_space(dict(x="uniform(0, 1)"))
self.create_algo(space=space)
def test_suggest_n(self):
algo = self.create_algo()
num = algo.n_observed
trials = algo.suggest(3)
assert len(trials) == 3
@pytest.mark.xfail
def test_is_done_cardinality(self):
"""Fails because of https://github.com/Epistimio/orion/issues/598"""
space_config = self.update_space(
{
"x": "uniform(0, 4, discrete=True)",
"y": "choices(['a', 'b', 'c'])",
"z": "loguniform(1, 6, discrete=True)",
}
)
space = self.create_space(space_config)
assert space.cardinality == 5 * 3 * 6
algo = self.create_algo(space=space)
i = 0
for i, (x, y, z) in enumerate(itertools.product(range(5), "abc", range(1, 7))):
assert not algo.is_done
n = algo.n_suggested
backward.algo_observe(
algo,
[format_trials.tuple_to_trial([1, x, y, z], space)],
[dict(objective=i)],
)
assert algo.n_suggested == n + 1
assert i + 1 == space.cardinality
assert algo.is_done
@pytest.mark.skip(reason="See https://github.com/Epistimio/orion/issues/599")
def test_optimize_branin(self):
pass
@last_phase_only
def test_is_done_max_trials(self, phase: TestPhase):
# pylint: disable=invalid-name
MAX_TRIALS = 10
algo = self.create_algo()
algo.algorithm.max_trials = MAX_TRIALS
objective = 0
while not algo.is_done:
trials = algo.suggest(1)
assert trials is not None
if trials:
self.observe_trials(trials, algo, objective)
objective += len(trials)
# Hyperband should ignore max trials.
assert algo.n_observed > MAX_TRIALS
assert algo.is_done
| 32.416 | 88 | 0.615005 |
79478534c3a23764194edd32e9c9d6af845542a4 | 2,537 | py | Python | tests/expm_multiply_parallel_batch_test.py | Alehud/QuSpin | c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201 | [
"BSD-3-Clause"
] | 195 | 2016-10-24T18:05:31.000Z | 2022-03-29T10:11:56.000Z | tests/expm_multiply_parallel_batch_test.py | Alehud/QuSpin | c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201 | [
"BSD-3-Clause"
] | 303 | 2016-10-25T20:08:11.000Z | 2022-03-31T16:52:09.000Z | tests/expm_multiply_parallel_batch_test.py | Alehud/QuSpin | c72d5fb2b2e9cd9a37d6917bba0337faf3b6c201 | [
"BSD-3-Clause"
] | 54 | 2017-01-03T18:47:52.000Z | 2022-03-16T06:54:33.000Z | from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
#print(os.environ["OMP_NUM_THREADS"])
from quspin.basis import spin_basis_1d
from quspin.operators import hamiltonian
from quspin.tools.evolution import expm_multiply_parallel
from scipy.sparse.linalg import expm_multiply
from scipy.sparse import random,eye
import numpy as np
def test_imag_time(L=20,seed=0):
np.random.seed(seed)
basis = spin_basis_1d(L,m=0,kblock=0,pblock=1,zblock=1)
J = [[1.0,i,(i+1)%L] for i in range(L)]
static = [["xx",J],["yy",J],["zz",J]]
H = hamiltonian(static,[],basis=basis,dtype=np.float64)
(E,),psi_gs = H.eigsh(k=1,which="SA")
psi_gs = psi_gs.ravel()
A = -(H.tocsr() - E*eye(H.Ns,format="csr",dtype=np.float64))
U = expm_multiply_parallel(A)
v1 = np.random.normal(0,1,size=(H.Ns,10))
v1 /= np.linalg.norm(v1,axis=0)
v2 = v1.copy()
for i in range(100):
v2 = U.dot(v2)
v2 /= np.linalg.norm(v2)
v1 = expm_multiply(A,v1)
v1 /= np.linalg.norm(v1)
if(np.all(np.abs(H.expt_value(v2)-E) < 1e-15)):
break #
i += 1
np.testing.assert_allclose(v1,v2,rtol=0,atol=5e-15,err_msg='imaginary time test failed, seed {:d}'.format(seed) )
def test_ramdom_matrix(N=3500,ntest=10,seed=0):
np.random.seed(seed)
i = 0
while(i<ntest):
print("testing random matrix {}".format(i+1))
A = (random(N,N,density=np.log(N)/N) + 1j*random(N,N,density=np.log(N)/N))
A = A.tocsr()
v = np.random.normal(0,1,size=(N,10)) + 1j * np.random.normal(0,1,size=(N,10))
v /= np.linalg.norm(v)
v1 = expm_multiply(A,v)
v2 = expm_multiply_parallel(A).dot(v)
np.testing.assert_allclose(v1,v2,rtol=0,atol=5e-15,err_msg='random matrix test failed, seed {:d}'.format(seed) )
i += 1
def test_ramdom_int_matrix(N=3500,ntest=10,seed=0):
np.random.seed(seed)
i = 0
while(i<ntest):
print("testing random integer matrix {}".format(i+1))
data_rvs = lambda n:np.random.randint(-100,100,size=n,dtype=np.int8)
A = random(N,N,density=np.log(N)/N,data_rvs=data_rvs,dtype=np.int8)
A = A.tocsr()
v = np.random.normal(0,1,size=(N,10)) + 1j * np.random.normal(0,1,size=(N,10))
v /= np.linalg.norm(v)
v1 = expm_multiply(-0.01j*A,v)
v2 = expm_multiply_parallel(A,a=-0.01j,dtype=np.complex128).dot(v)
np.testing.assert_allclose(v1,v2,rtol=0,atol=5e-15,err_msg='random matrix test failed, seed {:d}'.format(seed) )
i += 1
test_imag_time()
test_ramdom_matrix()
test_ramdom_int_matrix()
print("expm_multiply_parallel tests passed!")
| 25.887755 | 114 | 0.68309 |
7947853a24627ba241a9c008bdc34b702479dce4 | 3,662 | py | Python | main.py | Ahaan123/DiscordBrawlMemer | 8e1fc95d1d6fa409935be7159971166adf511482 | [
"MIT"
] | null | null | null | main.py | Ahaan123/DiscordBrawlMemer | 8e1fc95d1d6fa409935be7159971166adf511482 | [
"MIT"
] | null | null | null | main.py | Ahaan123/DiscordBrawlMemer | 8e1fc95d1d6fa409935be7159971166adf511482 | [
"MIT"
] | null | null | null | import praw
import discord
from discord.ext import commands
import random
import os
from WebServer import keep_alive
brawl_bot = commands.Bot(command_prefix='brawl ')
TOKEN = "TOKEN IS HIDDEN"
class meme_post:
url = ''
title = ''
meme_link = ''
permalink = ''
def __init__(self, post_url, post_title, meme_urlx, p):
self.url = post_url
self.title = post_title
self.meme_link = meme_urlx
self.permalink = p
@brawl_bot.event
async def on_ready():
print('Brawl Memer bot be ready though')
client_id = 'Hidden'
client_secret = 'Hidden'
user_agent = 'Hidden'
username = 'Hidden'
password = 'Hidden'
reddit = praw.Reddit(client_id = client_id, client_secret=client_secret, user_agent=user_agent, username=username, password=password)
subred = reddit.subreddit('brawlstars')
brawl_bot.remove_command('help')
@brawl_bot.command()
async def meme(ctx):
posts = get_memes()
post = random.choice(posts)
print(post.permalink)
embed = discord.Embed(
title=post.title,
colour = discord.Colour.blue(),
url = 'https://www.reddit.com'+str(post.permalink)
)
embed.set_footer(text='Thanks to r/brawlstars for the memes!')
embed.set_image(url=post.meme_link)
await ctx.send(embed=embed)
@brawl_bot.command()
async def videomeme(ctx):
meme_link_list = get_video_memes()
random_meme_link = random.choice(meme_link_list)
await ctx.send(random_meme_link)
def get_memes():
posts = []
for post in subred.search('flair:"Humor"', limit=50):
#length-4 to length-1
meme_url = str(post.url)
extension = meme_url[len(meme_url)-4:len(meme_url)]
if (extension == '.jpg' or extension == '.png'):
post = meme_post(post_url=post.url, post_title = post.title, meme_urlx=meme_url, p=post.permalink)
posts.append(post)
return posts
def get_video_memes():
video_meme_list = []
for post in subred.search('flair:"Humor"', limit=200):
#length-4 to length-1
meme_url = str(post.url)
extension = meme_url[len(meme_url)-4:len(meme_url)]
if (extension != '.jpg' and extension != '.png'):
video_meme_list.append(meme_url)
return video_meme_list
@brawl_bot.command()
async def info(ctx):
embed = discord.Embed(
colour=0xf1c40f,
title = "**BrawlMemer | Info**"
)
embed.add_field(name="**BrawlMemer | Version**", value="v1.1", inline=False)
embed.add_field(name="**BrawlMemer | Developer**", value="Ahaan Pandya", inline=False)
embed.add_field(name="**Disclaimer**", value="This material is unofficial and is not endorsed by Supercell. For more information see Supercell's Fan Content Policy: www.supercell.com/fan-content-policy.")
await ctx.send(embed=embed)
@brawl_bot.command()
async def help(ctx):
auth = ctx.message.author
embed = discord.Embed(
title='**COMMANDS HELP**',
colour = 0xf1c40f
)
embed.add_field(name=":laughing:`brawl meme`", value="Displays a Brawl Stars meme (image)")
embed.add_field(name=":video_camera:`brawl videomeme`", value="Displays a Brawl Stars meme (video)")
embed.add_field(name=":information_source:`brawl info`", value="Shows you information about the Bot")
embed.add_field(name=":question:`brawl help`", value="Gives Command Help")
embed.add_thumbnail(url='https://upload.wikimedia.org/wikipedia/en/thumb/1/18/Brawl_Stars_logo.png/220px-Brawl_Stars_logo.png')
new_embed = discord.Embed(
colour = 0xf1c40f,
description = "**A DM containing the help message has been sent to you!**"
)
await ctx.send(embed=new_embed)
await auth.send(embed=embed)
keep_alive()
brawl_bot.run(TOKEN)
| 34.87619 | 208 | 0.696068 |
79478541aca3277b0546828737dff256632955d4 | 2,190 | py | Python | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/bdl/bernoulli.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/bdl/bernoulli.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | Machine Learning Summer School 2019 (Moscow, Russia)/tutorials/bayesian_deep_learning/mlss2019bdl/bdl/bernoulli.py | xuedong/rlss2019 | d7468c2fcf269d8afd6fb0f44993aa9797867944 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch.nn import Linear, Conv2d
from .base import FreezableWeight, PenalizedWeight
class DropoutLinear(Linear, FreezableWeight):
"""Linear layer with dropout on inputs."""
def __init__(self, in_features, out_features, bias=True, p=0.5):
super().__init__(in_features, out_features, bias=bias)
self.p = p
def forward(self, input):
if self.is_frozen():
return F.linear(input, self.frozen_weight, self.bias)
return super().forward(F.dropout(input, self.p, True))
def freeze(self):
# let's draw the new weight
with torch.no_grad():
prob = torch.full_like(self.weight[:1, :], 1 - self.p)
feature_mask = torch.bernoulli(prob) / prob
frozen_weight = self.weight * feature_mask
# and store it
self.register_buffer("frozen_weight", frozen_weight)
class DropoutConv2d(Conv2d, FreezableWeight):
"""2d Convolutional layer with dropout on input features."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
p=0.5):
super().__init__(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups,
bias=bias, padding_mode=padding_mode)
self.p = p
def forward(self, input):
"""Apply feature dropout and then forward pass through the convolution."""
if self.is_frozen():
return F.conv2d(input, self.frozen_weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return super().forward(F.dropout2d(input, self.p, True))
def freeze(self):
"""Sample the weight from the parameter distribution and freeze it."""
prob = torch.full_like(self.weight[:1, :, :1, :1], 1 - self.p)
feature_mask = torch.bernoulli(prob) / prob
with torch.no_grad():
frozen_weight = self.weight * feature_mask
self.register_buffer("frozen_weight", frozen_weight)
| 34.21875 | 82 | 0.634247 |
7947882f7992a2e92a5534fbedad35e26f9dd60d | 377 | py | Python | authz/controller/apiv1/user.py | amnshzd/authz | 63a60bd185bd8119b0aac48fa5d0e19754b8d03b | [
"Apache-2.0"
] | null | null | null | authz/controller/apiv1/user.py | amnshzd/authz | 63a60bd185bd8119b0aac48fa5d0e19754b8d03b | [
"Apache-2.0"
] | null | null | null | authz/controller/apiv1/user.py | amnshzd/authz | 63a60bd185bd8119b0aac48fa5d0e19754b8d03b | [
"Apache-2.0"
] | null | null | null | from authz.util.jsonify import jsonify
class UserController:
def creat_user():
return jsonify(status=501)
def get_user_list():
return jsonify(status=501)
def get_user(user_id):
return jsonify(status=501)
def update_user(user_id):
return jsonify(status=501)
def delete_user(user_id):
return jsonify(status=501)
| 18.85 | 38 | 0.665782 |
79478909d76eebc38332834eb6c300d07d848a6b | 145 | py | Python | TIDALDL-PY/tidal_dl/__main__.py | luuray/Tidal-Media-Downloader | de1ea10c93fdb45732b252fa010fde57dfeafab6 | [
"Apache-2.0"
] | null | null | null | TIDALDL-PY/tidal_dl/__main__.py | luuray/Tidal-Media-Downloader | de1ea10c93fdb45732b252fa010fde57dfeafab6 | [
"Apache-2.0"
] | null | null | null | TIDALDL-PY/tidal_dl/__main__.py | luuray/Tidal-Media-Downloader | de1ea10c93fdb45732b252fa010fde57dfeafab6 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.append('./')
import tidal_dl
if __name__ == '__main__':
# tidal_dl.debug()
tidal_dl.main(sys.argv)
| 13.181818 | 28 | 0.593103 |
7947893cbdc395fab757201b2cba1182029ba7f1 | 51 | py | Python | sample.py | santosh4b6/python_sample_pkg_files | 6a1a997508478693a29f6ad36c4150cbfc84032b | [
"MIT"
] | null | null | null | sample.py | santosh4b6/python_sample_pkg_files | 6a1a997508478693a29f6ad36c4150cbfc84032b | [
"MIT"
] | null | null | null | sample.py | santosh4b6/python_sample_pkg_files | 6a1a997508478693a29f6ad36c4150cbfc84032b | [
"MIT"
] | null | null | null |
def test_print():
print('[INFO] Hello World') | 12.75 | 31 | 0.627451 |
794789b762fc74c10f4c24a1c94a95dea6619a26 | 4,507 | py | Python | HashTables/main.py | maksbrz184/AaDS | 489f3e88af2ff7d263a761f2f7e0ee29f6e91962 | [
"MIT"
] | null | null | null | HashTables/main.py | maksbrz184/AaDS | 489f3e88af2ff7d263a761f2f7e0ee29f6e91962 | [
"MIT"
] | null | null | null | HashTables/main.py | maksbrz184/AaDS | 489f3e88af2ff7d263a761f2f7e0ee29f6e91962 | [
"MIT"
] | null | null | null | from HashTables import hashTables
from Sources import fileDriver
import random
htl = hashTables.HashTableLinear(1000000, 20)
htd = hashTables.HashTableDouble(1000000)
nums = fileDriver.getIntArrayPath("../Sources/set_of_1050000_random_numbers.txt")
searched = fileDriver.getIntArrayPath("../Sources/set_of_1050000_random_numbers_for_search_miss.txt")
for i in range(500000):
htl.insert(nums[i])
htd.insert(nums[i])
for i in range(50000):
htl.search(nums[random.randint(0, 500000 - 1)])
htd.search(nums[random.randint(0, 500000 - 1)])
print("------50% fill actor------\n\n")
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.5))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.5))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.5))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.5))
htl.reset()
htd.reset()
for i in range(50000):
htl.search(searched[random.randint(0, 500000 - 1)])
htd.search(searched[random.randint(0, 500000 - 1)])
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.5))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.5))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.5))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.5))
print("\n\n\n\n")
htl.reset()
htd.reset()
for i in range(500000, 700000):
htl.insert(nums[i])
htd.insert(nums[i])
for i in range(70000):
htl.search(nums[random.randint(0, 700000 - 1)])
htd.search(nums[random.randint(0, 700000 - 1)])
print("------70% fill actor------\n\n")
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.7))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.7))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.7))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.7))
htl.reset()
htd.reset()
for i in range(70000):
htl.search(searched[random.randint(0, 700000 - 1)])
htd.search(searched[random.randint(0, 700000 - 1)])
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.7))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.7))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.7))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.7))
print("\n\n\n\n")
htl.reset()
htd.reset()
for i in range(700000, 800000):
htl.insert(nums[i])
htd.insert(nums[i])
for i in range(80000):
htl.search(nums[random.randint(0, 800000 - 1)])
htd.search(nums[random.randint(0, 800000 - 1)])
print("------80% fill actor------\n\n")
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.8))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.8))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.8))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.8))
htl.reset()
htd.reset()
for i in range(80000):
htl.search(searched[random.randint(0, 800000 - 1)])
htd.search(searched[random.randint(0, 800000 - 1)])
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.8))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.8))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.8))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.8))
print("\n\n\n\n")
htl.reset()
htd.reset()
for i in range(800000, 900000):
htl.insert(nums[i])
htd.insert(nums[i])
for i in range(90000):
htl.search(nums[random.randint(0, 900000 - 1)])
htd.search(nums[random.randint(0, 900000 - 1)])
print("------90% fill actor------\n\n")
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.9))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.9))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.9))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.9))
htl.reset()
htd.reset()
for i in range(90000):
htl.search(searched[random.randint(0, 900000 - 1)])
htd.search(searched[random.randint(0, 900000 - 1)])
print("--------Search hit--------")
print("Linear:\t", htl.sHit / ((htl.size / 10) * 0.9))
print("Double:\t", htd.sHit / ((htd.size / 10) * 0.9))
print("--------Search miss-------")
print("Linear:\t", htl.sMiss / ((htl.size / 10) * 0.9))
print("Double:\t", htd.sMiss / ((htd.size / 10) * 0.9))
print("\n\n\n\n")
print("Done")
| 31.298611 | 101 | 0.584868 |
79478b69efd3f77fddf6f9e9e8c7d1a8459ede93 | 4,465 | py | Python | mysql-utilities-1.6.0/mysql/utilities/common/charsets.py | bopopescu/mysql-dbcompare | 1e912fd87282be3b3bed48487e6beb0ecb1de339 | [
"Apache-2.0"
] | 2 | 2018-03-20T07:42:58.000Z | 2018-03-20T07:43:49.000Z | mysql-utilities-1.6.0/mysql/utilities/common/charsets.py | bopopescu/mysql-dbcompare | 1e912fd87282be3b3bed48487e6beb0ecb1de339 | [
"Apache-2.0"
] | null | null | null | mysql-utilities-1.6.0/mysql/utilities/common/charsets.py | bopopescu/mysql-dbcompare | 1e912fd87282be3b3bed48487e6beb0ecb1de339 | [
"Apache-2.0"
] | 1 | 2020-07-23T23:07:08.000Z | 2020-07-23T23:07:08.000Z | #
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains the charset_info class designed to read character set
and collation information from /share/charsets/index.xml.
"""
import sys
from mysql.utilities.common.format import print_list
_CHARSET_INDEXES = ID, CHARACTER_SET_NAME, COLLATION_NAME, MAXLEN, IS_DEFAULT \
= range(0, 5)
_CHARSET_QUERY = """
SELECT CL.ID,CL.CHARACTER_SET_NAME,CL.COLLATION_NAME,CS.MAXLEN, CL.IS_DEFAULT
FROM INFORMATION_SCHEMA.CHARACTER_SETS CS, INFORMATION_SCHEMA.COLLATIONS CL
WHERE CS.CHARACTER_SET_NAME=CL.CHARACTER_SET_NAME ORDER BY CHARACTER_SET_NAME
"""
class CharsetInfo(object):
"""
Read character set information for lookup. Methods include:
- get_charset_name(id) : get the name for a characterset id
- get_default_collation(name) : get default collation name
- get_name_by_collation(name) : given collation, find charset name
- print_charsets() : print the character set map
"""
def __init__(self, options=None):
"""Constructor
options[in] array of general options
"""
if options is None:
options = {}
self.verbosity = options.get("verbosity", 0)
self.format = options.get("format", "grid")
self.server = options.get("server", None)
self.charset_map = None
if self.server:
self.charset_map = self.server.exec_query(_CHARSET_QUERY)
def print_charsets(self):
"""Print the character set list
"""
print_list(sys.stdout, self.format,
["id", "character_set_name", "collation_name",
"maxlen", "is_default"],
self.charset_map)
print len(self.charset_map), "rows in set."
def get_name(self, chr_id):
"""Get the character set name for the given id
chr_id[in] id for character set (as read from .frm file)
Returns string - character set name or None if not found.
"""
for cs in self.charset_map:
if int(chr_id) == int(cs[ID]):
return cs[CHARACTER_SET_NAME]
return None
def get_collation(self, col_id):
"""Get the collation name for the given id
col_id[in] id for collation (as read from .frm file)
Returns string - collation name or None if not found.
"""
for cs in self.charset_map:
if int(col_id) == int(cs[ID]):
return cs[COLLATION_NAME]
return None
def get_name_by_collation(self, colname):
"""Get the character set name for the given collation
colname[in] collation name
Returns string - character set name or None if not found.
"""
for cs in self.charset_map:
if cs[COLLATION_NAME] == colname:
return cs[CHARACTER_SET_NAME]
return None
def get_default_collation(self, col_id):
"""Get the default collation for the character set
col_id[in] id for collation (as read from .frm file)
Returns tuple - (default collation id, name) or None if not found.
"""
# Exception for utf8
if col_id == 83:
return "utf8_bin"
for cs in self.charset_map:
if int(cs[ID]) == int(col_id) and cs[IS_DEFAULT].upper() == "YES":
return cs[COLLATION_NAME]
return None
def get_maxlen(self, col_id):
"""Get the maximum length for the character set
col_id[in] id for collation (as read from .frm file)
Returns int - max length or 1 if not found.
"""
for cs in self.charset_map:
if int(cs[ID]) == int(col_id):
return int(cs[MAXLEN])
return int(1)
| 33.074074 | 79 | 0.640538 |
79478b79e1316730a919e66c127e5edb5e138549 | 1,929 | py | Python | simfempy/examples/solverstokes.py | beckerrh/fempy | dd7214ea7f6d81a5200fcb4a91f07a5cd3322e9e | [
"MIT"
] | null | null | null | simfempy/examples/solverstokes.py | beckerrh/fempy | dd7214ea7f6d81a5200fcb4a91f07a5cd3322e9e | [
"MIT"
] | 3 | 2018-12-18T16:36:52.000Z | 2019-01-29T18:34:55.000Z | simfempy/examples/solverstokes.py | beckerrh/fempy | dd7214ea7f6d81a5200fcb4a91f07a5cd3322e9e | [
"MIT"
] | null | null | null | assert __name__ == '__main__'
# in shell
import os, sys
simfempypath = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,'simfempy'))
sys.path.insert(0,simfempypath)
from simfempy.applications.stokes import Stokes
from simfempy.meshes.simplexmesh import SimplexMesh
from simfempy.tools.comparemethods import CompareMethods
from simfempy.examples import incompflow
#----------------------------------------------------------------#
def test(testcase, **kwargs):
mu = kwargs.pop("mu", 1)
testcasefct = eval(f"incompflow.{testcase}")
mesh, data = testcasefct(mu=mu)
def createMesh(h): return SimplexMesh(testcasefct(h=h)[0])
applicationargs = {'problemdata': data}
# applicationargs['scalels'] = True
paramsdict = {'mu@scal_glob': [1]}
paramsdict['linearsolver'] = ['pyamg_gmres@full@100@0', 'pyamg_fgmres@full@100@0', 'scipy_gcrotmk@full@20@0']
paramsdict['linearsolver'] = ['pyamg_gmres@full@100@0']
paramsdict['solver_v'] = ['pyamg@aggregation@none@gauss_seidel@1@0']
# paramsdict['solver_p'] = ['scale', 'diag@pyamg@aggregation@none@gauss_seidel@1@0', 'schur@pyamg_cg@@3@0', 'schur_scale@pyamg_cg@@3@0', 'schur_diag@pyamg_cg@@3@0']
paramsdict['solver_p'] = ['diag@pyamg@aggregation@cg@gauss_seidel@6@0', 'schur|diag@pyamg_cg@@6@0']
niter = kwargs.pop('niter', 3)
comp = CompareMethods(niter=niter, createMesh=createMesh, paramsdict=paramsdict, application=Stokes, applicationargs=applicationargs, **kwargs)
return comp.compare()
#================================================================#
if __name__ == '__main__':
# test(testcase='poiseuille2d', niter=6)
# test(testcase='poiseuille3d', niter=5)
test(testcase='backwardFacingStep2d', niter=6)
# test(testcase='backwardFacingStep3d', niter=4)
# test(niter=4, exactsolution=[["x**2-y+z**2","-2*x*y*z+x**2","x**2-y**2+z"],"x*y+x*z"])
| 49.461538 | 168 | 0.660446 |
79478bc46ec6852442ca3e7d0a214a79d0c9d16f | 150 | py | Python | majavahbot/api/__init__.py | supertassu/MajavahBot | 79313fc1b40d7adcd2161f5f3d31c6563856c451 | [
"MIT"
] | 2 | 2021-02-01T07:52:06.000Z | 2022-01-17T19:05:32.000Z | majavahbot/api/__init__.py | supertassu/MajavahBot | 79313fc1b40d7adcd2161f5f3d31c6563856c451 | [
"MIT"
] | null | null | null | majavahbot/api/__init__.py | supertassu/MajavahBot | 79313fc1b40d7adcd2161f5f3d31c6563856c451 | [
"MIT"
] | null | null | null | from majavahbot.api.mediawiki import MediawikiApi, get_mediawiki_api
from majavahbot.api.database import ReplicaDatabase, TaskDatabase, task_database
| 50 | 80 | 0.88 |
79478cc6960226acef5ad900e8fb63afc53c1890 | 1,520 | py | Python | auth.py | raminaji/Hackathon | cebf037ad42ba85579eb3b287a1b9bc58144f1e4 | [
"Apache-2.0"
] | null | null | null | auth.py | raminaji/Hackathon | cebf037ad42ba85579eb3b287a1b9bc58144f1e4 | [
"Apache-2.0"
] | null | null | null | auth.py | raminaji/Hackathon | cebf037ad42ba85579eb3b287a1b9bc58144f1e4 | [
"Apache-2.0"
] | null | null | null | import firebase_admin
import json
from firebase_admin import db
cred_obj = firebase_admin.credentials.Certificate('hackathon-6c0c9-firebase-adminsdk-mvfav-92e3c52dc8.json')
default_app = firebase_admin.initialize_app(cred_obj, {
'databaseURL':"https://hackathon-6c0c9-default-rtdb.firebaseio.com/"
})
ref = db.reference("/")
data =ref.get()
users=list(data.keys())
usernames = users
passwords = []
for i in users:
passwords.append(data[i]["password"])
def signup_u(gui_new_username_input):
global user_input_u
user_input_u = gui_new_username_input
while user_input_u in usernames:
error_msg = ("Error. The Username is already in use.")
return error_msg
#user_input_u = input("Username: ")
return True
def signup_p(gui_new_password_input):
user_input_p = gui_new_password_input
ref.update({user_input_u:{"password":user_input_p}})
def sign_ins_u(sign_in_un):
global user_input_u
print("\nSign in. ")
user_input_u = sign_in_un
while user_input_u not in usernames:
print("Error. The Username is incorrect. Please try again.")
user_input_u = sign_in_un
return False
return 'All Good'
def sign_ins_p(gui_sign_ins_password):
user_input_p = gui_sign_ins_password
for i in range(len(usernames)):
if user_input_u == usernames[i]:
while user_input_p != passwords[i]:
print("Error. The Password is incorrect. Please try again.")
user_input_p = input("Password: ")
print("Logged in!")
| 33.043478 | 108 | 0.714474 |
79478d605341bfb2f64cfbd3b735ab2fdbefe65c | 4,353 | py | Python | filebrowser/templatetags/fb_versions.py | bastiaanraa/django-filebrowser | de8cceb58f1b9f0f41e927a81cc3c3b8deee7108 | [
"BSD-3-Clause"
] | 522 | 2015-01-04T05:26:10.000Z | 2022-03-29T20:09:24.000Z | filebrowser/templatetags/fb_versions.py | bastiaanraa/django-filebrowser | de8cceb58f1b9f0f41e927a81cc3c3b8deee7108 | [
"BSD-3-Clause"
] | 144 | 2015-01-06T11:52:27.000Z | 2022-03-22T19:07:15.000Z | filebrowser/templatetags/fb_versions.py | bastiaanraa/django-filebrowser | de8cceb58f1b9f0f41e927a81cc3c3b8deee7108 | [
"BSD-3-Clause"
] | 221 | 2015-01-13T07:20:32.000Z | 2022-02-23T10:58:31.000Z | # coding: utf-8
from django.conf import settings
from django.core.files import File
from django.template import Library, Node, Variable, VariableDoesNotExist, TemplateSyntaxError
from filebrowser.settings import VERSIONS, PLACEHOLDER, SHOW_PLACEHOLDER, FORCE_PLACEHOLDER
from filebrowser.base import FileObject
from filebrowser.sites import get_default_site
register = Library()
class VersionNode(Node):
def __init__(self, src, suffix, var_name):
self.src = src
self.suffix = suffix
self.var_name = var_name
def render(self, context):
try:
version_suffix = self.suffix.resolve(context)
source = self.src.resolve(context)
except VariableDoesNotExist:
if self.var_name:
return None
return ""
if version_suffix not in VERSIONS:
return "" # FIXME: should this throw an error?
if isinstance(source, FileObject):
source = source.path
elif isinstance(source, File):
source = source.name
else: # string
source = source
site = context.get('filebrowser_site', get_default_site())
if FORCE_PLACEHOLDER or (SHOW_PLACEHOLDER and not site.storage.isfile(source)):
source = PLACEHOLDER
fileobject = FileObject(source, site=site)
try:
version = fileobject.version_generate(version_suffix)
if self.var_name:
context[self.var_name] = version
else:
return version.url
except Exception:
if self.var_name:
context[self.var_name] = ""
if getattr(settings, 'TEMPLATE_DEBUG', True):
raise
return ""
def version(parser, token):
"""
Displaying a version of an existing Image according to the predefined VERSIONS settings (see filebrowser settings).
{% version fileobject version_suffix %}
Use {% version fileobject 'medium' %} in order to
display the medium-size version of an image.
version_suffix can be a string or a variable. if version_suffix is a string, use quotes.
Return a context variable 'var_name' with the FileObject
{% version fileobject version_suffix as var_name %}
Use {% version fileobject 'medium' as version_medium %} in order to
retrieve the medium version of an image stored in a variable version_medium.
version_suffix can be a string or a variable. If version_suffix is a string, use quotes.
"""
bits = token.split_contents()
if len(bits) != 3 and len(bits) != 5:
raise TemplateSyntaxError("'version' tag takes 2 or 4 arguments")
if len(bits) == 5 and bits[3] != 'as':
raise TemplateSyntaxError("second argument to 'version' tag must be 'as'")
if len(bits) == 3:
return VersionNode(parser.compile_filter(bits[1]), parser.compile_filter(bits[2]), None)
if len(bits) == 5:
return VersionNode(parser.compile_filter(bits[1]), parser.compile_filter(bits[2]), bits[4])
class VersionSettingNode(Node):
def __init__(self, version_suffix):
if (version_suffix[0] == version_suffix[-1] and version_suffix[0] in ('"', "'")):
self.version_suffix = version_suffix[1:-1]
else:
self.version_suffix = None
self.version_suffix_var = Variable(version_suffix)
def render(self, context):
if self.version_suffix:
version_suffix = self.version_suffix
else:
try:
version_suffix = self.version_suffix_var.resolve(context)
except VariableDoesNotExist:
return None
context['version_setting'] = VERSIONS[version_suffix]
return ''
def version_setting(parser, token):
"""
Get Information about a version setting.
"""
try:
tag, version_suffix = token.split_contents()
except:
raise TemplateSyntaxError("%s tag requires 1 argument" % token.contents.split()[0])
if (version_suffix[0] == version_suffix[-1] and version_suffix[0] in ('"', "'")) and version_suffix.lower()[1:-1] not in VERSIONS:
raise TemplateSyntaxError("%s tag received bad version_suffix %s" % (tag, version_suffix))
return VersionSettingNode(version_suffix)
register.tag(version)
register.tag(version_setting)
| 36.889831 | 134 | 0.652883 |
79478d8951e50055e1645ec2a52d602e35756c5e | 571 | py | Python | examples/first_run.py | SvenAke/pyadb | 6585a00fd3f8511dd3b4224fc0c84bb71694dfab | [
"BSD-2-Clause"
] | null | null | null | examples/first_run.py | SvenAke/pyadb | 6585a00fd3f8511dd3b4224fc0c84bb71694dfab | [
"BSD-2-Clause"
] | null | null | null | examples/first_run.py | SvenAke/pyadb | 6585a00fd3f8511dd3b4224fc0c84bb71694dfab | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Very basic PyADB example
#
try:
import sys
from pyadb import ADB
except ImportError,e:
# should never be reached
print "[f] Required module missing. %s" % e.args[0]
sys.exit(-1)
def main():
# creates the ADB object
adb = ADB()
# IMPORTANT: You should supply the absolute path to ADB binary
if adb.set_adb_path('/home/chema/.android-sdks/platform-tools/adb') is True:
print "Version: %s" % adb.get_version()
else:
print "Check ADB binary path"
if __name__ == "__main__":
main()
| 22.84 | 80 | 0.635727 |
79478e2097e23a4ae28a670c1b5e4ac0be1bae5b | 9,591 | py | Python | get_data/hue_polling_to_db.py | damok6/apartmentMonitor | 686ce7748c91407f4110ceda508391f40e2086f5 | [
"MIT"
] | null | null | null | get_data/hue_polling_to_db.py | damok6/apartmentMonitor | 686ce7748c91407f4110ceda508391f40e2086f5 | [
"MIT"
] | null | null | null | get_data/hue_polling_to_db.py | damok6/apartmentMonitor | 686ce7748c91407f4110ceda508391f40e2086f5 | [
"MIT"
] | 1 | 2021-02-14T19:58:52.000Z | 2021-02-14T19:58:52.000Z | import time
import json
import sys
import os
import csv
import urllib2
import sqlite3
import requests
csv.field_size_limit(sys.maxsize)
if "HUE_IP_ADDRESS" in os.environ:
HUE_IP_ADDRESS = os.environ["HUE_IP_ADDRESS"]
else:
HUE_IP_ADDRESS = "set_ip_address_here" # If you don't want to set in environment variables
if "HUE_API_KEY" in os.environ:
HUE_API_KEY = os.environ["HUE_API_KEY"]
else:
HUE_API_KEY = "set_key_here" # If you don't want to set in environment variables
# If the INFLUX_URL is specified and not blank, then log to influx_db:
if 'INFLUX_URL' in os.environ and len(os.environ['INFLUX_URL']):
influx_url = os.environ['INFLUX_URL']
# Create the database:
resp = requests.post(url='{}/query'.format(influx_url),
data={'q':'CREATE DATABASE hue_data'})
print(resp.text)
else:
influx_url = None
DB = "../hue_data.db"
DB_TABLE = "hue_results"
DB_TABLE_KNMI_CACHE = "knmi_cache"
OUT_FILE = "../hue_results.csv"
HUE_API_LOCATION = "http://{}/api/".format(HUE_IP_ADDRESS)
INTERVAL = 10 #seconds between polls
WRITE_FILE = False
print("Polling API Location: {}".format(HUE_API_LOCATION))
def initialize_db():
""" When not available, creates Database and table.
Otherwise, does nothing.
"""
# Set up DB connection
con = sqlite3.connect(DB)
cur = con.cursor()
# Create table (if not exists)
try:
cur.execute("""
CREATE TABLE {0} (
un UNIQUE,
polling_timestamp,
device_name,
device_type,
device_modelid,
device_manufacturer,
device_swversion,
device_uid,
value,
value_timestamp
);
""".format(DB_TABLE))
except:
pass
# Create table (if not exists)
try:
cur.execute("""
CREATE TABLE {0} (
polling_timestamp
);
""".format(DB_TABLE_KNMI_CACHE))
except:
pass
con.close()
def write_db(results):
""" Writes list of CSV lines (results) to database
"""
if influx_url is not None:
log_to_influx_db(results, influx_url)
# Set up DB connection
con = sqlite3.connect(DB)
cur = con.cursor()
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# Write to DB
for line in results:
# print('line', line)
try:
split_line = line.split(';')
un = "{0}{1}".format(split_line[0],split_line[7])
insert_data = ','.join(split_line)
#un = "{0}{1}".format(insert_data[0],insert_data[7])
insert_vals = "{},{},{}".format(un, time_string, insert_data)
insert_vals = ','.join(["'{}'".format(val) for val in insert_vals.split(',')])
# print(un)
# print(insert_vals)
query_str = "INSERT OR IGNORE INTO {0} VALUES({1})".format(DB_TABLE, insert_vals)
# print(query_str)
cur.execute(query_str)
except:
print "WARNING: Failed writing line to DB; '{0}'".format(line)
con.commit()
con.close()
def log_to_influx_db(results, influx_url):
influx_log_str = """"""
for line in results:
print(line)
split_line = line.split(';')
value_str = split_line[6]
if value_str == 'True':
value_str = True
elif value_str == 'False' or value_str == '':
value_str = False
value = float(value_str)
influx_log_str+=('{},device_name={},device_type={} value={}\n'.format(
split_line[1],
split_line[0].replace(' ','_'),
split_line[1],
value))
print(influx_log_str)
resp = requests.post(url='{}/write?db=hue_data'.format(influx_url),
data=influx_log_str,
headers={'Content-Type': 'application/octet-stream'})
print(resp.text)
def retrieve_data(request_string):
""" Question Hue API with request_string
"""
try:
#print("{0}{1}/{2}".format(HUE_API_LOCATION, HUE_API_KEY, request_string))
result = urllib2.urlopen("{0}{1}/{2}".format(HUE_API_LOCATION, HUE_API_KEY, request_string)).read()
result_json = json.loads(result)
return result_json
except:
print "Network unreachable. Retrying on next iteration..."
return {}
def write_file(file, lines):
""" Write given lines to given file
"""
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
for line in lines:
try:
with open(file, "a") as f:
f.write("{0};{1}\n".format(time_string,line))
#print line
except:
print "WARNING: Failed writing line to file; '{0}'".format(line)
def retrieve_knmi_weather_parent():
""" Parent of KNMI polling to make sure only once every 5 minutes is being polled.
In any other situation we will use the last known value
"""
# Check if last KNMI poll < 5 minutes old. Don't retrieve new value.
con = sqlite3.connect(DB)
cur = con.cursor()
query = """
SELECT
MAX(polling_timestamp)
FROM {0};
""".format(DB_TABLE_KNMI_CACHE)
# Execute query
cur.execute(query)
rows = cur.fetchall()
# Parse age
latest_time = "1970-01-01 01:00:00"
for row in rows:
latest_time = row[0]
print(latest_time)
if latest_time is None:
return retrieve_knmi_weather()
if time.strptime(latest_time, "%Y-%m-%d %H:%M:%S") > (time.gmtime()-900):
# Save new latest
try:
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
cur.execute("INSERT OR IGNORE INTO {0} VALUES({1})".format(DB_TABLE_KNMI_CACHE, time_string))
except:
print "WARNING: Failed writing time to KNMI DB Cache; '{0}'"
# Retrieve
return retrieve_knmi_weather()
else:
return False
con.close()
def retrieve_knmi_weather():
""" Retrieve current weather in Voorschoten from KNMI website
"""
results = []
try:
# retrieve KNMI HTML
url = "http://www.knmi.nl/nederland-nu/weer/waarnemingen"
response = urllib2.urlopen(url)
html = response.read()
# Cut out part containing the info we need
part = html.split("<td class="">Voorschoten</td>")[1]
part = part.split("</tr>")[0]
parts = part.split("<td class=\"\">")
rotterdam_temperature = parts[1].replace("</td>","")
rotterdam_humidity = parts[2].replace("</td>","")
rotterdam_wind_speed = parts[4].replace("</td>","")
rotterdam_wind_direction = parts[3].replace("</td>","")
rotterdam_visibility = parts[5].replace("</td>","")
rotterdam_air_pressure = parts[6].replace("</td>","")
# Add results in correct format
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_TEMPERATURE",
"Temperature",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_T_0",
rotterdam_temperature,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_HUMIDITY",
"Humidity",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_H_0",
rotterdam_humidity,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_WIND_SPEED",
"Wind speed (m/s)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_WS_0",
rotterdam_wind_speed,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_WIND_DIRECTION",
"Wind direction",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_WD_0",
rotterdam_wind_direction,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_VISIBILITY",
"Visibility (m)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_V_0",
rotterdam_visibility,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_PRESSURE",
"Air pressure (hPa)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_P_0",
rotterdam_air_pressure,
time_string
))
except:
print "Failed retrieving KNMI data"
return results
def parse_results(result):
""" Parse results from Hue API into one CSV line per Hue measurement.
Returns list of CSV lines
"""
results_parsed = []
for device in result:
try:
current = result[device]
device_data = "{0};{1};{2};{3};{4};{5}".format(
current["name"],
current["type"],
current["modelid"],
current["manufacturername"],
current["swversion"],
current["uniqueid"])
device_specific = ";"
if current["type"] == "Daylight":
device_specific = "{0};{1}".format(
current["state"]["daylight"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLTemperature":
device_specific = "{0};{1}".format(
current["state"]["temperature"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLPresence":
device_specific = "{0};{1}".format(
current["state"]["presence"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLLightLevel":
device_specific = "{0};{1}".format(
current["state"]["lightlevel"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "CLIPGenericStatus":
device_specific = "{0};{1}".format(
current["state"]["status"],
current["state"]["lastupdated"].replace("T"," "))
# device_config = json.dumps(current["config"])
device_line = "{0};{1}".format(device_data, device_specific)
results_parsed.append(device_line)
except Exception as e:
print "Device with invalid JSON contents found. Error: {0}".format(e)
return results_parsed
initialize_db()
# Main loop
while True:
# Retrieve Hue data
result = retrieve_data("sensors")
# Parse data
result_parsed = parse_results(result)
print(result_parsed)
# Retrieve and add KNMI data
knmi = retrieve_knmi_weather_parent()
if knmi is not False:
result_parsed = result_parsed + knmi
# Write to CSV
if WRITE_FILE:
write_file(OUT_FILE, result_parsed)
# Write to DB
write_db(result_parsed)
# Finished
print "Wrote results for {0} devices. Continueing...".format(len(result_parsed))
# Sleep, continue
time.sleep(INTERVAL)
| 25.644385 | 101 | 0.653738 |
7947919d149f556fa9f1b74da89d9cfa2c7b906c | 8,174 | py | Python | keras/layers/advanced_activations.py | asanoboy/keras | e467ee5a1a00afdfa1cb7f5508fdbfd2c5eab1e5 | [
"MIT"
] | 1 | 2020-05-18T03:08:48.000Z | 2020-05-18T03:08:48.000Z | keras/layers/advanced_activations.py | asanoboy/keras | e467ee5a1a00afdfa1cb7f5508fdbfd2c5eab1e5 | [
"MIT"
] | null | null | null | keras/layers/advanced_activations.py | asanoboy/keras | e467ee5a1a00afdfa1cb7f5508fdbfd2c5eab1e5 | [
"MIT"
] | 1 | 2019-11-19T12:13:27.000Z | 2019-11-19T12:13:27.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from .. import backend as K
from ..legacy import interfaces
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha: float >= 0. Negative slope coefficient.
# References
- [Rectifier Nonlinearities Improve Neural Network Acoustic Models](https://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha_initializer: initializer function for the weights.
alpha_regularizer: regularizer for the weights.
alpha_constraint: constraint for the weights.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
# References
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
"""
@interfaces.legacy_prelu_support
def __init__(self, alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
self.alpha = self.add_weight(shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs, mask=None):
pos = K.relu(inputs)
if K.backend() == 'theano':
neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
(inputs - K.abs(inputs)) * 0.5)
else:
neg = -self.alpha * K.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha: scale for the negative factor.
# References
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
`f(x) = x for x > theta`,
`f(x) = 0 otherwise`.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
theta: float >= 0. Threshold location of activation.
# References
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337)
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.theta = K.cast_to_floatx(theta)
def call(self, inputs, mask=None):
return inputs * K.cast(K.greater(inputs, self.theta), K.floatx())
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Softmax(Layer):
"""Softmax activation function.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
axis: Integer, axis along which the softmax normalization is applied.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs):
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 33.917012 | 145 | 0.620382 |
7947928f668501c6d000d5fd14fb597fb9090a4e | 6,296 | py | Python | doc/source/conf.py | 1990chs/pymapdl-reader | 6fa369c9019ac87f13b9a505d66e6399f36f1a66 | [
"MIT"
] | null | null | null | doc/source/conf.py | 1990chs/pymapdl-reader | 6fa369c9019ac87f13b9a505d66e6399f36f1a66 | [
"MIT"
] | null | null | null | doc/source/conf.py | 1990chs/pymapdl-reader | 6fa369c9019ac87f13b9a505d66e6399f36f1a66 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import warnings
import os
import pyvista
import numpy as np
from ansys.mapdl import reader as pymapdl_reader
# -- pyvista configuration ---------------------------------------------------
# Manage errors
pyvista.set_error_output_file('errors.txt')
# Ensure that offscreen rendering is used for docs generation
pyvista.OFF_SCREEN = True
# Preferred plotting style for documentation
# pyvista.set_plot_theme('document')
pyvista.rcParams['window_size'] = np.array([1024, 768]) * 2
# Save figures in specified directory
pyvista.FIGURE_PATH = os.path.join(os.path.abspath('./images/'), 'auto-generated/')
if not os.path.exists(pyvista.FIGURE_PATH):
os.makedirs(pyvista.FIGURE_PATH)
pyvista.BUILDING_GALLERY = True
# suppress annoying matplotlib bug
warnings.filterwarnings(
"ignore",
category=UserWarning,
message='Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.',
)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'notfound.extension',
'sphinx_copybutton',
'sphinx_gallery.gen_gallery',
'sphinx.ext.extlinks',
'sphinx.ext.coverage',
]
# extensions = ['sphinx.ext.autodoc',
# 'sphinx_gallery.gen_gallery',
# 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMAPDL Legacy Reader'
copyright = '(c) 2021 ANSYS, Inc. All rights reserved'
author = 'ANSYS Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = version = pymapdl_reader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Copy button customization ---------------------------------------------------
# exclude traditional Python prompts from the copied code
copybutton_prompt_text = ">>> "
# -- Sphinx Gallery Options
from sphinx_gallery.sorting import FileNameSortKey
sphinx_gallery_conf = {
'pypandoc': True, # convert rst to md for ipynb
# path to your examples scripts
"examples_dirs": [
"../../examples/",
],
# path where to save gallery generated examples
"gallery_dirs": ["examples"],
# Patter to search for example files
"filename_pattern": r"\.py",
# Remove the "Download all examples" button from the top level gallery
# "download_all_examples": False,
# Sort gallery example by file name instead of number of lines (default)
"within_subsection_order": FileNameSortKey,
# directory where function granular galleries are stored
"backreferences_dir": None,
# Modules for which function level galleries are created. In
"doc_module": "ansys.mapdl.reader",
"image_scrapers": (pymapdl_reader._get_sg_image_scraper(), 'matplotlib'),
"thumbnail_size": (350, 350),
'first_notebook_cell': ("%matplotlib inline\n"
"from pyvista import set_plot_theme\n"
"set_plot_theme('document')")
}
# -- Options for HTML output -------------------------------------------------
html_theme = 'pyansys_sphinx_theme'
html_logo = 'https://docs.pyansys.com/_static/pyansys-logo-black-cropped.png'
html_theme_options = {
"github_url": "https://github.com/pyansys/PyMAPDL",
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymapdlreaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymapdl_reader.tex', u'PyMAPDL Legacy Reader Documentation',
u'ANSYS Open Source Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymapdl_reader', u'PyMAPDL Legacy Reader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pymapdl-reader', u'PyMAPDL Reader Documentation',
author, 'pymapdl-reader', 'PyMAPDL binary file reader.',
'Miscellaneous'),
]
| 31.323383 | 104 | 0.658196 |
7947946e2d520cd6e42df84172077b14940808f5 | 12,923 | py | Python | refinement/networks/deeplab_xception.py | Guliisgreat/PReMVOS | b177e4d7c2aab40f93e181a8282ca72c5d789cb9 | [
"MIT"
] | 11 | 2019-01-20T06:41:32.000Z | 2021-03-13T13:34:07.000Z | refinement/networks/deeplab_xception.py | Guliisgreat/PReMVOS | b177e4d7c2aab40f93e181a8282ca72c5d789cb9 | [
"MIT"
] | null | null | null | refinement/networks/deeplab_xception.py | Guliisgreat/PReMVOS | b177e4d7c2aab40f93e181a8282ca72c5d789cb9 | [
"MIT"
] | 1 | 2019-04-14T07:19:49.000Z | 2019-04-14T07:19:49.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,
groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, start_with_relu=True, grow_first=True):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = nn.BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, stride=1, padding=1, bias=False))
rep.append(nn.BatchNorm2d(planes))
if not start_with_relu:
rep = rep[1:]
if stride != 1:
rep.append(SeparableConv2d(planes, planes, 3, stride=2, padding=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, inplanes=3, pretrained=False):
super(Xception, self).__init__()
# Entry flow
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=2, start_with_relu=True, grow_first=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=2, start_with_relu=True, grow_first=False)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, padding=1)
self.bn4 = nn.BatchNorm2d(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, padding=1)
self.bn5 = nn.BatchNorm2d(2048)
# init weights
self._init_weight()
if pretrained:
self._load_xception_pretrained()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
low_level_feat = x
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
# if 'bn' in k:
# continue
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('block11'):
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('conv3'):
model_dict[k] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=1, padding=rate, dilation=rate)
self.batch_norm = nn.BatchNorm2d(planes)
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.batch_norm(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, pretrained=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Number of classes: {}".format(n_classes))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
# Atrous Conv
self.xception_features = Xception(nInputChannels, pretrained=pretrained)
# ASPP
rates = [1, 6, 12, 18]
self.aspp1 = ASPP_module(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1))
self.conv1 = nn.Conv2d(1280, 256, 1)
self.bn1 = nn.BatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.conv2 = nn.Conv2d(256, 48, 1)
self.bn2 = nn.BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
# self.freeze_bn()
def forward(self, x):
x, low_level_features = self.xception_features(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = F.upsample(x, scale_factor=4, mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.upsample(x, scale_factor=4, mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.xception_features, model.global_avg_pool, model.bn1, model.bn2]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, pretrained=True, _print=True).cuda()
image = torch.randn(1, 3, 513, 513).cuda()
# According to paper, encoder output stride is 16,
# Therefore, final output size is 256 (16*16).
with torch.no_grad():
output = model.forward(image)
print(output.size())
| 36.609065 | 111 | 0.576259 |
79479631cc2e02a85900e5333906b14171228f07 | 1,691 | py | Python | data/covering_grammar/lib/make_test_file.py | wannaphong/wikipron | fefd89a9ecc62ac62abd91486049e5162e94e8a5 | [
"Apache-2.0"
] | 111 | 2019-08-11T05:57:50.000Z | 2022-03-22T03:32:33.000Z | data/covering_grammar/lib/make_test_file.py | wannaphong/wikipron | fefd89a9ecc62ac62abd91486049e5162e94e8a5 | [
"Apache-2.0"
] | 256 | 2019-08-10T20:14:35.000Z | 2021-05-09T16:35:06.000Z | data/covering_grammar/lib/make_test_file.py | wannaphong/wikipron | fefd89a9ecc62ac62abd91486049e5162e94e8a5 | [
"Apache-2.0"
] | 47 | 2019-08-10T01:47:13.000Z | 2021-04-24T15:19:41.000Z | #!/usr/bin/env python
"""Makes test file.
Using gold data and the model output, this script creates a three-column TSV
file in which each row contains a word, its gold pronunciation, and the
predicted pronunciation, assuming that the input files have the words listed
in the same order."""
import argparse
import contextlib
import logging
from data.scrape.lib.codes import LOGGING_PATH
def main(args: argparse.Namespace) -> None:
with contextlib.ExitStack() as stack:
gf = stack.enter_context(open(args.gold, "r"))
pf = stack.enter_context(open(args.pred, "r"))
wf = stack.enter_context(open(args.out, "w"))
for lineno, (g_line, p_line) in enumerate(zip(gf, pf), 1):
g_word, g_pron = g_line.rstrip().split("\t", 2)
p_word, p_pron = p_line.rstrip().split("\t", 2)
# Ensures the gold data and predictions have the same words.
if g_word != p_word:
logging.error("%s != %s (line %d)", g_word, p_word, lineno)
exit(1)
print(f"{g_word}\t{g_pron}\t{p_pron}", file=wf)
if __name__ == "__main__":
logging.basicConfig(
format="%(filename)s %(levelname)s: %(message)s",
handlers=[
logging.FileHandler(LOGGING_PATH, mode="a"),
logging.StreamHandler(),
],
level="INFO",
)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"gold", help="TSV with words and correct pronunciations"
)
parser.add_argument(
"pred", help="TSV with words and predicted pronunciations"
)
parser.add_argument("out", help="output file")
main(parser.parse_args())
| 34.510204 | 76 | 0.635719 |
794796aeaf424d20ac58c40c6051118ca28ce3e3 | 3,825 | py | Python | imageprocessing/imageprocessing_pb2.py | LukasMaly/grpc-playground | fc67a9b4e47cc7a18954ad66023c771328edb428 | [
"MIT"
] | null | null | null | imageprocessing/imageprocessing_pb2.py | LukasMaly/grpc-playground | fc67a9b4e47cc7a18954ad66023c771328edb428 | [
"MIT"
] | null | null | null | imageprocessing/imageprocessing_pb2.py | LukasMaly/grpc-playground | fc67a9b4e47cc7a18954ad66023c771328edb428 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: imageprocessing.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='imageprocessing.proto',
package='imageprocessing',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x15imageprocessing.proto\x12\x0fimageprocessing\"F\n\x05Image\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x10\n\x08\x63hannels\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x32R\n\x0fImageProcessing\x12?\n\x0bToGrayscale\x12\x16.imageprocessing.Image\x1a\x16.imageprocessing.Image\"\x00\x62\x06proto3')
)
_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='imageprocessing.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='imageprocessing.Image.height', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='imageprocessing.Image.width', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channels', full_name='imageprocessing.Image.channels', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='imageprocessing.Image.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=112,
)
DESCRIPTOR.message_types_by_name['Image'] = _IMAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), {
'DESCRIPTOR' : _IMAGE,
'__module__' : 'imageprocessing_pb2'
# @@protoc_insertion_point(class_scope:imageprocessing.Image)
})
_sym_db.RegisterMessage(Image)
_IMAGEPROCESSING = _descriptor.ServiceDescriptor(
name='ImageProcessing',
full_name='imageprocessing.ImageProcessing',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=114,
serialized_end=196,
methods=[
_descriptor.MethodDescriptor(
name='ToGrayscale',
full_name='imageprocessing.ImageProcessing.ToGrayscale',
index=0,
containing_service=None,
input_type=_IMAGE,
output_type=_IMAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_IMAGEPROCESSING)
DESCRIPTOR.services_by_name['ImageProcessing'] = _IMAGEPROCESSING
# @@protoc_insertion_point(module_scope)
| 32.974138 | 372 | 0.750065 |
7947971ea12ed25a9a272c9f9d8e064174cb8624 | 11,520 | py | Python | py_rpb/gd2js.py | refugee-phrasebook/py_rpb | 678e6658ecb3088e1a5f535c9c82d1dc96f3d0dc | [
"MIT"
] | 2 | 2015-09-19T11:17:51.000Z | 2016-01-27T15:16:13.000Z | py_rpb/gd2js.py | refugee-phrasebook/py_rpb | 678e6658ecb3088e1a5f535c9c82d1dc96f3d0dc | [
"MIT"
] | null | null | null | py_rpb/gd2js.py | refugee-phrasebook/py_rpb | 678e6658ecb3088e1a5f535c9c82d1dc96f3d0dc | [
"MIT"
] | null | null | null | import scrapesheet as sc
import sys
import string
import sampa2x
from iso6393 import iso6393dic
import json
from data import domaind
#provide a commandline version to scrape google doc and output to either tsv or js
#tabulate from original class was not working at the time of writing
class lexeme():
def __init__(self,ID,label,orth,iso,sampa,latn,ipa,domain):
self.ID = ID
self.label = label
self.iso6393code = iso
self.orthographic = orth
self.transcriptions = sampa2x.getPhoneticStrings(sampa,ipa=ipa)
self.domain = domain
if latn:
self.transcriptions['Latin'] = latn
else:
self.transcriptions['Latin'] = ''
def normalizename(s):
h = str(hash(s))[:3]
n = ''.join([c for c in s if c in string.ascii_letters])[:10]
result = "%s_%s"%(n,h)
return result
def outputtsv(normname,pairs):
filename = 'data_%s_.tsv' % normname
print(filename)
out = open(filename, 'w')
for p in pairs:
out.write("\t".join(p))
out.write("\n")
out.close()
def lg2js(normname,pairs):
t = """{
"target":"%s",
"source":"%s",
"phonetic":"%s",
"domain":""
}"""
result = """ '%s':function (){
return [""" % normname
result += '\n,\n'.join([t%p for p in pairs])
result +=""" ]
}"""
return result
def getpairs(records,lg):
"""
return a tuple of all translations from target to source
"""
return [(r[0],r[lg]) for r in records if r[lg]!='']
def gettuples(records,languages):
"""
return a tuple of the ID and all renderings in all selected languages
"""
languages = [0,3]+languages
#print(languages)
return [records[l] for l in languages]
def s2tsv(s,languages,typ=''):
firstlanguage = languages[0]
#print(firstlanguage)
#print(s.records[0])
#print(s.records[0][firstlanguage])
name = s.records[0][languages[0]]
normname = normalizename(name)
#print(languages)
#print([s.records[0][i] for i in languages])
print(normname)
#pairs = getpairs(s.records[1:], lg)
records = s.records[1:]
tuples = [gettuples(r,languages) for r in records]
fn = 'tsv/data_%s_%s.tsv'%(typ,normname)
#print(fn)
out = open(fn,'w')
for t in tuples:
out.write("\t".join(t))
out.write("\n")
out.close()
def s2js(s,languages,typ='',target=0):
jss = []
for lg in languages:
print(lg)
lg = int(lg)
name = s.records[0][lg]
normname = normalizename(name)
print("%s --> %s"%(name,normname))
pairs = getpairs(s.records[1:], lg,target=target)
lgjs = lg2js(normname, pairs)
jss.append(lgjs)
t = "var lgs={\n%s\n}"% '\n,\n'.join(jss)
targetlg = normalizename(s.records[0][target])
fn = '%sdata_%s.js'%(typ,targetlg)
print(fn)
out = open(fn,'w')
out.write(t)
out.close()
def s2lxm(s):#only for new short-style sheets
result = []
records = s.records
iso6393code = records[0][2]
#identify interesting colums
sampacol = False
latncol = False
ipacol = False
for i,x in enumerate(records[0]):
y = x.strip()
if y.endswith('-Qaas'):
if y.startswith(iso6393code):
sampacol = i
else:
raise ValueError
continue
if y.endswith('-Qaai'):
if y.startswith(iso6393code):
ipacol = i
else:
raise ValueError
continue
if y.endswith('-Latn'):
if y.startswith(iso6393code):
latncol = i
else:
raise ValueError
#extract information from rows
for record in s.records[1:]:
ID, label = record[:2]
sampa = False
ipa = False
latn = False
if sampacol:
sampa = record[sampacol]
if latncol:
latn = record[latncol]
if ipacol:
ipa = record[latncol]
orth = record[2]
#print(sampa, latn, orth, ID, label,iso6393code)
domain = getDomain(ID)
lxm = lexeme(ID,label,orth,iso6393code,sampa,latn,ipa,domain) # check for overwriting
result.append(lxm)
#print(len(result))
return result
def getDomain(ID):
try:
return domaind[ID]
except KeyError:
print (ID,"not in domaind")
def addtodico(d,lx):
ID = lx.ID
#assure the lexeme is in the dict
if d.get(ID) == None:
d[ID] = {'label':lx.label,
'domain':lx.domain,
'ID':lx.ID,
'lgs':{'eng':{'orthographic':lx.label,
'transcriptions':{'IPA':'',
'SAMPA':'',
'cyrtrans':'',
'arabtrans':''
}
}
}
}
#print(lx.label)
#add new information
if d[ID]['lgs'].get(lx.iso6393code) != None:
print("%s already present in %s, skipping %s"%(ID,lx.iso6393code,lx.orthographic))
return d
d[ID]['lgs'][lx.iso6393code] = {'orthographic':lx.orthographic,
'transcriptions':lx.transcriptions,
'label':iso6393dic[lx.iso6393code]['label']
}
return d
if __name__ == '__main__':
#usage : gd2js.py 1 3 8 12 14 17 19 22 24
languages = [int(i) for i in sys.argv[1:]]
#print(languages)
sheets = [('German','https://docs.google.com/spreadsheets/d/1Hu1ikg7AM_OJzbWSwSIzljYTOor4fKLXiUBYSXPm1ks/pubhtml'),
('Serbo-Croatian','https://docs.google.com/spreadsheets/d/1wweXwpEpHWrFcM46YZ-SVi-gLfywrUyj1wjEf19DWQE/pubhtml'),
('Albanian','https://docs.google.com/spreadsheets/d/1B9OXDIV4nDUekqpwILbAK6eHIAIP5UePgpYbOWRsTvY/pubhtml'),
('Urdu','https://docs.google.com/spreadsheets/d/1oCRZRBOn6sl8ufJ12OF1gPR_OLj598H45OENqkFfF7U/pubhtml'),
('Amharic','https://docs.google.com/spreadsheets/d/1ni8FOoW4Nqa1drwVCEoKMh4NqAn5ySSezFL-Mvo0hiY/pubhtml'),
('Somali','https://docs.google.com/spreadsheets/d/1SLCVAYupSfjpvMwKiid0Z4JexbJhQsZX19yAzhPIcx0/pubhtml'),
('Slovenian','https://docs.google.com/spreadsheets/d/1fFUM1Vv3EwYKZDFmYDxNTBPTEZAGG4fIWVuLJ3JFOS8/pubhtml'),
('Spanish','https://docs.google.com/spreadsheets/d/1YPoON25ikaxl47e03rIvP000EWV1Jjb69uYfcKykyVw/pubhtml'),
('Tigrinya','https://docs.google.com/spreadsheets/d/1xirVHOFdzJnAk2zHdcyL1qk59R0O9xIKYCpuDOzRCVk/pubhtml'),
('Arabic','https://docs.google.com/spreadsheets/d/1OgfvT0-Fu1i7o4voo6hGyijl5PuX3Ao7vaaD290yN3c/pubhtml'),
('Tamil','https://docs.google.com/spreadsheets/d/1U5zN3Z8ndAsP-rgAIUYdZ5byStv-MXjiCHPQjmiUYPI/pubhtml'),
('Vietnamese','https://docs.google.com/spreadsheets/d/1nAHfWgRkPl8v3bn2cZdlDzs_z1gTAEsvJDPMftfBozQ/pubhtml'),
('Turkish','https://docs.google.com/spreadsheets/d/1TxSFbmaWGjbg0jQCDTEQQRtUREMr5FaoDhfQIVpqGuM/pubhtml'),
('Armenian','https://docs.google.com/spreadsheets/d/17GNmqw7p70yeCCgxhX-0B12p4Pr-5Dn3SGEPqVj8SMQ/pubhtml'),
('Bangla','https://docs.google.com/spreadsheets/d/14T7_M75eTfuVq3sv90NuAXNL4ANa_S_FiW9UeYzCwzY/pubhtml'),
('Bulgarian','https://docs.google.com/spreadsheets/d/1_IVb4dau9W4Ks2EXeirmKvJcpiv-g5wGvtpuvKWXx2o/pubhtml'),
('Catalan','https://docs.google.com/spreadsheets/d/1CofyH0zQK5EqreiQHOCmKZ_JBu5IdfeIEvZZEGK0lrQ/pubhtml'),
('Czech','https://docs.google.com/spreadsheets/d/1pvYWmnD1gG-6EJDZjfm_OSRCmmzI0rojRl-WBjjsvkg/pubhtml'),
('Dari','https://docs.google.com/spreadsheets/d/1_DZXAK6qVd3qRFKH-xodl8JnUPGnX8_y_tRuYUM128k/pubhtml'),
('Dutch','https://docs.google.com/spreadsheets/d/1OhE1xpgofuivQDtcWvDeng4XxpyDFteExKwiC-k57wE/pubhtml'),
('Slovak','https://docs.google.com/spreadsheets/d/1dOiR8Uicz59p5CvzXHtT3M672R0Iw3ADzFcZANE27pA/pubhtml'),
('Russian','https://docs.google.com/spreadsheets/d/1WptrC8MhzEDpBma86wyyz2CvVhXyUKIaIaFklkMcC80/pubhtml'),
('Romanian','https://docs.google.com/spreadsheets/d/1ashnd-ZtcyrFEj0fYAl5ksaImSqWyahgHkbZnD_YqMA/pubhtml'),
('Portuguese','https://docs.google.com/spreadsheets/d/1QKIgFbW-R9Zr6fzTuQs-grOPvGp0DcNj1FfkY_bbPqA/pubhtml'),
('Filipino','https://docs.google.com/spreadsheets/d/1_5C3GEZbr34X9nLUADEaCs63Rz3TDkOE4e1DwFMsmcs/pubhtml'),
('Farsi','https://docs.google.com/spreadsheets/d/1S8KfVhmT6oDmJuxL0QFQ9q3j4cnegUWWXOSTUv6r7gY/pubhtml'),
('Finnish','https://docs.google.com/spreadsheets/d/1VS0gHUD5sHqoQPI65CCbhCykhC5Bb0jx3GXfWbAmwBQ/pubhtml'),
('French','https://docs.google.com/spreadsheets/d/1wSR5_gLCMNdGDOLlKuKel35_oaKrzrX5z6pgrlB_T0k/pubhtml'),
('Polish','https://docs.google.com/spreadsheets/d/1lNixeQDE3IaGV1-KwGd0QMxDawpj8B2AcRMLnkRXE7I/pubhtml'),
('Pashto','https://docs.google.com/spreadsheets/d/1Wz4il9CygqlZW1m7l7DDfXQpqQ-Unk7zmavBO5r5kGI/pubhtml'),
('Macedonian','https://docs.google.com/spreadsheets/d/1kEcuVFHCkt5kUE2dV2jff4UZZBLIZ2mUMVlue4ICQtM/pubhtml'),
('Lithuanian','https://docs.google.com/spreadsheets/d/1ozMIw30k-r8DzANLR66QWHWR7rdbkiJi_PfjU2zgIVE/pubhtml'),
('Greek','https://docs.google.com/spreadsheets/d/1L2QEC-TpWDEhUfQERConudQO12kx54zEy8poesFmo1c/pubhtml'),
('Sorani','https://docs.google.com/spreadsheets/d/1eFm_HeVZYmibwUElJ88wroTANMMFBLVkF9b4G5w4Ksk/pubhtml'),
('Hungarian','https://docs.google.com/spreadsheets/d/1fHcCEKf7utsT6L_LY7iDaSMKpSkwLSbqTKD96Bi1Bvw/pubhtml'),
('Icelandic','https://docs.google.com/spreadsheets/d/1mfVsGJqVp9iJ0rLXsqqZ5EDdgMdAbKh4mY7D7m8zQHA/pubhtml'),
('Kurmanji','https://docs.google.com/spreadsheets/d/1mfVsGJqVp9iJ0rLXsqqZ5EDdgMdAbKh4mY7D7m8zQHA/pubhtml'),
('Italian','https://docs.google.com/spreadsheets/d/1sTtzVugGrOL3ZplTRejSr5G2UAcv7JSEqYDiISzZbJM/pubhtml'),
('Swedish','https://docs.google.com/spreadsheets/d/1v4LtKee6U1booU92P0UOUrrL4W9nWaiyzx4g-9v20gI/pubhtml'),
('Norwegian','https://docs.google.com/spreadsheets/d/1Nb2EOiFuyYmIVIB5MtUINyflQMOIu8OklgVgWS5zG2w/pubhtml'),
('Danish','https://docs.google.com/spreadsheets/d/1Cd8H5-wle6ea32alCPdoSIzrbTu_Il48zGzq8XokV3o/pubhtml')
]
#accumulate lexemes
lexemes = []
for typ, sheet_uri in sheets:
print(typ)
s = sc.SheetScraper(sheet_uri)
s.fetch()
s.select_columns(languages)
#s2tsv(s,languages,typ=sh)
#s2js(s,languages,typ=sh)
lexemes += s2lxm(s)
fulldico = {}
#store lexemes in dictionary
for lx in lexemes:
fulldico = addtodico(fulldico,lx)
jd = json.dumps(fulldico,indent=4)
out = open('lexemes.json','w')
out.write(jd)
out.close()
#'short':'https://docs.google.com/spreadsheets/d/10Ch8eIACzROPYql5aztkG3_VvdCdkDInnVVK7QPK2E0/pubhtml#gid=418287843&single=true',
#'long':'https://docs.google.com/spreadsheets/d/1IpkETNzRzletRpLEeLUKAldB2j_O8UJVn1zM_sYg56Y/pubhtml#gid=0',
#'longcopy': 'https://docs.google.com/spreadsheets/d/1bBesmfse2EcK0n_DpgEM5uGd4EwNkxZW8waRLPSPb4Y/pubhtml?gid=0&single=true'
#('medical','https://docs.google.com/spreadsheets/d/1wjmRrkN9WVB4KIeKBy8wDDJ8E51Mh2-JxIBy2KNMFRQ/pubhtml')
#'legal':'https://docs.google.com/spreadsheets/d/1D7jo-tAyQkmfYvVyT27nZ93ZkyFcZg2vEvf4OMbXJ_c/pubhtml#gid=0',
| 42.666667 | 139 | 0.644531 |
794798aff045dc6b478e07c5df7e4769f202116a | 383 | py | Python | src/art/asgi.py | Denis-Gerashchenko/artsite | b7289dffa5bf49c41886355b3f6698639f8035cc | [
"MIT"
] | null | null | null | src/art/asgi.py | Denis-Gerashchenko/artsite | b7289dffa5bf49c41886355b3f6698639f8035cc | [
"MIT"
] | null | null | null | src/art/asgi.py | Denis-Gerashchenko/artsite | b7289dffa5bf49c41886355b3f6698639f8035cc | [
"MIT"
] | null | null | null | """
ASGI config for art project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'art.settings')
application = get_asgi_application()
| 22.529412 | 78 | 0.780679 |
794798faeaad88c7f8c2cd98c936a01b69379c30 | 173 | py | Python | teerace/api/v1/helpers.py | chaosk/teerace | 16110c739eb12009d8e70fb3c3bfe8ed87112cfa | [
"BSD-3-Clause"
] | 3 | 2016-06-10T03:45:19.000Z | 2018-09-27T15:32:17.000Z | teerace/api/v1/helpers.py | teerace/web | 16110c739eb12009d8e70fb3c3bfe8ed87112cfa | [
"BSD-3-Clause"
] | 26 | 2018-12-31T12:54:27.000Z | 2019-04-08T02:34:44.000Z | teerace/api/v1/helpers.py | teerace/web | 16110c739eb12009d8e70fb3c3bfe8ed87112cfa | [
"BSD-3-Clause"
] | 3 | 2017-01-08T14:59:18.000Z | 2018-11-21T14:59:03.000Z | def get_filtered_checkpoints(checkpoints):
try:
return ";".join([v for v in checkpoints.split(";") if float(v) != 0.0])
except ValueError:
return ""
| 28.833333 | 79 | 0.612717 |
794798fe34d3356fab12bcb429c5aa3a0ef82719 | 446 | py | Python | tests/verifiers_tests/bab_tests.py | nathzi1505/DNNV | 16c6e6ecb681ce66196f9274d4a43eede8686319 | [
"MIT"
] | 33 | 2019-12-13T18:54:52.000Z | 2021-11-16T06:29:29.000Z | tests/verifiers_tests/bab_tests.py | nathzi1505/DNNV | 16c6e6ecb681ce66196f9274d4a43eede8686319 | [
"MIT"
] | 28 | 2020-01-30T14:06:03.000Z | 2022-01-27T01:07:37.000Z | tests/verifiers_tests/bab_tests.py | nathzi1505/DNNV | 16c6e6ecb681ce66196f9274d4a43eede8686319 | [
"MIT"
] | 14 | 2020-04-08T01:57:00.000Z | 2021-11-26T09:35:02.000Z | import os
import unittest
from tests.verifiers_tests.utils import VerifierTests
from tests.utils import network_artifact_dir, property_artifact_dir
from dnnv.verifiers.bab import BaB
@unittest.skipIf(not BaB.is_installed(), "BaB is not installed")
class BabVerifierTests(VerifierTests, unittest.TestCase):
def initialize(self):
self.verifier = BaB
self.is_complete = False
if __name__ == "__main__":
unittest.main()
| 23.473684 | 67 | 0.764574 |
794799aeb51c13238794011f0587b6531b1adf26 | 3,108 | py | Python | labs/lab-06/plot_words_4.py | elihschiff/oss-repo-template | 3bb03a350be0e4b1c662a37948a6ac00cd94dcef | [
"MIT"
] | null | null | null | labs/lab-06/plot_words_4.py | elihschiff/oss-repo-template | 3bb03a350be0e4b1c662a37948a6ac00cd94dcef | [
"MIT"
] | null | null | null | labs/lab-06/plot_words_4.py | elihschiff/oss-repo-template | 3bb03a350be0e4b1c662a37948a6ac00cd94dcef | [
"MIT"
] | null | null | null | """
=====
Words
=====
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile `words_dat.txt.gz`. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book (see [1]_ and [2]_).
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg ([email protected]),
# Brendt Wohlberg,
# [email protected]
# Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import gzip
from string import ascii_lowercase as lowercase
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
G = nx.Graph(name="words")
lookup = dict((c, lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i + 1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j + 1:]:
for i in range(len(left+right)+1):
new_word = list(left+right)
new_word.insert(i,cc)
yield ''.join(new_word)
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
fh = gzip.open('words_dat.txt.gz', 'r')
words = set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w = str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
G = words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print("%d connected components" % nx.number_connected_components(G))
for (source, target) in [('chaos', 'order'),
('nodes', 'graph'),
('moron', 'smart'),
('flies', 'swims'),
('mango', 'peach'),
('pound', 'marks')]:
print("Shortest path between %s and %s is" % (source, target))
try:
sp = nx.shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
| 32.375 | 76 | 0.549871 |
79479a45120a92fc1b120227c512b8e07f2e3fd6 | 667 | py | Python | Python/7. calculator.py | mohitdhaundiyal/laravel_emtechtalks | a8274aa2d77f57e2c8e2686a023f7bdcf01db4ca | [
"MIT"
] | null | null | null | Python/7. calculator.py | mohitdhaundiyal/laravel_emtechtalks | a8274aa2d77f57e2c8e2686a023f7bdcf01db4ca | [
"MIT"
] | 4 | 2021-02-02T21:27:12.000Z | 2022-02-27T09:14:45.000Z | Python/7. calculator.py | mohitdhaundiyal/laravel_emtechtalks | a8274aa2d77f57e2c8e2686a023f7bdcf01db4ca | [
"MIT"
] | null | null | null | while True:
def add(num1,num2):
print(num1+num2)
def sub(num1,num2):
print(num1-num2)
def product(num1,num2):
print(num1*num2)
def divide(num1,num2):
print(num1/num2)
break
print('select operation')
print('1, Add')
print('2, Sub')
print('3, Product')
print('4, Divide')
opp = input()
opp = int(opp)
print('enter two numbers')
number1 = input('first number: ')
number2 = input('second number: ')
number1 = float(number1)
number2 = float(number2)
if(opp == 1):
add(number1,number2)
elif(opp == 2):
sub(number1,number2)
if(opp == 3):
product(number1,number2)
if(opp == 4):
divide(number1,number2)
| 18.527778 | 34 | 0.622189 |
79479bbfd02ea5354ef7e5d26f55fbf77009af0f | 14,721 | py | Python | MdlUtilities.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | MdlUtilities.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | 44 | 2021-02-10T23:58:28.000Z | 2021-12-14T02:38:21.000Z | MdlUtilities.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | import numpy as np
#from numpy import array
import re
import copy
import dbUtils
import matplotlib.tri as mpltri
import sys, inspect
gravitationalAcceleration = 32.17405*12 #in/s²
configurations = { ('b', None,None): {'nest':[['A']],'label':'=\n|\nA\n|\n=','PLfactor':0.05},
('r', None,None): {'nest':[['A']],'label':'=\n/\nA\n/\n=','PLfactor':1},
('b', 'b', None): {'nest':[['A','B']],'label':'=\nA\n|\nB\n=','PLfactor':1},
('b', 'r', None): {'nest':[['A'],['B']],'label':'=\n|\nA\n|\n=\n/\nB\n/\n=','PLfactor':1.5},
('r', 'b', None): {'nest':[['A'],['B']],'label':'=\n/\nA\n/\n=\n|\nB\n|\n=','PLfactor':1.5},
('r', 'r', None): {'nest':[['A'],['B']],'label':'=\n/\nA\n/\n=\n/\nB\n/\n=','PLfactor':2},
('b', None,'b' ): {'nest':[['A'],[],['C']],'label':'=\n|\nA\n|\n=\n|\n|\n|\n=\n|\nC\n|\n=','PLfactor':2},
('b', None,'r' ): {'nest':[['A'],[],['C']],'label':'=\n|\nA\n|\n=\n|\n|\n|\n=\n/\nC\n/\n=','PLfactor':2.5},
('r', None,'b' ): {'nest':[['A'],[],['C']],'label':'=\n/\nA\n/\n=\n|\n|\n|\n=\n|\nC\n|\n=','PLfactor':2.5},
('r', None,'r' ): {'nest':[['A'],[],['C']],'label':'=\n/\nA\n/\n=\n|\n|\n|\n=\n/\nC\n/\n=','PLfactor':3},
('b', 'b', 'b' ): {'nest':[['A','B','C']],'label':'=\nA\nB\nC\n=','PLfactor':1},
('b', 'b', 'r' ): {'nest':[['A','B'],['C']],'label':'=\nA\n|\nB\n=\n/\nC\n/\n=','PLfactor':2},
('r', 'b', 'b' ): {'nest':[['A'],['B','C']],'label':'=\n/\nA\n/\n=\nB\n|\nC\n=','PLfactor':2},
('b', 'r', 'b' ): {'nest':[['A'],['B'],['C']],'label':'=\n|\nA\n|\n=\n/\nB\n/\n=\n|\nC\n|\n=','PLfactor':2},
('b', 'r', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\n|\nA\n|\n=\n/\nB\n/\n=\n/\nC\n/\n=','PLfactor':2.5},
('r', 'r', 'b' ): {'nest':[['A'],['B'],['C']],'label':'=\n/\nA\n/\n=\n/\nB\n/\n=\n|\nC\n|\n=','PLfactor':2.5},
('r', 'b', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\n/\nA\n/\n=\n|\nB\n|\n=\n/\nC\n/\n=','PLfactor':3},
('r', 'r', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\n/\nA\n/\n=\n/\nB\n/\n=\n/\nC\n/\n=','PLfactor':3} }
def __repr__(self):
if len(self)==0:
return '[]'
elif len(self)==1:
return '[' + str(self[0]) +']'
else:
return '[' + str(self[0]) +', ... '+ str(self[-1]) + ']'
np.set_string_function(__repr__)
array = lambda L: np.array(L)
def get_decimalPointPattern():
return '(([\-\+]?\d*\.?\d+)|([\-\+]?\d+\.?\d*))'
def get_decimalPointWithThousandsCommaPattern():
return '(([\-\+]?\d{1,3}(\,\d{3})*\.\d*)|([\-\+]?\d*\.?\d+)|([\-\+]?\d+\.?\d*))'
def get_decimalCommaPattern():
return '(([\-\+]?\d{1,3}(\.\d{3})*\,\d*)|([\-\+]?\d*\,?\d+)|([\-\+]?\d+\,?\d*))'
def get_decimalFloatPointFunction():
def text2float(text):
items = re.split(',',text)
text = ''.join(items)
return float(text)
return text2float
def get_decimalFloatCommaFunction():
def text2float(text):
items = re.split(',',text)
assert(len(items)==2)
tridigs = re.split('\.',items[0])
items[0] = ''.join(tridigs)
text = '.'.join(items)
return float(text)
return text2float
def np_dot( u,v ):
return np.sum(u*v,axis=1,keepdims=True)
def np_cross( u,v ):
return np.cross(u,v,axis=1)
def np_norm( v ):
norm = np.linalg.norm(v,axis=1)
norm = norm.reshape(-1,1)
return v/norm
def calculate_buoyancyFactor( OD, ID, ρs, ρe, ρi ):
doverDsq = (ID/OD)**2
return ( (1-ρe/ρs)-doverDsq*(1-ρi/ρs) )/( 1-doverDsq )
def render_circle( center, radius, n=120, mode='all', xscale=1, yscale=1 ):
if mode=='all':
θ = np.linspace(0,np.pi*2,n)
θ += np.pi/2 #- np.pi/20
elif mode=='top':
θ = np.linspace(0,np.pi,n)
elif mode=='bottom':
θ = np.linspace(np.pi,np.pi*2,n)
elif mode=='right':
θ = np.linspace(-np.pi/2,np.pi*2,n)
elif mode=='left':
θ = np.linspace(np.pi/2,np.pi*3/2,n)
x = radius*np.cos(θ)*xscale
y = radius*np.sin(θ)*yscale
x += center[0]
y += center[1]
return np.array([x,y])
def RodriguesRotationFormula( v, u, θ ):
# Equation Reference:
# https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
return v*np.cos(θ) + np_cross( u,v )*np.sin(θ) + u*np_dot( u,v )*(1-np.cos(θ))
def render_wellbore( fields, radius, n=12 ):
x = np.array( fields.EW )
y = np.array( fields.NS )
z = np.array( fields.TVD )
c = 1.0#np.array( fields.MD )
"""
max_EW = max( fields.EW )
min_EW = min( fields.EW )
max_NS = max( fields.NS )
min_NS = min( fields.NS )
max_TVD = max( fields.TVD )
min_TVD = min( fields.TVD )
ΔEW = max_EW - min_EW
ΔNS = max_NS - min_NS
ΔTVD = max_TVD - min_TVD
if ΔEW>ΔNS:
zfactor = ΔEW/ΔTVD
else:
zfactor = ΔNS/ΔTVD
zfactor=1
z *= zfactor
"""
S = np.array([x,y,z])
S = S.T
U = S[2:] - S[:-2]
U = np.append([U[0]], U, axis=0)
U = np.append(U, [U[-1]], axis=0)
U = np_norm(U)
P = np.array([[1,0,0]])
V = np_cross(U,P)
V = radius*np_norm(V)
l = len(V)
R = [ V+S ]
φ = 2*np.pi/n
for i in range(n):
V = RodriguesRotationFormula( V, U, φ )
R.append( V+S )
R = np.array(R)
R = R.reshape(-1,3)
n+=1
#nl = n*l
#triangles = []
X,Y,Z = R.T
X = X.reshape(n,l)
Y = Y.reshape(n,l)
Z = Z.reshape(n,l)
C = c*np.ones((n,l))
return X,Y,Z,C#triangles #,Z/zfactor
def make_cleanAverage( X ):
if len(X)>0:
a = np.average(X)
for i in range(10):
W = np.exp(-np.abs(X-a))
a = np.average(X, weights=W)
return a
else:
return None
def isNoneEntry( entry ):
return entry=='' and hasattr(entry,'unit')
def isSomething( value ):
return value!='' and value!=None and value!=False
def unitConvert_value( value, originUnit, targetUnit ):
query = """select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u
where u.representation = '{origin}' """.format(origin=originUnit)
items_origin = dbUtils.execute_query(query)
query = """select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u
where u.representation = '{target}' """.format(target=targetUnit)
items_target = dbUtils.execute_query(query)
factor_origin = float(items_origin[0][0])
factor_target = float(items_target[0][0])
offset_origin = float(items_origin[0][1])
offset_target = float(items_target[0][1])
value = physicalValue( factor_origin/factor_target * value + (offset_origin - offset_target)/factor_target, targetUnit )
return value
def referenceUnitConvert_value( value, unit ):
query = """select u.factorToReferenceUnit, u.offsetToReferenceUnit, u.referenceUnit from units u
where u.representation = '{unit}' """.format(unit=unit)
items = dbUtils.execute_query(query)
factor = float(items[0][0])
offset = float(items[0][1])
referenceUnit = items[0][2]
value = physicalValue( factor*value+offset, referenceUnit )
return value
def inverseReferenceUnitConvert_value( value, unit ):
query = """select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u
where u.representation = '{unit}' """.format(unit=unit)
items = dbUtils.execute_query(query)
factor = float(items[0][0])
offset = float(items[0][1])
value = physicalValue( (value-offset)/factor, unit )
return value
def create_physicalValue_and_appendTo_field(value, field, unit=None ):
if unit=='referenceUnit':
value = physicalValue( value, field.referenceUnit )
elif unit==None:
value = physicalValue( value, field.unit )
else:
value = physicalValue( value, unit )
field.append( value )
def xfloat( expression ):
if isinstance(expression, float) or isinstance(expression, np.float32) or isinstance(expression, np.float64):
value = __float__( expression )
return value
else:
if expression=='' or expression==None:
raise ValueError
items = re.split('[ ]+',str(expression))
value = __float__( eval( '+'.join(items) ) )
value.fraction = expression
return value
def physicalValue(value, unit):
if isinstance(value, int) or isinstance(value, np.int32) or isinstance(value, np.int64):
entry = __int__(value)
elif isinstance(value, float) or isinstance(value, np.float32) or isinstance(value, np.float64):
entry = __float__(value)
elif isinstance(value, str):
entry = __str__(value)
elif isinstance(value, type(None)):
entry = __str__('')
entry.unit = unit
#entry.repr = lambda: str(entry._repr_)+' '+entry._repr_.unit
return entry
class LogicalError( Exception ): pass
class __int__(int): pass
class __float__(float): pass
class __str__(str): pass
class FieldList( list ):
def __init__(self):
super().__init__()
def append(self, field):
field.pos = len(self)
setattr(self, str(field.abbreviation), field)
super().append(field)
def insert_data(self, data):
for field in self:
try:
try:
field.append(data[field.abbreviation])
except AttributeError:
value = physicalValue(data[field.abbreviation],field.unit)
field.append(value)
except KeyError:
value = physicalValue(None,field.unit)
field.append(value)
def extract_data_from_row(self, row, representation=False):
data = {}
for field in self:
if representation:
data[field.abbreviation] = field[row]._repr_
else:
data[field.abbreviation] = field[row]
return data
def extract_fields_from_row(self, row):
fields = FieldList()
"""
for field in self:
newfield = Field(field.id)
newfield.append(field[row])
fields.append(newfield)
"""
for field in self:
newfield = copy.deepcopy( field )
newfield.clear()
newfield.append(field[row])
fields.append(newfield)
return fields
def clear_content(self):
for field in self:
field.clear()
def referenceUnitConvert_fields(self):
for field in self:
field.referenceUnitConvert()
def inverseReferenceUnitConvert_fields(self):
for field in self:
field.inverseReferenceUnitConvert()
class Field( list ):
def __init__(self, fieldID, altBg=False, altTx=False, altFg=False, mandatory=False, substitutefieldID=None):
super().__init__()
self.pos = None
self.id = fieldID
self.mandatory = mandatory
self._altFg_ = altFg
if substitutefieldID:
query = """ select f.abbreviation from fields f where f.fieldID = '{fieldID}' """.format(fieldID=substitutefieldID)
self.substitute = dbUtils.execute_query(query)[0][0]
else:
self.substitute = None
query = """ select f.description, f.representation, f.dataType, f.precision,
f.backgroundColor, f.altBackgroundColor, f.textColor, f.altTextColor, f.flag, f.altFlag, f.abbreviation
from fields f where f.fieldID = '{fieldID}' """.format(fieldID=fieldID)
items = dbUtils.execute_query(query)[0]
nom_i,alt_i = (5,4) if altBg else (4,5)
nom_j,alt_j = (7,6) if altTx else (6,7)
nom_k,alt_k = (9,8) if altFg else (8,9)
self.description = items[0]
self.representation = items[1]
self.dataType = eval(items[2])
self.backgroundColor = np.array([ int(items[nom_i][:2],16), int(items[nom_i][2:4],16), int(items[nom_i][4:],16) ])
self.altBackgroundColor = np.array([ int(items[alt_i][:2],16), int(items[alt_i][2:4],16), int(items[alt_i][4:],16) ])
self.textColor = np.array([ int(items[nom_j][:2],16), int(items[nom_j][2:4],16), int(items[nom_j][4:],16) ])
self.altTextColor = np.array([ int(items[alt_j][:2],16), int(items[alt_j][2:4],16), int(items[alt_j][4:],16) ])
self.flag = int(items[nom_k])
self.altFlag = int(items[alt_k])
self.abbreviation = items[10]
try:
self.precision = int(items[3])
except (TypeError, ValueError):
self.precision = None
try:
query = """ select u.representation from units u, work_units qu, fields f
where u.unitID=qu.unitID and qu.parameterID=f.parameterID and f.fieldID='{fieldID}' """.format(fieldID=fieldID)
self.unit = dbUtils.execute_query(query)[0][0]
self.set_unit(self.unit)
except IndexError:
self.headerName = self.representation
self.unit = None
self.factorToReferenceUnit = None
self.offsetToReferenceUnit = None
self.referenceUnit = None
def __repr__(self):
return __repr__(self)
def set_abbreviation(self, newAbbreviation):
self.abbreviation = newAbbreviation
def set_representation(self, newRepresentation):
self.representation = newRepresentation
if self.unit:
self.headerName = newRepresentation + ' ['+self.unit+']'
else:
self.headerName = newRepresentation
def set_unit(self, newUnit):
self.headerName = self.representation + ' ['+newUnit+']'
query = """select u.factorToReferenceUnit, u.offsetToReferenceUnit, u.referenceUnit from units u
where u.representation = '{unit}' """.format(unit=newUnit)
items = dbUtils.execute_query(query)
self.unit = newUnit
self.factorToReferenceUnit = float(items[0][0])
self.offsetToReferenceUnit = float(items[0][1])
self.referenceUnit = items[0][2]
def append(self, newValue):
if isNoneEntry(newValue) or newValue==None:
value = physicalValue(None, self.unit)
value._repr_ = physicalValue(None, self.unit)
else:
unit = newValue.unit
value = self.dataType(newValue)
value = physicalValue(value, unit)
value._repr_ = newValue
super().append(value)
def put(self, pos, newValue):
if isNoneEntry(newValue) or newValue==None:
value = physicalValue(None, self.unit)
value._repr_ = physicalValue(None, self.unit)
else:
unit = newValue.unit
value = self.dataType(newValue)
value = physicalValue(value, unit)
value._repr_ = newValue
try:
self[pos] = value
except IndexError:
super().append(value)
def insert(self, pos, newValue):
if isNoneEntry(newValue) or newValue==None:
value = physicalValue(None, self.unit)
value._repr_ = physicalValue(None, self.unit)
else:
unit = newValue.unit
value = self.dataType(newValue)
value = physicalValue(value, unit)
value._repr_ = newValue
super().insert(pos, value)
def referenceUnitConvert(self):
for i,value in enumerate(self):
if isNoneEntry(value):
newValue = physicalValue( None, self.referenceUnit )
self[i] = newValue
else:
if value.unit==self.referenceUnit:
newValue = value
elif value.unit==self.unit:
newValue = physicalValue( self.factorToReferenceUnit*value + self.offsetToReferenceUnit, self.referenceUnit )
else:
raise(ValueError)
self[i] = newValue
return self
def inverseReferenceUnitConvert(self):
for i,value in enumerate(self):
if isNoneEntry(value):
newValue = physicalValue( None, self.unit )
self[i] = newValue
else:
if value.unit==self.unit:
newValue = value
elif value.unit==self.referenceUnit:
newValue = physicalValue( (value-self.offsetToReferenceUnit)/self.factorToReferenceUnit, self.unit )
else:
raise(ValueError)
self[i] = newValue
return self | 27.110497 | 121 | 0.629305 |
79479c6db8d9d180e99668269ab391ccd39e32ea | 14,759 | py | Python | pandas/core/indexers.py | nofarm3/pandas | c5b4272ed1e7d71266e06660ce9970527711fd55 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-14T15:16:34.000Z | 2021-01-14T15:16:34.000Z | pandas/core/indexers.py | gershonc/pandas | 963cf2b5abf4e1ee99a7f6b9031ad485804c5dff | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexers.py | gershonc/pandas | 963cf2b5abf4e1ee99a7f6b9031ad485804c5dff | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike, ArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndex, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if ndim == 1 and is_integer(indexer):
# GH37748: allow indexer to be an integer for Series
return True
if isinstance(indexer, tuple):
if len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = (indexer,)
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value):
# boolean with truth values == len of the value is ok too
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def is_exact_shape_match(target: ArrayLike, value: ArrayLike) -> bool:
"""
Is setting this value into this target overwriting the entire column?
Parameters
----------
target : np.ndarray or ExtensionArray
value : np.ndarray or ExtensionArray
Returns
-------
bool
"""
return (
len(value.shape) > 0
and len(target.shape) > 0
and value.shape[0] == target.shape[0]
and value.size == target.size
)
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel=3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
| 28.328215 | 88 | 0.587574 |
79479cd3da1396b399deaf518e3bf37d21005725 | 2,474 | py | Python | videomaker/interface/menu/menu.py | TheTimebike/VideoMaker-Studio | 289fd3b9f3c27d298ee94b1e79415a20ec084c4c | [
"MIT"
] | 4 | 2019-03-03T12:10:20.000Z | 2021-09-05T12:30:25.000Z | videomaker/interface/menu/menu.py | TheTimebike/VideoMaker-Studio | 289fd3b9f3c27d298ee94b1e79415a20ec084c4c | [
"MIT"
] | null | null | null | videomaker/interface/menu/menu.py | TheTimebike/VideoMaker-Studio | 289fd3b9f3c27d298ee94b1e79415a20ec084c4c | [
"MIT"
] | 2 | 2019-03-27T23:40:34.000Z | 2019-10-07T11:09:32.000Z | from tkinter import *
from videomaker.functions.startThread import startThread
from videomaker.functions.clearSelection import clearSelections
from videomaker.functions.deleteOldClips import deleteOldClips
from videomaker.functions.addTheme import addNewTheme
from videomaker.functions.redirect import *
from videomaker.interface.newthemewindow.newThemeWindow import initWindow
from videomaker.functions.addPreset import addPreset
from videomaker.functions.savePreset import savePreset
def initMenubar(focus):
focus.menuBar = Menu(focus.master)
focus.master.config(menu=focus.menuBar)
focus.menuDropdownStudio = Menu(focus.menuBar)
focus.menuDropdownView = Menu(focus.menuBar)
focus.menuDropdownDebug = Menu(focus.menuBar)
focus.menuDropdownHelp = Menu(focus.menuBar)
focus.menuDropdownStartFromFile = Menu(focus.menuBar)
focus.menuDropdownStudio.add_command(label="Start", command= lambda: startThread(focus))
focus.menuDropdownStudio.add_command(label="Clear Boxes", command=lambda: clearSelections(focus))
focus.menuDropdownStudio.add_command(label="Remove Old Clips", command=deleteOldClips)
focus.menuDropdownStudio.add_command(label="Quit", command=quitProgram)
focus.menuDropdownView.add_command(label="Design A New Theme", command=lambda: initWindow(focus))
addNewTheme(focus)
focus.loggingModeBool = BooleanVar()
focus.loggingModeBool.set("false")
focus.menuDropdownDebug.add_checkbutton(label="Logging Mode", onvalue=True, offvalue=False, variable=focus.loggingModeBool)
focus.menuDropdownHelp.add_command(label="Source Code", command=redirectToSourceCode)
focus.menuDropdownHelp.add_command(label="File Issue", command=redirectToGithubIssue)
focus.menuDropdownHelp.add_command(label="Contact The Creator", command=redirectToRedditMessage)
focus.menuDropdownHelp.add_command(label="How To Find Reddit Tokens?", command=redirectToRedditTokens)
focus.menuDropdownStartFromFile.add_command(label="Save Current Settings", command= lambda: savePreset(focus))
addPreset(focus)
focus.menuBar.add_cascade(label="VideoMaker Studio", menu=focus.menuDropdownStudio)
focus.menuBar.add_cascade(label="Presets", menu=focus.menuDropdownStartFromFile)
focus.menuBar.add_cascade(label="View", menu=focus.menuDropdownView)
focus.menuBar.add_cascade(label="Debug", menu=focus.menuDropdownDebug)
focus.menuBar.add_cascade(label="Help", menu=focus.menuDropdownHelp) | 54.977778 | 127 | 0.806386 |
79479d7c8c056c0421467bc30ff31b0444fcbcaf | 3,565 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/messages/peer_dialogs.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/messages/peer_dialogs.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/messages/peer_dialogs.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class PeerDialogs(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.messages.PeerDialogs`.
Details:
- Layer: ``126``
- ID: ``0x3371c354``
Parameters:
dialogs: List of :obj:`Dialog <pyrogram.raw.base.Dialog>`
messages: List of :obj:`Message <pyrogram.raw.base.Message>`
chats: List of :obj:`Chat <pyrogram.raw.base.Chat>`
users: List of :obj:`User <pyrogram.raw.base.User>`
state: :obj:`updates.State <pyrogram.raw.base.updates.State>`
See Also:
This object can be returned by 2 methods:
.. hlist::
:columns: 2
- :obj:`messages.GetPeerDialogs <pyrogram.raw.functions.messages.GetPeerDialogs>`
- :obj:`messages.GetPinnedDialogs <pyrogram.raw.functions.messages.GetPinnedDialogs>`
"""
__slots__: List[str] = ["dialogs", "messages", "chats", "users", "state"]
ID = 0x3371c354
QUALNAME = "types.messages.PeerDialogs"
def __init__(self, *, dialogs: List["raw.base.Dialog"], messages: List["raw.base.Message"], chats: List["raw.base.Chat"], users: List["raw.base.User"], state: "raw.base.updates.State") -> None:
self.dialogs = dialogs # Vector<Dialog>
self.messages = messages # Vector<Message>
self.chats = chats # Vector<Chat>
self.users = users # Vector<User>
self.state = state # updates.State
@staticmethod
def read(data: BytesIO, *args: Any) -> "PeerDialogs":
# No flags
dialogs = TLObject.read(data)
messages = TLObject.read(data)
chats = TLObject.read(data)
users = TLObject.read(data)
state = TLObject.read(data)
return PeerDialogs(dialogs=dialogs, messages=messages, chats=chats, users=users, state=state)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Vector(self.dialogs))
data.write(Vector(self.messages))
data.write(Vector(self.chats))
data.write(Vector(self.users))
data.write(self.state.write())
return data.getvalue()
| 34.95098 | 197 | 0.617952 |
79479d98145a6e5beb5d4b291d7b4365562ab001 | 1,524 | py | Python | debug_toolbar/panels/templates/views.py | zborboa-google/django-debug-toolbar | 2b1d98b0e098518171211fe98e447449f73dadf8 | [
"BSD-3-Clause"
] | 2 | 2020-02-14T18:10:16.000Z | 2020-05-17T08:16:54.000Z | debug_toolbar/panels/templates/views.py | zborboa-google/django-debug-toolbar | 2b1d98b0e098518171211fe98e447449f73dadf8 | [
"BSD-3-Clause"
] | null | null | null | debug_toolbar/panels/templates/views.py | zborboa-google/django-debug-toolbar | 2b1d98b0e098518171211fe98e447449f73dadf8 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.http import HttpResponseBadRequest
from django.conf import settings
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.template.loader import find_template_loader
from django.utils.safestring import mark_safe
def template_source(request):
"""
Return the source of a template, syntax-highlighted by Pygments if
it's available.
"""
template_name = request.GET.get('template', None)
if template_name is None:
return HttpResponseBadRequest('"template" key is required')
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
for loader in loaders:
try:
source, display_name = loader.load_template_source(template_name)
break
except TemplateDoesNotExist:
source = "Template Does Not Exist: %s" % (template_name,)
try:
from pygments import highlight
from pygments.lexers import HtmlDjangoLexer
from pygments.formatters import HtmlFormatter
source = highlight(source, HtmlDjangoLexer(), HtmlFormatter())
source = mark_safe(source)
source.pygmentized = True
except ImportError:
pass
return render(request, 'debug_toolbar/panels/template_source.html', {
'source': source,
'template_name': template_name
})
| 32.425532 | 77 | 0.703412 |
7947a1149a505848a245086effbeed4650a7ef68 | 3,078 | py | Python | tatsu/semantics.py | smarty-timmi/TatSu | f09240959b51782cc3c38a9059b32f212077c802 | [
"BSD-2-Clause"
] | 1 | 2021-02-25T10:44:10.000Z | 2021-02-25T10:44:10.000Z | tatsu/semantics.py | smarty-timmi/TatSu | f09240959b51782cc3c38a9059b32f212077c802 | [
"BSD-2-Clause"
] | null | null | null | tatsu/semantics.py | smarty-timmi/TatSu | f09240959b51782cc3c38a9059b32f212077c802 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import generator_stop
import builtins
from tatsu.util import simplify_list
from tatsu.exceptions import SemanticError
from tatsu.objectmodel import Node
from tatsu.objectmodel import BASE_CLASS_TOKEN
from tatsu.synth import synthesize
class ASTSemantics(object):
def group(self, ast, *args):
return simplify_list(ast)
def element(self, ast, *args):
return simplify_list(ast)
def sequence(self, ast, *args):
return simplify_list(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return simplify_list(ast[0])
return ast
class ModelBuilderSemantics(object):
""" Intended as a semantic action for parsing, a ModelBuilderSemantics creates
nodes using the class name given as first parameter to a grammar
rule, and synthesizes the class/type if it's not known.
"""
def __init__(self, context=None, base_type=Node, types=None):
self.ctx = context
self.base_type = base_type
self.constructors = dict()
for t in types or ():
self._register_constructor(t)
def _register_constructor(self, constructor):
self.constructors[constructor.__name__] = constructor
return constructor
def _find_existing_constructor(self, typename):
constructor = builtins
for name in typename.split('.'):
try:
context = vars(constructor)
except Exception as e:
raise SemanticError(
'Could not find constructor for %s (%s): %s'
% (typename, type(constructor).__name__, str(e))
)
if name in context:
constructor = context[name]
else:
constructor = None
break
return constructor
def _get_constructor(self, typename, base):
typename = str(typename) # cannot be unicode in Python 2.7
if typename in self.constructors:
return self.constructors[typename]
constructor = self._find_existing_constructor(typename)
if not constructor:
constructor = synthesize(typename, base)
return self._register_constructor(constructor)
def _default(self, ast, *args, **kwargs):
if not args:
return ast
typespec = args[0].split(BASE_CLASS_TOKEN)
typename = typespec[0]
bases = typespec[-1:0:-1]
base = self.base_type
for base_ in bases:
base = self._get_constructor(base_, base)
constructor = self._get_constructor(typename, base)
try:
if type(constructor) is type and issubclass(constructor, Node):
return constructor(*args[1:], ast=ast, ctx=self.ctx, **kwargs)
else:
return constructor(ast, *args[1:], **kwargs)
except Exception as e:
raise SemanticError(
'Could not call constructor for %s: %s'
% (typename, str(e))
)
| 30.78 | 82 | 0.607862 |
7947a237df4aa77dc886c2970f7abc8545e64f25 | 16,063 | gyp | Python | node.gyp | adityaanupindi/node | f755ecf484a9789525746475b924ddf2b3f316d0 | [
"Artistic-2.0"
] | 1 | 2019-06-27T13:01:40.000Z | 2019-06-27T13:01:40.000Z | node.gyp | hafeez-syed/node | 5b230007adba91163a2f49dbdd9a16d5834fd322 | [
"Artistic-2.0"
] | null | null | null | node.gyp | hafeez-syed/node | 5b230007adba91163a2f49dbdd9a16d5834fd322 | [
"Artistic-2.0"
] | 1 | 2021-03-15T17:23:59.000Z | 2021-03-15T17:23:59.000Z | {
'variables': {
'v8_use_snapshot%': 'true',
# Turn off -Werror in V8
# See http://codereview.chromium.org/8159015
'werror': '',
'node_use_dtrace%': 'false',
'node_use_etw%': 'false',
'node_use_perfctr%': 'false',
'node_has_winsdk%': 'false',
'node_shared_v8%': 'false',
'node_shared_zlib%': 'false',
'node_shared_http_parser%': 'false',
'node_shared_cares%': 'false',
'node_shared_libuv%': 'false',
'node_use_openssl%': 'true',
'node_use_systemtap%': 'false',
'node_shared_openssl%': 'false',
'node_use_mdb%': 'false',
'library_files': [
'src/node.js',
'lib/_debugger.js',
'lib/_linklist.js',
'lib/assert.js',
'lib/buffer.js',
'lib/child_process.js',
'lib/console.js',
'lib/constants.js',
'lib/crypto.js',
'lib/cluster.js',
'lib/dgram.js',
'lib/dns.js',
'lib/domain.js',
'lib/events.js',
'lib/freelist.js',
'lib/fs.js',
'lib/http.js',
'lib/_http_agent.js',
'lib/_http_client.js',
'lib/_http_common.js',
'lib/_http_incoming.js',
'lib/_http_outgoing.js',
'lib/_http_server.js',
'lib/https.js',
'lib/module.js',
'lib/net.js',
'lib/os.js',
'lib/path.js',
'lib/punycode.js',
'lib/querystring.js',
'lib/readline.js',
'lib/repl.js',
'lib/smalloc.js',
'lib/stream.js',
'lib/_stream_readable.js',
'lib/_stream_writable.js',
'lib/_stream_duplex.js',
'lib/_stream_transform.js',
'lib/_stream_passthrough.js',
'lib/string_decoder.js',
'lib/sys.js',
'lib/timers.js',
'lib/tls.js',
'lib/_tls_legacy.js',
'lib/_tls_wrap.js',
'lib/tty.js',
'lib/url.js',
'lib/util.js',
'lib/vm.js',
'lib/zlib.js',
],
},
'targets': [
{
'target_name': 'node',
'type': 'executable',
'dependencies': [
'node_js2c#host',
],
'include_dirs': [
'src',
'tools/msvs/genfiles',
'deps/uv/src/ares',
'<(SHARED_INTERMEDIATE_DIR)' # for node_natives.h
],
'sources': [
'src/fs_event_wrap.cc',
'src/cares_wrap.cc',
'src/handle_wrap.cc',
'src/node.cc',
'src/node_buffer.cc',
'src/node_constants.cc',
'src/node_contextify.cc',
'src/node_extensions.cc',
'src/node_file.cc',
'src/node_http_parser.cc',
'src/node_javascript.cc',
'src/node_main.cc',
'src/node_os.cc',
'src/node_stat_watcher.cc',
'src/node_watchdog.cc',
'src/node_zlib.cc',
'src/pipe_wrap.cc',
'src/signal_wrap.cc',
'src/smalloc.cc',
'src/string_bytes.cc',
'src/stream_wrap.cc',
'src/tcp_wrap.cc',
'src/timer_wrap.cc',
'src/tty_wrap.cc',
'src/process_wrap.cc',
'src/udp_wrap.cc',
'src/uv.cc',
# headers to make for a more pleasant IDE experience
'src/env.h',
'src/env-inl.h',
'src/handle_wrap.h',
'src/node.h',
'src/node_buffer.h',
'src/node_constants.h',
'src/node_contextify.h',
'src/node_extensions.h',
'src/node_file.h',
'src/node_http_parser.h',
'src/node_internals.h',
'src/node_javascript.h',
'src/node_root_certs.h',
'src/node_version.h',
'src/node_watchdog.h',
'src/node_wrap.h',
'src/pipe_wrap.h',
'src/queue.h',
'src/smalloc.h',
'src/tty_wrap.h',
'src/tcp_wrap.h',
'src/udp_wrap.h',
'src/req_wrap.h',
'src/string_bytes.h',
'src/stream_wrap.h',
'src/tree.h',
'src/util.h',
'src/util-inl.h',
'src/weak-object.h',
'src/weak-object-inl.h',
'deps/http_parser/http_parser.h',
'<(SHARED_INTERMEDIATE_DIR)/node_natives.h',
# javascript files to make for an even more pleasant IDE experience
'<@(library_files)',
# node.gyp is added to the project by default.
'common.gypi',
],
'defines': [
'NODE_WANT_INTERNALS=1',
'ARCH="<(target_arch)"',
'PLATFORM="<(OS)"',
'NODE_TAG="<(node_tag)"',
],
'conditions': [
[ 'node_use_openssl=="true"', {
'defines': [ 'HAVE_OPENSSL=1' ],
'sources': [
'src/node_crypto.cc',
'src/node_crypto_bio.cc',
'src/node_crypto_clienthello.cc',
'src/node_crypto.h',
'src/node_crypto_bio.h',
'src/node_crypto_clienthello.h',
'src/tls_wrap.cc',
'src/tls_wrap.h'
],
'conditions': [
[ 'node_shared_openssl=="false"', {
'dependencies': [ './deps/openssl/openssl.gyp:openssl' ],
}]]
}, {
'defines': [ 'HAVE_OPENSSL=0' ]
}],
[ 'node_use_dtrace=="true"', {
'defines': [ 'HAVE_DTRACE=1' ],
'dependencies': [ 'node_dtrace_header' ],
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
#
# DTrace is supported on solaris, mac, and bsd. There are three
# object files associated with DTrace support, but they're not all
# used all the time:
#
# node_dtrace.o all configurations
# node_dtrace_ustack.o not supported on OS X
# node_dtrace_provider.o All except OS X. "dtrace -G" is not
# used on OS X.
#
# Note that node_dtrace_provider.cc and node_dtrace_ustack.cc do not
# actually exist. They're listed here to trick GYP into linking the
# corresponding object files into the final "node" executable. These
# object files are generated by "dtrace -G" using custom actions
# below, and the GYP-generated Makefiles will properly build them when
# needed.
#
'sources': [
'src/node_dtrace.cc',
],
'conditions': [ [
'OS!="mac"', {
'sources': [
'src/node_dtrace_ustack.cc',
'src/node_dtrace_provider.cc',
]
}
] ]
} ],
[ 'node_use_mdb=="true"', {
'dependencies': [ 'node_mdb' ],
'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)' ],
'sources': [
'src/node_mdb.cc',
],
} ],
[ 'node_use_systemtap=="true"', {
'defines': [ 'HAVE_SYSTEMTAP=1', 'STAP_SDT_V1=1' ],
'sources': [
'src/node_dtrace.cc',
],
} ],
[ 'node_use_etw=="true"', {
'defines': [ 'HAVE_ETW=1' ],
'dependencies': [ 'node_etw' ],
'sources': [
'src/node_win32_etw_provider.h',
'src/node_win32_etw_provider-inl.h',
'src/node_win32_etw_provider.cc',
'src/node_dtrace.cc',
'tools/msvs/genfiles/node_etw_provider.h',
'tools/msvs/genfiles/node_etw_provider.rc',
]
} ],
[ 'node_use_perfctr=="true"', {
'defines': [ 'HAVE_PERFCTR=1' ],
'dependencies': [ 'node_perfctr' ],
'sources': [
'src/node_win32_perfctr_provider.h',
'src/node_win32_perfctr_provider.cc',
'src/node_counters.cc',
'src/node_counters.h',
'tools/msvs/genfiles/node_perfctr_provider.rc',
]
} ],
[ 'v8_postmortem_support=="true"', {
'dependencies': [ 'deps/v8/tools/gyp/v8.gyp:postmortem-metadata' ],
}],
[ 'node_shared_v8=="false"', {
'sources': [
'deps/v8/include/v8.h',
'deps/v8/include/v8-debug.h',
],
'dependencies': [ 'deps/v8/tools/gyp/v8.gyp:v8' ],
}],
[ 'node_shared_zlib=="false"', {
'dependencies': [ 'deps/zlib/zlib.gyp:zlib' ],
}],
[ 'node_shared_http_parser=="false"', {
'dependencies': [ 'deps/http_parser/http_parser.gyp:http_parser' ],
}],
[ 'node_shared_cares=="false"', {
'dependencies': [ 'deps/cares/cares.gyp:cares' ],
}],
[ 'node_shared_libuv=="false"', {
'dependencies': [ 'deps/uv/uv.gyp:libuv' ],
}],
[ 'OS=="win"', {
'sources': [
'src/res/node.rc',
],
'defines': [
'FD_SETSIZE=1024',
# we need to use node's preferred "win32" rather than gyp's preferred "win"
'PLATFORM="win32"',
'_UNICODE=1',
],
'libraries': [ '-lpsapi.lib' ]
}, { # POSIX
'defines': [ '__POSIX__' ],
}],
[ 'OS=="mac"', {
'defines!': [
'PLATFORM="mac"',
],
'defines': [
# we need to use node's preferred "darwin" rather than gyp's preferred "mac"
'PLATFORM="darwin"',
],
}],
[ 'OS=="freebsd"', {
'libraries': [
'-lutil',
'-lkvm',
],
}],
[ 'OS=="solaris"', {
'libraries': [
'-lkstat',
'-lumem',
],
'defines!': [
'PLATFORM="solaris"',
],
'defines': [
# we need to use node's preferred "sunos"
# rather than gyp's preferred "solaris"
'PLATFORM="sunos"',
],
}],
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': 1, # /subsystem:console
},
},
},
# generate ETW header and resource files
{
'target_name': 'node_etw',
'type': 'none',
'conditions': [
[ 'node_use_etw=="true" and node_has_winsdk=="true"', {
'actions': [
{
'action_name': 'node_etw',
'inputs': [ 'src/res/node_etw_provider.man' ],
'outputs': [
'tools/msvs/genfiles/node_etw_provider.rc',
'tools/msvs/genfiles/node_etw_provider.h',
'tools/msvs/genfiles/node_etw_providerTEMP.BIN',
],
'action': [ 'mc <@(_inputs) -h tools/msvs/genfiles -r tools/msvs/genfiles' ]
}
]
} ]
]
},
# generate perf counter header and resource files
{
'target_name': 'node_perfctr',
'type': 'none',
'conditions': [
[ 'node_use_perfctr=="true" and node_has_winsdk=="true"', {
'actions': [
{
'action_name': 'node_perfctr_man',
'inputs': [ 'src/res/node_perfctr_provider.man' ],
'outputs': [
'tools/msvs/genfiles/node_perfctr_provider.h',
'tools/msvs/genfiles/node_perfctr_provider.rc',
'tools/msvs/genfiles/MSG00001.BIN',
],
'action': [ 'ctrpp <@(_inputs) '
'-o tools/msvs/genfiles/node_perfctr_provider.h '
'-rc tools/msvs/genfiles/node_perfctr_provider.rc'
]
},
],
} ]
]
},
{
'target_name': 'node_js2c',
'type': 'none',
'toolsets': ['host'],
'actions': [
{
'action_name': 'node_js2c',
'inputs': [
'<@(library_files)',
'./config.gypi',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/node_natives.h',
],
'conditions': [
[ 'node_use_dtrace=="false"'
' and node_use_etw=="false"'
' and node_use_systemtap=="false"',
{
'inputs': ['src/notrace_macros.py']
}],
[ 'node_use_perfctr=="false"', {
'inputs': [ 'src/perfctr_macros.py' ]
}]
],
'action': [
'<(python)',
'tools/js2c.py',
'<@(_outputs)',
'<@(_inputs)',
],
},
],
}, # end node_js2c
{
'target_name': 'node_dtrace_header',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" or node_use_systemtap=="true"', {
'actions': [
{
'action_name': 'node_dtrace_header',
'inputs': [ 'src/node_provider.d' ],
'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/node_provider.h' ],
'action': [ 'dtrace', '-h', '-xnolibs', '-s', '<@(_inputs)',
'-o', '<@(_outputs)' ]
}
]
} ]
]
},
{
'target_name': 'node_mdb',
'type': 'none',
'conditions': [
[ 'node_use_mdb=="true"',
{
'dependencies': [ 'deps/mdb_v8/mdb_v8.gyp:mdb_v8' ],
'actions': [
{
'action_name': 'node_mdb',
'inputs': [ '<(PRODUCT_DIR)/obj.target/deps/mdb_v8/mdb_v8.so' ],
'outputs': [ '<(PRODUCT_DIR)/obj.target/node/src/node_mdb.o' ],
'conditions': [
[ 'target_arch=="ia32"', {
'action': [ 'elfwrap', '-o', '<@(_outputs)', '<@(_inputs)' ],
} ],
[ 'target_arch=="x64"', {
'action': [ 'elfwrap', '-64', '-o', '<@(_outputs)', '<@(_inputs)' ],
} ],
],
},
],
},
],
],
},
{
'target_name': 'node_dtrace_provider',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" and OS!="mac"', {
'actions': [
{
'action_name': 'node_dtrace_provider_o',
'inputs': [
'<(PRODUCT_DIR)/obj.target/libuv/deps/uv/src/unix/core.o',
'<(PRODUCT_DIR)/obj.target/node/src/node_dtrace.o',
],
'outputs': [
'<(PRODUCT_DIR)/obj.target/node/src/node_dtrace_provider.o'
],
'action': [ 'dtrace', '-G', '-xnolibs', '-s', 'src/node_provider.d',
'-s', 'deps/uv/src/unix/uv-dtrace.d', '<@(_inputs)',
'-o', '<@(_outputs)' ]
}
]
} ]
]
},
{
'target_name': 'node_dtrace_ustack',
'type': 'none',
'conditions': [
[ 'node_use_dtrace=="true" and OS!="mac"', {
'actions': [
{
'action_name': 'node_dtrace_ustack_constants',
'inputs': [
'<(PRODUCT_DIR)/obj.target/deps/v8/tools/gyp/libv8_base.<(target_arch).a'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/v8constants.h'
],
'action': [
'tools/genv8constants.py',
'<@(_outputs)',
'<@(_inputs)'
]
},
{
'action_name': 'node_dtrace_ustack',
'inputs': [
'src/v8ustack.d',
'<(SHARED_INTERMEDIATE_DIR)/v8constants.h'
],
'outputs': [
'<(PRODUCT_DIR)/obj.target/node/src/node_dtrace_ustack.o'
],
'conditions': [
[ 'target_arch=="ia32"', {
'action': [
'dtrace', '-32', '-I<(SHARED_INTERMEDIATE_DIR)', '-Isrc',
'-C', '-G', '-s', 'src/v8ustack.d', '-o', '<@(_outputs)',
]
} ],
[ 'target_arch=="x64"', {
'action': [
'dtrace', '-64', '-I<(SHARED_INTERMEDIATE_DIR)', '-Isrc',
'-C', '-G', '-s', 'src/v8ustack.d', '-o', '<@(_outputs)',
]
} ],
]
}
]
} ],
]
}
] # end targets
}
| 30.59619 | 90 | 0.455768 |
7947a3e831c263e802bdcb0294b6349a04698cb7 | 17,756 | py | Python | backend/flask_oauth.py | yuvatia/isREAL-ui | 5e8cf36009e08a595550e4abd569a2382518f948 | [
"MIT"
] | null | null | null | backend/flask_oauth.py | yuvatia/isREAL-ui | 5e8cf36009e08a595550e4abd569a2382518f948 | [
"MIT"
] | null | null | null | backend/flask_oauth.py | yuvatia/isREAL-ui | 5e8cf36009e08a595550e4abd569a2382518f948 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Fixed compatibility issues with Python 3.x
"""
flask_oauth
~~~~~~~~~~~
Implements basic OAuth support for Flask.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import httplib2
from functools import wraps
from urllib.parse import urljoin
from flask import request, session, json, redirect, Response
from werkzeug import url_decode, url_encode, url_quote, \
parse_options_header, Headers
import oauth2
_etree = None
def get_etree():
"""Return an elementtree implementation. Prefers lxml"""
global _etree
if _etree is None:
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False):
ct, options = parse_options_header(resp['content-type'])
if ct in ('application/json', 'text/javascript'):
return json.loads(content)
elif ct in ('application/xml', 'text/xml'):
# technically, text/xml is ascii based but because many
# implementations get that wrong and utf-8 is a superset
# of utf-8 anyways, so there is not much harm in assuming
# utf-8 here
charset = options.get('charset', 'utf-8')
return get_etree().fromstring(content.decode(charset))
elif ct != 'application/x-www-form-urlencoded':
if strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def add_query(url, args):
if not args:
return url
return url + ('?' in url and '&' or '?') + url_encode(args)
def encode_request_data(data, format):
if format is None:
return data, None
elif format == 'json':
return json.dumps(data or {}), 'application/json'
elif format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
"""Contains the response sent back from an OAuth protected remote
application.
"""
def __init__(self, resp, content):
#: a :class:`~werkzeug.Headers` object with the response headers
#: the application sent.
self.headers = Headers(resp)
#: the raw, unencoded content from the server
self.raw_data = content
#: the parsed content from the server
self.data = parse_response(resp, content, strict=True)
@property
def status(self):
"""The status code of the response."""
return self.headers.get('status', type=int)
class OAuthClient(oauth2.Client):
def request_new_token(self, uri, callback=None, params={}):
if callback is not None:
params['oauth_callback'] = callback
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method='POST', http_url=uri, parameters=params,
is_form_encoded=True)
req.sign_request(self.method, self.consumer, self.token)
body = req.to_postdata()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(body))
}
return httplib2.Http.request(self, uri, method='POST',
body=body, headers=headers)
class OAuthException(RuntimeError):
"""Raised if authorization fails for some reason."""
message = None
type = None
def __init__(self, message, type=None, data=None):
#: A helpful error message for debugging
self.message = message
#: A unique type for this exception if available.
self.type = type
#: If available, the parsed data from the remote API that can be
#: used to pointpoint the error.
self.data = data
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuth(object):
"""Registry for remote applications. In the future this will also
be the central class for OAuth provider functionality.
"""
def __init__(self):
self.remote_apps = {}
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote applicaton. If `param` register is
set to `False` the application is not registered in the
:attr:`remote_apps` dictionary. The keyword arguments are
forwarded to the :class:`OAuthRemoteApp` consturctor.
"""
app = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps, \
'application already registered'
self.remote_apps[name] = app
return app
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object.
:param name: then name of the remote application
:param request_token_url: the URL for requesting new tokens
:param access_token_url: the URL for token exchange
:param authorize_url: the URL for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token URL
or authorize URL depending on oauth
version.
:param access_token_params: an option diction of parameters to forward to
the access token URL
:param access_token_method: the HTTP method that should be used
for the access_token_url. Defaults
to ``'GET'``.
"""
def __init__(self, oauth, name, base_url,
request_token_url,
access_token_url, authorize_url,
consumer_key, consumer_secret,
request_token_params=None,
access_token_params=None,
access_token_method='GET'):
self.oauth = oauth
#: the `base_url` all URLs are joined with.
self.base_url = base_url
self.name = name
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorize_url = authorize_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.tokengetter_func = None
self.request_token_params = request_token_params or {}
self.access_token_params = access_token_params or {}
self.access_token_method = access_token_method
self._consumer = oauth2.Consumer(self.consumer_key,
self.consumer_secret)
self._client = OAuthClient(self._consumer)
def status_okay(self, resp):
"""Given request data, checks if the status is okay."""
try:
return int(resp['status']) in (200, 201)
except ValueError:
return False
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def make_client(self, token=None):
"""Creates a new `oauth2` Client object with the token attached.
Usually you don't have to do that but use the :meth:`request`
method instead.
"""
return oauth2.Client(self._consumer, self.get_request_token(token))
def request(self, url, data="", headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""Sends a request to the remote server with OAuth tokens attached.
The `url` is joined with :attr:`base_url` if the URL is relative.
.. versionadded:: 0.12
added the `token` parameter.
:param url: where to send the request to
:param data: the data to be sent to the server. If the request method
is ``GET`` the data is appended to the URL as query
parameters, otherwise encoded to `format` if the format
is given. If a `content_type` is provided instead, the
data must be a string encoded for the given content
type and used as request body.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type is
provided, the data is passed as it and the
`format` parameter is ignored.
:param token: an optional token to pass to tokengetter. Use this if you
want to support sending requests using multiple tokens.
If you set this to anything not None, `tokengetter_func`
will receive the given token as an argument, in which case
the tokengetter should return the `(token, secret)` tuple
for the given token.
:return: an :class:`OAuthResponse` object.
"""
headers = dict(headers or {})
client = self.make_client(token)
url = self.expand_url(url)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_query(url, data)
data = ""
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
return OAuthResponse(*client.request(url, method=method,
body=data or '',
headers=headers))
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
if callback is not None:
callback = urljoin(request.url, callback)
resp, content = self._client.request_new_token(
self.expand_url(self.request_token_url), callback,
self.request_token_params)
if not self.status_okay(resp):
raise OAuthException('Failed to generate request token',
type='token_generation_failed')
data = parse_response(resp, content)
if data is None:
raise OAuthException('Invalid token response from ' + self.name,
type='token_generation_failed')
tup = (data['oauth_token'], data['oauth_token_secret'])
session[self.name + '_oauthtok'] = tup
return tup
def get_request_token(self, token=None):
assert self.tokengetter_func is not None, 'missing tokengetter function'
# Don't pass the token if the token is None to support old
# tokengetter functions.
rv = self.tokengetter_func(*(token and (token,) or ()))
if rv is None:
rv = session.get(self.name + '_oauthtok')
if rv is None:
raise OAuthException('No token available', type='token_missing')
return oauth2.Token(*rv)
def free_request_token(self):
session.pop(self.name + '_oauthtok', None)
session.pop(self.name + '_oauthredir', None)
def authorize(self, callback=None):
"""Returns a redirect response to the remote authorization URL with
the signed callback given. The callback must be `None` in which
case the application will most likely switch to PIN based authentication
or use a remotely stored callback URL. Alternatively it's an URL
on the system that has to be decorated as :meth:`authorized_handler`.
"""
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (self.expand_url(self.authorize_url),
url_quote(token))
else:
assert callback is not None, 'Callback is required OAuth2'
# This is for things like facebook's oauth. Since we need the
# callback for the access_token_url we need to keep it in the
# session.
params = dict(self.request_token_params)
params['redirect_uri'] = callback
params['client_id'] = self.consumer_key
session[self.name + '_oauthredir'] = callback
url = add_query(self.expand_url(self.authorize_url), params)
return redirect(url)
def tokengetter(self, f):
"""Registers a function as tokengetter. The tokengetter has to return
a tuple of ``(token, secret)`` with the user's token and token secret.
If the data is unavailable, the function must return `None`.
If the `token` parameter is passed to the request function it's
forwarded to the tokengetter function::
@oauth.tokengetter
def get_token(token='user'):
if token == 'user':
return find_the_user_token()
elif token == 'app':
return find_the_app_token()
raise RuntimeError('invalid token')
"""
self.tokengetter_func = f
return f
def handle_oauth1_response(self):
"""Handles an oauth1 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
client = self.make_client()
resp, content = client.request('%s?oauth_verifier=%s' % (
self.expand_url(self.access_token_url),
request.args['oauth_verifier']
), self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_oauth2_response(self):
"""Handles an oauth2 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
remote_args = {
'code': request.args.get('code'),
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': session.get(self.name + '_oauthredir')
}
remote_args.update(self.access_token_params)
if self.access_token_method == 'POST':
resp, content = self._client.request(self.expand_url(self.access_token_url),
self.access_token_method,
url_encode(remote_args))
elif self.access_token_method == 'GET':
url = add_query(self.expand_url(self.access_token_url), remote_args)
resp, content = self._client.request(url, self.access_token_method)
else:
raise OAuthException('Unsupported access_token_method: ' +
self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_unknown_response(self):
"""Called if an unknown response came back from the server. This
usually indicates a denied response. The default implementation
just returns `None`.
"""
return None
def authorized_handler(self, f):
"""Injects additional authorization functionality into the function.
The function will be passed the response object as first argument
if the request was allowed, or `None` if access was denied. When the
authorized handler is called, the temporary issued tokens are already
destroyed.
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'oauth_verifier' in request.args:
data = self.handle_oauth1_response()
elif 'code' in request.args:
data = self.handle_oauth2_response()
else:
data = self.handle_unknown_response()
self.free_request_token()
return f(*((data,) + args), **kwargs)
return decorated
| 40.724771 | 88 | 0.601205 |
7947a4d8c50dac13b0160f95cc7b1dac1c4e5bc9 | 6,669 | py | Python | bin/utils/plot_utils.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 79 | 2019-07-03T01:54:30.000Z | 2021-04-19T12:28:08.000Z | bin/utils/plot_utils.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 28 | 2019-07-16T21:03:49.000Z | 2021-02-14T14:59:45.000Z | bin/utils/plot_utils.py | hillmich/quantum-benchmarks-1 | e7ab97e004f638d8681b5ee9cbbe662d64bc3378 | [
"MIT"
] | 21 | 2019-07-04T05:21:53.000Z | 2021-02-22T18:59:47.000Z | from typing import List
import pandas as pd
import os
import json
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
IMAGE_PATH = os.path.join(ROOT_PATH, 'images')
COLOR = {
'yao': 'tab:red',
'yao (cuda)': 'tab:orange',
'yao x 1000': 'tab:blue',
'yao x 64': 'tab:blue',
'qiskit': 'tab:green',
'qiskit (cuda)': 'tab:gray',
'projectq': 'tab:blue',
'cirq': 'tab:cyan',
'quest': 'tab:olive',
'qulacs': 'tab:brown',
'qulacs (cuda)': 'tab:pink',
'pennylane': 'tab:purple',
'jkq-ddsim': 'darkblue'
}
def image_path(name):
if not os.path.isdir(IMAGE_PATH):
os.makedirs(IMAGE_PATH, exist_ok=True)
return os.path.join(IMAGE_PATH, name)
def find_json(name):
"""find the first matchable json benchmark file.
"""
benchmark_dir = os.path.join(ROOT_PATH, 'data')
sub_dirs = [f.path for f in os.scandir(benchmark_dir) if f.is_dir()]
if not sub_dirs:
raise FileNotFoundError('Did not find any directory with in data/')
elif len(sub_dirs) > 1:
print('WARNING: Found more than one suitable subdir. Arbitrarily choose {}'.format(sub_dirs[0]))
benchmark_path = os.path.join(benchmark_dir, sub_dirs[0])
file_stack = []
for each in os.listdir(benchmark_path):
if name in each:
file_stack.append(each)
return os.path.join(benchmark_path, file_stack[-1])
def wash_benchmark_data(name, labels):
"""process benchmark data, append `inf` to the data if there is no such data (it means
timeout during benchmarking usually). Then return a Pandas.DataFrame object.
"""
with open(find_json(name)) as f:
data = json.load(f)
cols = [each['params']['nqubits'] for each in data['benchmarks'] if each['group'] == labels[0]]
dd = {'nqubits': cols}
for lb in labels:
time_data = [each['stats']['min']*1e9 for each in data['benchmarks'] if each['group'] == lb]
if len(time_data) is not len(cols):
time_data.extend([float('inf') for _ in range(len(cols) - len(time_data))])
dd[lb] = time_data
return pd.DataFrame(data=dd)
def wash_google_benchmark_data(name: str, labels: List[str]) -> pd.DataFrame:
print(f'{name} {labels}')
with open(os.path.join(ROOT_PATH, 'data', f'{name}.json')) as f:
data = json.load(f)
# If the first series of benchmarks does not have enough data, the following commented line yields the wrong list
# cols = [int(each['name'].split('/')[1]) for each in data['benchmarks'] if each['label'] == labels[0]]
# It might be better to explicitly set the range and have it as parameter for parsing?
cols = list(range(4, 26)) # TODO: move to parameter list?
dd = {'nqubits': cols}
for lb in labels:
time_data = [each['cpu_time'] for each in data['benchmarks'] if each['label'] == lb]
if len(time_data) is not len(cols):
time_data.extend([float('inf') for _ in range(len(cols) - len(time_data))])
dd[lb] = time_data
return pd.DataFrame(data=dd)
def parse_data(packages, labels=['X', 'H', 'T', 'CNOT', 'Toffoli']):
"""parse benchmark data of `packages` of `labels`.
"""
gate_data = {}
for each_package in packages:
if each_package == 'yao':
if len(labels) == 1 and 'QCBM' in labels:
pd_data = pd.read_csv(os.path.join(ROOT_PATH, 'data', 'yao_qcbm.csv'))
gate_data[each_package] = pd_data[['nqubits', 'QCBM']]
gate_data['yao (cuda)'] = pd_data[['nqubits', 'QCBM_cuda']].rename(columns={'QCBM_cuda' : 'QCBM'})
elif len(labels) == 1 and 'QCBM (batch)' in labels:
pd_data = pd.read_csv(os.path.join(ROOT_PATH, 'data', 'yao_qcbm_batch.csv'))
gate_data['yao'] = pd_data[['nqubits', 'QCBM_batch']].rename(columns={'QCBM_batch' : 'QCBM (batch)'})
gate_data['yao (cuda)'] = pd_data[['nqubits', 'QCBM_cuda_batch']].rename(columns={'QCBM_cuda_batch' : 'QCBM (batch)'})
else:
gate_data[each_package] = pd.read_csv(os.path.join(ROOT_PATH, 'data', 'yao.csv'))
elif each_package == 'qulacs':
if len(labels) == 1 and 'QCBM' in labels:
gate_data['qulacs'] = wash_benchmark_data(each_package, ['QCBM'])
gate_data['qulacs (cuda)'] = wash_benchmark_data(each_package, ['QCBM (cuda)']).rename(columns={'QCBM (cuda)': 'QCBM'})
else:
gate_data[each_package] = wash_benchmark_data(each_package, labels)
elif each_package == 'qiskit':
if len(labels) == 1 and 'QCBM' in labels:
gate_data['qiskit'] = wash_benchmark_data(each_package, ['QCBM'])
gate_data['qiskit (cuda)'] = wash_benchmark_data(each_package, ['QCBM (cuda)']).rename(columns={'QCBM (cuda)': 'QCBM'})
else:
gate_data[each_package] = wash_benchmark_data(each_package, labels)
elif each_package == 'jkq-ddsim':
gate_data[each_package] = wash_google_benchmark_data(each_package, labels)
else:
gate_data[each_package] = wash_benchmark_data(each_package, labels)
return gate_data
def plot_absolute(ax, data : dict, gate):
ls, labels = [], []
for k in data:
d = data[k]
if k == 'yao':
ls.append(ax.semilogy(d["nqubits"], d[gate], '-o', markersize=4, color=COLOR[k]))
elif k == 'yao (cuda)':
ls.append(ax.semilogy(d["nqubits"], d[gate], '-o', markersize=4, color=COLOR[k]))
else:
ls.append(ax.semilogy(d["nqubits"], d[gate], '-o', markersize=4, color=COLOR[k]))
if k == 'quest':
labels.append('pyquest-cffi')
else:
labels.append(k)
ax.set_xlabel("nqubits", size=16)
ax.set_ylabel("ns", size=16)
return ls, labels
def plot_relative(ax, data: dict, gate, to='yao', log=True):
ls, labels = [], []
d_yao = data[to]
for k in data:
if k == to:
continue
else:
d = data[k]
if log:
ls.append(ax.semilogy(d["nqubits"], d[gate]/d_yao[gate], '-o', markersize=4, color=COLOR[k]))
else:
ls.append(ax.plot(d["nqubits"], d[gate]/d_yao[gate], '-o', markersize=4, color=COLOR[k]))
if k == 'quest':
labels.append('pyquest-cffi')
else:
labels.append(k)
ax.axhline(y=1, linestyle='--')
labels.append(to)
ax.set_xlabel("nqubits", size=16)
ax.set_ylabel("relative time ({} = 1)".format(to), size=16)
return ls, labels
| 40.174699 | 135 | 0.591993 |
7947a5f1f2b566b255cb7870440226807cb0cdf7 | 15,102 | py | Python | applications/zcomx/modules/rss.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | applications/zcomx/modules/rss.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | applications/zcomx/modules/rss.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Classes and functions related to rss feeds.
"""
import datetime
import os
import gluon.contrib.rss2 as rss2
from gluon import *
from applications.zcomx.modules.activity_logs import ActivityLog
from applications.zcomx.modules.book_pages import \
BookPage, \
AbridgedBookPageNumbers
from applications.zcomx.modules.books import \
Book, \
formatted_name as book_formatted_name, \
get_page, \
page_url
from applications.zcomx.modules.creators import \
Creator, \
url as creator_url
from applications.zcomx.modules.images import ImageDescriptor
from applications.zcomx.modules.zco import \
SITE_NAME, \
Zco
LOG = current.app.logger
MINIMUM_AGE_TO_LOG_IN_SECONDS = 4 * 60 * 60 # 4 hours
class BaseRSSChannel(object):
"""Class representing a BaseRSSChannel"""
max_entry_age_in_days = 7
def __init__(self, record=None):
"""Initializer
Args:
record: Record sublcass instance
"""
self.record = record
def description(self):
"""Return the description for the channel.
Returns:
string, channel description.
"""
raise NotImplementedError()
def entries(self):
"""Return of list of feed entries.
Returns:
list of dicts.
"""
db = current.app.db
items = []
query = self.filter_query()
rows = db(query).select(
db.activity_log.id,
left=[
db.book.on(db.book.id == db.activity_log.book_id),
],
orderby=~db.activity_log.time_stamp,
)
for r in rows:
activity_log = ActivityLog.from_id(r.id)
try:
entry = activity_log_as_rss_entry(activity_log).feed_item()
except LookupError as err:
# This may happen if a book deletion is in progress
# LOG.error(err)
pass # This is producing too much noise
else:
items.append(entry)
return items
def feed(self):
"""Return a feed for the channel."""
return dict(
title=self.title(),
link=self.link(),
description=self.description(),
created_on=datetime.datetime.now(),
image=self.image(),
entries=self.entries(),
)
def filter_query(self):
"""Define a query to filter activity_log records to include in feed.
Return
gluon.pydal.objects Query instance.
"""
db = current.app.db
now = datetime.datetime.now()
min_time_stamp = now - \
datetime.timedelta(days=self.max_entry_age_in_days)
return db.activity_log.time_stamp > min_time_stamp
def image(self):
"""Return the RSS image.
Returns
rss2.Image instance.
"""
# R0201: *Method could be a function*
# pylint: disable=R0201
# From RSS spec: (Note, in practice the image <title> and <link>
# should have the same value as the channel's <title> and <link>.
return rss2.Image(
URL(c='static', f='images/zco.mx-logo-small.png', host=True),
self.title(),
self.link(),
)
def link(self):
"""Return the link for the channel.
Returns:
string, channel link.
"""
raise NotImplementedError()
def title(self):
"""Return the title for the channel.
Returns:
string, channel title.
"""
raise NotImplementedError()
class AllRSSChannel(BaseRSSChannel):
"""Class representing a RSS channel for all zco.mx activity."""
def description(self):
return 'Recent activity on {s}.'.format(s=SITE_NAME)
def link(self):
return URL(host=True, **Zco().all_rss_url)
def title(self):
return SITE_NAME
class BookRSSChannel(BaseRSSChannel):
"""Class representing a book RSS channel"""
max_entry_age_in_days = 30
def __init__(self, record=None):
"""Initializer
Args:
record: Book instance
"""
super().__init__(record=record)
self.book = record
self.creator = Creator.from_id(self.book.creator_id)
def description(self):
return 'Recent activity of {b} by {c} on {s}.'.format(
b=book_formatted_name(self.book, include_publication_year=False),
c=self.creator.name,
s=SITE_NAME
)
def filter_query(self):
db = current.app.db
return super().filter_query() & \
(db.activity_log.book_id == self.book.id)
def link(self):
try:
first_page = get_page(self.book, page_no='first')
except LookupError:
return URL(**Zco().all_rss_url)
return page_url(first_page, extension=False, host=True)
def title(self):
return '{s}: {b} by {c}'.format(
s=SITE_NAME,
b=book_formatted_name(self.book, include_publication_year=False),
c=self.creator.name,
)
class CartoonistRSSChannel(BaseRSSChannel):
"""Class representing a cartoonist RSS channel"""
max_entry_age_in_days = 30
def __init__(self, record=None):
"""Initializer
Args:
record: Creator instance
"""
super().__init__(record=record)
self.creator = record
def description(self):
return 'Recent activity of {c} on {s}.'.format(
c=self.creator.name,
s=SITE_NAME
)
def filter_query(self):
db = current.app.db
return super().filter_query() & \
(db.book.creator_id == self.creator.id)
def link(self):
return creator_url(self.creator, extension=False, host=True)
def title(self):
return '{s}: {c}'.format(
s=SITE_NAME,
c=self.creator.name,
)
class BaseRSSEntry(object):
"""Class representing a BaseRSSEntry"""
def __init__(self, book_page_ids, time_stamp, activity_log_id):
"""Initializer
Args:
book_page_ids: list of integers, ids of book_page records
time_stamp: datetime.datetime instance representing the time the
activity took place.
activity_log_id: integer, id of activity_log record the entry
is about.
"""
self.book_page_ids = book_page_ids
self.time_stamp = time_stamp
self.activity_log_id = activity_log_id
if not book_page_ids:
raise LookupError('No book page ids provided')
self.first_page = self.first_of_pages()
if not self.first_page:
raise LookupError('First page not found within: {e}'.format(
e=self.book_page_ids))
self.book = Book.from_id(self.first_page.book_id)
self.creator = Creator.from_id(self.book.creator_id)
def created_on(self):
"""Return the created_on value for the entry.
Returns:
string, entry created_on value.
"""
return self.time_stamp
def description(self):
"""Return the description for the entry.
Returns:
string, entry description.
"""
return self.description_fmt().format(
b=book_formatted_name(self.book, include_publication_year=False),
c=self.creator.name,
d=datetime.datetime.strftime(self.time_stamp, '%b %d, %Y')
)
def description_fmt(self):
"""Return a format string with suitable convertion flags for
the description.
Returns:
string
"""
raise NotImplementedError()
def enclosure(self):
"""Return the enclosure for the entry.
Returns
rss2.Enclosure instance.
"""
url = URL(
c='images',
f='download',
args=self.first_page.image,
vars={'size': 'web'},
host=SITE_NAME,
scheme='http', # RSS validation suggests this
)
length = ImageDescriptor(
self.first_page.upload_image().fullname(size='web')
).size_bytes()
_, extension = os.path.splitext(self.first_page.image)
mime_type = 'image/{ext}'.format(ext=extension.lstrip('.'))
if mime_type == 'image/jpg':
mime_type = 'image/jpeg'
return rss2.Enclosure(url, length, mime_type)
def feed_item(self):
"""Return a dict representing an RSS feed item.
Returns:
dict
"""
return dict(
title=self.title(),
link=self.link(),
description=self.description(),
enclosure=self.enclosure(),
guid=self.guid(),
created_on=self.created_on(),
)
def first_of_pages(self):
"""Return a Row instance representing the book_page record that
is the first of the pages with activity. 'first' is the one with
the minimum page_no value.
Returns:
Row instance representing a book_page record.
"""
db = current.app.db
rows = db(db.book_page.id.belongs(self.book_page_ids)).select(
db.book_page.id,
orderby=db.book_page.page_no,
limitby=(0, 1),
)
if not rows:
return
return BookPage.from_id(rows[0].id)
def guid(self):
"""Return a guid for the entry.
Returns:
string, entry guid.
"""
fmt = '{site}-{rid:09d}'
unique_guid = fmt.format(
site=SITE_NAME,
rid=self.activity_log_id
).replace('.', '')
return rss2.Guid(str(unique_guid), isPermaLink=False)
def link(self):
"""Return the link for the entry.
Returns:
string, entry link.
"""
if not self.first_page:
return
return page_url(self.first_page, extension=False, host=True)
def title(self):
"""Return the title for the entry.
Returns:
string, entry title.
"""
pages = [BookPage.from_id(x) for x in self.book_page_ids]
return "'{b}' {p} by {c}".format(
b=book_formatted_name(self.book, include_publication_year=False),
p=' '.join(AbridgedBookPageNumbers(pages).numbers()),
c=self.creator.name,
)
class CompletedRSSEntry(BaseRSSEntry):
"""Class representing a 'completed' RSS entry"""
def description_fmt(self):
return "Posted: {d} - The book '{b}' by {c} has been set as completed."
class PageAddedRSSEntry(BaseRSSEntry):
"""Class representing a 'page added' RSS entry"""
def description_fmt(self):
if len(self.book_page_ids) > 1:
# line-too-long (C0301): *Line too long (%%s/%%s)*
# pylint: disable=C0301
return "Posted: {d} - Several pages were added to the book '{b}' by {c}."
else:
return "Posted: {d} - A page was added to the book '{b}' by {c}."
class RSS2WithAtom(rss2.RSS2):
"""Class representing the main RSS class with an atom namespace"""
rss_attrs = dict(
rss2.RSS2.rss_attrs,
**{'xmlns:atom': 'http://www.w3.org/2005/Atom'}
)
def publish_extensions(self, handler):
# protected-access (W0212): *Access to a protected member
# pylint: disable=W0212
rss2._element(
handler,
'atom:link',
None,
{
'href': self.link,
'rel': 'self',
'type': 'application/rss+xml',
}
)
def activity_log_as_rss_entry(activity_log):
"""Factory to create a BaseRSSEntry subclass instance from an activity_log
record.
Args:
activity_log: ActivityLog instance
Returns:
BaseRSSEntry subclass instance.
"""
if not activity_log.book_page_ids:
raise LookupError('activity_log has no book page ids, id {i}'.format(
i=activity_log.id))
book_page_ids = activity_log.verified_book_page_ids()
if not book_page_ids:
fmt = 'activity_log has no verifiable book page ids, id {i}'
raise LookupError(fmt.format(i=activity_log.id))
entry_class = entry_class_from_action(activity_log.action)
return entry_class(
book_page_ids,
activity_log.time_stamp,
activity_log.id
)
def channel_from_type(channel_type, record_id=None):
"""Factory for returning a RSSChannel instance from args.
Args:
channel_type: string, one of 'all', 'book', 'creator'
record_id: integer, id of record
"""
if not channel_type:
raise SyntaxError('Invalid rss feed channel: {c}'.format(
c=channel_type))
if channel_type == 'all':
return AllRSSChannel()
if channel_type == 'creator':
return CartoonistRSSChannel(Creator.from_id(record_id))
if channel_type == 'book':
return BookRSSChannel(Book.from_id(record_id))
raise SyntaxError('Invalid rss feed channel: {c}'.format(
c=channel_type))
def entry_class_from_action(action):
"""Return the appropriate RSS Entry class for the action."""
if action == 'completed':
return CompletedRSSEntry
elif action == 'page added':
return PageAddedRSSEntry
else:
raise LookupError('Invalid RSS entry action: {a}'.format(a=action))
def rss_serializer_with_image(feed):
"""RSS serializer adapted from gluon/serializers def rss().
Customizations:
Replace rss2.RSS2 with RSS2WithAtom
rss2.RSS2(..., image=...)
rss2.RSSItem(..., guid=...)
rss2.RSSItem(..., enclosure=...)
"""
if 'entries' not in feed and 'items' in feed:
feed['entries'] = feed['items']
def _safestr(obj, key, default=''):
"""Encode string for safety."""
if key not in obj:
return default
as_bytes = obj[key]
if isinstance(obj[key], str):
as_bytes = obj[key].encode(encoding='utf-8', errors='replace')
return as_bytes.decode('utf-8')
now = datetime.datetime.now()
rss = RSS2WithAtom(
title=_safestr(feed, 'title'),
link=_safestr(feed, 'link'),
description=_safestr(feed, 'description'),
lastBuildDate=feed.get('created_on', now),
image=feed.get('image', None), # <--- customization
items=[
rss2.RSSItem(
title=_safestr(entry, 'title', '(notitle)'),
link=_safestr(entry, 'link'),
description=_safestr(entry, 'description'),
enclosure=entry.get('enclosure', None),
guid=entry.get('guid', None),
pubDate=entry.get('created_on', now)
) for entry in feed.get('entries', [])
]
)
return rss.to_xml(encoding='utf-8')
| 28.765714 | 85 | 0.581777 |
7947a6a71ac5717d7356d64a84136e857e90a80b | 661 | py | Python | messages/HostReserveLinkRequestMessage.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 2 | 2020-04-15T11:20:59.000Z | 2021-05-12T13:01:36.000Z | messages/HostReserveLinkRequestMessage.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-06-05T04:48:56.000Z | 2018-06-05T04:48:56.000Z | messages/HostReserveLinkRequestMessage.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-08-15T06:45:46.000Z | 2018-08-15T06:45:46.000Z | # last generated 2018-04-08 02:13:50.681000
from messages import BaseMessage
from msg_codes import HOST_RESERVE_LINK_REQUEST as HOST_RESERVE_LINK_REQUEST
__author__ = 'Mike'
class HostReserveLinkRequestMessage(BaseMessage):
def __init__(self, cloud_uname=None, cname=None):
super(HostReserveLinkRequestMessage, self).__init__()
self.type = HOST_RESERVE_LINK_REQUEST
self.cloud_uname = cloud_uname
self.cname = cname
@staticmethod
def deserialize(json_dict):
msg = HostReserveLinkRequestMessage()
msg.cloud_uname = json_dict['cloud_uname']
msg.cname = json_dict['cname']
return msg
| 31.47619 | 76 | 0.729198 |
7947a6c85ef347aad4ade698693d0d1fb4b525f5 | 1,824 | py | Python | neoload/commands/project.py | stephanemartin/neoload-cli | aa128aad9a446e94d5700e8a25b674397d633e1a | [
"BSD-2-Clause"
] | 9 | 2020-06-01T14:28:37.000Z | 2022-03-06T23:21:09.000Z | neoload/commands/project.py | stephanemartin/neoload-cli | aa128aad9a446e94d5700e8a25b674397d633e1a | [
"BSD-2-Clause"
] | 97 | 2019-12-06T23:52:19.000Z | 2022-02-11T14:22:07.000Z | neoload/commands/project.py | stephanemartin/neoload-cli | aa128aad9a446e94d5700e8a25b674397d633e1a | [
"BSD-2-Clause"
] | 23 | 2020-03-24T18:38:58.000Z | 2022-03-04T16:09:23.000Z | import os
import click
from commands import test_settings
from neoload_cli_lib import user_data, tools, rest_crud, neoLoad_project
@click.command()
@click.argument("command", required=True, type=click.Choice(['up', 'upload', 'meta']))
@click.option("--path", "-p", type=click.Path(exists=True), default=os.getcwd(),
help="path of project folder, zip or yml file. . is default value")
@click.option("--save", "-s", type=click.Path(exists=False),
help="Path to a (non-existent) file ending in .zip to preserve what was uploaded")
@click.argument("name_or_id", type=str, required=False)
def cli(command, name_or_id, path, save):
"""Upload and list scenario from settings"""
rest_crud.set_current_command()
if not name_or_id or name_or_id == "cur":
name_or_id = user_data.get_meta(test_settings.meta_key)
if not tools.is_id(name_or_id):
name_or_id = test_settings.__resolver.resolve_name(name_or_id)
if command[:2] == "up":
upload(path, name_or_id, save)
elif command == "meta":
meta_data(name_or_id)
user_data.set_meta(test_settings.meta_key, name_or_id)
rest_crud.set_current_sub_command(command)
#TODO: pre-validate with 'neoload validate' functionality, but..
#TODO: provide a --skip-validation option
#TODO: spider through all YAML (as-code files)
#TODO: fix validate to recurse through all includes; create unique file list map (avoid recursive references)
def upload(path, settings_id, save):
neoLoad_project.upload_project(path, get_endpoint(settings_id), save)
def meta_data(setting_id):
neoLoad_project.display_project(rest_crud.get_from_file_storage(get_endpoint(setting_id)))
def get_endpoint(settings_id: str):
return rest_crud.base_endpoint_with_workspace() + '/tests/' + settings_id + "/project"
| 38 | 109 | 0.730263 |
7947a7be11854c0f8b78750d60895fd7e04c28c4 | 3,882 | py | Python | util/debug/ai_performance.py | littlebee/shelly-bot | e25f2759bf1c7ac61bacbe70221910184e49beba | [
"MIT"
] | null | null | null | util/debug/ai_performance.py | littlebee/shelly-bot | e25f2759bf1c7ac61bacbe70221910184e49beba | [
"MIT"
] | null | null | null | util/debug/ai_performance.py | littlebee/shelly-bot | e25f2759bf1c7ac61bacbe70221910184e49beba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import time
import cv2
import pickle
import face_recognition
import numpy
# last n seconds to use for fps calc
FPS_WINDOW = 60
# if true, use face_recognition.face_distance to determin known faces
USE_FACE_DISTANCE = os.getenv('USE_FACE_DISTANCE') == '1' or False
if USE_FACE_DISTANCE:
print('Using face_distance to determine known faces')
class FpsStats(object):
def __init__(self):
self.start()
# can call start after init, or pause and start for more accuracy
def start(self):
self.started_at = time.time()
self.total_frames = 0
self.floating_frames_count = 0
self.floating_started_at = time.time()
self.last_floating_fps = 0
def increment(self):
self.total_frames += 1
self.floating_frames_count += 1
fps_time = time.time() - self.floating_started_at
if fps_time > FPS_WINDOW:
self.last_floating_fps = self.floating_frames_count / fps_time
self.floating_started_at = time.time()
self.floating_frames_count = 0
print(f"fps: {self.last_floating_fps}")
def stats(self):
now = time.time()
total_time = now - self.started_at
return {
"totalFramesRead": self.total_frames,
"totalTime": total_time,
"overallFps": self.total_frames / total_time,
"fpsStartedAt": self.floating_started_at,
"floatingFps": self.last_floating_fps
}
print('initializing VideoCapture')
camera = cv2.VideoCapture(0) # , apiPreference=cv2.CAP_V4L2)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
encodings_data = pickle.loads(
open("data/encodings.pickle", "rb").read(), encoding='latin1')
print(f"loaded {len(encodings_data['encodings'])} encodings")
fps_stats = FpsStats()
try:
while True:
_, img = camera.read()
if img is None:
print(
"The camera has not read data, please check whether the camera can be used normally.")
print(
"Use the command: 'raspistill -t 1000 -o image.jpg' to check whether the camera can be used correctly.")
break
fps_stats.increment()
new_faces = face_recognition.face_locations(img)
num_new_faces = len(new_faces)
print(f"found {num_new_faces} faces")
names = []
if num_new_faces > 0:
encodings = face_recognition.face_encodings(img, new_faces)
for encoding in encodings:
if USE_FACE_DISTANCE:
face_distances = face_recognition.face_distance(
encodings_data["encodings"], encoding)
best_match_index = numpy.argmin(face_distances)
if face_distances[best_match_index] < 0.65:
names.append(encodings_data["names"][best_match_index])
else:
matches = face_recognition.compare_faces(
encodings_data["encodings"], encoding)
# check to see if we have found a match
if True in matches:
matched_indexes = [
i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matched_indexes:
name = encodings_data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of votes
names.append(max(counts, key=counts.get))
print(f"recognized {len(names)} faces")
except KeyboardInterrupt:
pass
print('')
print(fps_stats.stats())
| 33.465517 | 120 | 0.60304 |
7947a8202bafd8b4f43e5d6245dda8a1a1f4bbcc | 1,837 | py | Python | chatbot/test/test_chatbot.py | sachibalata/testix | 8c1acf839c00753609a809ca4ab5942956d29656 | [
"MIT"
] | 3 | 2016-09-24T16:56:48.000Z | 2020-01-08T14:15:19.000Z | chatbot/test/test_chatbot.py | haarcuba/testix | 9cd7975a4b0baf6cadfab8d9405f6d19defe8372 | [
"MIT"
] | 32 | 2019-05-03T20:08:47.000Z | 2022-02-19T13:53:26.000Z | chatbot/test/test_chatbot.py | sachibalata/testix | 8c1acf839c00753609a809ca4ab5942956d29656 | [
"MIT"
] | 1 | 2021-08-23T10:30:51.000Z | 2021-08-23T10:30:51.000Z | import pytest
import socket
from testix.frequentlyused import *
from testix import patch_module
from chatbot import chatbot
class TestChatbot:
@pytest.fixture(autouse=True)
def globals_patch(self, patch_module):
patch_module( chatbot, 'responder' )
def construct(self):
with Scenario() as s:
s.responder.Responder() >> Fake( 'responder_' )
self.tested = chatbot.Chatbot( Fake( 'sock' ) )
def test_construction(self):
self.construct()
def test_request_response_loop(self):
self.construct()
class EndTestException(Exception): pass
with Scenario() as s:
for i in range(10):
s.sock.recv(4096) >> f'request {i}'
s.responder_.process(f'request {i}') >> f'response {i}'
s.sock.send(f'response {i}')
s.sock.recv(4096) >> Throwing(EndTestException)
with pytest.raises(EndTestException):
self.tested.go()
def test_request_response_loop_survives_a_recv_exception(self):
self.construct()
class EndTestException(Exception): pass
with Scenario() as s:
for i in range(10):
s.sock.recv(4096) >> f'request {i}'
s.responder_.process(f'request {i}') >> f'response {i}'
s.sock.send(f'response {i}')
s.sock.recv(4096) >> Throwing(socket.error)
for i in range(10):
s.sock.recv(4096) >> f'request {i}'
s.responder_.process(f'request {i}') >> f'response {i}'
s.sock.send(f'response {i}')
s.sock.recv(4096) >> Throwing(EndTestException)
with pytest.raises(EndTestException):
self.tested.go()
| 35.326923 | 74 | 0.556342 |
7947a8f5f125053e393efc42423c6eddd0a955c0 | 1,723 | py | Python | setup.py | hart-seg-reg/amsaf | e2c5aaf3fd0a367f7b607b68f716e810f6896ed7 | [
"MIT"
] | null | null | null | setup.py | hart-seg-reg/amsaf | e2c5aaf3fd0a367f7b607b68f716e810f6896ed7 | [
"MIT"
] | null | null | null | setup.py | hart-seg-reg/amsaf | e2c5aaf3fd0a367f7b607b68f716e810f6896ed7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'scipy',
'scikit-learn',
'numpy',
'SimpleITK',
'click'
]
setup_requirements = [
'pytest-runner',
'scipy',
'scikit-learn',
'numpy',
'SimpleITK',
'click'
# TODO(hart-seg-reg): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
'scipy',
'scikit-learn',
'numpy',
'SimpleITK',
'click'
]
setup(
name='amsaf',
version='0.1.1',
description="The HART Lab's tools for registration-based segmentation",
long_description=readme + '\n\n' + history,
author="Laura Hallock",
author_email='[email protected]',
url='https://github.com/hart-seg-reg/amsaf',
packages=find_packages(include=['amsaf']),
entry_points={
'console_scripts': [
'amsaf=amsaf.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='amsaf',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 23.283784 | 82 | 0.615206 |
7947a9037d638cc702a7a29dd6256906caf4f6fc | 43,705 | py | Python | turbulenz_tools/tools/vmath.py | turbulenz/turbulenz_tools | 36e4a15d4fd9cdc0adf0ea365e2e09013565d6fa | [
"MIT"
] | 12 | 2015-01-26T16:15:28.000Z | 2021-12-11T08:51:41.000Z | turbulenz_tools/tools/vmath.py | turbulenz/turbulenz_tools | 36e4a15d4fd9cdc0adf0ea365e2e09013565d6fa | [
"MIT"
] | null | null | null | turbulenz_tools/tools/vmath.py | turbulenz/turbulenz_tools | 36e4a15d4fd9cdc0adf0ea365e2e09013565d6fa | [
"MIT"
] | 6 | 2015-02-23T13:39:54.000Z | 2021-09-30T10:04:22.000Z | #!/usr/bin/python
# Copyright (c) 2009-2014 Turbulenz Limited
"""
Collection of math functions.
"""
import math
__version__ = '1.0.0'
# pylint: disable=C0302,C0111,R0914,R0913
# C0111 - Missing docstring
# C0302 - Too many lines in module
# R0914 - Too many local variables
# R0913 - Too many arguments
#######################################################################################################################
PRECISION = 1e-6
def tidy(m, tolerance=PRECISION):
def __tidy(x, tolerance):
if abs(x) < tolerance:
return 0
return x
return tuple([__tidy(x, tolerance) for x in m])
#######################################################################################################################
def select(m, a, b):
if m:
return a
return b
def rcp(a):
if a != 0.0:
return 1 / a
return 0.0
def iszero(a, tolerance=PRECISION):
return abs(a) < tolerance
#######################################################################################################################
def v2equal(a, b, tolerance=PRECISION):
(a0, a1) = a
(b0, b1) = b
return abs(a0 - b0) <= tolerance and abs(a1 - b1) <= tolerance
#######################################################################################################################
V3ZERO = (0.0, 0.0, 0.0)
V3HALF = (0.5, 0.5, 0.5)
V3ONE = (1.0, 1.0, 1.0)
V3TWO = (2.0, 2.0, 2.0)
V3XAXIS = (1.0, 0.0, 0.0)
V3YAXIS = (0.0, 1.0, 0.0)
V3ZAXIS = (0.0, 0.0, 1.0)
#######################################################################################################################
def v3create(a, b, c):
return (a, b, c)
def v3neg(a):
(a0, a1, a2) = a
return (-a0, -a1, -a2)
def v3add(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return ((a0 + b0), (a1 + b1), (a2 + b2))
def v3add3(a, b, c):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
return ((a0 + b0 + c0), (a1 + b1 + c1), (a2 + b2 + c2))
def v3add4(a, b, c, d):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
(d0, d1, d2) = d
return ((a0 + b0 + c0 + d0), (a1 + b1 + c1 + d1), (a2 + b2 + c2 + d2))
def v3sub(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return ((a0 - b0), (a1 - b1), (a2 - b2))
def v3mul(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return ((a0 * b0), (a1 * b1), (a2 * b2))
def v3madd(a, b, c):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
return (((a0 * b0) + c0), ((a1 * b1) + c1), ((a2 * b2) + c2))
def v3dot(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 * b0) + (a1 * b1) + (a2 * b2)
def v3cross(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a1 * b2) - (a2 * b1), (a2 * b0) - (a0 * b2), (a0 * b1) - (a1 * b0)
def v3lengthsq(a):
(a0, a1, a2) = a
return (a0 * a0) + (a1 * a1) + (a2 * a2)
def v3length(a):
(a0, a1, a2) = a
return math.sqrt((a0 * a0) + (a1 * a1) + (a2 * a2))
def v3distancesq(a, b):
return v3lengthsq(v3sub(a, b))
def v3recp(a):
(a0, a1, a2) = a
return rcp(a0), rcp(a1), rcp(a2)
def v3normalize(a):
(a0, a1, a2) = a
lsq = ((a0 * a0) + (a1 * a1) + (a2 * a2))
if lsq > 0.0:
lr = 1.0 / math.sqrt(lsq)
return (a0 * lr), (a1 * lr), (a2 * lr)
return V3ZERO
def v3abs(a):
(a0, a1, a2) = a
return abs(a0), abs(a1), abs(a2)
def v3max(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return max(a0, b0), max(a1, b1), max(a2, b2)
def v3max3(a, b, c):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
return max(max(a0, b0), c0), max(max(a1, b1), c1), max(max(a2, b2), c2)
def v3max4(a, b, c, d):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
(d0, d1, d2) = d
return max(max(a0, b0), max(c0, d0)), max(max(a1, b1), max(c1, d1)), max(max(a2, b2), max(c2, d2))
def v3min(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return min(a0, b0), min(a1, b1), min(a2, b2)
def v3min3(a, b, c):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
return min(min(a0, b0), c0), min(min(a1, b1), c1), min(min(a2, b2), c2)
def v3min4(a, b, c, d):
(a0, a1, a2) = a
(b0, b1, b2) = b
(c0, c1, c2) = c
(d0, d1, d2) = d
return min(min(a0, b0), min(c0, d0)), min(min(a1, b1), min(c1, d1)), min(min(a2, b2), min(c2, d2))
def v3equal(a, b, tolerance=PRECISION):
(a0, a1, a2) = a
(b0, b1, b2) = b
return abs(a0 - b0) <= tolerance and abs(a1 - b1) <= tolerance and abs(a2 - b2) <= tolerance
def v3mulm33(a, m):
(a0, a1, a2) = a
return v3add3( v3muls(m33right(m), a0),
v3muls(m33up(m), a1),
v3muls(m33at(m), a2) )
def v3mequal(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (abs(a0 - b0) <= PRECISION), (abs(a1 - b1) <= PRECISION), (abs(a2 - b2) <= PRECISION)
def v3mless(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 < b0), (a1 < b1), (a2 < b2)
def v3mgreater(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 > b0), (a1 > b1), (a2 > b2)
def v3mgreatereq(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 >= b0), (a1 >= b1), (a2 >= b2)
def v3mnot(a):
(a0, a1, a2) = a
return not a0, not a1, not a2
def v3mor(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 or b0), (a1 or b1), (a2 or b2)
def v3mand(a, b):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 and b0), (a1 and b1), (a2 and b2)
def v3select(m, a, b):
(m0, m1, m2) = m
(a0, a1, a2) = a
(b0, b1, b2) = b
return select(m0, a0, b0), select(m1, a1, b1), select(m2, a2, b2)
def v3creates(a):
return a, a, a
def v3maxs(a, b):
(a0, a1, a2) = a
return max(a0, b), max(a1, b), max(a2, b)
def v3mins(a, b):
(a0, a1, a2) = a
return min(a0, b), min(a1, b), min(a2, b)
def v3adds(a, b):
(a0, a1, a2) = a
return (a0 + b), (a1 + b), (a2 + b)
def v3subs(a, b):
(a0, a1, a2) = a
return (a0 - b), (a1 - b), (a2 - b)
def v3muls(a, b):
(a0, a1, a2) = a
if b == 0:
return V3ZERO
return (a0 * b), (a1 * b), (a2 * b)
def v3equals(a, b):
(a0, a1, a2) = a
return abs(a0 - b) <= PRECISION and abs(a1 - b) <= PRECISION and abs(a2 - b) <= PRECISION
def v3equalsm(a, b):
(a0, a1, a2) = a
return (abs(a0 - b) <= PRECISION), (abs(a1 - b) <= PRECISION), (abs(a2 - b) <= PRECISION)
def v3lesssm(a, b):
(a0, a1, a2) = a
return (a0 > b), (a1 > b), (a2 > b)
def v3greatersm(a, b):
(a0, a1, a2) = a
return (a0 > b), (a1 > b), (a2 > b)
def v3greatereqsm(a, b):
(a0, a1, a2) = a
return (a0 >= b), (a1 >= b), (a2 >= b)
def v3lerp(a, b, t):
(a0, a1, a2) = a
(b0, b1, b2) = b
return (a0 + (b0 - a0) * t), (a1 + (b1 - a1) * t), (a2 + (b2 - a2) * t)
def v3is_zero(a, tolerance=PRECISION):
return abs(v3lengthsq(a)) < (tolerance * tolerance)
def v3is_similar(a, b, tolerance=PRECISION):
return v3dot(a, b) > tolerance
def v3is_within_tolerance(a, b, tolerance):
"""The tolerance must be defined as the square of the cosine angle tolerated. Returns True is 'a' is zero."""
if v3is_zero(a): # Should we test b is_zero as well?
return True
dot = v3dot(a, b)
if dot < 0:
return False
if (dot * dot) < (v3lengthsq(a) * v3lengthsq(b) * tolerance):
return False
return True
def v3unitcube_clamp(a):
(a0, a1, a2) = a
if a0 > 1.0:
a0 = 1.0
elif a0 < -1.0:
a0 = -1.0
if a1 > 1.0:
a1 = 1.0
elif a1 < -1.0:
a1 = -1.0
if a2 > 1.0:
a2 = 1.0
elif a2 < -1.0:
a2 = -.10
return a0, a1, a2
#######################################################################################################################
def v3s_min_max(points):
(min_x, min_y, min_z) = points[0]
(max_x, max_y, max_z) = points[0]
for (x, y, z) in points:
min_x = min(x, min_x)
min_y = min(y, min_y)
min_z = min(z, min_z)
max_x = max(x, max_x)
max_y = max(y, max_y)
max_z = max(z, max_z)
return (min_x, min_y, min_z), (max_x, max_y, max_z)
#######################################################################################################################
V4ZERO = (0.0, 0.0, 0.0, 0.0)
V4HALF = (0.5, 0.5, 0.5, 0.5)
V4ONE = (1.0, 1.0, 1.0, 1.0)
V4TWO = (2.0, 2.0, 2.0, 2.0)
#######################################################################################################################
def v4create(a, b, c, d):
return a, b, c, d
def v4neg(a):
(a0, a1, a2, a3) = a
return -a0, -a1, -a2, -a3
def v4add(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 + b0), (a1 + b1), (a2 + b2), (a3 + b3))
def v4add3(a, b, c):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
return ((a0 + b0 + c0), (a1 + b1 + c1), (a2 + b2 + c2), (a3 + b3 + c3))
def v4add4(a, b, c, d):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
(d0, d1, d2, d3) = d
return ((a0 + b0 + c0 + d0), (a1 + b1 + c1 + d1), (a2 + b2 + c2 + d2), (a3 + b3 + c3 + d3))
def v4sub(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 - b0), (a1 - b1), (a2 - b2), (a3 - b3))
def v4mul(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 * b0), (a1 * b1), (a2 * b2), (a3 * b3))
def v4madd(a, b, c):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
return (((a0 * b0) + c0), ((a1 * b1) + c1), ((a2 * b2) + c2), ((a3 * b3) + c3))
def v4dot(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return (a0 * b0) + (a1 * b1) + (a2 * b2) + (a3 * b3)
def v4lengthsq(a):
(a0, a1, a2, a3) = a
return (a0 * a0) + (a1 * a1) + (a2 * a2) + (a3 * a3)
def v4length(a):
(a0, a1, a2, a3) = a
return math.sqrt((a0 * a0) + (a1 * a1) + (a2 * a2) + (a3 * a3))
def v4recp(a):
(a0, a1, a2, a3) = a
return (rcp(a0), rcp(a1), rcp(a2), rcp(a3))
def v4normalize(a):
(a0, a1, a2, a3) = a
lsq = ((a0 * a0) + (a1 * a1) + (a2 * a2) + (a3 * a3))
if lsq > 0.0:
lr = 1.0 / math.sqrt(lsq)
return ((a0 * lr), (a1 * lr), (a2 * lr), (a3 * lr))
return V4ZERO
def v4abs(a):
(a0, a1, a2, a3) = a
return (abs(a0), abs(a1), abs(a2), abs(a3))
def v4max(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return (max(a0, b0), max(a1, b1), max(a2, b2), max(a3, b3))
def v4max3(a, b, c):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
return (max(max(a0, b0), c0),
max(max(a1, b1), c1),
max(max(a2, b2), c2),
max(max(a3, b3), c3))
def v4max4(a, b, c, d):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
(d0, d1, d2, d3) = d
return (max(max(a0, b0), max(c0, d0)),
max(max(a1, b1), max(c1, d1)),
max(max(a2, b2), max(c2, d2)),
max(max(a3, b3), max(c3, d3)))
def v4min(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return (min(a0, b0), min(a1, b1), min(a2, b2), min(a3, b3))
def v4min3(a, b, c):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
return (min(min(a0, b0), c0),
min(min(a1, b1), c1),
min(min(a2, b2), c2),
min(min(a3, b3), c3))
def v4min4(a, b, c, d):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
(c0, c1, c2, c3) = c
(d0, d1, d2, d3) = d
return (min(min(a0, b0), min(c0, d0)),
min(min(a1, b1), min(c1, d1)),
min(min(a2, b2), min(c2, d2)),
min(min(a3, b3), min(c3, d3)))
def v4equal(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return (abs(a0 - b0) <= PRECISION and
abs(a1 - b1) <= PRECISION and
abs(a2 - b2) <= PRECISION and
abs(a3 - b3) <= PRECISION)
def v4mulm44(v, m):
(v0, v1, v2, v3) = v
return v4add4(v4muls(m44right(m), v0),
v4muls(m44up(m), v1),
v4muls(m44at(m), v2),
v4muls(m44pos(m), v3))
def v4mequal(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((abs(a0 - b0) <= PRECISION),
(abs(a1 - b1) <= PRECISION),
(abs(a2 - b2) <= PRECISION),
(abs(a3 - b3) <= PRECISION))
def v4mless(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 < b0), (a1 < b1), (a2 < b2), (a3 < b3))
def v4mgreater(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 > b0), (a1 > b1), (a2 > b2), (a3 > b3))
def v4mgreatereq(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 >= b0), (a1 >= b1), (a2 >= b2), (a3 >= b3))
def v4mnot(a):
(a0, a1, a2, a3) = a
return ( not a0, not a1, not a2, not a3)
def v4mor(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 or b0), (a1 or b1), (a2 or b2), (a3 or b3))
def v4mand(a, b):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 and b0), (a1 and b1), (a2 and b2), (a3 and b3))
def v4many(m):
(m0, m1, m2, m3) = m
return m0 or m1 or m2 or m3
def v4mall(m):
(m0, m1, m2, m3) = m
return m0 and m1 and m2 and m3
def v4select(m, a, b):
(m0, m1, m2, m3) = m
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return (select(m0, a0, b0), select(m1, a1, b1), select(m2, a2, b2), select(m3, a3, b3))
def v4creates(a):
return (a, a, a, a)
def v4maxs(a, b):
(a0, a1, a2, a3) = a
return (max(a0, b), max(a1, b), max(a2, b), max(a3, b))
def v4mins(a, b):
(a0, a1, a2, a3) = a
return (min(a0, b), min(a1, b), min(a2, b), min(a3, b))
def v4adds(a, b):
(a0, a1, a2, a3) = a
return ((a0 + b), (a1 + b), (a2 + b), (a3 + b))
def v4subs(a, b):
(a0, a1, a2, a3) = a
return ((a0 - b), (a1 - b), (a2 - b), (a3 - b))
def v4muls(a, b):
if b == 0:
return V4ZERO
else:
(a0, a1, a2, a3) = a
return ((a0 * b), (a1 * b), (a2 * b), (a3 * b))
def v4equals(a, b):
(a0, a1, a2, a3) = a
return (abs(a0 - b) <= PRECISION and
abs(a1 - b) <= PRECISION and
abs(a2 - b) <= PRECISION and
abs(a3 - b) <= PRECISION)
def v4equalsm(a, b):
(a0, a1, a2, a3) = a
return ((abs(a0 - b) <= PRECISION),
(abs(a1 - b) <= PRECISION),
(abs(a2 - b) <= PRECISION),
(abs(a3 - b) <= PRECISION))
def v4lesssm(a, b):
(a0, a1, a2, a3) = a
return ((a0 < b), (a1 < b), (a2 < b), (a3 < b))
def v4greatersm(a, b):
(a0, a1, a2, a3) = a
return ((a0 > b), (a1 > b), (a2 > b), (a3 > b))
def v4greatereqsm(a, b):
(a0, a1, a2, a3) = a
return ((a0 >= b), (a1 >= b), (a2 >= b), (a3 >= b))
def v4lerp(a, b, t):
(a0, a1, a2, a3) = a
(b0, b1, b2, b3) = b
return ((a0 + (b0 - a0) * t), (a1 + (b1 - a1) * t), (a2 + (b2 - a2) * t), (a3 + (b3 - a3) * t))
#######################################################################################################################
M33IDENTITY = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
M43IDENTITY = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0)
M44IDENTITY = (1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0)
#######################################################################################################################
def m33(r0, r1, r2, u0, u1, u2, a0, a1, a2):
return (r0, r1, r2, u0, u1, u2, a0, a1, a2)
def m33create(r, u, a):
(r0, r1, r2) = r
(u0, u1, u2) = u
(a0, a1, a2) = a
return (r0, r1, r2, u0, u1, u2, a0, a1, a2)
def m33is_identity(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
return (m0 == 1 and m1 == 0 and m2 == 0 and
m3 == 0 and m4 == 1 and m5 == 0 and
m6 == 0 and m7 == 0 and m8 == 1)
def m33from_axis_rotation(axis, angle):
s = math.sin(angle)
c = math.cos(angle)
t = 1.0 - c
(axisX, axisY, axisZ) = axis
tx = t * axisX
ty = t * axisY
tz = t * axisZ
sx = s * axisX
sy = s * axisY
sz = s * axisZ
return (tx * axisX + c, tx * axisY + sz, tx * axisZ - sy,
ty * axisX - sz, ty * axisY + c, ty * axisZ + sx,
tz * axisX + sy, tz * axisY - sx, tz * axisZ + c)
def m33right(m):
return m[:3]
def m33up(m):
return m[3:6]
def m33at(m):
return m[6:]
def m33setright(m, v):
(_, _, _, m3, m4, m5, m6, m7, m8) = m
(v0, v1, v2) = v
return (v0, v1, v2, m3, m4, m5, m6, m7, m8)
def m33setup(m, v):
(m0, m1, m2, _, _, _, m6, m7, m8) = m
(v0, v1, v2) = v
return (m0, m1, m2, v0, v1, v2, m6, m7, m8)
def m33setat(m, v):
(m0, m1, m2, m3, m4, m5, _, _, _) = m
(v0, v1, v2) = v
return (m0, m1, m2, m3, m4, m5, v0, v1, v2)
def m33transpose(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
return (m0, m3, m6, m1, m4, m7, m2, m5, m8)
def m33determinant(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
return m0 * (m4 * m8 - m5 * m7) + m1 * (m5 * m6 - m3 * m8) + m2 * (m3 * m7 - m4 * m6)
def m33inverse(m):
det = m33determinant(m)
if det == 0.0:
return ( )
else:
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
detrecp = 1.0 / det
return (((m4 * m8 + m5 * (-m7)) * detrecp),
((m7 * m2 + m8 * (-m1)) * detrecp),
((m1 * m5 - m2 * m4) * detrecp),
((m5 * m6 + m3 * (-m8)) * detrecp),
((m8 * m0 + m6 * (-m2)) * detrecp),
((m3 * m2 - m0 * m5) * detrecp),
((m3 * m7 + m4 * (-m6)) * detrecp),
((m6 * m1 + m7 * (-m0)) * detrecp),
((m0 * m4 - m3 * m1) * detrecp))
def m33inversetranspose(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
det = (m0 * (m4 * m8 - m5 * m7) +
m1 * (m5 * m6 - m3 * m8) +
m2 * (m3 * m7 - m4 * m6))
if det == 0.0:
return ( )
else:
detrecp = 1.0 / det
r0 = ((m4 * m8 + m5 * (-m7)) * detrecp)
r1 = ((m7 * m2 + m8 * (-m1)) * detrecp)
r2 = ((m1 * m5 - m2 * m4) * detrecp)
r3 = ((m5 * m6 + m3 * (-m8)) * detrecp)
r4 = ((m8 * m0 + m6 * (-m2)) * detrecp)
r5 = ((m3 * m2 - m0 * m5) * detrecp)
r6 = ((m3 * m7 + m4 * (-m6)) * detrecp)
r7 = ((m6 * m1 + m7 * (-m0)) * detrecp)
r8 = ((m0 * m4 - m3 * m1) * detrecp)
return (r0, r3, r6,
r1, r4, r7,
r2, r5, r8)
def m33mul(a, b):
(a0, a1, a2, a3, a4, a5, a6, a7, a8) = a
(b0, b1, b2, b3, b4, b5, b6, b7, b8) = b
return ( (b0 * a0 + b3 * a1 + b6 * a2),
(b1 * a0 + b4 * a1 + b7 * a2),
(b2 * a0 + b5 * a1 + b8 * a2),
(b0 * a3 + b3 * a4 + b6 * a5),
(b1 * a3 + b4 * a4 + b7 * a5),
(b2 * a3 + b5 * a4 + b8 * a5),
(b0 * a6 + b3 * a7 + b6 * a8),
(b1 * a6 + b4 * a7 + b7 * a8),
(b2 * a6 + b5 * a7 + b8 * a8) )
def m33mulm43(a, b):
(a0, a1, a2, a3, a4, a5, a6, a7, a8) = a
(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11) = b
return ( (b0 * a0 + b3 * a1 + b6 * a2),
(b1 * a0 + b4 * a1 + b7 * a2),
(b2 * a0 + b5 * a1 + b8 * a2),
(b0 * a3 + b3 * a4 + b6 * a5),
(b1 * a3 + b4 * a4 + b7 * a5),
(b2 * a3 + b5 * a4 + b8 * a5),
(b0 * a6 + b3 * a7 + b6 * a8),
(b1 * a6 + b4 * a7 + b7 * a8),
(b2 * a6 + b5 * a7 + b8 * a8),
b9, b10, b11 )
def m33mulm44(a, b):
(a0, a1, a2, a3, a4, a5, a6, a7, a8) = a
(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15) = b
return ( (b0 * a0 + b4 * a1 + b8 * a2),
(b1 * a0 + b5 * a1 + b9 * a2),
(b2 * a0 + b6 * a1 + b10 * a2),
(b3 * a0 + b7 * a1 + b11 * a2),
(b0 * a3 + b4 * a4 + b8 * a5),
(b1 * a3 + b5 * a4 + b9 * a5),
(b2 * a3 + b6 * a4 + b10 * a5),
(b3 * a3 + b7 * a4 + b11 * a5),
(b0 * a6 + b4 * a7 + b8 * a8),
(b1 * a6 + b5 * a7 + b9 * a8),
(b2 * a6 + b6 * a7 + b10 * a8),
(b3 * a6 + b7 * a7 + b11 * a8),
b12, b13, b14, b15 )
def m33adds(m, s):
return tuple([ m[n] + s for n in range(9) ])
def m33subs(m, s):
return tuple([ m[n] - s for n in range(9) ])
def m33muls(m, s):
return tuple([ m[n] * s for n in range(9) ])
#######################################################################################################################
def m43(r0, r1, r2, u0, u1, u2, a0, a1, a2, p0, p1, p2):
return (r0, r1, r2, u0, u1, u2, a0, a1, a2, p0, p1, p2)
def m43create(r, u, a, p):
(r0, r1, r2) = r
(u0, u1, u2) = u
(a0, a1, a2) = a
(p0, p1, p2) = p
return (r0, r1, r2, u0, u1, u2, a0, a1, a2, p0, p1, p2)
def m43from_m44(m):
return m43create(m[0:3], m[4:7], m[8:11], m[12:15])
def m43is_identity(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
return (m0 == 1 and m1 == 0 and m2 == 0 and
m3 == 0 and m4 == 1 and m5 == 0 and
m6 == 0 and m7 == 0 and m8 == 1 and
m9 == 0 and m10 == 0 and m11 == 0)
def m43from_axis_rotation(axis, angle):
s = math.sin(angle)
c = math.cos(angle)
t = 1.0 - c
(axisX, axisY, axisZ) = axis
tx = t * axisX
ty = t * axisY
tz = t * axisZ
sx = s * axisX
sy = s * axisY
sz = s * axisZ
return (tx * axisX + c,
tx * axisY + sz,
tx * axisZ - sy,
ty * axisX - sz,
ty * axisY + c,
ty * axisZ + sx,
tz * axisX + sy,
tz * axisY - sx,
tz * axisZ + c,
0.0,
0.0,
0.0)
def m43right(m):
return m[:3]
def m43up(m):
return m[3:6]
def m43at(m):
return m[6:9]
def m43pos(m):
return m[9:]
def m43setright(m, v):
(_, _, _, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
(v0, v1, v2) = v
return (v0, v1, v2, m3, m4, m5, m6, m7, m8, m9, m10, m11)
def m43setup(m, v):
(m0, m1, m2, _, _, _, m6, m7, m8, m9, m10, m11) = m
(v0, v1, v2) = v
return (m0, m1, m2, v0, v1, v2, m6, m7, m8, m9, m10, m11)
def m43setat(m, v):
(m0, m1, m2, m3, m4, m5, _, _, _, m9, m10, m11) = m
(v0, v1, v2) = v
return (m0, m1, m2, m3, m4, m5, v0, v1, v2, m9, m10, m11)
def m43setpos(m, v):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, _, _, _) = m
(v0, v1, v2) = v
return (m0, m1, m2, m3, m4, m5, m6, m7, m8, v0, v1, v2)
def m43translate(m, v):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
(v0, v1, v2) = v
return (m0, m1, m2, m3, m4, m5, m6, m7, m8, m9 + v0, m10 + v1, m11 + v2)
def m43inverse_orthonormal(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, px, py, pz) = m
return ( m0, m3, m6,
m1, m4, m7,
m2, m5, m8,
-((px * m0) + (py * m1) + (pz * m2)),
-((px * m3) + (py * m4) + (pz * m5)),
-((px * m6) + (py * m7) + (pz * m8)) )
def m43ortho_normalize(m):
right = m43right(m)
up = m43up(m)
at = m43at(m)
pos = m43pos(m)
innerX = v3length(right)
innerY = v3length(up)
innerZ = v3length(at)
right = v3normalize(right)
up = v3normalize(up)
at = v3normalize(at)
if innerX > 0.0:
if innerY > 0.0:
if innerZ > 0.0:
outerX = abs(v3dot(up, at))
outerY = abs(v3dot(at, right))
outerZ = abs(v3dot(right, up))
if outerX < outerY:
if outerX < outerZ:
vpU = up
vpV = at
vpW = right
else:
vpU = right
vpV = up
vpW = at
else:
if outerY < outerZ:
vpU = at
vpV = right
vpW = up
else:
vpU = right
vpV = up
vpW = at
else:
vpU = right
vpV = up
vpW = at
else:
vpU = at
vpV = right
vpW = up
else:
vpU = up
vpV = at
vpW = right
vpW = v3normalize(v3cross(vpV, vpU))
vpV = v3normalize(v3cross(vpU, vpW))
return m43create(right, up, at, pos)
def m43determinant(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, _m9, _m10, _m11) = m
return (m0 * (m4 * m8 - m5 * m7) +
m1 * (m5 * m6 - m3 * m8) +
m2 * (m3 * m7 - m4 * m6))
def m43inverse(m):
det = m43determinant(m)
if det == 0.0:
return ( )
else:
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
detrecp = 1.0 / det
return (((m4 * m8 + m5 * (-m7)) * detrecp),
((m7 * m2 + m8 * (-m1)) * detrecp),
((m1 * m5 - m2 * m4) * detrecp),
((m5 * m6 + m3 * (-m8)) * detrecp),
((m8 * m0 + m6 * (-m2)) * detrecp),
((m3 * m2 - m0 * m5) * detrecp),
((m3 * m7 + m4 * (-m6)) * detrecp),
((m6 * m1 + m7 * (-m0)) * detrecp),
((m0 * m4 - m3 * m1) * detrecp),
((m3 * (m10 * m8 - m7 * m11) + m4 * (m6 * m11 - m9 * m8) + m5 * (m9 * m7 - m6 * m10)) * detrecp),
((m6 * (m2 * m10 - m1 * m11) + m7 * (m0 * m11 - m9 * m2) + m8 * (m9 * m1 - m0 * m10)) * detrecp),
((m9 * (m2 * m4 - m1 * m5) + m10 * (m0 * m5 - m3 * m2) + m11 * (m3 * m1 - m0 * m4)) * detrecp))
def m43transformn(m, v):
(v0, v1, v2) = v
(m0, m1, m2, m3, m4, m5, m6, m7, m8, _m9, _m10, _m11) = m
return ( (m0 * v0 + m3 * v1 + m6 * v2),
(m1 * v0 + m4 * v1 + m7 * v2),
(m2 * v0 + m5 * v1 + m8 * v2) )
def m43transformp(m, v):
(v0, v1, v2) = v
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
return ( (m0 * v0 + m3 * v1 + m6 * v2 + m9),
(m1 * v0 + m4 * v1 + m7 * v2 + m10),
(m2 * v0 + m5 * v1 + m8 * v2 + m11) )
def m43mul(a, b):
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) = a
(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11) = b
return ( (b0 * a0 + b3 * a1 + b6 * a2),
(b1 * a0 + b4 * a1 + b7 * a2),
(b2 * a0 + b5 * a1 + b8 * a2),
(b0 * a3 + b3 * a4 + b6 * a5),
(b1 * a3 + b4 * a4 + b7 * a5),
(b2 * a3 + b5 * a4 + b8 * a5),
(b0 * a6 + b3 * a7 + b6 * a8),
(b1 * a6 + b4 * a7 + b7 * a8),
(b2 * a6 + b5 * a7 + b8 * a8),
(b0 * a9 + b3 * a10 + b6 * a11 + b9),
(b1 * a9 + b4 * a10 + b7 * a11 + b10),
(b2 * a9 + b5 * a10 + b8 * a11 + b11) )
def m43mulm44(a, b):
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) = a
(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15) = b
return ( (b0 * a0 + b4 * a1 + b8 * a2),
(b1 * a0 + b5 * a1 + b9 * a2),
(b2 * a0 + b6 * a1 + b10 * a2),
(b3 * a0 + b7 * a1 + b11 * a2),
(b0 * a3 + b4 * a4 + b8 * a5),
(b1 * a3 + b5 * a4 + b9 * a5),
(b2 * a3 + b6 * a4 + b10 * a5),
(b3 * a3 + b7 * a4 + b11 * a5),
(b0 * a6 + b4 * a7 + b8 * a8),
(b1 * a6 + b5 * a7 + b9 * a8),
(b2 * a6 + b6 * a7 + b10 * a8),
(b3 * a6 + b7 * a7 + b11 * a8),
(b0 * a9 + b4 * a10 + b8 * a11 + b12),
(b1 * a9 + b5 * a10 + b9 * a11 + b13),
(b2 * a9 + b6 * a10 + b10 * a11 + b14),
(b3 * a9 + b7 * a10 + b11 * a11 + b15) )
def m43transpose(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = m
return (m0, m3, m6, m9,
m1, m4, m7, m10,
m2, m5, m8, m11)
def m43adds(m, s):
return tuple([ m[n] + s for n in range(12) ])
def m43subs(m, s):
return tuple([ m[n] - s for n in range(12) ])
def m43muls(m, s):
return tuple([ m[n] * s for n in range(12) ])
#######################################################################################################################
def m44(r0, r1, r2, r3,
u0, u1, u2, u3,
a0, a1, a2, a3,
p0, p1, p2, p3):
return (r0, r1, r2, r3,
u0, u1, u2, u3,
a0, a1, a2, a3,
p0, p1, p2, p3)
def m44create(r, u, a, p):
(r0, r1, r2, r3) = r
(u0, u1, u2, u3) = u
(a0, a1, a2, a3) = a
(p0, p1, p2, p3) = p
return (r0, r1, r2, r3,
u0, u1, u2, u3,
a0, a1, a2, a3,
p0, p1, p2, p3)
def m44is_identity(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = m
return (m0 == 1 and m1 == 0 and m2 == 0 and m3 == 0 and
m4 == 0 and m5 == 1 and m6 == 0 and m7 == 0 and
m8 == 0 and m9 == 0 and m10 == 1 and m11 == 0 and
m12 == 0 and m13 == 0 and m14 == 0 and m15 == 1)
def m44right(m):
return m[:4]
def m44up(m):
return m[4:8]
def m44at(m):
return m[8:12]
def m44pos(m):
return m[12:]
def m44setright(m, v):
(_, _, _, _, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = m
(v0, v1, v2, v3) = v
return (v0, v1, v2, v3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15)
def m44setup(m, v):
(m0, m1, m2, m3, _, _, _, _, m8, m9, m10, m11, m12, m13, m14, m15) = m
(v0, v1, v2, v3) = v
return (m0, m1, m2, m3, v0, v1, v2, v3, m8, m9, m10, m11, m12, m13, m14, m15)
def m44setat(m, v):
(m0, m1, m2, m3, m4, m5, m6, m7, _, _, _, _, m12, m13, m14, m15) = m
(v0, v1, v2, v3) = v
return (m0, m1, m2, m3, m4, m5, m6, m7, v0, v1, v2, v3, m12, m13, m14, m15)
def m44setpos(m, v):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, _, _, _, _) = m
(v0, v1, v2, v3) = v
return (m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, v0, v1, v2, v3)
def m44translate(m, v):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = m
(v0, v1, v2, v3) = v
return (m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12 + v0, m13 + v1, m14 + v2, m15 + v3)
def m44transformn(m, v):
(v0, v1, v2) = v
return v4add3(v4muls(m44right(m), v0),
v4muls(m44up(m), v1),
v4muls(m44at(m), v2))
def m44transformp(m, v):
(v0, v1, v2) = v
return v4add4(v4muls(m44right(m), v0),
v4muls(m44up(m), v1),
v4muls(m44at(m), v2),
m44pos(m))
def m44mul(a, b):
return m44create(v4mulm44(m44right(a), b),
v4mulm44(m44up(a), b),
v4mulm44(m44at(a), b),
v4mulm44(m44pos(a), b))
def m44transpose(m):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = m
return (m0, m4, m8, m12,
m1, m5, m9, m13,
m2, m6, m10, m14,
m3, m7, m11, m15)
def m44adds(m, s):
return tuple([ m[n] + s for n in range(16) ])
def m44subs(m, s):
return tuple([ m[n] - s for n in range(16) ])
def m44muls(m, s):
return tuple([ m[n] * s for n in range(16) ])
#######################################################################################################################
def is_visible_box(center, halfDimensions, vpm):
(c0, c1, c2) = center
(h0, h1, h2) = halfDimensions
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = vpm
i0 = (m0 * h0)
i1 = (m1 * h0)
i2 = (m2 * h0)
i3 = (m3 * h0)
j0 = (m4 * h1)
j1 = (m5 * h1)
j2 = (m6 * h1)
j3 = (m7 * h1)
k0 = (m8 * h2)
k1 = (m9 * h2)
k2 = (m10 * h2)
k3 = (m11 * h2)
t0 = (m0 * c0 + m4 * c1 + m8 * c2 + m12)
t1 = (m1 * c0 + m5 * c1 + m9 * c2 + m13)
t2 = (m2 * c0 + m6 * c1 + m10 * c2 + m14)
t3 = (m3 * c0 + m7 * c1 + m11 * c2 + m15)
return not (((t0 - t3) > (abs(i0 - i3) + abs(j0 - j3) + abs(k0 - k3))) or
((t0 + t3) < -(abs(i0 + i3) + abs(j0 + j3) + abs(k0 + k3))) or
((t1 - t3) > (abs(i1 - i3) + abs(j1 - j3) + abs(k1 - k3))) or
((t1 + t3) < -(abs(i1 + i3) + abs(j1 + j3) + abs(k1 + k3))) or
((t2 - t3) > (abs(i2 - i3) + abs(j2 - j3) + abs(k2 - k3))) or
((t2 + t3) < -(abs(i2 + i3) + abs(j2 + j3) + abs(k2 + k3))) or
#((t3 - t3) > (abs(i3 - i3) + abs(j3 - j3) + abs(k3 - k3))) or
((t3 + t3) < -(abs(i3 + i3) + abs(j3 + j3) + abs(k3 + k3))))
def is_visible_box_origin(halfDimensions, vpm):
(h0, h1, h2) = halfDimensions
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = vpm
i0 = (m0 * h0)
i1 = (m1 * h0)
i2 = (m2 * h0)
i3 = (m3 * h0)
j0 = (m4 * h1)
j1 = (m5 * h1)
j2 = (m6 * h1)
j3 = (m7 * h1)
k0 = (m8 * h2)
k1 = (m9 * h2)
k2 = (m10 * h2)
k3 = (m11 * h2)
t0 = m12
t1 = m13
t2 = m14
t3 = m15
return not (((t0 - t3) > (abs(i0 - i3) + abs(j0 - j3) + abs(k0 - k3))) or
((t0 + t3) < -(abs(i0 + i3) + abs(j0 + j3) + abs(k0 + k3))) or
((t1 - t3) > (abs(i1 - i3) + abs(j1 - j3) + abs(k1 - k3))) or
((t1 + t3) < -(abs(i1 + i3) + abs(j1 + j3) + abs(k1 + k3))) or
((t2 - t3) > (abs(i2 - i3) + abs(j2 - j3) + abs(k2 - k3))) or
((t2 + t3) < -(abs(i2 + i3) + abs(j2 + j3) + abs(k2 + k3))) or
#((t3 - t3) > (abs(i3 - i3) + abs(j3 - j3) + abs(k3 - k3))) or
((t3 + t3) < -(abs(i3 + i3) + abs(j3 + j3) + abs(k3 + k3))))
def is_visible_sphere(center, radius, vpm):
(c0, c1, c2) = center
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = vpm
i0 = m0
i1 = m1
i2 = m2
i3 = m3
j0 = m4
j1 = m5
j2 = m6
j3 = m7
k0 = m8
k1 = m9
k2 = m10
k3 = m11
t0 = (m0 * c0 + m4 * c1 + m8 * c2 + m12)
t1 = (m1 * c0 + m5 * c1 + m9 * c2 + m13)
t2 = (m2 * c0 + m6 * c1 + m10 * c2 + m14)
t3 = (m3 * c0 + m7 * c1 + m11 * c2 + m15)
nradius = -radius
return not (((t0 - t3) > radius * (abs(i0 - i3) + abs(j0 - j3) + abs(k0 - k3))) or
((t0 + t3) < nradius * (abs(i0 + i3) + abs(j0 + j3) + abs(k0 + k3))) or
((t1 - t3) > radius * (abs(i1 - i3) + abs(j1 - j3) + abs(k1 - k3))) or
((t1 + t3) < nradius * (abs(i1 + i3) + abs(j1 + j3) + abs(k1 + k3))) or
((t2 - t3) > radius * (abs(i2 - i3) + abs(j2 - j3) + abs(k2 - k3))) or
((t2 + t3) < nradius * (abs(i2 + i3) + abs(j2 + j3) + abs(k2 + k3))) or
#((t3 - t3) > radius * (abs(i3 - i3) + abs(j3 - j3) + abs(k3 - k3))) or
((t3 + t3) < nradius * (abs(i3 + i3) + abs(j3 + j3) + abs(k3 + k3))))
def is_visible_sphere_origin(radius, vpm):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = vpm
i0 = m0
i1 = m1
i2 = m2
i3 = m3
j0 = m4
j1 = m5
j2 = m6
j3 = m7
k0 = m8
k1 = m9
k2 = m10
k3 = m11
t0 = m12
t1 = m13
t2 = m14
t3 = m15
nradius = -radius
return not (((t0 - t3) > radius * (abs(i0 - i3) + abs(j0 - j3) + abs(k0 - k3))) or
((t0 + t3) < nradius * (abs(i0 + i3) + abs(j0 + j3) + abs(k0 + k3))) or
((t1 - t3) > radius * (abs(i1 - i3) + abs(j1 - j3) + abs(k1 - k3))) or
((t1 + t3) < nradius * (abs(i1 + i3) + abs(j1 + j3) + abs(k1 + k3))) or
((t2 - t3) > radius * (abs(i2 - i3) + abs(j2 - j3) + abs(k2 - k3))) or
((t2 + t3) < nradius * (abs(i2 + i3) + abs(j2 + j3) + abs(k2 + k3))) or
#((t3 - t3) > radius * (abs(i3 - i3) + abs(j3 - j3) + abs(k3 - k3))) or
((t3 + t3) < nradius * (abs(i3 + i3) + abs(j3 + j3) + abs(k3 + k3))))
def is_visible_sphere_unit(vpm):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) = vpm
i0 = m0
i1 = m1
i2 = m2
i3 = m3
j0 = m4
j1 = m5
j2 = m6
j3 = m7
k0 = m8
k1 = m9
k2 = m10
k3 = m11
t0 = m12
t1 = m13
t2 = m14
t3 = m15
return not (((t0 - t3) > (abs(i0 - i3) + abs(j0 - j3) + abs(k0 - k3))) or
((t0 + t3) < -(abs(i0 + i3) + abs(j0 + j3) + abs(k0 + k3))) or
((t1 - t3) > (abs(i1 - i3) + abs(j1 - j3) + abs(k1 - k3))) or
((t1 + t3) < -(abs(i1 + i3) + abs(j1 + j3) + abs(k1 + k3))) or
((t2 - t3) > (abs(i2 - i3) + abs(j2 - j3) + abs(k2 - k3))) or
((t2 + t3) < -(abs(i2 + i3) + abs(j2 + j3) + abs(k2 + k3))) or
#((t3 - t3) > (abs(i3 - i3) + abs(j3 - j3) + abs(k3 - k3))) or
((t3 + t3) < -(abs(i3 + i3) + abs(j3 + j3) + abs(k3 + k3))))
def transform_box(center, halfExtents, matrix):
(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11) = matrix
(c0, c1, c2) = center
(h0, h1, h2) = halfExtents
return { center : ((m0 * c0 + m3 * c1 + m6 * c2 + m9),
(m1 * c0 + m4 * c1 + m7 * c2 + m10),
(m2 * c0 + m5 * c1 + m8 * c2 + m11)),
halfExtents : ((abs(m0) * h0 + abs(m3) * h1 + abs(m6) * h2),
(abs(m1) * h0 + abs(m4) * h1 + abs(m7) * h2),
(abs(m2) * h0 + abs(m5) * h1 + abs(m8) * h2)) }
def plane_normalize(plane):
(a, b, c, d) = plane
lsq = ((a * a) + (b * b) + (c * c))
if lsq > 0.0:
lr = 1.0 / math.sqrt(lsq)
return ((a * lr), (b * lr), (c * lr), (d * lr))
return V4ZERO
#######################################################################################################################
def quat(qx, qy, qz, qw):
return (qx, qy, qz, qw)
def quatis_similar(q1, q2):
# this compares for similar rotations not raw data
(_, _, _, w1) = q1
(_, _, _, w2) = q2
if w1 * w2 < 0.0:
# quaternions in opposing hemispheres, negate one
q1 = v4mul((-1, -1, -1, -1), q1)
mag_sqrd = v4lengthsq(v4sub(q1, q2))
epsilon_sqrd = (PRECISION * PRECISION)
return mag_sqrd < epsilon_sqrd
def quatlength(q):
return v4length(q)
def quatdot(q1, q2):
return v4dot(q1, q2)
# Note quaternion multiplication is the opposite way around from our matrix multiplication
def quatmul(q1, q2):
(v2, w2) = (q1[:3], q1[3])
(v1, w1) = (q2[:3], q2[3])
imag = v3add3(v3muls(v2, w1), v3muls(v1, w2), v3cross(v2, v1))
real = (w1 * w2) - v3dot(v1, v2)
(i0, i1, i2) = imag
return (i0, i1, i2, real)
def quatnormalize(q):
norme = math.sqrt(quatdot(q, q))
if norme == 0.0:
return V4ZERO
else:
recip = 1.0 / norme
return v4muls(q, recip)
def quatconjugate(q):
(x, y, z, w) = q
return (-x, -y, -z, w)
def quatlerp(q1, q2, t):
if v4dot(q1, q2) > 0.0:
return v4add(v4muls(v4sub(q2, q1), t), q1)
else:
return v4add(v4muls(v4sub(q2, q1), -t), q1)
def quatslerp(q1, q2, t):
cosom = quatdot(q1, q2)
if cosom < 0.0:
q1 = v4muls(q1, -1.0)
cosom = -cosom
if cosom > math.cos(math.pi / 180.0): # use a lerp for angles <= 1 degree
return quatnormalize(quatlerp(q1, q2, t))
omega = math.acos(cosom)
sin_omega = math.sin(omega)
q1 = v4muls(q1, math.sin((1.0-t)*omega)/sin_omega)
return v4add(q1, v4muls(q2, math.sin(t*omega)/sin_omega))
def quatfrom_axis_rotation(axis, angle):
omega = 0.5 * angle
s = math.sin(omega)
c = math.cos(omega)
(a0, a1, a2) = axis
q = (a0 * s, a1 * s, a2 * s, c)
return quatnormalize(q)
def quatto_axis_rotation(q):
angle = math.acos(q[3]) * 2.0
sin_sqrd = 1.0 - q[3] * q[3]
if sin_sqrd < PRECISION:
# we can return any axis
return ( (1.0, 0.0, 0.0), angle )
else:
scale = 1.0 / math.sqrt(sin_sqrd)
axis = v3muls(q[:3], scale)
return ( axis, angle )
def quattransformv(q, v):
(qx, qy, qz, qw) = q
qimaginary = (qx, qy, qz)
s = (qw * qw) - v3dot(qimaginary, qimaginary)
r = v3muls(v, s)
s = v3dot(qimaginary, v)
r = v3add(r, v3muls(qimaginary, s + s))
r = v3add(r, v3muls(v3cross(qimaginary, v), qw + qw))
return r
def quatto_m43(q):
"""Convert a quaternion to a matrix43."""
(q0, q1, q2, q3) = q
xx = 2.0 * q0 * q0
yy = 2.0 * q1 * q1
zz = 2.0 * q2 * q2
xy = 2.0 * q0 * q1
zw = 2.0 * q2 * q3
xz = 2.0 * q0 * q2
yw = 2.0 * q1 * q3
yz = 2.0 * q1 * q2
xw = 2.0 * q0 * q3
return m43(1.0 - yy - zz, xy - zw, xz + yw,
xy + zw, 1.0 - xx - zz, yz - xw,
xz - yw, yz + xw, 1.0 - xx - yy,
0.0, 0.0, 0.0)
def quatfrom_m33(m):
"""Convert the top of an m33 matrix into a quaternion."""
(m0, m1, m2, m3, m4, m5, m6, m7, m8) = m
trace = m0 + m4 + m8 + 1
if trace > PRECISION:
w = math.sqrt(trace) / 2
x = (m5 - m7) / (4*w)
y = (m6 - m2) / (4*w)
z = (m1 - m3) / (4*w)
else:
if (m0 > m4) and (m0 > m8):
s = math.sqrt( 1.0 + m0 - m4 - m8 ) * 2 # S=4*qx
w = (m5 - m7) / s
x = 0.25 * s
y = (m3 + m1) / s
z = (m6 + m2) / s
elif m4 > m8:
s = math.sqrt( 1.0 + m4 - m0 - m8 ) * 2 # S=4*qy
w = (m6 - m2) / s
x = (m3 + m1) / s
y = 0.25 * s
z = (m7 + m5) / s
else:
s = math.sqrt( 1.0 + m8 - m0 - m4 ) * 2 # S=4*qz
w = (m1 - m3) / s
x = (m6 + m2) / s
y = (m7 + m5) / s
z = 0.25 * s
return quatnormalize((-x, -y, -z, w))
def quatfrom_m43(m):
""" Convert the top of an m33 matrix into a quaternion."""
(m0, m1, m2, m3, m4, m5, m6, m7, m8, _, _, _) = m
trace = m0 + m4 + m8 + 1
if trace > PRECISION:
w = math.sqrt(trace) / 2
x = (m5 - m7) / (4*w)
y = (m6 - m2) / (4*w)
z = (m1 - m3) / (4*w)
else:
if (m0 > m4) and (m0 > m8):
s = math.sqrt( 1.0 + m0 - m4 - m8 ) * 2 # S=4*qx
w = (m5 - m7) / s
x = 0.25 * s
y = (m3 + m1) / s
z = (m6 + m2) / s
elif m4 > m8:
s = math.sqrt( 1.0 + m4 - m0 - m8 ) * 2 # S=4*qy
w = (m6 - m2) / s
x = (m3 + m1) / s
y = 0.25 * s
z = (m7 + m5) / s
else:
s = math.sqrt( 1.0 + m8 - m0 - m4 ) * 2 # S=4*qz
w = (m1 - m3) / s
x = (m6 + m2) / s
y = (m7 + m5) / s
z = 0.25 * s
return quatnormalize((-x, -y, -z, w))
def quatpos(qx, qy, qz, qw, px, py, pz):
return ( (qx, qy, qz, qw), (px, py, pz) )
def quatpostransformn(qp, n):
(q, _) = qp
return quattransformv(q, n)
def quatpostransformp(qp, p):
(q, v) = qp
rotated_p = quattransformv(q, p)
return v3add(rotated_p, v)
# Note quaternion multiplication is the opposite way around from our matrix multiplication
def quatposmul(qp1, qp2):
(q1, _) = qp1
(q2, v2) = qp2
qr = quatmul(q1, q2)
pr = quatpostransformp(v2, qp1)
return (qr, pr)
def quat_from_qx_qy_qz(qx, qy, qz):
"""Calculate the w field of a quaternion."""
qw = 1.0 - ((qx * qx) + (qy * qy) + (qz * qz))
if qw < 0.0:
qw = 0.0
else:
qw = -math.sqrt(qw)
return (qx, qy, qz, qw)
#######################################################################################################################
| 29.510466 | 119 | 0.41533 |
7947a9d7ee0e8b5967a3445a80aaee4fab32fa3b | 7,309 | py | Python | OSSER/commands/DnsReconCommand.py | Selora/OSST | f89dbf2d9baa75dfbabad93cbf72be2f01bc76bd | [
"BSD-2-Clause"
] | null | null | null | OSSER/commands/DnsReconCommand.py | Selora/OSST | f89dbf2d9baa75dfbabad93cbf72be2f01bc76bd | [
"BSD-2-Clause"
] | null | null | null | OSSER/commands/DnsReconCommand.py | Selora/OSST | f89dbf2d9baa75dfbabad93cbf72be2f01bc76bd | [
"BSD-2-Clause"
] | null | null | null | import sys
from typing import Iterable
import OSSER.core.helpers as helpers
from OSSER.commands.AbstractCommand import AbstractCommand
from OSSER.commands.DnsQueryCommand import DnsQueryCommand
from OSSER.modules.DnsQuery import DnsQuery
class DnsReconCommand(AbstractCommand):
"""
This class is used to perform a two-way lookup of "IP <-> FQDN" until the results are stabilized:
tl;dr:
dig -x ip_list >> fqdn_list
dig -t A fqdn_list >> ip_list
dig -x ip_list >> fqdn_list
(. . .)
-> Stops when results are stable (No new IP or FQDN from an iteration)
Moar complex explanation:
Tx: task that could be in parallel
T1: Do a forward query (A record) for all FQDN in scope
T2: Do a reverse query (PTR record) for all IPs in scope
After T1 and T2 are finished,
While ( T1.resulting_ips != T2.queried_ips OR
T1.queried_fqdns are not superset or T2.resulting_fqdn )
T2 += T1.resulting_ips
T1 += T2.resulting_fqdns
Restart T1,T2
That way, if PRT query yields a new fqdn, we feed it to A query, and vice-versa
Note: The "while" is performed with recursion and composite pattern, see self.execute()
If it's not stable after an iteration, this command will create a new command as a child
This would catch
-misconfigured PTR records (ex. IP changed, but PTR still points to old IP)
-IPs not deemed in scope (but that should be)
-FQDNs not deemed in scope (but that should be)
"""
class Args(AbstractCommand.AbstractArgs):
def __init__(self, ip_addresses: Iterable[str], fully_qualified_domain_names: Iterable[str]):
self.ip_addresses = set(ip_addresses)
# Getting all zones as well: admin.test.com -> (admin.test.com, test.com)
# The sum part flattens the list (each calls returns a list of possible domains,
# otherwise its a list of list)
self.fully_qualified_domain_names = set(sum([helpers.expand_fqdn(x)
for x in fully_qualified_domain_names], []))
def __init__(self,
dns_query_module_args: DnsQuery.Args = None,
command_args: Args = None):
super().__init__()
self.command_args = command_args
self.dns_query_module_args = dns_query_module_args
# Generate sub-commands for every IPs and FQDNs
for ip in self.command_args.ip_addresses:
ip_reverse_query = DnsQueryCommand(dns_query_module_args=self.dns_query_module_args,
command_args=DnsQueryCommand.Args(record_type='PTR', dns_query=ip))
self.add(ip_reverse_query)
for fqdn in self.command_args.fully_qualified_domain_names:
dns_query = DnsQueryCommand(dns_query_module_args=self.dns_query_module_args,
command_args=DnsQueryCommand.Args(record_type='A', dns_query=fqdn))
self.add(dns_query)
@AbstractCommand.composite_command
def execute(self):
"""
This one is a recursive mindfuck, but it's awesome!
First, we execute all the children ourselves.
If the condition to stop is not met, we build a new composite command, add it to childrens, return.
The decorator will take care of executing the "new" children we just append.
That way, we have complete tracability over what commands were executed in order to find something.
Ex. google.com -> 8.8.8.8 was discovered in the third pass.
We can find that by getting its parents.
(Which IP lead to google.com previously?)
:return:
"""
# print("Executing the {} children of {}".format(len([x for x in self.children() if not x.executed]), self))
for child in self.children():
child.execute()
# Get all the FQDN-> IPs
ip_results = set([res.address for cmd in self.children()
if cmd.command_args.record_type == 'A' # Get all fqdn -> ip cmd
and cmd.results # If results are not empty
for res in cmd.results])
# The [:-1] is to skip the last '.' and get a usable fqdn (test.fqdn.com.)
fqdn_results = [res.to_text()[:-1] for cmd in self.children()
if cmd.command_args.record_type == 'PTR'
and cmd.results
for res in cmd.results]
# Get all possible new FQDN
# 'sum' is to flatten list of list (results of expand_fqdn is list)
fqdn_results = set(sum([helpers.expand_fqdn(x) for x in fqdn_results], []))
# Getting all previously queried IPs using command args
previous_ip = set([cmd.command_args.dns_query for cmd in self.children()
if cmd.command_args.record_type == 'PTR'])
# Getting all previously queried FQDNs using command args
previous_fqdn = set([cmd.command_args.dns_query for cmd in self.children()
if cmd.command_args.record_type == 'A'])
if ip_results != previous_ip or not \
previous_fqdn.issuperset(fqdn_results):
new_command = DnsReconCommand(
command_args=DnsReconCommand.Args(ip_addresses=ip_results.difference(previous_ip),
fully_qualified_domain_names=fqdn_results.difference(previous_fqdn)),
dns_query_module_args=self.dns_query_module_args)
# Adding already executed children so we can keep a track of previous commands run
for c in self.children():
new_command.add(c)
self.add(new_command)
@property
def results(self):
"""
Get a dict of ip-fqdn by crawling depth-first throught childrens
:return:
"""
results = {}
for c in self.children():
# We know the only two types of commands in this composite are either
# another DnsReconCommand or some DnsQueryCommand
# This is sketchy...
if type(c) is DnsReconCommand:
results += c.results
else:
pass
def print_children(command: AbstractCommand):
"""
Depth first print (print leafs up to the top)
"""
for c in command.children():
print_children(c)
print(command, command.command_args)
def main(args):
command_args = DnsReconCommand.Args(ip_addresses=args.ip_addresses,
fully_qualified_domain_names=args.fully_qualified_domain_names)
dns_args = DnsQuery.Args()
cmd = DnsReconCommand(dns_query_module_args=dns_args, command_args=command_args)
cmd.execute()
if cmd.executed:
print("Executed successfully!")
print("Results:")
print(cmd.results)
print_children(cmd)
run_args = [
]
if __name__ == "__main__":
args = lambda: None
#args.record_type = sys.argv[1]
#args.dns_query = sys.argv[2]
args.ip_addresses = run_args[0]
args.fully_qualified_domain_names = run_args[1]
main(args)
| 38.267016 | 123 | 0.621973 |
7947a9e4f1ddda6d633ea4df0d167332095e2a53 | 989 | py | Python | publiapp_api/migrations/0010_auto_20200925_0300.py | KevinPercy/PubliAppAPI | 262fe66eaf2ac1d895681b0e611f6d7633e2353e | [
"MIT"
] | null | null | null | publiapp_api/migrations/0010_auto_20200925_0300.py | KevinPercy/PubliAppAPI | 262fe66eaf2ac1d895681b0e611f6d7633e2353e | [
"MIT"
] | 6 | 2020-06-30T02:38:05.000Z | 2021-09-22T19:21:35.000Z | publiapp_api/migrations/0010_auto_20200925_0300.py | KevinPercy/PubliAppAPI | 262fe66eaf2ac1d895681b0e611f6d7633e2353e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-09-25 03:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publiapp_api', '0009_auto_20200922_0303'),
]
operations = [
migrations.CreateModel(
name='Ubigeo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_ubigeo', models.CharField(max_length=6)),
('departamento', models.CharField(max_length=50)),
('provincia', models.CharField(max_length=50)),
('distrito', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='precio',
name='anuncio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='precios', to='publiapp_api.Anuncio'),
),
]
| 32.966667 | 132 | 0.600607 |
7947aaf20b4092c63a32f342ac9c102e75de7e63 | 11,848 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ips_urlfilter_cache_setting.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ips_urlfilter_cache_setting.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ips_urlfilter_cache_setting.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_ips_urlfilter_cache_setting
short_description: Configure IPS URL filter cache settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and ips_urlfilter_cache_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
webfilter_ips_urlfilter_cache_setting:
description:
- Configure IPS URL filter cache settings.
default: null
type: dict
suboptions:
dns_retry_interval:
description:
- Retry interval. Refresh DNS faster than TTL to capture multiple IPs for hosts. 0 means use DNS server"s TTL only.
type: int
extended_ttl:
description:
- Extend time to live beyond reported by DNS. 0 means use DNS server"s TTL
type: int
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_webfilter_ips_urlfilter_cache_setting
fortios_webfilter_ips_urlfilter_cache_setting:
vdom: root
webfilter_ips_urlfilter_cache_setting:
dns_retry_interval: 0
extended_ttl: 0
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_webfilter_ips_urlfilter_cache_setting_data(json):
option_list = ['dns_retry_interval', 'extended_ttl']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_ips_urlfilter_cache_setting(data, fos):
vdom = data['vdom']
webfilter_ips_urlfilter_cache_setting_data = data['webfilter_ips_urlfilter_cache_setting']
filtered_data = underscore_to_hyphen(filter_webfilter_ips_urlfilter_cache_setting_data(webfilter_ips_urlfilter_cache_setting_data))
return fos.set('webfilter',
'ips-urlfilter-cache-setting',
data=filtered_data,
vdom=vdom)
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_webfilter(data, fos):
fos.do_member_operation('webfilter_ips_urlfilter_cache_setting')
if data['webfilter_ips_urlfilter_cache_setting']:
resp = webfilter_ips_urlfilter_cache_setting(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('webfilter_ips_urlfilter_cache_setting'))
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "dict",
"children": {
"dns_retry_interval": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"extended_ttl": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"webfilter_ips_urlfilter_cache_setting": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["webfilter_ips_urlfilter_cache_setting"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["webfilter_ips_urlfilter_cache_setting"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "webfilter_ips_urlfilter_cache_setting")
is_error, has_changed, result = fortios_webfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 33.280899 | 144 | 0.64188 |
7947ac1482ad4f136f54b3921d258e07ee149d09 | 2,295 | py | Python | app/base/models.py | asad70/reddit-analysis | 32a6c7ceaa314bdc9c723cebe0413c422ae4b414 | [
"MIT"
] | null | null | null | app/base/models.py | asad70/reddit-analysis | 32a6c7ceaa314bdc9c723cebe0413c422ae4b414 | [
"MIT"
] | null | null | null | app/base/models.py | asad70/reddit-analysis | 32a6c7ceaa314bdc9c723cebe0413c422ae4b414 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from flask_login import UserMixin
from sqlalchemy import Binary, Column, Integer, String
from sqlalchemy.sql.sqltypes import Date, PickleType
import time
from app import db, login_manager
import datetime
from app.base.util import hash_pass
class User(db.Model, UserMixin):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True)
email = Column(String, unique=True)
password = Column(Binary)
def __init__(self, **kwargs):
for property, value in kwargs.items():
# depending on whether value is an iterable or not, we must
# unpack it's value (when **kwargs is request.form, some values
# will be a 1-element list)
if hasattr(value, '__iter__') and not isinstance(value, str):
# the ,= unpack of a singleton fails PEP8 (travis flake8 test)
value = value[0]
if property == 'password':
value = hash_pass( value ) # we need bytes here (not plain str)
setattr(self, property, value)
def __repr__(self):
return str(self.username)
@login_manager.user_loader
def user_loader(id):
return User.query.filter_by(id=id).first()
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
user = User.query.filter_by(username=username).first()
return user if user else None
class Picks(db.Model):
__tablename__ = 'Picks'
id = Column(Integer, primary_key=True)
pick = Column(PickleType)
direction = Column(String, unique=True)
bearish = Column(Integer)
neutral = Column(Integer)
bullish = Column(Integer)
total = Column(Integer)
time= Column(Date, default=datetime.datetime.utcnow())
#def __init__(self, **kwargs):
# for property, value in kwargs.items():
# if hasattr(value, '__iter__') and not isinstance(value, str):
# value = value[0]
# if property == 'time':
# value = time.ctime() # we need bytes here (not plain str)
# setattr(self, property, value)
def __repr__(self):
return "class picks id is " + str(self.id) | 31.875 | 79 | 0.639651 |
7947ac891ebb152e8dacf521c57570a7fb08d4cc | 1,772 | py | Python | hyppo/independence/tests/test_utils.py | bstraus1/hyppo | cf0ebed8fdb23078447a06ceb4eed4aca92a287c | [
"Apache-2.0"
] | 116 | 2020-02-28T10:29:22.000Z | 2022-03-22T12:19:39.000Z | hyppo/independence/tests/test_utils.py | bstraus1/hyppo | cf0ebed8fdb23078447a06ceb4eed4aca92a287c | [
"Apache-2.0"
] | 253 | 2020-02-17T16:18:56.000Z | 2022-03-30T16:55:02.000Z | hyppo/independence/tests/test_utils.py | bstraus1/hyppo | cf0ebed8fdb23078447a06ceb4eed4aca92a287c | [
"Apache-2.0"
] | 27 | 2020-03-02T21:07:41.000Z | 2022-03-08T08:33:23.000Z | import numpy as np
import pytest
from numpy.testing import assert_raises
from sklearn.ensemble import RandomForestRegressor
from ...tools.common import _check_kernmat
from .._utils import _CheckInputs, sim_matrix
class TestErrorWarn:
"""Tests errors and warnings."""
def test_error_notndarray(self):
# raises error if x or y is not a ndarray
x = np.arange(20)
y = [5] * 20
assert_raises(TypeError, _CheckInputs(x, y))
assert_raises(TypeError, _CheckInputs(y, x))
def test_error_shape(self):
# raises error if number of samples different (n)
x = np.arange(100).reshape(25, 4)
y = x.reshape(10, 10)
assert_raises(ValueError, _CheckInputs(x, y))
def test_error_lowsamples(self):
# raises error if samples are low (< 3)
x = np.arange(3)
y = np.arange(3)
assert_raises(ValueError, _CheckInputs(x, y))
def test_error_nans(self):
# raises error if inputs contain NaNs
x = np.arange(20, dtype=float)
x[0] = np.nan
assert_raises(ValueError, _CheckInputs(x, x))
y = np.arange(20)
assert_raises(ValueError, _CheckInputs(x, y))
@pytest.mark.parametrize(
"reps", [-1, "1"] # reps is negative # reps is not integer
)
def test_error_reps(self, reps):
# raises error if reps is negative
x = np.arange(20)
assert_raises(ValueError, _CheckInputs(x, x, reps=reps))
class TestHelper:
def test_simmat(self):
# raises error if x or y is not a ndarray
clf = RandomForestRegressor()
x = np.arange(20).reshape(-1, 1)
y = np.arange(5, 25)
clf.fit(x, y)
kernx = sim_matrix(clf, x)
_check_kernmat(kernx, kernx)
| 30.033898 | 68 | 0.62754 |
7947acd6adc1366fd08219acb60188e0c62e123a | 1,327 | py | Python | module2-sql-for-analysis/module2.py | BoWarburton/DS-Unit-3-Sprint-2-SQL-and-Databases | 847fa627f74133e529ac47014167f34915f8df66 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/module2.py | BoWarburton/DS-Unit-3-Sprint-2-SQL-and-Databases | 847fa627f74133e529ac47014167f34915f8df66 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/module2.py | BoWarburton/DS-Unit-3-Sprint-2-SQL-and-Databases | 847fa627f74133e529ac47014167f34915f8df66 | [
"MIT"
] | null | null | null | #!/usr/env/bin Python
import sqlite3
import psycopg2
dbname = 'elvrajlo'
user = 'elvrajlo'
password = 'Mq4dGr1-4InqYz1NxinWY93fKQ0YMj1'
host = "otto.db.elephantsql.com"
pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)
pg_curs = pg_conn.cursor()
create_test_table_statement = """
CREATE TABLE IF NOT EXISTS test_table (
id SERIAL PRIMARY KEY,
name varchar(40) NOT NULL,
data JSONB
);
"""
create_character_table = """
CREATE TABLE IF NOT EXISTS charactercreator_character (
character_id SERIAL PRIMARY KEY,
name VARCHAR(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
);
"""
show_tables = """
SELECT
*
FROM
pg_catalog.pg_tables
WHERE
schemaname != 'pg_catalog'
AND
schemaname != 'information_schema';
"""
pg_curs.execute(create_character_table)
pg_conn.commit()
# pg_curs.execute(show_tables)
# print(pg_curs.fetchall())
sl_conn = sqlite3.connect('rpg_db.sqlite3')
sl_curs = sl_conn.cursor()
get_characters = 'SELECT * FROM charactercreator_character;'
characters = sl_curs.execute(get_characters).fetchall()
print(len(characters))
characters_insert = """
INSERT INTO charactercreator_character
(name, level, exp, hp, strength, intelligence, dexterity, wisdom)
VALUES """ + str(characters[0][1:]) + ";"
| 21.063492 | 82 | 0.738508 |
7947ad2294abc24ed39d73013f66bbd4e9912428 | 50,294 | py | Python | language/question_answering/bert_joint/run_nq.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | 2 | 2020-09-30T11:52:51.000Z | 2020-09-30T12:07:41.000Z | language/question_answering/bert_joint/run_nq.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | null | null | null | language/question_answering/bert_joint/run_nq.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT-joint baseline for NQ v1.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import json
import os
import random
import re
import enum
from bert import modeling
from bert import optimization
from bert import tokenization
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("train_precomputed_file", None,
"Precomputed tf records for training.")
flags.DEFINE_integer("train_num_precomputed", None,
"Number of precomputed tf records for training.")
flags.DEFINE_string(
"predict_file", None,
"NQ json for predictions. E.g., dev-v1.1.jsonl.gz or test-v1.1.jsonl.gz")
flags.DEFINE_string(
"output_prediction_file", None,
"Where to print predictions in NQ prediction format, to be passed to"
"natural_questions.nq_eval.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_float(
"include_unknowns", -1.0,
"If positive, probability of including answers of type `UNKNOWN`.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal NQ evaluation.")
flags.DEFINE_boolean(
"skip_nested_contexts", True,
"Completely ignore context that are not top level nodes in the page.")
flags.DEFINE_integer("task_id", 0,
"Train and dev shard to read from and write to.")
flags.DEFINE_integer("max_contexts", 48,
"Maximum number of contexts to output for an example.")
flags.DEFINE_integer(
"max_position", 50,
"Maximum context position for which to generate special tokens.")
TextSpan = collections.namedtuple("TextSpan", "token_positions text")
class AnswerType(enum.IntEnum):
"""Type of NQ answer."""
UNKNOWN = 0
YES = 1
NO = 2
SHORT = 3
LONG = 4
class Answer(collections.namedtuple("Answer", ["type", "text", "offset"])):
"""Answer record.
An Answer contains the type of the answer and possibly the text (for
long) as well as the offset (for extractive).
"""
def __new__(cls, type_, text=None, offset=None):
return super(Answer, cls).__new__(cls, type_, text, offset)
class NqExample(object):
"""A single training/test example."""
def __init__(self,
example_id,
qas_id,
questions,
doc_tokens,
doc_tokens_map=None,
answer=None,
start_position=None,
end_position=None):
self.example_id = example_id
self.qas_id = qas_id
self.questions = questions
self.doc_tokens = doc_tokens
self.doc_tokens_map = doc_tokens_map
self.answer = answer
self.start_position = start_position
self.end_position = end_position
def has_long_answer(a):
return (a["long_answer"]["start_token"] >= 0 and
a["long_answer"]["end_token"] >= 0)
def should_skip_context(e, idx):
if (FLAGS.skip_nested_contexts and
not e["long_answer_candidates"][idx]["top_level"]):
return True
elif not get_candidate_text(e, idx).text.strip():
# Skip empty contexts.
return True
else:
return False
def get_first_annotation(e):
"""Returns the first short or long answer in the example.
Args:
e: (dict) annotated example.
Returns:
annotation: (dict) selected annotation
annotated_idx: (int) index of the first annotated candidate.
annotated_sa: (tuple) char offset of the start and end token
of the short answer. The end token is exclusive.
"""
positive_annotations = sorted(
[a for a in e["annotations"] if has_long_answer(a)],
key=lambda a: a["long_answer"]["candidate_index"])
for a in positive_annotations:
if a["short_answers"]:
idx = a["long_answer"]["candidate_index"]
start_token = a["short_answers"][0]["start_token"]
end_token = a["short_answers"][-1]["end_token"]
return a, idx, (token_to_char_offset(e, idx, start_token),
token_to_char_offset(e, idx, end_token) - 1)
for a in positive_annotations:
idx = a["long_answer"]["candidate_index"]
return a, idx, (-1, -1)
return None, -1, (-1, -1)
def get_text_span(example, span):
"""Returns the text in the example's document in the given token span."""
token_positions = []
tokens = []
for i in range(span["start_token"], span["end_token"]):
t = example["document_tokens"][i]
if not t["html_token"]:
token_positions.append(i)
token = t["token"].replace(" ", "")
tokens.append(token)
return TextSpan(token_positions, " ".join(tokens))
def token_to_char_offset(e, candidate_idx, token_idx):
"""Converts a token index to the char offset within the candidate."""
c = e["long_answer_candidates"][candidate_idx]
char_offset = 0
for i in range(c["start_token"], token_idx):
t = e["document_tokens"][i]
if not t["html_token"]:
token = t["token"].replace(" ", "")
char_offset += len(token) + 1
return char_offset
def get_candidate_type(e, idx):
"""Returns the candidate's type: Table, Paragraph, List or Other."""
c = e["long_answer_candidates"][idx]
first_token = e["document_tokens"][c["start_token"]]["token"]
if first_token == "<Table>":
return "Table"
elif first_token == "<P>":
return "Paragraph"
elif first_token in ("<Ul>", "<Dl>", "<Ol>"):
return "List"
elif first_token in ("<Tr>", "<Li>", "<Dd>", "<Dt>"):
return "Other"
else:
tf.logging.warning("Unknoww candidate type found: %s", first_token)
return "Other"
def add_candidate_types_and_positions(e):
"""Adds type and position info to each candidate in the document."""
counts = collections.defaultdict(int)
for idx, c in candidates_iter(e):
context_type = get_candidate_type(e, idx)
if counts[context_type] < FLAGS.max_position:
counts[context_type] += 1
c["type_and_position"] = "[%s=%d]" % (context_type, counts[context_type])
def get_candidate_type_and_position(e, idx):
"""Returns type and position info for the candidate at the given index."""
if idx == -1:
return "[NoLongAnswer]"
else:
return e["long_answer_candidates"][idx]["type_and_position"]
def get_candidate_text(e, idx):
"""Returns a text representation of the candidate at the given index."""
# No candidate at this index.
if idx < 0 or idx >= len(e["long_answer_candidates"]):
return TextSpan([], "")
# This returns an actual candidate.
return get_text_span(e, e["long_answer_candidates"][idx])
def candidates_iter(e):
"""Yield's the candidates that should not be skipped in an example."""
for idx, c in enumerate(e["long_answer_candidates"]):
if should_skip_context(e, idx):
continue
yield idx, c
def create_example_from_jsonl(line):
"""Creates an NQ example from a given line of JSON."""
e = json.loads(line, object_pairs_hook=collections.OrderedDict)
add_candidate_types_and_positions(e)
annotation, annotated_idx, annotated_sa = get_first_annotation(e)
# annotated_idx: index of the first annotated context, -1 if null.
# annotated_sa: short answer start and end char offsets, (-1, -1) if null.
question = {"input_text": e["question_text"]}
answer = {
"candidate_id": annotated_idx,
"span_text": "",
"span_start": -1,
"span_end": -1,
"input_text": "long",
}
# Yes/no answers are added in the input text.
if annotation is not None:
assert annotation["yes_no_answer"] in ("YES", "NO", "NONE")
if annotation["yes_no_answer"] in ("YES", "NO"):
answer["input_text"] = annotation["yes_no_answer"].lower()
# Add a short answer if one was found.
if annotated_sa != (-1, -1):
answer["input_text"] = "short"
span_text = get_candidate_text(e, annotated_idx).text
answer["span_text"] = span_text[annotated_sa[0]:annotated_sa[1]]
answer["span_start"] = annotated_sa[0]
answer["span_end"] = annotated_sa[1]
expected_answer_text = get_text_span(
e, {
"start_token": annotation["short_answers"][0]["start_token"],
"end_token": annotation["short_answers"][-1]["end_token"],
}).text
assert expected_answer_text == answer["span_text"], (expected_answer_text,
answer["span_text"])
# Add a long answer if one was found.
elif annotation and annotation["long_answer"]["candidate_index"] >= 0:
answer["span_text"] = get_candidate_text(e, annotated_idx).text
answer["span_start"] = 0
answer["span_end"] = len(answer["span_text"])
context_idxs = [-1]
context_list = [{"id": -1, "type": get_candidate_type_and_position(e, -1)}]
context_list[-1]["text_map"], context_list[-1]["text"] = (
get_candidate_text(e, -1))
for idx, _ in candidates_iter(e):
context = {"id": idx, "type": get_candidate_type_and_position(e, idx)}
context["text_map"], context["text"] = get_candidate_text(e, idx)
context_idxs.append(idx)
context_list.append(context)
if len(context_list) >= FLAGS.max_contexts:
break
# Assemble example.
example = {
"name": e["document_title"],
"id": str(e["example_id"]),
"questions": [question],
"answers": [answer],
"has_correct_context": annotated_idx in context_idxs
}
single_map = []
single_context = []
offset = 0
for context in context_list:
single_map.extend([-1, -1])
single_context.append("[ContextId=%d] %s" %
(context["id"], context["type"]))
offset += len(single_context[-1]) + 1
if context["id"] == annotated_idx:
answer["span_start"] += offset
answer["span_end"] += offset
# Many contexts are empty once the HTML tags have been stripped, so we
# want to skip those.
if context["text"]:
single_map.extend(context["text_map"])
single_context.append(context["text"])
offset += len(single_context[-1]) + 1
example["contexts"] = " ".join(single_context)
example["contexts_map"] = single_map
if annotated_idx in context_idxs:
expected = example["contexts"][answer["span_start"]:answer["span_end"]]
# This is a sanity check to ensure that the calculated start and end
# indices match the reported span text. If this assert fails, it is likely
# a bug in the data preparation code above.
assert expected == answer["span_text"], (expected, answer["span_text"])
return example
def make_nq_answer(contexts, answer):
"""Makes an Answer object following NQ conventions.
Args:
contexts: string containing the context
answer: dictionary with `span_start` and `input_text` fields
Returns:
an Answer object. If the Answer type is YES or NO or LONG, the text
of the answer is the long answer. If the answer type is UNKNOWN, the text of
the answer is empty.
"""
start = answer["span_start"]
end = answer["span_end"]
input_text = answer["input_text"]
if (answer["candidate_id"] == -1 or start >= len(contexts) or
end > len(contexts)):
answer_type = AnswerType.UNKNOWN
start = 0
end = 1
elif input_text.lower() == "yes":
answer_type = AnswerType.YES
elif input_text.lower() == "no":
answer_type = AnswerType.NO
elif input_text.lower() == "long":
answer_type = AnswerType.LONG
else:
answer_type = AnswerType.SHORT
return Answer(answer_type, text=contexts[start:end], offset=start)
def read_nq_entry(entry, is_training):
"""Converts a NQ entry into a list of NqExamples."""
def is_whitespace(c):
return c in " \t\r\n" or ord(c) == 0x202F
examples = []
contexts_id = entry["id"]
contexts = entry["contexts"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in contexts:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
questions = []
for i, question in enumerate(entry["questions"]):
qas_id = "{}".format(contexts_id)
question_text = question["input_text"]
start_position = None
end_position = None
answer = None
if is_training:
answer_dict = entry["answers"][i]
answer = make_nq_answer(contexts, answer_dict)
# For now, only handle extractive, yes, and no.
if answer is None or answer.offset is None:
continue
start_position = char_to_word_offset[answer.offset]
end_position = char_to_word_offset[answer.offset + len(answer.text) - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(answer.text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'", actual_text,
cleaned_answer_text)
continue
questions.append(question_text)
example = NqExample(
example_id=int(contexts_id),
qas_id=qas_id,
questions=questions[:],
doc_tokens=doc_tokens,
doc_tokens_map=entry.get("contexts_map", None),
answer=answer,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, is_training, output_fn):
"""Converts a list of NqExamples into InputFeatures."""
num_spans_to_ids = collections.defaultdict(list)
for example in examples:
example_index = example.example_id
features = convert_single_example(example, tokenizer, is_training)
num_spans_to_ids[len(features)].append(example.qas_id)
for feature in features:
feature.example_index = example_index
feature.unique_id = feature.example_index + feature.doc_span_index
output_fn(feature)
return num_spans_to_ids
def convert_single_example(example, tokenizer, is_training):
"""Converts a single NqExample into a list of InputFeatures."""
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
features = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenize(tokenizer, token)
tok_to_orig_index.extend([i] * len(sub_tokens))
all_doc_tokens.extend(sub_tokens)
# `tok_to_orig_index` maps wordpiece indices to indices of whitespace
# tokenized word tokens in the contexts. The word tokens might themselves
# correspond to word tokens in a larger document, with the mapping given
# by `doc_tokens_map`.
if example.doc_tokens_map:
tok_to_orig_index = [
example.doc_tokens_map[index] for index in tok_to_orig_index
]
# QUERY
query_tokens = []
query_tokens.append("[Q]")
query_tokens.extend(tokenize(tokenizer, example.questions[-1]))
if len(query_tokens) > FLAGS.max_query_length:
query_tokens = query_tokens[-FLAGS.max_query_length:]
# ANSWER
tok_start_position = 0
tok_end_position = 0
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = FLAGS.max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
length = min(length, max_tokens_for_doc)
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, FLAGS.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
tokens.extend(query_tokens)
segment_ids.extend([0] * len(query_tokens))
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
assert len(tokens) == len(segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (FLAGS.max_seq_length - len(input_ids))
input_ids.extend(padding)
input_mask.extend(padding)
segment_ids.extend(padding)
assert len(input_ids) == FLAGS.max_seq_length
assert len(input_mask) == FLAGS.max_seq_length
assert len(segment_ids) == FLAGS.max_seq_length
start_position = None
end_position = None
answer_type = None
answer_text = ""
if is_training:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
contains_an_annotation = (
tok_start_position >= doc_start and tok_end_position <= doc_end)
if ((not contains_an_annotation) or
example.answer.type == AnswerType.UNKNOWN):
# If an example has unknown answer type or does not contain the answer
# span, then we only include it with probability --include_unknowns.
# When we include an example with unknown answer type, we set the first
# token of the passage to be the annotated short span.
if (FLAGS.include_unknowns < 0 or
random.random() > FLAGS.include_unknowns):
continue
start_position = 0
end_position = 0
answer_type = AnswerType.UNKNOWN
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
answer_type = example.answer.type
answer_text = " ".join(tokens[start_position:(end_position + 1)])
feature = InputFeatures(
unique_id=-1,
example_index=-1,
doc_span_index=doc_span_index,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
answer_text=answer_text,
answer_type=answer_type)
features.append(feature)
return features
# A special token in NQ is made of non-space chars enclosed in square brackets.
_SPECIAL_TOKENS_RE = re.compile(r"^\[[^ ]*\]$", re.UNICODE)
def tokenize(tokenizer, text, apply_basic_tokenization=False):
"""Tokenizes text, optionally looking up special tokens separately.
Args:
tokenizer: a tokenizer from bert.tokenization.FullTokenizer
text: text to tokenize
apply_basic_tokenization: If True, apply the basic tokenization. If False,
apply the full tokenization (basic + wordpiece).
Returns:
tokenized text.
A special token is any text with no spaces enclosed in square brackets with no
space, so we separate those out and look them up in the dictionary before
doing actual tokenization.
"""
tokenize_fn = tokenizer.tokenize
if apply_basic_tokenization:
tokenize_fn = tokenizer.basic_tokenizer.tokenize
tokens = []
for token in text.split(" "):
if _SPECIAL_TOKENS_RE.match(token):
if token in tokenizer.vocab:
tokens.append(token)
else:
tokens.append(tokenizer.wordpiece_tokenizer.unk_token)
else:
tokens.extend(tokenize_fn(token))
return tokens
class CreateTFExampleFn(object):
"""Functor for creating NQ tf.Examples."""
def __init__(self, is_training):
self.is_training = is_training
self.tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
def process(self, example):
"""Coverts an NQ example in a list of serialized tf examples."""
nq_examples = read_nq_entry(example, self.is_training)
input_features = []
for nq_example in nq_examples:
input_features.extend(
convert_single_example(nq_example, self.tokenizer, self.is_training))
for input_feature in input_features:
input_feature.example_index = int(example["id"])
input_feature.unique_id = (
input_feature.example_index + input_feature.doc_span_index)
def create_int_feature(values):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([input_feature.unique_id])
features["input_ids"] = create_int_feature(input_feature.input_ids)
features["input_mask"] = create_int_feature(input_feature.input_mask)
features["segment_ids"] = create_int_feature(input_feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature(
[input_feature.start_position])
features["end_positions"] = create_int_feature(
[input_feature.end_position])
features["answer_types"] = create_int_feature(
[input_feature.answer_type])
else:
token_map = [-1] * len(input_feature.input_ids)
for k, v in input_feature.token_to_orig_map.items():
token_map[k] = v
features["token_map"] = create_int_feature(token_map)
yield tf.train.Example(features=tf.train.Features(
feature=features)).SerializeToString()
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
token_to_orig_map,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
answer_text="",
answer_type=AnswerType.SHORT):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.answer_text = answer_text
self.answer_type = answer_type
def read_nq_examples(input_file, is_training):
"""Read a NQ json file into a list of NqExample."""
input_paths = tf.gfile.Glob(input_file)
input_data = []
def _open(path):
if path.endswith(".gz"):
return gzip.GzipFile(fileobj=tf.gfile.Open(path, "rb"))
else:
return tf.gfile.Open(path, "r")
for path in input_paths:
tf.logging.info("Reading: %s", path)
with _open(path) as input_file:
for line in input_file:
input_data.append(create_example_from_jsonl(line))
examples = []
for entry in input_data:
examples.extend(read_nq_entry(entry, is_training))
return examples
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# Get the logits for the start and end predictions.
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/nq/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/nq/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
# Get the logits for the answer type prediction.
answer_type_output_layer = model.get_pooled_output()
answer_type_hidden_size = answer_type_output_layer.shape[-1].value
num_answer_types = 5 # YES, NO, UNKNOWN, SHORT, LONG
answer_type_output_weights = tf.get_variable(
"answer_type_output_weights", [num_answer_types, answer_type_hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
answer_type_output_bias = tf.get_variable(
"answer_type_output_bias", [num_answer_types],
initializer=tf.zeros_initializer())
answer_type_logits = tf.matmul(
answer_type_output_layer, answer_type_output_weights, transpose_b=True)
answer_type_logits = tf.nn.bias_add(answer_type_logits,
answer_type_output_bias)
return (start_logits, end_logits, answer_type_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits, answer_type_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
# Computes the loss for positions.
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
# Computes the loss for labels.
def compute_label_loss(logits, labels):
one_hot_labels = tf.one_hot(
labels, depth=len(AnswerType), dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_labels * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
answer_types = features["answer_types"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
answer_type_loss = compute_label_loss(answer_type_logits, answer_types)
total_loss = (start_loss + end_loss + answer_type_loss) / 3.0
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
"answer_type_logits": answer_type_logits,
}
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and PREDICT modes are supported: %s" %
(mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["answer_types"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple(
"RawResult",
["unique_id", "start_logits", "end_logits", "answer_type_logits"])
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
features["answer_types"] = create_int_feature([feature.answer_type])
else:
token_map = [-1] * len(feature.input_ids)
for k, v in feature.token_to_orig_map.items():
token_map[k] = v
features["token_map"] = create_int_feature(token_map)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
Span = collections.namedtuple("Span", ["start_token_idx", "end_token_idx"])
class EvalExample(object):
"""Eval data available for a single example."""
def __init__(self, example_id, candidates):
self.example_id = example_id
self.candidates = candidates
self.results = {}
self.features = {}
class ScoreSummary(object):
def __init__(self):
self.predicted_label = None
self.short_span_score = None
self.cls_token_score = None
self.answer_type_logits = None
def read_candidates_from_one_split(input_path):
"""Read candidates from a single jsonl file."""
candidates_dict = {}
with gzip.GzipFile(fileobj=tf.gfile.Open(input_path, "rb")) as input_file:
tf.logging.info("Reading examples from: %s", input_path)
for line in input_file:
e = json.loads(line)
candidates_dict[e["example_id"]] = e["long_answer_candidates"]
return candidates_dict
def read_candidates(input_pattern):
"""Read candidates with real multiple processes."""
input_paths = tf.gfile.Glob(input_pattern)
final_dict = {}
for input_path in input_paths:
final_dict.update(read_candidates_from_one_split(input_path))
return final_dict
def get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(
enumerate(logits[1:], 1), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def compute_predictions(example):
"""Converts an example into an NQEval object for evaluation."""
predictions = []
n_best_size = 10
max_answer_length = 30
for unique_id, result in example.results.items():
if unique_id not in example.features:
raise ValueError("No feature found with unique_id:", unique_id)
token_map = example.features[unique_id]["token_map"].int64_list.value
start_indexes = get_best_indexes(result["start_logits"], n_best_size)
end_indexes = get_best_indexes(result["end_logits"], n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
if end_index < start_index:
continue
if token_map[start_index] == -1:
continue
if token_map[end_index] == -1:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
summary = ScoreSummary()
summary.short_span_score = (
result["start_logits"][start_index] +
result["end_logits"][end_index])
summary.cls_token_score = (
result["start_logits"][0] + result["end_logits"][0])
summary.answer_type_logits = result["answer_type_logits"]
start_span = token_map[start_index]
end_span = token_map[end_index] + 1
# Span logits minus the cls logits seems to be close to the best.
score = summary.short_span_score - summary.cls_token_score
predictions.append((score, summary, start_span, end_span))
# Default empty prediction.
score = -10000.0
short_span = Span(-1, -1)
long_span = Span(-1, -1)
summary = ScoreSummary()
if predictions:
score, summary, start_span, end_span = sorted(predictions, reverse=True)[0]
short_span = Span(start_span, end_span)
for c in example.candidates:
start = short_span.start_token_idx
end = short_span.end_token_idx
if c["top_level"] and c["start_token"] <= start and c["end_token"] >= end:
long_span = Span(c["start_token"], c["end_token"])
break
summary.predicted_label = {
"example_id": example.example_id,
"long_answer": {
"start_token": long_span.start_token_idx,
"end_token": long_span.end_token_idx,
"start_byte": -1,
"end_byte": -1
},
"long_answer_score": score,
"short_answers": [{
"start_token": short_span.start_token_idx,
"end_token": short_span.end_token_idx,
"start_byte": -1,
"end_byte": -1
}],
"short_answers_score": score,
"yes_no_answer": "NONE"
}
return summary
def compute_pred_dict(candidates_dict, dev_features, raw_results):
"""Computes official answer key from raw logits."""
raw_results_by_id = [(int(res["unique_id"] + 1), res) for res in raw_results]
# Cast example id to int32 for each example, similarly to the raw results.
sess = tf.Session()
all_candidates = candidates_dict.items()
example_ids = tf.to_int32(np.array([int(k) for k, _ in all_candidates
])).eval(session=sess)
examples_by_id = zip(example_ids, all_candidates)
# Cast unique_id also to int32 for features.
feature_ids = []
features = []
for f in dev_features:
feature_ids.append(f.features.feature["unique_ids"].int64_list.value[0] + 1)
features.append(f.features.feature)
feature_ids = tf.to_int32(np.array(feature_ids)).eval(session=sess)
features_by_id = zip(feature_ids, features)
# Join examples with features and raw results.
examples = []
merged = sorted(
list(examples_by_id) + raw_results_by_id + list(features_by_id),
key=lambda x: x[0])
for idx, datum in merged:
if isinstance(datum, tuple):
examples.append(EvalExample(datum[0], datum[1]))
elif "token_map" in datum:
examples[-1].features[idx] = datum
else:
examples[-1].results[idx] = datum
# Construct prediction objects.
tf.logging.info("Computing predictions...")
summary_dict = {}
nq_pred_dict = {}
for e in examples:
summary = compute_predictions(e)
summary_dict[e.example_id] = summary
nq_pred_dict[e.example_id] = summary.predicted_label
if len(nq_pred_dict) % 100 == 0:
tf.logging.info("Examples processed: %d", len(nq_pred_dict))
tf.logging.info("Done computing predictions.")
return nq_pred_dict
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `{do_train,do_predict}` must be True.")
if FLAGS.do_train:
if not FLAGS.train_precomputed_file:
raise ValueError("If `do_train` is True, then `train_precomputed_file` "
"must be specified.")
if not FLAGS.train_num_precomputed:
raise ValueError("If `do_train` is True, then `train_num_precomputed` "
"must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
num_train_features = FLAGS.train_num_precomputed
num_train_steps = int(num_train_features / FLAGS.train_batch_size *
FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this falls back to normal Estimator on CPU or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training on precomputed features *****")
tf.logging.info(" Num split examples = %d", num_train_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_filenames = tf.gfile.Glob(FLAGS.train_precomputed_file)
train_input_fn = input_fn_builder(
input_file=train_filenames,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
if not FLAGS.output_prediction_file:
raise ValueError(
"--output_prediction_file must be defined in predict mode.")
eval_examples = read_nq_examples(
input_file=FLAGS.predict_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
num_spans_to_ids = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
is_training=False,
output_fn=append_feature)
eval_writer.close()
eval_filename = eval_writer.filename
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
for spans, ids in num_spans_to_ids.items():
tf.logging.info(" Num split into %d = %d", spans, len(ids))
predict_input_fn = input_fn_builder(
input_file=eval_filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
answer_type_logits = [float(x) for x in result["answer_type_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits,
answer_type_logits=answer_type_logits))
candidates_dict = read_candidates(FLAGS.predict_file)
eval_features = [
tf.train.Example.FromString(r)
for r in tf.python_io.tf_record_iterator(eval_filename)
]
nq_pred_dict = compute_pred_dict(candidates_dict, eval_features,
[r._asdict() for r in all_results])
predictions_json = {"predictions": list(nq_pred_dict.values())}
with tf.gfile.Open(FLAGS.output_prediction_file, "w") as f:
json.dump(predictions_json, f, indent=4)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 34.805536 | 82 | 0.687259 |
7947ad480c49cb2d2324e5b8b2114f6acc815258 | 6,068 | py | Python | server.py | uggla/lab_droplet | 5fb6388e16aa84215f7329b5972a8a5b49ff9308 | [
"Apache-2.0"
] | null | null | null | server.py | uggla/lab_droplet | 5fb6388e16aa84215f7329b5972a8a5b49ff9308 | [
"Apache-2.0"
] | null | null | null | server.py | uggla/lab_droplet | 5fb6388e16aa84215f7329b5972a8a5b49ff9308 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import json
import jinja2
from flask import Flask
from flask import jsonify
from flask import request
from flask import send_from_directory
from flask_sockets import Sockets
from api_exception import InvalidUsage
from flask_httpauth import HTTPBasicAuth
from flask_restful import abort
from pprint import pprint
import droplet
class Reservation(object):
def __init__(self):
self.file = "reservation.json"
self.data = None
# read json file
try:
with open(self.file, "r") as json_data:
self.data = json.load(json_data)
json_data.close()
except IOError:
self.data = {}
self.save()
def save(self):
with open(self.file, "w") as json_data:
json_data.write(json.dumps(self.data))
json_data.close()
def reserve(self, uuid, owner):
self.data.update({uuid: owner})
self.save()
def get(self, uuid):
try:
return self.data[uuid]
except KeyError:
return ""
def release(self, uuid):
try:
del self.data[uuid]
self.save()
except KeyError:
pass
ENV = ("DIGITALOCEAN_TOKEN", "PUBKEY", "LABUSER", "LABPASSWD")
for item in ENV:
VAR = os.getenv(item)
if not VAR:
print("Please set {} environment variable.".format(item))
sys.exit(1)
app = Flask(__name__)
sockets = Sockets(app)
auth = HTTPBasicAuth()
# Initialize reservation
resa = Reservation()
users = {os.getenv("LABUSER"): os.getenv("LABPASSWD")}
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/")
@auth.login_required
def index():
html = render_template("user.html")
return html
@app.route("/create_droplet", methods=["POST"])
@auth.login_required
def create_droplet():
if (
not request.form["InputAccount"]
or not request.form["InputPubSSH"]
or not re.match(r"^ssh-rsa .+$", request.form["InputPubSSH"])
or not re.match(r"\w{4,}", request.form["InputAccount"])
): # noqa
abort(400, message="Bad request")
pprint(request.form["InputAccount"])
pprint(request.form["InputPubSSH"])
pprint(request.form["InputDistro"])
pprint(request.form["InputFlavor"])
vmid = new_droplet(
request.form["InputAccount"],
request.form["InputPubSSH"],
request.form["InputDistro"],
request.form["InputFlavor"],
)
html = render_template("droplet.html", vmid)
return html
def new_droplet(account, pubssh, distro, flavor):
ANIMALS_LIST_URL = "https://raw.githubusercontent.com/hzlzh/Domain-Name-List/master/Animal-words.txt" # noqa
ADJECTIVES_LIST_URL = "https://raw.githubusercontent.com/gef756/namegen/master/adjectives.txt" # noqa
TAG = ["docker", account]
REGION = "ams3" # Amsterdam 3
if distro == "centos":
IMAGE = "centos-7-x64"
elif distro == "ubuntu":
IMAGE = "ubuntu-16-04-x64"
elif distro == "docker":
IMAGE = "docker-18-04"
if flavor == "1GB":
DROPLET_SIZE = "s-1vcpu-1gb"
elif flavor == "4GB":
DROPLET_SIZE = "s-2vcpu-4gb"
TOKEN = os.getenv("DIGITALOCEAN_TOKEN")
userkey = droplet.digitalocean.SSHKey(token=TOKEN)
userkey.load_by_pub_key(pubssh)
if not userkey.id:
userkey = droplet.digitalocean.SSHKey(
token=TOKEN, name=account, public_key=pubssh
)
userkey.create()
# admkeys = droplet.get_ssh_keys(TOKEN)
admkey = droplet.digitalocean.SSHKey(token=TOKEN)
admkey.load_by_pub_key(os.getenv("PUBKEY"))
keys = [userkey, admkey]
vm = droplet.digitalocean.Droplet(
token=TOKEN,
name=droplet.generate_random_name(
ADJECTIVES_LIST_URL, ANIMALS_LIST_URL
),
region=REGION,
image=IMAGE,
size_slug=DROPLET_SIZE,
tags=TAG,
ssh_keys=keys,
)
vm.create()
return vm.id
def add_headers(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add(
"Access-Control-Allow-Headers", "Content-Type,Authorization"
)
@sockets.route("/vmstatus")
def vmstatus(ws):
TOKEN = os.getenv("DIGITALOCEAN_TOKEN")
data = json.loads(ws.receive())
pprint(data)
vm = droplet.get_droplet(TOKEN, data["vmid"])
droplet.wait_completion(vm)
# Reload status
vm = droplet.get_droplet(TOKEN, data["vmid"])
pprint(vm)
print("Droplet id: {}".format(vm.id))
print("Droplet name: {}".format(vm.name))
print("Droplet ip: {}".format(vm.ip_address))
data = {"vmid": vm.id, "vmname": vm.name, "vmip": vm.ip_address}
ws.send(json.dumps(data))
@app.route("/css/<path>")
def send_css(path):
return send_from_directory("templates/css", path)
@app.route("/img/<path>")
def send_img(path):
return send_from_directory("templates/img", path)
def render_template(template, values=None):
# Initialize Template system (jinja2)
templates_path = "templates"
jinja2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_path)
)
try:
template = jinja2_env.get_template(template)
except jinja2.exceptions.TemplateNotFound as e:
print(
'Template "{}" not found in {}.'.format(
e.message, jinja2_env.loader.searchpath[0]
)
)
if values is None:
data = template.render()
else:
data = template.render(r=values)
return data
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(("", 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
| 26.268398 | 113 | 0.638266 |
7947aec7efec0a0d025e7807c9197c5cb3012bae | 960 | py | Python | stipo/places/api/serializers.py | azaleas/stipo | 5bd7954f89b46f359f9dfc45a8230e7b5e154986 | [
"MIT"
] | null | null | null | stipo/places/api/serializers.py | azaleas/stipo | 5bd7954f89b46f359f9dfc45a8230e7b5e154986 | [
"MIT"
] | null | null | null | stipo/places/api/serializers.py | azaleas/stipo | 5bd7954f89b46f359f9dfc45a8230e7b5e154986 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from ..models import Attend, Facility, YelpToken
class AttendSerializer(serializers.ModelSerializer):
is_going = serializers.BooleanField(required=True)
class Meta:
model = Attend
fields = ['id', 'attender', 'facility',
'is_going', 'created_date']
class AttendSerializerReadOnly(serializers.ModelSerializer):
class Meta:
model = Attend
fields = ['attender', 'is_going', 'created_date']
class FacilitySerializer(serializers.ModelSerializer):
attends = AttendSerializerReadOnly(many=True, read_only=True)
class Meta:
model = Facility
fields = ['id', 'name', 'location', 'rating', 'url', 'image_url', 'attends']
read_only_fields = ['name', 'rating', 'url', 'image_url', 'attends']
class YelpTokenSerializer(serializers.ModelSerializer):
class Meta:
model = YelpToken
fields = ['token', 'updated_date'] | 28.235294 | 84 | 0.672917 |
7947afa66a5b96471f9cb306c009f5cbcb9945b4 | 4,498 | py | Python | src/microprobe/target/uarch/element_type.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 13 | 2018-09-06T05:16:08.000Z | 2022-03-07T23:03:46.000Z | src/microprobe/target/uarch/element_type.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 24 | 2018-07-10T01:56:10.000Z | 2022-02-22T22:38:25.000Z | src/microprobe/target/uarch/element_type.py | TheArni/microprobe | 46d17a9744b943bb448fc5e2872f3521084d8bec | [
"Apache-2.0"
] | 12 | 2018-09-06T13:58:24.000Z | 2022-01-27T21:15:39.000Z | # Copyright 2011-2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`microprobe.target.uarch.element_type` module
"""
# Futures
from __future__ import absolute_import
# Built-in modules
import abc
import os
# Third party modules
import six
# Own modules
from microprobe.property import PropertyHolder, import_properties
from microprobe.utils.logger import get_logger
from microprobe.utils.yaml import read_yaml
# Constants
SCHEMA = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "schemas", "element_type.yaml"
)
LOG = get_logger(__name__)
__all__ = [
"import_definition", "MicroarchitectureElementType",
"GenericMicroarchitectureElementType"
]
# Functions
def import_definition(cls, filenames, dummy):
"""
:param cls:
:type cls:
:param filenames:
:type filenames:
:param dummy:
:type dummy:
"""
LOG.info("Start importing element type definitions")
element_types = {}
for filename in filenames:
element_type_data = read_yaml(filename, SCHEMA)
if element_type_data is None:
continue
for elem in element_type_data:
name = elem["Name"]
descr = elem.get("Description", "No description")
element_type = cls(name, descr)
element_types[name] = element_type
LOG.debug(element_type)
for filename in filenames:
import_properties(filename, element_types)
LOG.info("End importing element type definitions")
return element_types
# Classes
class MicroarchitectureElementType(
six.with_metaclass(
abc.ABCMeta,
PropertyHolder)):
"""Abstract class to represent a microarchitecture element type."""
@abc.abstractmethod
def __init__(self):
"""Create a microarchitecture element type.
:return: MicroarchitectureElementType instance
:rtype: :class:`~.MicroarchitectureElementType`
"""
pass
@abc.abstractproperty
def name(self):
"""Microarchitecture element type name (:class:`~.str`)."""
raise NotImplementedError
@abc.abstractproperty
def description(self):
"""Microarchitecture element type description (:class:`~.str`)."""
raise NotImplementedError
@abc.abstractmethod
def __str__(self):
"""Return the string representation of this element type
(:class:`~.str`)."""
raise NotImplementedError
class GenericMicroarchitectureElementType(
six.with_metaclass(
abc.ABCMeta,
MicroarchitectureElementType)):
"""Class to represent a generic microarchitecture element type."""
def __init__(self, name, description):
"""Create a generic microarchitecture element type.
:param name: Microarchitecture element type name
:type name: :class:`~.str`
:param description: Microarchitecture element type description
:type description: :class:`~.str`
:return: GenericMicroarchitectureElementType instance
:rtype: :class:`~.GenericMicroarchitectureElementType`
"""
super(GenericMicroarchitectureElementType, self).__init__()
self._name = name
self._description = description
@property
def name(self):
"""Microarchitecture element type name (:class:`~.str`)."""
return self._name
@property
def description(self):
"""Microarchitecture element type description (:class:`~.str`)."""
return self._description
def __str__(self):
"""Return the string representation of this element type
(:class:`~.str`)."""
return "%s('%s','%s')" % (
self.__class__.__name__, self.name, self.description
)
def __lt__(self, other):
assert isinstance(other, MicroarchitectureElementType)
name_cmp = self.name != other.name
if name_cmp:
return self.name < other.name
return self.description < other.description
| 27.595092 | 78 | 0.675634 |
7947afef3cf96d4bc16959ac0d0584721aa15001 | 3,713 | py | Python | fbpcs/pcf/tests/test_private_computation_framework.py | HaipengGuan/fbpcs | 97829881b741fc84702250ae15d232787eac1946 | [
"MIT"
] | null | null | null | fbpcs/pcf/tests/test_private_computation_framework.py | HaipengGuan/fbpcs | 97829881b741fc84702250ae15d232787eac1946 | [
"MIT"
] | null | null | null | fbpcs/pcf/tests/test_private_computation_framework.py | HaipengGuan/fbpcs | 97829881b741fc84702250ae15d232787eac1946 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from fbpcs.pcf.games import ConversionLift
from fbpcs.pcf.mpc.tests.utils import MPCTestCase
from fbpcs.pcf.private_computation_framework import PrivateComputationFramework
from fbpcs.pcf.structs import Role, Status
from fbpcs.pcf.tests.async_utils import wait
from fbpcs.pcf.tests.utils import DummyGame, DummyMPCFramework, DummyPlayer
TEST_RUN_TIMEOUT = 5678
TEST_SLEEP_SECONDS = 0
class TestPrivateComputationFramework(MPCTestCase):
def setUp(self):
os.environ["RUN_TIMEOUT"] = str(TEST_RUN_TIMEOUT)
self.game = DummyGame
self.player = DummyPlayer.build(Role.PUBLISHER)
self.other_players = [DummyPlayer.build(Role.PARTNER)]
num_files = 2
_, self.input_files = zip(
*[
self._make_input_csv(
game=ConversionLift, role=Role.PUBLISHER, num_records=10
)
for i in range(num_files)
]
)
self.output_files = self.input_files
self.tempdirs = [f.parent for f in self.input_files]
self.pcf = PrivateComputationFramework(
game=self.game,
input_files=self.input_files,
output_files=self.output_files,
player=self.player,
other_players=self.other_players,
mpc_cls=DummyMPCFramework,
partner_sleep_seconds=TEST_SLEEP_SECONDS,
)
self.pcf_partner = PrivateComputationFramework(
game=self.game,
input_files=self.input_files,
output_files=self.output_files,
player=DummyPlayer.build(Role.PARTNER),
other_players=[DummyPlayer.build(Role.PUBLISHER)],
mpc_cls=DummyMPCFramework,
partner_sleep_seconds=TEST_SLEEP_SECONDS,
)
def tearDown(self):
for tempdir in self.tempdirs:
shutil.rmtree(tempdir)
def test_gen_frameworks(self):
for i, fw in enumerate(self.pcf.mpc_frameworks):
self.assertTrue(isinstance(fw, DummyMPCFramework))
self.assertEqual(self.game, fw.game)
self.assertEqual(self.input_files[i], fw.input_file)
self.assertEqual(self.player, fw.player)
self.assertEqual(self.other_players, fw.other_players)
self.assertEqual(TEST_RUN_TIMEOUT, fw.run_timeout)
def test_prepare_input(self):
for status in Status:
for fw in self.pcf.mpc_frameworks:
fw.build(prepare_input=status)
self.assertEqual(status, wait(self.pcf.prepare_input()))
def test_run_mpc(self):
expected_1 = {"key1": 1.0, "key2": 2.5, "key3": 99.9}
expected_2 = {"key1": 9.0, "key2": 10.5, "key3": 199.9}
self.assertEqual(2, len(self.pcf.mpc_frameworks))
self.pcf.mpc_frameworks[0].build(run_mpc=expected_1.copy())
self.pcf.mpc_frameworks[1].build(run_mpc=expected_2.copy())
self.assertEqual(expected_1, wait(self.pcf.run_mpc())[0])
self.assertEqual(expected_2, wait(self.pcf.run_mpc())[1])
# Test on partner player too because it has a different logic in run_mpc
self.assertEqual(2, len(self.pcf_partner.mpc_frameworks))
self.pcf_partner.mpc_frameworks[0].build(run_mpc=expected_1.copy())
self.pcf_partner.mpc_frameworks[1].build(run_mpc=expected_2.copy())
self.assertEqual(expected_1, wait(self.pcf_partner.run_mpc())[0])
self.assertEqual(expected_2, wait(self.pcf_partner.run_mpc())[1])
| 39.084211 | 80 | 0.664422 |
7947affc69202c7d42a47bf495d1c1474f11f712 | 3,625 | py | Python | tests/test_results.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | tests/test_results.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | tests/test_results.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | import tempfile
import os
from click.testing import CliRunner
import shutil
import pandas as pd
import numpy as np
def test_energy_regression_results():
from aict_tools.scripts.train_energy_regressor import main as train
from aict_tools.io import read_telescope_data
from aict_tools.apply import predict_energy
import joblib
from aict_tools.configuration import AICTConfig
configuration_path = 'examples/full_config.yaml'
with tempfile.TemporaryDirectory(prefix='aict_tools_test_') as d:
data_path = os.path.join(d, 'gamma.hdf5')
model_path = os.path.join(d, 'test.pkl')
shutil.copy('examples/gamma.hdf5', data_path)
runner = CliRunner()
result = runner.invoke(
train,
[
configuration_path,
data_path,
os.path.join(d, 'test.hdf5'),
model_path,
]
)
assert result.exit_code == 0
config = AICTConfig.from_yaml(configuration_path)
model_config = config.energy
model = joblib.load(model_path)
df = read_telescope_data(
data_path, config, model_config.columns_to_read_apply,
feature_generation_config=model_config.feature_generation
)
energy_prediction = predict_energy(
df[model_config.features],
model,
log_target=model_config.log_target,
)
expectation = pd.read_csv('tests/expected_results.csv')
np.testing.assert_array_almost_equal(energy_prediction, expectation['energy_prediction'])
def test_seperation_results():
from aict_tools.scripts.train_separation_model import main as train
from aict_tools.io import read_telescope_data
from aict_tools.apply import predict_separator
import joblib
from aict_tools.configuration import AICTConfig
configuration_path = 'examples/full_config.yaml'
expectation = pd.read_csv('tests/expected_results.csv')
with tempfile.TemporaryDirectory(prefix='aict_tools_test_') as d:
gamma_path = os.path.join(d, 'gamma.hdf5')
proton_path = os.path.join(d, 'proton.hdf5')
model_path = os.path.join(d, 'test.pkl')
shutil.copy('examples/gamma.hdf5', gamma_path)
shutil.copy('examples/proton.hdf5', proton_path)
runner = CliRunner()
result = runner.invoke(
train,
[
configuration_path,
gamma_path,
proton_path,
os.path.join(d, 'test.hdf5'),
model_path,
]
)
assert result.exit_code == 0
config = AICTConfig.from_yaml(configuration_path)
model_config = config.energy
model = joblib.load(model_path)
df = read_telescope_data(
proton_path, config, model_config.columns_to_read_apply,
feature_generation_config=model_config.feature_generation
)
protons_prediction = predict_separator(
df[model_config.features],
model,
)
df = read_telescope_data(
gamma_path, config, model_config.columns_to_read_apply,
feature_generation_config=model_config.feature_generation
)
gammas_prediction = predict_separator(
df[model_config.features],
model,
)
np.testing.assert_array_almost_equal(protons_prediction, expectation['separator_prediction_on_protons'])
np.testing.assert_array_almost_equal(gammas_prediction, expectation['separator_prediction_on_gammas'])
| 30.982906 | 112 | 0.653793 |
7947b08f3df25d39e9cef29a211cbc3d215311b4 | 550 | py | Python | longclaw/orders/migrations/0004_auto_20190307_1708.py | YokoTheSlayer/longclaw | 83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611 | [
"MIT"
] | null | null | null | longclaw/orders/migrations/0004_auto_20190307_1708.py | YokoTheSlayer/longclaw | 83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611 | [
"MIT"
] | null | null | null | longclaw/orders/migrations/0004_auto_20190307_1708.py | YokoTheSlayer/longclaw | 83801feb24ca6f0dfe0fb7a2ef2e9614e5b75611 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-07 14:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20190307_1704'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='billing_address',
),
migrations.RemoveField(
model_name='order',
name='ip_address',
),
migrations.RemoveField(
model_name='order',
name='shipping_rate',
),
]
| 21.153846 | 47 | 0.552727 |
7947b107f4176fe4ee7ca4ee6a9c3241fc1dfc4a | 27,069 | py | Python | Lib/site-packages/pygments/lexers/freefem.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2021-12-14T21:23:25.000Z | 2021-12-14T21:23:25.000Z | Lib/site-packages/pygments/lexers/freefem.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pygments/lexers/freefem.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | """
pygments.lexers.freefem
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FreeFem++ language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['FreeFemLexer']
class FreeFemLexer(CppLexer):
"""
For FreeFem++ source.
This is an extension of the CppLexer, as the FreeFem Language is a superset
of C++.
.. versionadded:: 2.4
"""
name = 'Freefem'
url = 'https://freefem.org/'
aliases = ['freefem']
filenames = ['*.edp']
mimetypes = ['text/x-freefem']
# Language operators
operators = {'+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''}
# types
types = {'bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline',
'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm',
'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap',
'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf'}
# finite element spaces
fespaces = {'BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT',
'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d',
'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge',
'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc',
'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho',
'RT2', 'RT2Ortho'}
# preprocessor
preprocessor = {'ENDIFMACRO', 'include', 'IFMACRO', 'load'}
# Language keywords
keywords = {
'adj',
'append',
'area',
'ARGV',
'be',
'binary',
'BoundaryEdge',
'bordermeasure',
'CG',
'Cholesky',
'cin',
'cout',
'Crout',
'default',
'diag',
'edgeOrientation',
'endl',
'false',
'ffind',
'FILE',
'find',
'fixed',
'flush',
'GMRES',
'good',
'hTriangle',
'im',
'imax',
'imin',
'InternalEdge',
'l1',
'l2',
'label',
'lenEdge',
'length',
'LINE',
'linfty',
'LU',
'm',
'max',
'measure',
'min',
'mpiAnySource',
'mpiBAND',
'mpiBXOR',
'mpiCommWorld',
'mpiLAND',
'mpiLOR',
'mpiLXOR',
'mpiMAX',
'mpiMIN',
'mpiPROD',
'mpirank',
'mpisize',
'mpiSUM',
'mpiUndefined',
'n',
'N',
'nbe',
'ndof',
'ndofK',
'noshowbase',
'noshowpos',
'notaregion',
'nt',
'nTonEdge',
'nuEdge',
'nuTriangle',
'nv',
'P',
'pi',
'precision',
'qf1pE',
'qf1pElump',
'qf1pT',
'qf1pTlump',
'qfV1',
'qfV1lump',
'qf2pE',
'qf2pT',
'qf2pT4P1',
'qfV2',
'qf3pE',
'qf4pE',
'qf5pE',
'qf5pT',
'qfV5',
'qf7pT',
'qf9pT',
'qfnbpE',
'quantile',
're',
'region',
'rfind',
'scientific',
'searchMethod',
'setw',
'showbase',
'showpos',
'sparsesolver',
'sum',
'tellp',
'true',
'UMFPACK',
'unused',
'whoinElement',
'verbosity',
'version',
'volume',
'x',
'y',
'z'
}
# Language shipped functions and class ( )
functions = {
'abs',
'acos',
'acosh',
'adaptmesh',
'adj',
'AffineCG',
'AffineGMRES',
'arg',
'asin',
'asinh',
'assert',
'atan',
'atan2',
'atanh',
'atof',
'atoi',
'BFGS',
'broadcast',
'buildlayers',
'buildmesh',
'ceil',
'chi',
'complexEigenValue',
'copysign',
'change',
'checkmovemesh',
'clock',
'cmaes',
'conj',
'convect',
'cos',
'cosh',
'cube',
'd',
'dd',
'dfft',
'diffnp',
'diffpos',
'dimKrylov',
'dist',
'dumptable',
'dx',
'dxx',
'dxy',
'dxz',
'dy',
'dyx',
'dyy',
'dyz',
'dz',
'dzx',
'dzy',
'dzz',
'EigenValue',
'emptymesh',
'erf',
'erfc',
'exec',
'exit',
'exp',
'fdim',
'floor',
'fmax',
'fmin',
'fmod',
'freeyams',
'getARGV',
'getline',
'gmshload',
'gmshload3',
'gslcdfugaussianP',
'gslcdfugaussianQ',
'gslcdfugaussianPinv',
'gslcdfugaussianQinv',
'gslcdfgaussianP',
'gslcdfgaussianQ',
'gslcdfgaussianPinv',
'gslcdfgaussianQinv',
'gslcdfgammaP',
'gslcdfgammaQ',
'gslcdfgammaPinv',
'gslcdfgammaQinv',
'gslcdfcauchyP',
'gslcdfcauchyQ',
'gslcdfcauchyPinv',
'gslcdfcauchyQinv',
'gslcdflaplaceP',
'gslcdflaplaceQ',
'gslcdflaplacePinv',
'gslcdflaplaceQinv',
'gslcdfrayleighP',
'gslcdfrayleighQ',
'gslcdfrayleighPinv',
'gslcdfrayleighQinv',
'gslcdfchisqP',
'gslcdfchisqQ',
'gslcdfchisqPinv',
'gslcdfchisqQinv',
'gslcdfexponentialP',
'gslcdfexponentialQ',
'gslcdfexponentialPinv',
'gslcdfexponentialQinv',
'gslcdfexppowP',
'gslcdfexppowQ',
'gslcdftdistP',
'gslcdftdistQ',
'gslcdftdistPinv',
'gslcdftdistQinv',
'gslcdffdistP',
'gslcdffdistQ',
'gslcdffdistPinv',
'gslcdffdistQinv',
'gslcdfbetaP',
'gslcdfbetaQ',
'gslcdfbetaPinv',
'gslcdfbetaQinv',
'gslcdfflatP',
'gslcdfflatQ',
'gslcdfflatPinv',
'gslcdfflatQinv',
'gslcdflognormalP',
'gslcdflognormalQ',
'gslcdflognormalPinv',
'gslcdflognormalQinv',
'gslcdfgumbel1P',
'gslcdfgumbel1Q',
'gslcdfgumbel1Pinv',
'gslcdfgumbel1Qinv',
'gslcdfgumbel2P',
'gslcdfgumbel2Q',
'gslcdfgumbel2Pinv',
'gslcdfgumbel2Qinv',
'gslcdfweibullP',
'gslcdfweibullQ',
'gslcdfweibullPinv',
'gslcdfweibullQinv',
'gslcdfparetoP',
'gslcdfparetoQ',
'gslcdfparetoPinv',
'gslcdfparetoQinv',
'gslcdflogisticP',
'gslcdflogisticQ',
'gslcdflogisticPinv',
'gslcdflogisticQinv',
'gslcdfbinomialP',
'gslcdfbinomialQ',
'gslcdfpoissonP',
'gslcdfpoissonQ',
'gslcdfgeometricP',
'gslcdfgeometricQ',
'gslcdfnegativebinomialP',
'gslcdfnegativebinomialQ',
'gslcdfpascalP',
'gslcdfpascalQ',
'gslinterpakima',
'gslinterpakimaperiodic',
'gslinterpcsplineperiodic',
'gslinterpcspline',
'gslinterpsteffen',
'gslinterplinear',
'gslinterppolynomial',
'gslranbernoullipdf',
'gslranbeta',
'gslranbetapdf',
'gslranbinomialpdf',
'gslranexponential',
'gslranexponentialpdf',
'gslranexppow',
'gslranexppowpdf',
'gslrancauchy',
'gslrancauchypdf',
'gslranchisq',
'gslranchisqpdf',
'gslranerlang',
'gslranerlangpdf',
'gslranfdist',
'gslranfdistpdf',
'gslranflat',
'gslranflatpdf',
'gslrangamma',
'gslrangammaint',
'gslrangammapdf',
'gslrangammamt',
'gslrangammaknuth',
'gslrangaussian',
'gslrangaussianratiomethod',
'gslrangaussianziggurat',
'gslrangaussianpdf',
'gslranugaussian',
'gslranugaussianratiomethod',
'gslranugaussianpdf',
'gslrangaussiantail',
'gslrangaussiantailpdf',
'gslranugaussiantail',
'gslranugaussiantailpdf',
'gslranlandau',
'gslranlandaupdf',
'gslrangeometricpdf',
'gslrangumbel1',
'gslrangumbel1pdf',
'gslrangumbel2',
'gslrangumbel2pdf',
'gslranlogistic',
'gslranlogisticpdf',
'gslranlognormal',
'gslranlognormalpdf',
'gslranlogarithmicpdf',
'gslrannegativebinomialpdf',
'gslranpascalpdf',
'gslranpareto',
'gslranparetopdf',
'gslranpoissonpdf',
'gslranrayleigh',
'gslranrayleighpdf',
'gslranrayleightail',
'gslranrayleightailpdf',
'gslrantdist',
'gslrantdistpdf',
'gslranlaplace',
'gslranlaplacepdf',
'gslranlevy',
'gslranweibull',
'gslranweibullpdf',
'gslsfairyAi',
'gslsfairyBi',
'gslsfairyAiscaled',
'gslsfairyBiscaled',
'gslsfairyAideriv',
'gslsfairyBideriv',
'gslsfairyAiderivscaled',
'gslsfairyBiderivscaled',
'gslsfairyzeroAi',
'gslsfairyzeroBi',
'gslsfairyzeroAideriv',
'gslsfairyzeroBideriv',
'gslsfbesselJ0',
'gslsfbesselJ1',
'gslsfbesselJn',
'gslsfbesselY0',
'gslsfbesselY1',
'gslsfbesselYn',
'gslsfbesselI0',
'gslsfbesselI1',
'gslsfbesselIn',
'gslsfbesselI0scaled',
'gslsfbesselI1scaled',
'gslsfbesselInscaled',
'gslsfbesselK0',
'gslsfbesselK1',
'gslsfbesselKn',
'gslsfbesselK0scaled',
'gslsfbesselK1scaled',
'gslsfbesselKnscaled',
'gslsfbesselj0',
'gslsfbesselj1',
'gslsfbesselj2',
'gslsfbesseljl',
'gslsfbessely0',
'gslsfbessely1',
'gslsfbessely2',
'gslsfbesselyl',
'gslsfbesseli0scaled',
'gslsfbesseli1scaled',
'gslsfbesseli2scaled',
'gslsfbesselilscaled',
'gslsfbesselk0scaled',
'gslsfbesselk1scaled',
'gslsfbesselk2scaled',
'gslsfbesselklscaled',
'gslsfbesselJnu',
'gslsfbesselYnu',
'gslsfbesselInuscaled',
'gslsfbesselInu',
'gslsfbesselKnuscaled',
'gslsfbesselKnu',
'gslsfbessellnKnu',
'gslsfbesselzeroJ0',
'gslsfbesselzeroJ1',
'gslsfbesselzeroJnu',
'gslsfclausen',
'gslsfhydrogenicR1',
'gslsfdawson',
'gslsfdebye1',
'gslsfdebye2',
'gslsfdebye3',
'gslsfdebye4',
'gslsfdebye5',
'gslsfdebye6',
'gslsfdilog',
'gslsfmultiply',
'gslsfellintKcomp',
'gslsfellintEcomp',
'gslsfellintPcomp',
'gslsfellintDcomp',
'gslsfellintF',
'gslsfellintE',
'gslsfellintRC',
'gslsferfc',
'gslsflogerfc',
'gslsferf',
'gslsferfZ',
'gslsferfQ',
'gslsfhazard',
'gslsfexp',
'gslsfexpmult',
'gslsfexpm1',
'gslsfexprel',
'gslsfexprel2',
'gslsfexpreln',
'gslsfexpintE1',
'gslsfexpintE2',
'gslsfexpintEn',
'gslsfexpintE1scaled',
'gslsfexpintE2scaled',
'gslsfexpintEnscaled',
'gslsfexpintEi',
'gslsfexpintEiscaled',
'gslsfShi',
'gslsfChi',
'gslsfexpint3',
'gslsfSi',
'gslsfCi',
'gslsfatanint',
'gslsffermidiracm1',
'gslsffermidirac0',
'gslsffermidirac1',
'gslsffermidirac2',
'gslsffermidiracint',
'gslsffermidiracmhalf',
'gslsffermidirachalf',
'gslsffermidirac3half',
'gslsffermidiracinc0',
'gslsflngamma',
'gslsfgamma',
'gslsfgammastar',
'gslsfgammainv',
'gslsftaylorcoeff',
'gslsffact',
'gslsfdoublefact',
'gslsflnfact',
'gslsflndoublefact',
'gslsflnchoose',
'gslsfchoose',
'gslsflnpoch',
'gslsfpoch',
'gslsfpochrel',
'gslsfgammaincQ',
'gslsfgammaincP',
'gslsfgammainc',
'gslsflnbeta',
'gslsfbeta',
'gslsfbetainc',
'gslsfgegenpoly1',
'gslsfgegenpoly2',
'gslsfgegenpoly3',
'gslsfgegenpolyn',
'gslsfhyperg0F1',
'gslsfhyperg1F1int',
'gslsfhyperg1F1',
'gslsfhypergUint',
'gslsfhypergU',
'gslsfhyperg2F0',
'gslsflaguerre1',
'gslsflaguerre2',
'gslsflaguerre3',
'gslsflaguerren',
'gslsflambertW0',
'gslsflambertWm1',
'gslsflegendrePl',
'gslsflegendreP1',
'gslsflegendreP2',
'gslsflegendreP3',
'gslsflegendreQ0',
'gslsflegendreQ1',
'gslsflegendreQl',
'gslsflegendrePlm',
'gslsflegendresphPlm',
'gslsflegendrearraysize',
'gslsfconicalPhalf',
'gslsfconicalPmhalf',
'gslsfconicalP0',
'gslsfconicalP1',
'gslsfconicalPsphreg',
'gslsfconicalPcylreg',
'gslsflegendreH3d0',
'gslsflegendreH3d1',
'gslsflegendreH3d',
'gslsflog',
'gslsflogabs',
'gslsflog1plusx',
'gslsflog1plusxmx',
'gslsfpowint',
'gslsfpsiint',
'gslsfpsi',
'gslsfpsi1piy',
'gslsfpsi1int',
'gslsfpsi1',
'gslsfpsin',
'gslsfsynchrotron1',
'gslsfsynchrotron2',
'gslsftransport2',
'gslsftransport3',
'gslsftransport4',
'gslsftransport5',
'gslsfsin',
'gslsfcos',
'gslsfhypot',
'gslsfsinc',
'gslsflnsinh',
'gslsflncosh',
'gslsfanglerestrictsymm',
'gslsfanglerestrictpos',
'gslsfzetaint',
'gslsfzeta',
'gslsfzetam1',
'gslsfzetam1int',
'gslsfhzeta',
'gslsfetaint',
'gslsfeta',
'imag',
'int1d',
'int2d',
'int3d',
'intalledges',
'intallfaces',
'interpolate',
'invdiff',
'invdiffnp',
'invdiffpos',
'Isend',
'isInf',
'isNaN',
'isoline',
'Irecv',
'j0',
'j1',
'jn',
'jump',
'lgamma',
'LinearCG',
'LinearGMRES',
'log',
'log10',
'lrint',
'lround',
'max',
'mean',
'medit',
'min',
'mmg3d',
'movemesh',
'movemesh23',
'mpiAlltoall',
'mpiAlltoallv',
'mpiAllgather',
'mpiAllgatherv',
'mpiAllReduce',
'mpiBarrier',
'mpiGather',
'mpiGatherv',
'mpiRank',
'mpiReduce',
'mpiScatter',
'mpiScatterv',
'mpiSize',
'mpiWait',
'mpiWaitAny',
'mpiWtick',
'mpiWtime',
'mshmet',
'NaN',
'NLCG',
'on',
'plot',
'polar',
'Post',
'pow',
'processor',
'processorblock',
'projection',
'randinit',
'randint31',
'randint32',
'random',
'randreal1',
'randreal2',
'randreal3',
'randres53',
'Read',
'readmesh',
'readmesh3',
'Recv',
'rint',
'round',
'savemesh',
'savesol',
'savevtk',
'seekg',
'Sent',
'set',
'sign',
'signbit',
'sin',
'sinh',
'sort',
'splitComm',
'splitmesh',
'sqrt',
'square',
'srandom',
'srandomdev',
'Stringification',
'swap',
'system',
'tan',
'tanh',
'tellg',
'tetg',
'tetgconvexhull',
'tetgreconstruction',
'tetgtransfo',
'tgamma',
'triangulate',
'trunc',
'Wait',
'Write',
'y0',
'y1',
'yn'
}
# function parameters
parameters = {
'A',
'A1',
'abserror',
'absolute',
'aniso',
'aspectratio',
'B',
'B1',
'bb',
'beginend',
'bin',
'boundary',
'bw',
'close',
'cmm',
'coef',
'composante',
'cutoff',
'datafilename',
'dataname',
'dim',
'distmax',
'displacement',
'doptions',
'dparams',
'eps',
'err',
'errg',
'facemerge',
'facetcl',
'factorize',
'file',
'fill',
'fixedborder',
'flabel',
'flags',
'floatmesh',
'floatsol',
'fregion',
'gradation',
'grey',
'hmax',
'hmin',
'holelist',
'hsv',
'init',
'inquire',
'inside',
'IsMetric',
'iso',
'ivalue',
'keepbackvertices',
'label',
'labeldown',
'labelmid',
'labelup',
'levelset',
'loptions',
'lparams',
'maxit',
'maxsubdiv',
'meditff',
'mem',
'memory',
'metric',
'mode',
'nbarrow',
'nbiso',
'nbiter',
'nbjacoby',
'nboffacetcl',
'nbofholes',
'nbofregions',
'nbregul',
'nbsmooth',
'nbvx',
'ncv',
'nev',
'nomeshgeneration',
'normalization',
'omega',
'op',
'optimize',
'option',
'options',
'order',
'orientation',
'periodic',
'power',
'precon',
'prev',
'ps',
'ptmerge',
'qfe',
'qforder',
'qft',
'qfV',
'ratio',
'rawvector',
'reffacelow',
'reffacemid',
'reffaceup',
'refnum',
'reftet',
'reftri',
'region',
'regionlist',
'renumv',
'rescaling',
'ridgeangle',
'save',
'sigma',
'sizeofvolume',
'smoothing',
'solver',
'sparams',
'split',
'splitin2',
'splitpbedge',
'stop',
'strategy',
'swap',
'switch',
'sym',
't',
'tgv',
'thetamax',
'tol',
'tolpivot',
'tolpivotsym',
'transfo',
'U2Vc',
'value',
'varrow',
'vector',
'veps',
'viso',
'wait',
'width',
'withsurfacemesh',
'WindowIndex',
'which',
'zbound'
}
# deprecated
deprecated = {'fixeborder'}
# do not highlight
suppress_highlight = {
'alignof',
'asm',
'constexpr',
'decltype',
'div',
'double',
'grad',
'mutable',
'namespace',
'noexcept',
'restrict',
'static_assert',
'template',
'this',
'thread_local',
'typeid',
'typename',
'using'
}
def get_tokens_unprocessed(self, text):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
if value in self.operators:
yield index, Operator, value
elif value in self.types:
yield index, Keyword.Type, value
elif value in self.fespaces:
yield index, Name.Class, value
elif value in self.preprocessor:
yield index, Comment.Preproc, value
elif value in self.keywords:
yield index, Keyword.Reserved, value
elif value in self.functions:
yield index, Name.Function, value
elif value in self.parameters:
yield index, Keyword.Pseudo, value
elif value in self.suppress_highlight:
yield index, Name, value
else:
yield index, token, value
| 30.110122 | 88 | 0.357642 |
7947b1a18064d729d8bb20fef64e3d3d0aa71ded | 858 | py | Python | lib/surface/ml/video/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml/video/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml/video/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml video."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(
base.ReleaseTrack.GA, base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class VideoIntelligence(base.Group):
"""Cloud ML Video-Intelligence command groups."""
| 37.304348 | 74 | 0.765734 |
7947b1f2bf9ce7d7673aee1d67def43404705041 | 1,902 | py | Python | Model/nets.py | zigoom/DrawAndPainting_Pytorch | 690de052f1ef1f9b5dc4012a003e87200d6ba892 | [
"Apache-2.0"
] | 9 | 2019-05-29T02:29:05.000Z | 2021-07-24T08:07:31.000Z | Model/nets.py | zigoom/DrawAndPainting_Pytorch | 690de052f1ef1f9b5dc4012a003e87200d6ba892 | [
"Apache-2.0"
] | 1 | 2019-06-07T23:58:35.000Z | 2019-06-07T23:58:35.000Z | Model/nets.py | zigoom/DrawAndPainting_Pytorch | 690de052f1ef1f9b5dc4012a003e87200d6ba892 | [
"Apache-2.0"
] | 7 | 2020-04-15T06:49:19.000Z | 2021-04-29T09:52:08.000Z | import torchvision.models as models
import torch.nn as nn
import torch.nn.functional as F
def resnet18(numclasses, pretrained=False):
model = models.resnet18(pretrained)
conv1_out_channels = model.conv1.out_channels
model.conv1 = nn.Conv2d(1, conv1_out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
model.maxpool = nn.MaxPool2d(kernel_size=2)
fc_features = model.fc.in_features
model.fc = nn.Linear(fc_features, numclasses)
return model
def resnet34(numclasses, pretrained=False):
model = models.resnet34(pretrained)
conv1_out_channels = model.conv1.out_channels
model.conv1 = nn.Conv2d(1, conv1_out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
model.maxpool = nn.MaxPool2d(kernel_size=2)
fc_features = model.fc.in_features
model.fc = nn.Linear(fc_features, numclasses)
return model
class ConvNet(nn.Module):
def __init__(self, numclasses):
"""
args
"""
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 64, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 256, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.layer3 = nn.Sequential(
nn.Conv2d(256, 512, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.fc = nn.Sequential(
nn.Linear(512*3*3, 512),
nn.Linear(512, numclasses)
)
def forward(self, net):
net = self.layer1(net)
net = self.layer2(net)
net = self.layer3(net)
net = net.view(net.size(0), -1)
net = self.fc(net)
return net
def convnet(numclasses):
return ConvNet(numclasses)
| 29.261538 | 65 | 0.59306 |
7947b2003e60d0eabdcadcd1bba4277a7bdd8720 | 3,710 | py | Python | microsoft/testsuites/xdp/common.py | srveniga/lisa | 0b5bcf028ed4211d79ff90b9f915981c426baab4 | [
"MIT"
] | null | null | null | microsoft/testsuites/xdp/common.py | srveniga/lisa | 0b5bcf028ed4211d79ff90b9f915981c426baab4 | [
"MIT"
] | null | null | null | microsoft/testsuites/xdp/common.py | srveniga/lisa | 0b5bcf028ed4211d79ff90b9f915981c426baab4 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from typing import List, Pattern
from lisa import Logger, Node, SkippedException, UnsupportedDistroException
from lisa.nic import NicInfo
from lisa.tools import Echo, Ethtool, Mount
from microsoft.testsuites.xdp.xdpdump import XdpDump
_rx_drop_patterns = [
# rx_queue_0_xdp_drop
re.compile(r"^rx_queue_\d+_xdp_drop$"),
# rx_xdp_drop
re.compile(r"^rx_xdp_drop$"),
]
_tx_forwarded_patterns = [
# rx_xdp_tx
re.compile(r"^rx_xdp_tx$"),
# rx_xdp_tx_xmit
re.compile(r"^rx_xdp_tx_xmit$"),
]
_huge_page_disks = {"/mnt/huge": "", "/mnt/huge1g": "pagesize=1G"}
def get_xdpdump(node: Node) -> XdpDump:
try:
xdpdump = node.tools[XdpDump]
except UnsupportedDistroException as identifier:
raise SkippedException(identifier)
return xdpdump
def get_forwarded_count(
node: Node, nic: NicInfo, previous_count: int, log: Logger
) -> int:
return _aggregate_count(
node=node,
nic=nic,
previous_count=previous_count,
log=log,
counter_name="xdp forwarded",
patterns=_tx_forwarded_patterns,
)
def get_dropped_count(
node: Node, nic: NicInfo, previous_count: int, log: Logger
) -> int:
return _aggregate_count(
node=node,
nic=nic,
previous_count=previous_count,
log=log,
counter_name="xdp droppped",
patterns=_rx_drop_patterns,
)
def set_hugepage(node: Node) -> None:
mount = node.tools[Mount]
for point, options in _huge_page_disks.items():
mount.mount(disk_name="nodev", point=point, type="hugetlbfs", options=options)
echo = node.tools[Echo]
echo.write_to_file(
"4096",
node.get_pure_path(
"/sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
),
sudo=True,
)
echo.write_to_file(
"1",
node.get_pure_path(
"/sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages"
),
sudo=True,
)
def remove_hugepage(node: Node) -> None:
echo = node.tools[Echo]
echo.write_to_file(
"0",
node.get_pure_path(
"/sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
),
sudo=True,
)
echo.write_to_file(
"0",
node.get_pure_path(
"/sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages"
),
sudo=True,
)
mount = node.tools[Mount]
for point in _huge_page_disks:
mount.umount(disk_name="nodev", point=point, type="hugetlbfs", erase=False)
pure_path = node.get_pure_path(point)
node.execute(f"rm -rf {pure_path}", sudo=True)
def _aggregate_count(
node: Node,
nic: NicInfo,
previous_count: int,
log: Logger,
counter_name: str,
patterns: List[Pattern[str]],
) -> int:
ethtool = node.tools[Ethtool]
nic_names = [nic.upper, nic.lower]
# aggrerate xdp drop count by different nic type
new_count = -previous_count
for nic_name in nic_names:
# there may not have vf nic
if not nic_name:
continue
stats = ethtool.get_device_statistics(interface=nic_name, force_run=True)
# the name and pattern ordered by syn/vf
for pattern in patterns:
items = {key: value for key, value in stats.items() if pattern.match(key)}
if items:
log.debug(f"found {counter_name} stats: {items}")
new_count += sum(value for value in items.values())
log.debug(f"{counter_name} count: {new_count}")
return new_count
| 27.686567 | 87 | 0.642857 |
7947b2126fd1c9546c0d37e608e7873bc913c89b | 1,005 | py | Python | login/migrations/0003_auto_20170407_1749.py | aotella/calcoff_backend | 7f25ff494ea2e1e4119fe7450d805c986fa77f0c | [
"Apache-2.0",
"MIT"
] | null | null | null | login/migrations/0003_auto_20170407_1749.py | aotella/calcoff_backend | 7f25ff494ea2e1e4119fe7450d805c986fa77f0c | [
"Apache-2.0",
"MIT"
] | null | null | null | login/migrations/0003_auto_20170407_1749.py | aotella/calcoff_backend | 7f25ff494ea2e1e4119fe7450d805c986fa77f0c | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-07 17:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20170404_0714'),
]
operations = [
migrations.CreateModel(
name='UserStat',
fields=[
('Uid', models.AutoField(primary_key=True, serialize=False)),
('NumGamePlayed', models.IntegerField(blank=True, null=True)),
('TotalScore', models.FloatField(default=0.0, null=True)),
],
),
migrations.AlterModelOptions(
name='usermodel',
options={'ordering': ['UserScore']},
),
migrations.AddField(
model_name='userstat',
name='UserName',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.UserModel'),
),
]
| 29.558824 | 103 | 0.587065 |
7947b31a1071a0c2f371350c67208e8296e3a0ed | 10,381 | py | Python | ai_covid_19/transformers/nlp.py | nestauk/ai_covid_19 | 4367b1c7a110094f090167dbe02a1c47ce27e0e9 | [
"MIT"
] | null | null | null | ai_covid_19/transformers/nlp.py | nestauk/ai_covid_19 | 4367b1c7a110094f090167dbe02a1c47ce27e0e9 | [
"MIT"
] | 14 | 2020-06-12T12:02:29.000Z | 2020-06-28T12:45:14.000Z | ai_covid_19/transformers/nlp.py | nestauk/ai_covid_19 | 4367b1c7a110094f090167dbe02a1c47ce27e0e9 | [
"MIT"
] | null | null | null | #Various functions and utilities that we use to work with text
import re
import string
from string import punctuation
from string import digits
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim import corpora, models
from nltk.stem import *
nltk.download("stopwords", quiet=True)
nltk.download("punkt", quiet=True)
stop_words = set(
stopwords.words("english") + list(string.punctuation) + ["\\n"] + ["quot"]
)
regex_str = [
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|" r"[!*\(\),](?:%[0-9a-f][0-9a-f]))+",
r"(?:\w+-\w+){2}",
r"(?:\w+-\w+)",
r"(?:\\\+n+)",
r"(?:@[\w_]+)",
r"<[^>]+>",
r"(?:\w+'\w)",
r"(?:[\w_]+)",
r"(?:\S)",
]
# Create the tokenizer which will be case insensitive and will ignore space.
tokens_re = re.compile(r"(" + "|".join(regex_str) + ")", re.VERBOSE | re.IGNORECASE)
stemmer = PorterStemmer()
def tokenize_document(text, remove_stops=False):
"""Preprocess a whole raw document.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
List of preprocessed and tokenized documents
"""
return [
clean_and_tokenize(sentence, remove_stops)
for sentence in nltk.sent_tokenize(text)
]
def clean_and_tokenize(text, remove_stops):
"""Preprocess a raw string/sentence of text.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
tokens (list, str): Preprocessed tokens.
"""
tokens = tokens_re.findall(text)
_tokens = [t.lower() for t in tokens]
filtered_tokens = [
token.replace("-", "_")
for token in _tokens
if not (remove_stops and len(token) <= 2)
and (not remove_stops or token not in stop_words)
and not any(x in token for x in string.digits)
and any(x in token for x in string.ascii_lowercase)
]
return filtered_tokens
def tfidf_vectors(data, max_features):
"""Transforms text to tfidf vectors.
Args:
data (pandas.Series)
Returns:
(`scipy.sparse`): Sparse TFIDF matrix.
"""
vectorizer = TfidfVectorizer(
stop_words="english", analyzer="word", max_features=max_features
)
return vectorizer.fit_transform(data)
#Characters to drop
drop_characters = re.sub('-','',punctuation)+digits
#Stopwords
from nltk.corpus import stopwords
stop = stopwords.words('English')
#Stem functions
from nltk.stem import *
stemmer = PorterStemmer()
def clean_tokenise(string,drop_characters=drop_characters,stopwords=stop_words):
'''
Takes a string and cleans (makes lowercase and removes stopwords)
'''
#Lowercase
str_low = string.lower()
#Remove symbols and numbers
str_letters = re.sub('[{drop}]'.format(drop=drop_characters),'',str_low)
#Remove stopwords
clean = [x for x in str_letters.split(' ') if (x not in stop) & (x!='')]
return(clean)
class CleanTokenize():
'''
This class takes a list of strings and returns a tokenised, clean list of token lists ready
to be processed with the LdaPipeline
It has a clean method to remove symbols and stopwords
It has a bigram method to detect collocated words
It has a stem method to stem words
'''
def __init__(self,corpus):
'''
Takes a corpus (list where each element is a string)
'''
#Store
self.corpus = corpus
def clean(self,drop=drop_characters,stopwords=stop):
'''
Removes strings and stopwords,
'''
cleaned = [clean_tokenise(doc,drop_characters=drop,stopwords=stop) for doc in self.corpus]
self.tokenised = cleaned
return(self)
def stem(self):
'''
Optional: stems words
'''
#Stems each word in each tokenised sentence
stemmed = [[stemmer.stem(word) for word in sentence] for sentence in self.tokenised]
self.tokenised = stemmed
return(self)
def bigram(self,threshold=10):
'''
Optional Create bigrams.
'''
#Colocation detector trained on the data
phrases = models.Phrases(self.tokenised,threshold=threshold)
bigram = models.phrases.Phraser(phrases)
self.tokenised = bigram[self.tokenised]
return(self)
def salient_words_per_category(token_df,corpus_freqs,thres=100,top_words=50):
'''
Create a list of salient terms in a df (salient terms normalised by corpus frequency).
Args:
tokens (list or series) a list where every element is a tokenised abstract
corpus_freqs (df) are the frequencies of terms in the whole corpus
thres (int) is the number of occurrences of a term in the subcorpus
top_words (int) is the number of salient words to output
'''
subcorpus_freqs = flatten_freq(token_df,freq=True)
merged= pd.concat([pd.DataFrame(subcorpus_freqs),corpus_freqs],axis=1,sort=True)
merged['salience'] = (merged.iloc[:,0]/merged.iloc[:,1])
results = merged.loc[merged.iloc[:,0]>thres].sort_values('salience',ascending=False).iloc[:top_words]
results.columns = ['sub_corpus','corpus','salience']
return results
def get_term_salience(df,sel_var,sel_term,corpus_freqs,thres=100,top_words=50):
'''
Returns a list of salient terms per SDG
Args:
df (df) is a df of interest
sel_var (str) is the variable we use to select
sel_term (str) is the term we use to select
corpus_freqs (df) is a df with corpus frequencies
thres (int) is the min number of word occurrences
top_words (int) is the number of words to report
'''
rel_corp = df.loc[df[sel_var]==sel_term].drop_duplicates('project_id')['tokenised_abstract']
salient_rel = salient_words_per_category(list(rel_corp),corpus_freqs,thres,top_words)
salient_rel.rename(columns={'sub_corpus':f'{str(sel_term)}_freq','corpus':'all_freq',
'salience':f'{str(sel_term)}_salience'},inplace=True)
return(salient_rel)
class LdaPipeline():
'''
This class processes lists of keywords.
How does it work?
-It is initialised with a list where every element is a collection of keywords
-It has a method to filter keywords removing those that appear less than a set number of times
-It has a method to process the filtered df into an object that gensim can work with
-It has a method to train the LDA model with the right parameters
-It has a method to predict the topics in a corpus
'''
def __init__(self,corpus):
'''
Takes the list of terms
'''
#Store the corpus
self.tokenised = corpus
def filter(self,minimum=5):
'''
Removes keywords that appear less than 5 times.
'''
#Load
tokenised = self.tokenised
#Count tokens
token_counts = pd.Series([x for el in tokenised for x in el]).value_counts()
#Tokens to keep
keep = token_counts.index[token_counts>minimum]
#Filter
tokenised_filtered = [[x for x in el if x in keep] for el in tokenised]
#Store
self.tokenised = tokenised_filtered
self.empty_groups = np.sum([len(x)==0 for x in tokenised_filtered])
return(self)
def clean(self):
'''
Remove symbols and numbers
'''
def process(self):
'''
This creates the bag of words we use in the gensim analysis
'''
#Load the list of keywords
tokenised = self.tokenised
#Create the dictionary
dictionary = corpora.Dictionary(tokenised)
#Create the Bag of words. This converts keywords into ids
corpus = [dictionary.doc2bow(x) for x in tokenised]
self.corpus = corpus
self.dictionary = dictionary
return(self)
def tfidf(self):
'''
This is optional: We extract the term-frequency inverse document frequency of the words in
the corpus. The idea is to identify those keywords that are more salient in a document by normalising over
their frequency in the whole corpus
'''
#Load the corpus
corpus = self.corpus
#Fit a TFIDF model on the data
tfidf = models.TfidfModel(corpus)
#Transform the corpus and save it
self.corpus = tfidf[corpus]
return(self)
def fit_lda(self,num_topics=20,passes=5,iterations=75,random_state=1803):
'''
This fits the LDA model taking a set of keyword arguments.
#Number of passes, iterations and random state for reproducibility. We will have to consider
reproducibility eventually.
'''
#Load the corpus
corpus = self.corpus
#Train the LDA model with the parameters we supplied
lda = models.LdaModel(corpus,id2word=self.dictionary,
num_topics=num_topics,passes=passes,iterations=iterations,random_state=random_state)
#Save the outputs
self.lda_model = lda
self.lda_topics = lda.show_topics(num_topics=num_topics)
return(self)
def predict_topics(self):
'''
This predicts the topic mix for every observation in the corpus
'''
#Load the attributes we will be working with
lda = self.lda_model
corpus = self.corpus
#Now we create a df
predicted = lda[corpus]
#Convert this into a dataframe
predicted_df = pd.concat([pd.DataFrame({x[0]:x[1] for x in topics},
index=[num]) for num,topics in enumerate(predicted)]).fillna(0)
self.predicted_df = predicted_df
return(self)
| 28.916435 | 114 | 0.608516 |
7947b3279bbde19392540a8c219832e8c871ccf3 | 11,017 | py | Python | src/gamesbyexample/hexapawn.py | asweigart/gamesbyexample | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | [
"Python-2.0"
] | 83 | 2021-03-01T09:16:07.000Z | 2022-01-04T20:52:42.000Z | src/gamesbyexample/hexapawn.py | asweigart/gamesbyexample | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | [
"Python-2.0"
] | null | null | null | src/gamesbyexample/hexapawn.py | asweigart/gamesbyexample | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | [
"Python-2.0"
] | 9 | 2021-03-01T09:16:18.000Z | 2021-11-21T12:22:03.000Z | """Hexapawn, by Al Sweigart [email protected]
A pawn-only chess variant where you must try to move one of your
pawns to the opposite end of the board. You also win if you block
your opponent from making a move. The original Hexapawn had a 3x3
board with six pawns, but this program lets you use boards of any
size. Based on Martin Gardner's puzzle.
More info at: https://en.wikipedia.org/wiki/Hexapawn
Tags: extra-large, game, two-player, board game"""
__version__ = 0
import sys
# Set up the constants:
X_PLAYER = 'x'
O_PLAYER = 'o'
EMPTY_SPACE = ' '
WIDTH = 'width'
HEIGHT = 'height'
def main():
print("""Hexapawn, by Al Sweigart [email protected]
A pawn-only chess variant where you must try to move one of your
pawns to the opposite end of the board. You also win if you block
your opponent from making a move.
Pawns can advance one space at a time (if they are not blocked by
an opponent's pawn), and can capture pawns that are diagonally
in front of them.
""")
width, height = askForBoardSize()
board = getNewBoard(width, height)
turn = O_PLAYER
while True: # Main game loop.
displayBoard(board)
# Check if the player is blocked and can't make any moves:
validMoves = getValidMoves(turn, board)
if len(validMoves) == 0:
print(turn.upper(), 'is blocked and cannot move!')
if turn == X_PLAYER:
print('O has won!')
elif turn == O_PLAYER:
print('X has won!')
print('Thanks for playing!')
sys.exit()
# Carry out the player's move:
doPlayerMove(turn, board)
if checkIfPlayerReachedEnd(turn, board):
displayBoard(board)
print(turn.upper(), 'has won!')
print('Thanks for playing!')
sys.exit()
if turn == X_PLAYER:
turn = O_PLAYER
elif turn == O_PLAYER:
turn = X_PLAYER
def askForBoardSize():
"""Returns a (width, height) tuple of the board dimensions the
player has requested."""
for dimension in [WIDTH, HEIGHT]:
while True: # Keep looping until the user enters a valid size.
print('Enter the board', dimension, ' (3 to 26) to play on:')
response = input('> ')
if response.isdecimal() and (3 <= int(response) <= 26):
if dimension == WIDTH:
width = int(response)
elif dimension == HEIGHT:
height = int(response)
break # The user has entered a valid size.
print('Please enter a number between 3 and 26.')
# Display a warning if the user choose a size larger than 10.
if width > 8 or height > 8:
print('WARNING: You may have to resize the terminal window to')
print('view a board this big.')
return (width, height)
def getNewBoard(width, height):
"""Return a new dictionary that represents the board. The keys are
(x, y) tuples and the values are X_PLAYER, O_PLAYER, or EMPTY_SPACE.
There is also 'width' and 'height' keys with values of the board's
dimensions."""
board = {WIDTH: width, HEIGHT: height}
# Set up the X player's pieces at the top:
for i in range(width):
board[(i, 0)] = X_PLAYER
# Set up the O player's pieces at the bottom:
for i in range(width):
board[(i, height - 1)] = O_PLAYER
# Set up the rest of the spaces as blank:
for x in range(width):
for y in range(1, height - 1):
board[(x, y)] = EMPTY_SPACE
return board
def getNthLetter(nth):
"""Returns the "nth" letter, where nth is an integer. The 0th letter
is 'A', the 1st letter is 'B', the 2nd letter is 'C', and so on."""
return chr(nth + 65) # The ASCII value of 'A' is 65.
def getNumberForNthLetter(letter):
"""Returns the integer form of a letter. The integer of 'A' is 0,
the integer of 'B' is 1, the integer of 'C' is 2, and so on."""
return ord(letter) - 65 # The ASCII value of 'A' is 65.
def displayBoard(board):
"""Display the board on the screen."""
# Print the letter labels across the top:
print(' ', end='') # Print the indentation for the letter labels.
for x in range(board[WIDTH]):
print(' ', getNthLetter(x), ' ', sep='', end='')
print() # Print a newline.
for y in range(board[HEIGHT]):
# Print the horizontal border:
print(' ', end='') # Print the indentation.
for x in range(board[WIDTH]):
print('+---', end='')
print('+')
# Print the number labels on the left side:
print(str(y + 1).rjust(2) + ' ', end='')
# Print the board spaces:
for x in range(board[WIDTH]):
print('| ' + board[(x, y)] + ' ', end='')
print('|', str(y + 1).ljust(2))
# Print the last horizontal border at the very bottom:
print(' ', end='') # Print the indentation.
for x in range(board[WIDTH]):
print('+---', end='')
print('+')
# Print the letter labels across the bottom:
print(' ', end='') # Print the indentation for the letter labels.
for x in range(board[WIDTH]):
print(' ', chr(x + 65), ' ', sep='', end='')
print() # Print a newline.
def doPlayerMove(player, board):
"""Ask the player for a move and carry it out on the board."""
validMoves = getValidMoves(player, board)
print('It is player ' + player.upper() + '\'s turn.')
print('Select which pawn you want to move:', ' '.join(validMoves))
print('(Or enter QUIT to quit.)')
while True: # Keep looping until the player enters a valid move.
selectedPawn = input('> ').upper()
if selectedPawn == 'QUIT':
print('Thanks for playing!')
sys.exit()
if selectedPawn in validMoves:
break # The user entered a valid move, so break.
print('That is not a valid move.')
# Figure out which moves the selected pawn can make:
x = getNumberForNthLetter(selectedPawn[0])
y = int(selectedPawn[1]) - 1
possibleMoves = []
if pieceCanCaptureLeft(player, x, y, board):
possibleMoves.append('L')
if pieceCanAdvance(player, x, y, board):
possibleMoves.append('A')
if pieceCanCaptureRight(player, x, y, board):
possibleMoves.append('R')
if len(possibleMoves) != 1:
# There are multiple possible moves, so ask the player which
# move they want to make:
print('Enter the move this pawn will make:')
if 'L' in possibleMoves:
print('(L)eft Capture ', end='')
if 'A' in possibleMoves:
print('(A)dvance Forward ', end='')
if 'R' in possibleMoves:
print('(R)ight Capture', end='')
print()
while True: # Ask until the player until enters a valid move.
move = input('> ').upper()
if move in possibleMoves:
break
print('Enter which move this pawn will take.')
elif len(possibleMoves) == 1:
# There's only one possible move, so automatically select it.
move = possibleMoves[0]
# Carry out this pawn's move:
board[(x, y)] = EMPTY_SPACE
if move == 'A':
if player == X_PLAYER:
board[(x, y + 1)] = X_PLAYER
elif player == O_PLAYER:
board[(x, y - 1)] = O_PLAYER
elif move == 'L':
if player == X_PLAYER:
board[(x - 1, y + 1)] = X_PLAYER
elif player == O_PLAYER:
board[(x - 1, y - 1)] = O_PLAYER
elif move == 'R':
if player == X_PLAYER:
board[(x + 1, y + 1)] = X_PLAYER
elif player == O_PLAYER:
board[(x + 1, y - 1)] = O_PLAYER
def getValidMoves(player, board):
"""Return a list of board space labels that have a player piece
that can make a move."""
validMoves = []
for x in range(board[WIDTH]):
for y in range(board[HEIGHT]):
if board[(x, y)] == player:
if (pieceCanAdvance(player, x, y, board) or
pieceCanCaptureLeft(player, x, y, board) or
pieceCanCaptureRight(player, x, y, board)):
validMoves.append(getNthLetter(x) + str(y + 1))
return validMoves
def pieceCanAdvance(player, x, y, board):
"""Return True if the player's piece at (x, y) on the board can
move forward. Otherwise return False."""
if player == X_PLAYER: # X's "forward" is the space below.
if (x, y + 1) in board and board[(x, y + 1)] == EMPTY_SPACE:
return True # Piece can move forward.
elif player == O_PLAYER: # O's "forward" is the space above.
if (x, y - 1) in board and board[(x, y - 1)] == EMPTY_SPACE:
return True # Piece can move forward.
return False # Piece cannot move forward.
def pieceCanCaptureLeft(player, x, y, board):
"""Return True if the player's piece at (x, y) on the board can
capture the piece forward and left. Otherwise return False."""
# Can this piece capture an opponent's piece?
if player == X_PLAYER: # X's "forward" is the space below.
# Check diagonally forward and left:
if (x - 1, y + 1) in board and board[(x - 1, y + 1)] == O_PLAYER:
return True
elif player == O_PLAYER: # O's "forward" is the space above.
# Check diagonally forward and left:
if (x - 1, y - 1) in board and board[(x - 1, y - 1)] == X_PLAYER:
return True
return False # This piece cannot capture.
def pieceCanCaptureRight(player, x, y, board):
"""Return True if the player's piece at (x, y) on the board can
capture the piece forward and right. Otherwise return False."""
# Can this piece capture an opponent's piece?
if player == X_PLAYER: # X's "forward" is the space below.
# Check diagonally forward and right:
if (x + 1, y + 1) in board and board[(x + 1, y + 1)] == O_PLAYER:
return True
elif player == O_PLAYER: # O's "forward" is the space above.
# Check diagonally forward and right:
if (x + 1, y - 1) in board and board[(x + 1, y - 1)] == X_PLAYER:
return True
return False # This piece cannot capture.
def checkIfPlayerReachedEnd(player, board):
"""Return True if the player has reached the opposite end of the
board and won. Otherwise return False."""
if player == X_PLAYER:
# Check if X has any pieces on the bottom row:
for x in range(board['width']):
if board[(x, board['height'] - 1)] == X_PLAYER:
return True
return False
elif player == O_PLAYER:
# Check if O has any pieces on the top row:
for x in range(board['width']):
if board[(x, 0)] == O_PLAYER:
return True
return False
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
main()
| 36.359736 | 73 | 0.588 |
7947b32ee3a7379b2c8e7ebf71723c71392cd4b3 | 70 | py | Python | gpu_test.py | fangchenplus/CarND-Behavioral-Cloning-P3 | 06664d942674d978fe3346e3c168df63ba071f9d | [
"MIT"
] | null | null | null | gpu_test.py | fangchenplus/CarND-Behavioral-Cloning-P3 | 06664d942674d978fe3346e3c168df63ba071f9d | [
"MIT"
] | null | null | null | gpu_test.py | fangchenplus/CarND-Behavioral-Cloning-P3 | 06664d942674d978fe3346e3c168df63ba071f9d | [
"MIT"
] | null | null | null | import tensorflow as tf
tf.config.experimental.list_physical_devices() | 35 | 46 | 0.871429 |
7947b3bf08972c42264adde426fb94f43a50092a | 11,245 | py | Python | rf2settings/app_settings.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 8 | 2020-12-09T17:34:40.000Z | 2022-02-21T10:15:09.000Z | rf2settings/app_settings.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 11 | 2021-02-27T00:21:47.000Z | 2022-02-25T14:41:56.000Z | rf2settings/app_settings.py | tappi287/rf2_video_settings | 6ae73c63f48e6d515a9efb653f236dea0494d9f1 | [
"MIT"
] | 2 | 2021-06-28T21:11:53.000Z | 2022-02-06T17:20:18.000Z | import json
import logging
from pathlib import Path, WindowsPath
from shutil import copyfile
from typing import Iterator, Union, Dict
from .globals import get_settings_dir, SETTINGS_FILE_NAME, SETTINGS_CONTENT_FILE_NAME, get_default_presets_dir, \
get_present_mon_bin
from .preset.preset_base import PRESET_TYPES
from .preset.presets_dir import PresetDir, get_user_presets_dir
from .rfactor import RfactorPlayer, RfactorLocation
from .utils import JsonRepr
class AppSettings(JsonRepr):
backup_created = False
needs_admin = False
selected_presets: Dict[str, str] = dict()
replay_preset = str()
rf_overwrite_location = ''
last_rf_version = str()
user_presets_dir = str()
deleted_defaults = list() # Default Presets the user deleted
server_favourites = list()
server_browser: dict = {'filter_fav': False, 'filter_empty': False, 'filter_pwd': False, 'filter_version': False,
'filter_text': '', 'store_pwd': False}
benchmark_settings = dict()
headlight_settings = dict()
headlight_controller_assignments = dict()
headlight_rf_key = 'DIK_H'
server_passwords = dict()
apply_webui_settings = False
# -----------------------------------
# -- Won't be saved to file:
skip_keys = ['first_load_complete', 'session_selection', 'replay_playing',
'present_mon_bin', 'present_mon_result_dir',
'content_selected', 'content_keys', 'content_urls', 'content', 'content_saved']
present_mon_bin: Path = get_present_mon_bin()
present_mon_result_dir: Path = get_user_presets_dir() / 'benchmark_results'
first_load_complete = False
replay_playing = False
content = dict()
content_keys = ['series', 'tracks', 'cars']
content_urls = ['/rest/race/series', '/rest/race/track', '/rest/race/car']
content_selected = dict() # Content Selection will be saved to preset but transferred to greenlets via this var
session_selection = dict() # Session Selection will be saved to preset but transferred to greenlets via this var
content_saved = False
def __init__(self):
self.backup_created = AppSettings.backup_created
self.selected_presets = AppSettings.selected_presets
self.user_presets_dir = AppSettings.user_presets_dir
@staticmethod
def update_user_presets_dir(user_presets_dir: Union[str, Path]) -> bool:
user_presets_dir = Path(user_presets_dir)
user_presets_dir_str = str(WindowsPath(user_presets_dir))
try:
if user_presets_dir.exists():
logging.info('Updating User Presets Dir: %s', user_presets_dir_str)
PresetDir.value = user_presets_dir_str
AppSettings.user_presets_dir = user_presets_dir_str
AppSettings.save()
AppSettings.copy_default_presets()
else:
logging.error('Selected Presets Directory does not exist: %s', user_presets_dir.as_posix())
return False
except Exception as e:
logging.error('Error accessing path: %s', e)
return False
return True
@staticmethod
def create_backup(rf: RfactorPlayer):
result = False
files = (rf.player_file, rf.controller_file, rf.ini_file)
has_permission_error = False
for org in files:
if not org.is_file():
continue
bak = org.with_suffix('.original')
if AppSettings.backup_created and bak.exists():
result = True
continue
try:
copyfile(org, bak)
result = True
except Exception as e:
if type(e) is PermissionError:
has_permission_error = True
logging.fatal('Could not back-up file: %s %s', org.as_posix(), e)
result = False
if has_permission_error:
logging.error('Accessing rf2 files requires Admin rights!')
AppSettings.needs_admin = True
AppSettings.backup_created = result
AppSettings.save()
return result
@staticmethod
def restore_backup(rf: RfactorPlayer):
result = False
files = (rf.player_file, rf.controller_file, rf.ini_file)
has_permission_error = False
for org in files:
if not org.is_file():
continue
bak = org.with_suffix('.original')
if not bak.exists():
logging.fatal('Could not locate BackUp file: %s', bak.as_posix())
continue
try:
# Delete current file
org.unlink()
# Create original file
copyfile(bak, org)
result = True
except Exception as e:
if type(e) is PermissionError:
has_permission_error = True
logging.fatal('Could not restore file: %s %s', org.as_posix(), e)
result = False
if has_permission_error:
logging.error('Accessing rf2 files requires Admin rights!')
AppSettings.needs_admin = True
AppSettings.save()
return result
@staticmethod
def iterate_default_presets() -> Iterator[Path]:
for file in get_default_presets_dir().glob('*.json'):
yield file
@classmethod
def copy_default_presets(cls) -> bool:
result = False
for file in cls.iterate_default_presets():
dst = get_user_presets_dir() / file.name
# -- Remove existing legacy default presets
cls.delete_legacy_default_presets(dst)
if dst.exists() or file.stem in cls.deleted_defaults:
continue
try:
logging.info('Creating default preset: %s', file.name)
copyfile(file, dst.with_name(file.name))
result = True
except Exception as e:
logging.error('Could not copy default preset: %s', e)
result = False
return result
@staticmethod
def delete_current_settings_presets():
""" Delete 'Current_Settings__Nickname' Presets so we can handle changed Usernames """
for file in Path(AppSettings.user_presets_dir).glob('*.json'):
for prefix in (p.prefix for p in PRESET_TYPES.values()):
name = f'{prefix}_Current_Settings__'
if file.stem.startswith(name):
try:
file.unlink(missing_ok=True)
except Exception as e:
logging.error('Error deleting current settings preset: %s', e)
@staticmethod
def delete_legacy_default_presets(dst: Path):
""" Wipe pre 0.7.8 default Presets without prefixes """
folder = dst.parent
for prefix in (p.prefix for p in PRESET_TYPES.values()):
legacy_preset_name = dst.name.removeprefix(f'{prefix}_')
legacy_file = folder / legacy_preset_name
if legacy_file.exists() and legacy_file != dst:
logging.info('Deleting legacy preset: %s', legacy_file)
legacy_file.unlink(missing_ok=True)
@staticmethod
def _get_settings_file() -> Path:
return get_settings_dir() / SETTINGS_FILE_NAME
@staticmethod
def _get_settings_content_file() -> Path:
return get_settings_dir() / SETTINGS_CONTENT_FILE_NAME
@classmethod
def save_content(cls):
file = cls._get_settings_content_file()
try:
with open(file.as_posix(), 'w') as f:
f.write(json.dumps(cls.content))
except Exception as e:
logging.error('Could not save content! %s', e)
return False
cls.content_saved = True
return True
@classmethod
def save(cls, save_content: bool = False):
# -- Save 'content' in separate file
if cls.content and not cls.content_saved:
cls.save_content()
file = cls._get_settings_content_file() if save_content else cls._get_settings_file()
try:
with open(file.as_posix(), 'w') as f:
if not save_content:
# -- Save Settings
# noinspection PyTypeChecker
f.write(json.dumps(cls.to_js_object(cls)))
else:
# -- Save Content
f.write(json.dumps(cls.content))
except Exception as e:
logging.error('Could not save application settings! %s', e)
return False
return True
@classmethod
def _first_load(cls):
if not cls.first_load_complete:
# -- Reset Content data if rFactor version changed
version = RfactorPlayer(only_version=True).version.replace('\n', '')
logging.debug('Compared last known rF version %s with current version %s', cls.last_rf_version, version)
if version != cls.last_rf_version:
cls.last_rf_version = version
content_data_file = cls._get_settings_content_file()
if content_data_file.exists():
logging.info('Found differing rFactor version. Deleting content data.')
content_data_file.unlink()
# -- Save updated version
cls.save()
else:
cls.load_content()
cls.first_load_complete = True
@classmethod
def load_content(cls) -> bool:
file = cls._get_settings_content_file()
try:
if file.exists():
with open(file.as_posix(), 'r') as f:
cls.content = json.loads(f.read())
except Exception as e:
logging.error('Could not load content list! %s', e)
return False
return True
@classmethod
def load(cls) -> bool:
file = cls._get_settings_file()
try:
if file.exists():
with open(file.as_posix(), 'r') as f:
# -- Load Settings
# noinspection PyTypeChecker
cls.from_js_dict(cls, json.loads(f.read()))
except Exception as e:
logging.error('Could not load application settings! %s', e)
return False
# -- Setup custom user preset dir if set --
PresetDir.value = AppSettings.user_presets_dir
# -- Overwrite rf2 location if overwrite location set
if cls.rf_overwrite_location and cls.rf_overwrite_location not in ('.', '..', '../modules'):
RfactorLocation.overwrite_location(cls.rf_overwrite_location)
# -- Operations on first load
cls._first_load()
return True
@classmethod
def update_webui_settings(cls, rf: RfactorPlayer):
# -- Update WebUi Session Settings for next run
if rf.webui_session_settings:
cls.session_selection = rf.webui_session_settings
# -- Update WebUi Content Selection Settings for next run
if rf.webui_content_selection:
cls.content_selected = rf.webui_content_selection
| 35.698413 | 117 | 0.601334 |
7947b45b7b951a9404bdb37be9e7e5724f21040e | 3,339 | py | Python | data/p2DJ/New/program/qiskit/simulator/startQiskit332.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit332.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/simulator/startQiskit332.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=19
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.rx(2.0860175219836226,input_qubit[1]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=16
prog.x(input_qubit[0]) # number=17
prog.cx(input_qubit[1],input_qubit[0]) # number=18
prog.x(input_qubit[0]) # number=6
prog.h(input_qubit[0]) # number=10
prog.cz(input_qubit[1],input_qubit[0]) # number=11
prog.h(input_qubit[0]) # number=12
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit332.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 29.034783 | 82 | 0.627134 |
7947b658367cacd53f1d800ebdf48de1f7d790da | 455 | py | Python | asyncmy/structs.py | kijk2869/asyncmy | 02e5daa901898dc134bfa5ef56183ca6e7665242 | [
"Apache-2.0"
] | 82 | 2021-02-08T03:38:44.000Z | 2022-03-23T02:14:36.000Z | asyncmy/structs.py | kijk2869/asyncmy | 02e5daa901898dc134bfa5ef56183ca6e7665242 | [
"Apache-2.0"
] | 32 | 2021-03-16T07:30:35.000Z | 2022-03-31T22:38:00.000Z | asyncmy/structs.py | kijk2869/asyncmy | 02e5daa901898dc134bfa5ef56183ca6e7665242 | [
"Apache-2.0"
] | 11 | 2021-03-16T08:54:46.000Z | 2022-03-31T13:24:13.000Z | import struct
h = struct.Struct("<h")
I = struct.Struct("<I") # noqa
H = struct.Struct("<H")
Q = struct.Struct("<Q")
i = struct.Struct("<i")
B = struct.Struct("!B")
B_ = struct.Struct("B")
HBB = struct.Struct("<HBB")
iB = struct.Struct("<iB")
IIB = struct.Struct("<IIB")
iIB23s = struct.Struct("<iIB23s")
BHHB = struct.Struct("<BHHB")
HB = struct.Struct("<HB")
xHIBHBxx = struct.Struct("<xHIBHBxx")
xhh = struct.Struct("<xhh")
HH = struct.Struct("<HH")
| 23.947368 | 37 | 0.635165 |
7947b6f5fb10230306669facd7f77d4901ad9892 | 6,674 | py | Python | networkx-d3-v2/lib/gdata/Crypto/PublicKey/DSA.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | networkx-d3-v2/lib/gdata/Crypto/PublicKey/DSA.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | networkx-d3-v2/lib/gdata/Crypto/PublicKey/DSA.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z |
#
# DSA.py : Digital Signature Algorithm
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Hash import SHA
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generateQ(randfunc):
S=randfunc(20)
hash1=SHA.new(S).digest()
hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest()
q = bignum(0)
for i in range(0,20):
c=ord(hash1[i])^ord(hash2[i])
if i==0:
c=c | 128
if i==19:
c= c | 1
q=q*256+c
while (not isPrime(q)):
q=q+2
if pow(2,159L) < q < pow(2,160L):
return S, q
raise error, 'Bad q value generated'
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a DSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
if bits<160:
raise error, 'Key length <160 bits'
obj=DSAobj()
# Generate string S and prime q
if progress_func:
progress_func('p,q\n')
while (1):
S, obj.q = generateQ(randfunc)
n=(bits-1)/160
C, N, V = 0, 2, {}
b=(obj.q >> 5) & 15
powb=pow(bignum(2), b)
powL1=pow(bignum(2), bits-1)
while C<4096:
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
W=V[n] % powb
for k in range(n-1, -1, -1):
W=(W<<160L)+V[k]
X=W+powL1
p=X-(X%(2*obj.q)-1)
if powL1<=p and isPrime(p):
break
C, N = C+1, N+n+1
if C<4096:
break
if progress_func:
progress_func('4096 multiples failed\n')
obj.p = p
power=(p-1)/obj.q
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y = x, pow(g, x, p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj
Construct a DSA object from a 4- or 5-tuple of numbers.
"""
obj=DSAobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class DSAobj(pubkey):
keydata=['y', 'g', 'p', 'q', 'x']
def _encrypt(self, s, Kstr):
raise error, 'DSA algorithm cannot encrypt data'
def _decrypt(self, s):
raise error, 'DSA algorithm cannot decrypt data'
def _sign(self, M, K):
if (K<2 or self.q<=K):
raise error, 'K is not between 2 and q'
r=pow(self.g, K, self.p) % self.q
s=(inverse(K, self.q)*(M+self.x*r)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
w=inverse(s, self.q)
u1, u2 = (M*w) % self.q, (r*w) % self.q
v1 = pow(self.g, u1, self.p)
v2 = pow(self.y, u2, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.y, self.g, self.p, self.q))
object=DSAobj
generate_py = generate
construct_py = construct
class DSAobj_c(pubkey):
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if self.__dict__.has_key(attr):
self.__dict__[attr]
else:
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
y,g,p,q = state['y'], state['g'], state['p'], state['q']
if not state.has_key('x'):
self.key = _fastmath.dsa_construct(y,g,p,q)
else:
x = state['x']
self.key = _fastmath.dsa_construct(y,g,p,q,x)
def _sign(self, M, K):
return self.key._sign(M, K)
def _verify(self, M, (r, s)):
return self.key._verify(M, r, s)
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.y, self.key.g, self.key.p, self.key.q))
def can_sign(self):
return 1
def can_encrypt(self):
return 0
def generate_c(bits, randfunc, progress_func=None):
obj = generate_py(bits, randfunc, progress_func)
y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x
return construct_c((y,g,p,q,x))
def construct_c(tuple):
key = apply(_fastmath.dsa_construct, tuple)
return DSAobj_c(key)
if _fastmath:
#print "using C version of DSA"
generate = generate_c
construct = construct_c
error = _fastmath.error
| 27.924686 | 96 | 0.570722 |
7947b7bf595fbf238505e34b1da412454050320c | 15,180 | py | Python | library/juniper_junos_pmtud.py | janul/ansible-junos-stdlib | 5c7ff7c880fffe741b96cc6705acaca0e617e1a1 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2015-10-01T21:55:22.000Z | 2016-06-21T13:45:16.000Z | library/juniper_junos_pmtud.py | janul/ansible-junos-stdlib | 5c7ff7c880fffe741b96cc6705acaca0e617e1a1 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | library/juniper_junos_pmtud.py | janul/ansible-junos-stdlib | 5c7ff7c880fffe741b96cc6705acaca0e617e1a1 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-02-17T09:49:15.000Z | 2018-02-17T09:49:15.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 1999-2018, Juniper Networks Inc.
# 2017, Martin Komon
#
# All rights reserved.
#
# License: Apache 2.0
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the Juniper Networks nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Juniper Networks, Inc. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Juniper Networks, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community',
'status': ['stableinterface']}
DOCUMENTATION = '''
---
extends_documentation_fragment:
- juniper_junos_common.connection_documentation
- juniper_junos_common.logging_documentation
module: juniper_junos_pmtud
version_added: "2.0.0" # of Juniper.junos role
author:
- Martin Komon (@mkomon)
- Juniper Networks - Stacy Smith (@stacywsmith)
short_description: Perform path MTU discovery from a Junos device to a
destination
description:
- Determine the maximum IP MTU supported along a path from a Junos device to
a user-specified destination by performing path MTU discovery (PMTUD) using
the ping command. The reported MTU will be between min_test_size and
I(max_size) where I(min_test_size) = (I(max_size) - I(max_range) + 1).
If the actual path MTU is greater than I(max_size), then I(max_size) will
be reported. If the actual path MTU is less than I(min_test_size), then a
failure will be reported.
options:
dest:
description:
- The IPv4 address, or hostname if DNS is configured on the Junos device,
used as the destination of the PMTUD.
required: true
default: none
type: str
aliases:
- dest_ip
- dest_host
- destination
- destination_ip
- destination_host
interface:
description:
- The source interface from which the the PMTUD is performed. If not
specified, the default Junos algorithm for determining the source
interface is used.
required: false
default: none
type: str
max_range:
description:
- The maximum range of MTU values, in bytes, which will be searched
when performing path MTU discovery. This value must be C(0) or
a power of 2 (2^n) between C(2) and C(65536). The minimum IPv4 MTU
value attempted when performing path MTU discovery is
I(min_test_size) = (I(max_size) - I(max_range) + 1)
required: false
default: 512
type: int
max_size:
description:
- The maximum IPv4 MTU, in bytes, to attempt when performing path MTU
discovery.
- The value returned for I(inet_mtu) will be no more
than this value even if the path actually supports a higher MTU.
- This value must be between 68 and 65496.
required: false
default: 1500
type: int
routing_instance:
description:
- Name of the source routing instance from which the ping is
originated.
- If not specified, the default routing instance is used.
required: false
default: none
type: str
source:
description:
- The IPv4 address, or hostname if DNS is configured on the Junos device,
used as the source address of the PMTUD. If not specified, the Junos
default algorithm for determining the source address is used.
required: false
default: none
type: str
aliases:
- source_ip
- source_host
- src
- src_ip
- src_host
'''
EXAMPLES = '''
---
- name: Examples of juniper_junos_mtud
hosts: junos-all
connection: local
gather_facts: no
roles:
- Juniper.junos
tasks:
- name: Perform PMTUD to 192.68.1.1 with default parameters.
juniper_junos_pmtud:
dest: "192.68.1.1"
- name: Perform PMTUD to 192.68.1.1. Register response.
juniper_junos_pmtud:
dest: "192.68.1.1"
register: response
- name: Print the discovered MTU.
debug:
var: response.inet_mtu
- name: Perform PMTUD to 192.68.1.1. Search all possible MTU values.
juniper_junos_pmtud:
dest: "192.68.1.1"
max_size: 65496
max_range: 65536
register: response
- name: Print the discovered MTU.
debug:
var: response.inet_mtu
- name: Perform PMTUD to 192.68.1.1. Source from ge-0/0/0.0 interface.
juniper_junos_pmtud:
dest: "192.68.1.1"
interface: "ge-0/0/0.0"
register: response
- name: Print the discovered MTU.
debug:
var: response.inet_mtu
- name: Perform PMTUD to 192.68.1.1. Source from 192.168.1.2.
juniper_junos_pmtud:
dest: "192.68.1.1"
source: "192.168.1.2"
register: response
- name: Print the discovered MTU.
debug:
var: response.inet_mtu
- name: Perform PMTUD to 192.68.1.1. Source from the red routing-instance.
juniper_junos_pmtud:
dest: "192.68.1.1"
routing_instance: "red"
register: response
- name: Print the discovered MTU.
debug:
var: response.inet_mtu
'''
RETURN = '''
changed:
description:
- Indicates if the device's state has changed. Since this module
doesn't change the operational or configuration state of the
device, the value is always set to C(false).
returned: when PMTUD successfully executed.
type: bool
failed:
description:
- Indicates if the task failed.
returned: always
type: bool
host:
description:
- The destination IP/host of the PMTUD as specified by the I(dest)
option.
- Keys I(dest) and I(dest_ip) are also returned for backwards
compatibility.
returned: when PMTUD successfully executed.
type: str
inet_mtu:
description:
- The IPv4 path MTU size in bytes to the I(dest). This is the lesser of
I(max_size) and the actual path MTU to I(dest). If the actual path
MTU is less than I(min_test_size), then a failure is reported. Where
I(min_test_size) = (I(max_size) - I(max_range) + 1)
returned: when PMTUD successfully executed.
type: str
interface:
description:
- The source interface of the PMTUD as specified by the I(interface)
option.
returned: when the I(interface) option was specified.
type: str
routing_instance:
description:
- The routing-instance from which the PMTUD was performed as specified by
the I(routing_instance) option.
returned: when the I(routing_instance) option was specified.
type: str
source:
description:
- The source IP/host of the PMTUD as specified by the I(source)
option.
- Key I(source_ip) is also returned for backwards compatibility.
returned: when the I(source) option was specified.
type: str
warnings:
description:
- A list of warning strings, if any, produced from the ping.
returned: when warnings are present
type: list
'''
"""From Ansible 2.1, Ansible uses Ansiballz framework for assembling modules
But custom module_utils directory is supported from Ansible 2.3
Reference for the issue: https://groups.google.com/forum/#!topic/ansible-project/J8FL7Z1J1Mw """
# Ansiballz packages module_utils into ansible.module_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import juniper_junos_common
def main():
# Constants for MTU size
INET_MIN_MTU_SIZE = 68 # As prescribed by RFC 791, Section 3.2 -
# Fragmentation and Reassembly.
INET_MAX_MTU_SIZE = 65496 # Size of inet header's total length field is
# 16 bits. Therefore max inet packet size is 2^16
# or 65536, but Junos only supports max IP size
# of 65496 for the ping command in order to
# accomodate a (potentially) maximum sized IP
# header.
# Constants for the size of headers
INET_HEADER_SIZE = 20
ICMP_HEADER_SIZE = 8
INET_AND_ICMP_HEADER_SIZE = INET_HEADER_SIZE + ICMP_HEADER_SIZE
# Choices for max_size
MAX_SIZE_CHOICES = [0] + list(map(lambda x: 2 ** x, range(1, 17)))
# Create the module instance.
junos_module = juniper_junos_common.JuniperJunosModule(
argument_spec=dict(
dest=dict(type='str',
required=True,
aliases=['dest_ip', 'dest_host', 'destination',
'destination_ip', 'destination_host'],
default=None),
max_size=dict(type='int',
required=False,
default=1500),
max_range=dict(type='int',
required=False,
choices=MAX_SIZE_CHOICES,
default=512),
source=dict(type='str',
required=False,
aliases=['source_ip', 'source_host', 'src',
'src_ip', 'src_host'],
default=None),
interface=dict(type='str',
required=False,
default=None),
routing_instance=dict(type='str',
required=False,
default=None),
),
# Since this module doesn't change the device's configuration, there is
# no additional work required to support check mode. It's inherently
# supported.
supports_check_mode=True
)
# We're going to be using params a lot
params = junos_module.params
# max_size must be between INET_MIN_MTU_SIZE and INET_MAX_MTU_SIZE
if (params['max_size'] < INET_MIN_MTU_SIZE or
params['max_size'] > INET_MAX_MTU_SIZE):
junos_module.fail_json(msg='The value of the max_size option(%d) '
'must be between %d and %d.' %
(params['max_size'], INET_MIN_MTU_SIZE,
INET_MAX_MTU_SIZE))
# Initialize ping parameters.
ping_params = {'host': params.get('dest'),
'count': '3',
'rapid': True,
'inet': True,
'do_not_fragment': True}
# Add optional ping parameters
o_ping_params = {}
if params['source'] is not None:
o_ping_params['source'] = params['source']
if params['interface'] is not None:
o_ping_params['interface'] = params['interface']
if params['routing_instance'] is not None:
o_ping_params['routing_instance'] = params['routing_instance']
ping_params.update(o_ping_params)
# Set initial results values. Assume failure until we know it's success.
results = {'changed': False,
'failed': True,
'inet_mtu': 0,
'host': params.get('dest')}
# Results should include all the o_ping_params.
for key in o_ping_params:
results[key] = ping_params.get(key)
# Add aliases for backwards compatibility
results.update({'dest': ping_params.get('host'),
'dest_ip': ping_params.get('host'),
'source_ip': ping_params.get('source')})
# Execute a minimally-sized ping just to verify basic connectivity.
junos_module.logger.debug("Verifying basic connectivity.")
ping_params['size'] = str(INET_MIN_MTU_SIZE -
INET_AND_ICMP_HEADER_SIZE)
results_for_minimal = dict(results)
results_for_minimal = junos_module.ping(ping_params,
acceptable_percent_loss=100,
results=results_for_minimal)
if int(results_for_minimal.get('packet_loss', 100)) == 100:
results['msg'] = "Basic connectivity to %s failed." % (results['host'])
junos_module.exit_json(**results)
# Initialize test_size and step
test_size = params['max_size']
step = params['max_range']
min_test_size = test_size - (params['max_range'] - 1)
if min_test_size < INET_MIN_MTU_SIZE:
min_test_size = INET_MIN_MTU_SIZE
while True:
if test_size < INET_MIN_MTU_SIZE:
test_size = INET_MIN_MTU_SIZE
if test_size > params['max_size']:
test_size = params['max_size']
junos_module.logger.debug("Probing with size: %d", test_size)
step = step // 2 if step >= 2 else 0
ping_params['size'] = str(test_size - INET_AND_ICMP_HEADER_SIZE)
current_results = dict(results)
current_results = junos_module.ping(ping_params,
acceptable_percent_loss=100,
results=current_results)
loss = int(current_results.get('packet_loss', 100))
if loss < 100 and test_size == params['max_size']:
# ping success with max test_size, save and break
results['failed'] = False
results['inet_mtu'] = test_size
break
elif loss < 100:
# ping success, increase test_size
results['failed'] = False
results['inet_mtu'] = test_size
test_size += step
else:
# ping fail, lower size
test_size -= step
if step < 1:
break
if results.get('inet_mtu', 0) == 0:
junos_module.fail_json(msg='The MTU of the path to %s is less than '
'the minimum tested size(%d). Try '
'decreasing max_size(%d) or increasing '
'max_range(%d).' % (results['host'],
min_test_size,
params['max_size'],
params['max_range']),
**results)
# Return results.
junos_module.exit_json(**results)
if __name__ == '__main__':
main()
| 36.934307 | 96 | 0.627866 |
7947b7c2f6b143e49d4d083d5bdaffad71c7bd2c | 7,084 | py | Python | lib/python3.6/site-packages/statsmodels/sandbox/pca.py | KshitizSharmaV/Quant_Platform_Python | d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39 | [
"BSD-3-Clause"
] | 1 | 2020-05-09T08:42:52.000Z | 2020-05-09T08:42:52.000Z | statsmodels/sandbox/pca.py | yanzhenxiong/statsmodels | e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/sandbox/pca.py | yanzhenxiong/statsmodels | e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0 | [
"BSD-3-Clause"
] | 1 | 2020-05-09T08:42:58.000Z | 2020-05-09T08:42:58.000Z | #Copyright (c) 2008 Erik Tollerud ([email protected])
from statsmodels.compat.python import zip
import numpy as np
class Pca(object):
"""
A basic class for Principal Component Analysis (PCA).
p is the number of dimensions, while N is the number of data points
"""
_colors=('r','g','b','c','y','m','k') #defaults
def __calc(self):
A = self.A
M=A-np.mean(A,axis=0)
N=M/np.std(M,axis=0)
self.M = M
self.N = N
self._eig = None
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(x) for x in names])
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T)
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig
def getEigenvalues(self):
return self.getEigensystem()[0]
def getEigenvectors(self):
return self.getEigensystem()[1]
def getEnergies(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v)
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$')
def plot3d(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes()
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m)
def reset(self):
self.A = self._origA.copy()
self.__calc()
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e is not None for e in (enthresh, nPCs, cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("can't specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals doesn't match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals don't have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0)
| 31.207048 | 112 | 0.547149 |
7947ba368134e90b18c9e3ebc80b8cd2b310bbbd | 1,375 | py | Python | dnm_cohorts/de_novos/mcrae_nature.py | jeremymcrae/dnm_cohorts | e968357797d2d370b44904129c32c2e74b36b903 | [
"MIT"
] | 1 | 2020-12-10T05:17:21.000Z | 2020-12-10T05:17:21.000Z | dnm_cohorts/de_novos/mcrae_nature.py | jeremymcrae/dnm_cohorts | e968357797d2d370b44904129c32c2e74b36b903 | [
"MIT"
] | null | null | null | dnm_cohorts/de_novos/mcrae_nature.py | jeremymcrae/dnm_cohorts | e968357797d2d370b44904129c32c2e74b36b903 | [
"MIT"
] | null | null | null |
import logging
import pandas
from dnm_cohorts.de_novo import DeNovo
url = 'https://static-content.springer.com/esm/art%3A10.1038%2Fnature21062/MediaObjects/41586_2017_BFnature21062_MOESM34_ESM.xlsx'
async def mcrae_nature_de_novos(result):
""" load de novo mutations from McRae et al Nature 2017
These de novos are loaded from Supplementary Table 1 from
McRae et al Nature 2017 542:433-438
doi: 10.1038/nature21062
Returns:
dataframe of de novo mutations
"""
logging.info('getting McRae et al Nature 2017 de novos')
data = pandas.read_excel(url, sheet_name='Supplementary Table 1')
data['person_id'] = data['Individual ID']
data['chrom'] = data['Chromosome'].astype(str)
data['pos'] = data['Position (GRCh37)']
data['ref'] = data['Reference allele']
data['alt'] = data['Alternate allele']
data['person_id'] += '|DDD'
data['study'] = '10.1038/nature21062'
qual, status = data['PP(DNM)'], data['Status']
quality = qual.isnull() | (qual > 0.00781) | (status == 'validated')
data['confidence'] = quality.map({True: 'high', False: 'low'})
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, 'grch37')
vars.add(var)
result.append(vars)
| 31.976744 | 130 | 0.648 |
7947ba5ccef07479a0c1a96188c72f3047b56e6f | 2,768 | py | Python | tests/components/recorder/test_migrate.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/components/recorder/test_migrate.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | tests/components/recorder/test_migrate.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """The tests for the Recorder component."""
# pylint: disable=protected-access
from unittest.mock import call, patch
import pytest
from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.recorder import const, migration, models
from tests.components.recorder import models_original
def create_engine_test(*args, **kwargs):
"""Test version of create_engine that initializes with old schema.
This simulates an existing db with the old schema.
"""
engine = create_engine(*args, **kwargs)
models_original.Base.metadata.create_all(engine)
return engine
async def test_schema_update_calls(hass):
"""Test that schema migrations occur in correct order."""
with patch(
"homeassistant.components.recorder.create_engine", new=create_engine_test
), patch("homeassistant.components.recorder.migration._apply_update") as update:
await async_setup_component(
hass, "recorder", {"recorder": {"db_url": "sqlite://"}}
)
await hass.async_block_till_done()
update.assert_has_calls(
[
call(hass.data[const.DATA_INSTANCE].engine, version + 1, 0)
for version in range(0, models.SCHEMA_VERSION)
]
)
async def test_schema_migrate(hass):
"""Test the full schema migration logic.
We're just testing that the logic can execute successfully here without
throwing exceptions. Maintaining a set of assertions based on schema
inspection could quickly become quite cumbersome.
"""
with patch("sqlalchemy.create_engine", new=create_engine_test), patch(
"homeassistant.components.recorder.Recorder._setup_run"
) as setup_run:
await async_setup_component(
hass, "recorder", {"recorder": {"db_url": "sqlite://"}}
)
await hass.async_block_till_done()
assert setup_run.called
def test_invalid_update():
"""Test that an invalid new version raises an exception."""
with pytest.raises(ValueError):
migration._apply_update(None, -1, 0)
def test_forgiving_add_column():
"""Test that add column will continue if column exists."""
engine = create_engine("sqlite://", poolclass=StaticPool)
engine.execute("CREATE TABLE hello (id int)")
migration._add_columns(engine, "hello", ["context_id CHARACTER(36)"])
migration._add_columns(engine, "hello", ["context_id CHARACTER(36)"])
def test_forgiving_add_index():
"""Test that add index will continue if index exists."""
engine = create_engine("sqlite://", poolclass=StaticPool)
models.Base.metadata.create_all(engine)
migration._create_index(engine, "states", "ix_states_context_id")
| 35.037975 | 84 | 0.714595 |
7947ba7599980a6f39df83cb589751e2a9273537 | 7,437 | py | Python | homeassistant/components/apcupsd/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | 1 | 2021-01-10T05:35:53.000Z | 2021-01-10T05:35:53.000Z | homeassistant/components/apcupsd/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/apcupsd/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | 1 | 2020-02-24T16:17:42.000Z | 2020-02-24T16:17:42.000Z | """Support for APCUPSd sensors."""
import logging
from apcaccess.status import ALL_UNITS
import voluptuous as vol
from homeassistant.components import apcupsd
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_RESOURCES,
POWER_WATT,
TEMP_CELSIUS,
TIME_MINUTES,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_PREFIX = "UPS "
SENSOR_TYPES = {
"alarmdel": ["Alarm Delay", "", "mdi:alarm"],
"ambtemp": ["Ambient Temperature", "", "mdi:thermometer"],
"apc": ["Status Data", "", "mdi:information-outline"],
"apcmodel": ["Model", "", "mdi:information-outline"],
"badbatts": ["Bad Batteries", "", "mdi:information-outline"],
"battdate": ["Battery Replaced", "", "mdi:calendar-clock"],
"battstat": ["Battery Status", "", "mdi:information-outline"],
"battv": ["Battery Voltage", "V", "mdi:flash"],
"bcharge": ["Battery", "%", "mdi:battery"],
"cable": ["Cable Type", "", "mdi:ethernet-cable"],
"cumonbatt": ["Total Time on Battery", "", "mdi:timer"],
"date": ["Status Date", "", "mdi:calendar-clock"],
"dipsw": ["Dip Switch Settings", "", "mdi:information-outline"],
"dlowbatt": ["Low Battery Signal", "", "mdi:clock-alert"],
"driver": ["Driver", "", "mdi:information-outline"],
"dshutd": ["Shutdown Delay", "", "mdi:timer"],
"dwake": ["Wake Delay", "", "mdi:timer"],
"endapc": ["Date and Time", "", "mdi:calendar-clock"],
"extbatts": ["External Batteries", "", "mdi:information-outline"],
"firmware": ["Firmware Version", "", "mdi:information-outline"],
"hitrans": ["Transfer High", "V", "mdi:flash"],
"hostname": ["Hostname", "", "mdi:information-outline"],
"humidity": ["Ambient Humidity", "%", "mdi:water-percent"],
"itemp": ["Internal Temperature", TEMP_CELSIUS, "mdi:thermometer"],
"lastxfer": ["Last Transfer", "", "mdi:transfer"],
"linefail": ["Input Voltage Status", "", "mdi:information-outline"],
"linefreq": ["Line Frequency", "Hz", "mdi:information-outline"],
"linev": ["Input Voltage", "V", "mdi:flash"],
"loadpct": ["Load", "%", "mdi:gauge"],
"loadapnt": ["Load Apparent Power", "%", "mdi:gauge"],
"lotrans": ["Transfer Low", "V", "mdi:flash"],
"mandate": ["Manufacture Date", "", "mdi:calendar"],
"masterupd": ["Master Update", "", "mdi:information-outline"],
"maxlinev": ["Input Voltage High", "V", "mdi:flash"],
"maxtime": ["Battery Timeout", "", "mdi:timer-off"],
"mbattchg": ["Battery Shutdown", "%", "mdi:battery-alert"],
"minlinev": ["Input Voltage Low", "V", "mdi:flash"],
"mintimel": ["Shutdown Time", "", "mdi:timer"],
"model": ["Model", "", "mdi:information-outline"],
"nombattv": ["Battery Nominal Voltage", "V", "mdi:flash"],
"nominv": ["Nominal Input Voltage", "V", "mdi:flash"],
"nomoutv": ["Nominal Output Voltage", "V", "mdi:flash"],
"nompower": ["Nominal Output Power", POWER_WATT, "mdi:flash"],
"nomapnt": ["Nominal Apparent Power", "VA", "mdi:flash"],
"numxfers": ["Transfer Count", "", "mdi:counter"],
"outcurnt": ["Output Current", "A", "mdi:flash"],
"outputv": ["Output Voltage", "V", "mdi:flash"],
"reg1": ["Register 1 Fault", "", "mdi:information-outline"],
"reg2": ["Register 2 Fault", "", "mdi:information-outline"],
"reg3": ["Register 3 Fault", "", "mdi:information-outline"],
"retpct": ["Restore Requirement", "%", "mdi:battery-alert"],
"selftest": ["Last Self Test", "", "mdi:calendar-clock"],
"sense": ["Sensitivity", "", "mdi:information-outline"],
"serialno": ["Serial Number", "", "mdi:information-outline"],
"starttime": ["Startup Time", "", "mdi:calendar-clock"],
"statflag": ["Status Flag", "", "mdi:information-outline"],
"status": ["Status", "", "mdi:information-outline"],
"stesti": ["Self Test Interval", "", "mdi:information-outline"],
"timeleft": ["Time Left", "", "mdi:clock-alert"],
"tonbatt": ["Time on Battery", "", "mdi:timer"],
"upsmode": ["Mode", "", "mdi:information-outline"],
"upsname": ["Name", "", "mdi:information-outline"],
"version": ["Daemon Info", "", "mdi:information-outline"],
"xoffbat": ["Transfer from Battery", "", "mdi:transfer"],
"xoffbatt": ["Transfer from Battery", "", "mdi:transfer"],
"xonbatt": ["Transfer to Battery", "", "mdi:transfer"],
}
SPECIFIC_UNITS = {"ITEMP": TEMP_CELSIUS}
INFERRED_UNITS = {
" Minutes": TIME_MINUTES,
" Seconds": TIME_SECONDS,
" Percent": "%",
" Volts": "V",
" Ampere": "A",
" Volt-Ampere": "VA",
" Watts": POWER_WATT,
" Hz": "Hz",
" C": TEMP_CELSIUS,
" Percent Load Capacity": "%",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the APCUPSd sensors."""
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in SENSOR_TYPES:
SENSOR_TYPES[sensor_type] = [
sensor_type.title(),
"",
"mdi:information-outline",
]
if sensor_type.upper() not in apcupsd.DATA.status:
_LOGGER.warning(
"Sensor type: %s does not appear in the APCUPSd status output",
sensor_type,
)
entities.append(APCUPSdSensor(apcupsd.DATA, sensor_type))
add_entities(entities, True)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
for unit in ALL_UNITS:
if value.endswith(unit):
return value[: -len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(Entity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = SENSOR_PREFIX + SENSOR_TYPES[sensor_type][0]
self._unit = SENSOR_TYPES[sensor_type][1]
self._inferred_unit = None
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return true if the UPS is online, else False."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self._unit:
return self._inferred_unit
return self._unit
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self.type.upper() not in self._data.status:
self._state = None
self._inferred_unit = None
else:
self._state, self._inferred_unit = infer_unit(
self._data.status[self.type.upper()]
)
| 38.138462 | 79 | 0.603738 |
7947baf4d75b9926705793127085229eca7bbc38 | 1,636 | py | Python | python_modules/libraries/dagster-azure/dagster_azure_tests/adls2_tests/test_object_store.py | sd2k/dagster | d15542e2be374a0c35f2b1623c1fff98f002c605 | [
"Apache-2.0"
] | 1 | 2021-05-13T08:42:14.000Z | 2021-05-13T08:42:14.000Z | python_modules/libraries/dagster-azure/dagster_azure_tests/adls2_tests/test_object_store.py | sd2k/dagster | d15542e2be374a0c35f2b1623c1fff98f002c605 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-azure/dagster_azure_tests/adls2_tests/test_object_store.py | sd2k/dagster | d15542e2be374a0c35f2b1623c1fff98f002c605 | [
"Apache-2.0"
] | null | null | null | from dagster_azure.adls2 import ADLS2ObjectStore, FakeADLS2ServiceClient
from dagster_azure.blob import FakeBlobServiceClient
from dagster.core.storage.object_store import DEFAULT_SERIALIZATION_STRATEGY
def test_adls2_object_store(
storage_account, credential, file_system, caplog
): # pylint: disable=too-many-function-args
adls2_fake_client = FakeADLS2ServiceClient(storage_account, credential)
blob_fake_client = FakeBlobServiceClient(storage_account, credential)
key = "foo"
# Uses mock ADLS2 client
adls2_obj_store = ADLS2ObjectStore(
file_system, adls2_client=adls2_fake_client, blob_client=blob_fake_client
)
res = adls2_obj_store.set_object(key, True, DEFAULT_SERIALIZATION_STRATEGY)
assert res.key == "abfss://{fs}@{account}.dfs.core.windows.net/{key}".format(
fs=file_system, account=storage_account, key=key
)
adls2_obj_store.set_object(key, True, DEFAULT_SERIALIZATION_STRATEGY)
assert "Removing existing ADLS2 key" in caplog.text
assert adls2_obj_store.has_object(key)
assert adls2_obj_store.get_object(key, DEFAULT_SERIALIZATION_STRATEGY).obj is True
# Harder to test this since it requires a fake synchronised Blob client,
# since cp_object uses blob APIs to communicate...
# adls2_obj_store.cp_object(key, 'bar')
# assert adls2_obj_store.has_object('bar')
adls2_obj_store.rm_object(key)
assert not adls2_obj_store.has_object(key)
assert adls2_obj_store.uri_for_key(
key
) == "abfss://{fs}@{account}.dfs.core.windows.net/{key}".format(
fs=file_system, account=storage_account, key=key
)
| 38.952381 | 86 | 0.75978 |
7947bb0d11fb366733d12a5ccd730fa1fb04d588 | 565 | py | Python | Minimizing height of the maximum difference of array/minmax.py | sparkingdark/450cracker | 7aa62d0321e30998ced9a7a1c586e4c6395e1dcd | [
"Unlicense"
] | null | null | null | Minimizing height of the maximum difference of array/minmax.py | sparkingdark/450cracker | 7aa62d0321e30998ced9a7a1c586e4c6395e1dcd | [
"Unlicense"
] | null | null | null | Minimizing height of the maximum difference of array/minmax.py | sparkingdark/450cracker | 7aa62d0321e30998ced9a7a1c586e4c6395e1dcd | [
"Unlicense"
] | null | null | null | def min_max_diff(arr,n,k):
if n==1:
return 0
arr.sort()
ans = arr[n-1]-arr[0]
small = arr[0]+k
big = arr[n-1]-k
if small>big:
small,big = big,small
for i in range(1,n-1):
subtract = arr[i]-k
add = arr[i]+k
if subtract>=small or add<=big:
continue
if big-subtract<=add-small:
small=subtract
else:
big = add
return min(ans,big-small)
if __name__ == "__main__":
print(min_max_diff([1,2,3,4,22,21,1],7,10))
| 17.121212 | 70 | 0.481416 |
7947bb4b1af359cd46f63a4be04019e30c84a3c3 | 12,064 | py | Python | sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_migration_item.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_migration_item.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20180710/replication_migration_item.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ReplicationMigrationItemArgs', 'ReplicationMigrationItem']
@pulumi.input_type
class ReplicationMigrationItemArgs:
def __init__(__self__, *,
fabric_name: pulumi.Input[str],
properties: pulumi.Input['EnableMigrationInputPropertiesArgs'],
protection_container_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
migration_item_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ReplicationMigrationItem resource.
:param pulumi.Input[str] fabric_name: Fabric name.
:param pulumi.Input['EnableMigrationInputPropertiesArgs'] properties: Enable migration input properties.
:param pulumi.Input[str] protection_container_name: Protection container name.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] resource_name: The name of the recovery services vault.
:param pulumi.Input[str] migration_item_name: Migration item name.
"""
pulumi.set(__self__, "fabric_name", fabric_name)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "protection_container_name", protection_container_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if migration_item_name is not None:
pulumi.set(__self__, "migration_item_name", migration_item_name)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> pulumi.Input[str]:
"""
Fabric name.
"""
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['EnableMigrationInputPropertiesArgs']:
"""
Enable migration input properties.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['EnableMigrationInputPropertiesArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="protectionContainerName")
def protection_container_name(self) -> pulumi.Input[str]:
"""
Protection container name.
"""
return pulumi.get(self, "protection_container_name")
@protection_container_name.setter
def protection_container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "protection_container_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group where the recovery services vault is present.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the recovery services vault.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="migrationItemName")
def migration_item_name(self) -> Optional[pulumi.Input[str]]:
"""
Migration item name.
"""
return pulumi.get(self, "migration_item_name")
@migration_item_name.setter
def migration_item_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "migration_item_name", value)
class ReplicationMigrationItem(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
migration_item_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['EnableMigrationInputPropertiesArgs']]] = None,
protection_container_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Migration item.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fabric_name: Fabric name.
:param pulumi.Input[str] migration_item_name: Migration item name.
:param pulumi.Input[pulumi.InputType['EnableMigrationInputPropertiesArgs']] properties: Enable migration input properties.
:param pulumi.Input[str] protection_container_name: Protection container name.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] resource_name_: The name of the recovery services vault.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReplicationMigrationItemArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Migration item.
:param str resource_name: The name of the resource.
:param ReplicationMigrationItemArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReplicationMigrationItemArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
migration_item_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['EnableMigrationInputPropertiesArgs']]] = None,
protection_container_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReplicationMigrationItemArgs.__new__(ReplicationMigrationItemArgs)
if fabric_name is None and not opts.urn:
raise TypeError("Missing required property 'fabric_name'")
__props__.__dict__["fabric_name"] = fabric_name
__props__.__dict__["migration_item_name"] = migration_item_name
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
if protection_container_name is None and not opts.urn:
raise TypeError("Missing required property 'protection_container_name'")
__props__.__dict__["protection_container_name"] = protection_container_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180710:ReplicationMigrationItem"), pulumi.Alias(type_="azure-native:recoveryservices:ReplicationMigrationItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices:ReplicationMigrationItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20180110:ReplicationMigrationItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20180110:ReplicationMigrationItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210210:ReplicationMigrationItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210210:ReplicationMigrationItem"), pulumi.Alias(type_="azure-native:recoveryservices/v20210301:ReplicationMigrationItem"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210301:ReplicationMigrationItem")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ReplicationMigrationItem, __self__).__init__(
'azure-native:recoveryservices/v20180710:ReplicationMigrationItem',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ReplicationMigrationItem':
"""
Get an existing ReplicationMigrationItem resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ReplicationMigrationItemArgs.__new__(ReplicationMigrationItemArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return ReplicationMigrationItem(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.MigrationItemPropertiesResponse']:
"""
The migration item properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource Type
"""
return pulumi.get(self, "type")
| 46.045802 | 830 | 0.672994 |
7947bcefd76bec5a4a2d0b2e086501f4c8be42b6 | 4,898 | py | Python | nextdl/extractor/r7.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | 1 | 2021-12-19T13:55:20.000Z | 2021-12-19T13:55:20.000Z | nextdl/extractor/r7.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | null | null | null | nextdl/extractor/r7.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
from ..utils import int_or_none
from .common import InfoExtractor
class R7IE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://
(?:
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
player\.r7\.com/video/i/
)
(?P<id>[\da-f]{24})
"""
_TESTS = [
{
"url": "http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html",
"md5": "403c4e393617e8e8ddc748978ee8efde",
"info_dict": {
"id": "54e7050b0cf2ff57e0279389",
"ext": "mp4",
"title": 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
"description": "md5:01812008664be76a6479aa58ec865b72",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 98,
"like_count": int,
"view_count": int,
},
},
{
"url": "http://esportes.r7.com/videos/cigano-manda-recado-aos-fas/idmedia/4e176727b51a048ee6646a1b.html",
"only_matching": True,
},
{
"url": "http://noticias.r7.com/record-news/video/representante-do-instituto-sou-da-paz-fala-sobre-fim-do-estatuto-do-desarmamento-5480fc580cf2285b117f438d/",
"only_matching": True,
},
{
"url": "http://player.r7.com/video/i/54e7050b0cf2ff57e0279389?play=true&video=http://vsh.r7.com/54e7050b0cf2ff57e0279389/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-ATOS_copy.mp4&linkCallback=http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html&thumbnail=http://vtb.r7.com/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-thumb.jpg&idCategory=192&share=true&layout=full&full=true",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://player-api.r7.com/video/i/%s" % video_id, video_id
)
title = video["title"]
formats = []
media_url_hls = video.get("media_url_hls")
if media_url_hls:
formats.extend(
self._extract_m3u8_formats(
media_url_hls,
video_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
media_url = video.get("media_url")
if media_url:
f = {
"url": media_url,
"format_id": "http",
}
# m3u8 format always matches the http format, let's copy metadata from
# one to another
m3u8_formats = list(filter(lambda f: f.get("vcodec") != "none", formats))
if len(m3u8_formats) == 1:
f_copy = m3u8_formats[0].copy()
f_copy.update(f)
f_copy["protocol"] = "http"
f = f_copy
formats.append(f)
self._sort_formats(formats)
description = video.get("description")
thumbnail = video.get("thumb")
duration = int_or_none(video.get("media_duration"))
like_count = int_or_none(video.get("likes"))
view_count = int_or_none(video.get("views"))
return {
"id": video_id,
"title": title,
"description": description,
"thumbnail": thumbnail,
"duration": duration,
"like_count": like_count,
"view_count": view_count,
"formats": formats,
}
class R7ArticleIE(InfoExtractor):
_VALID_URL = r"https?://(?:[a-zA-Z]+)\.r7\.com/(?:[^/]+/)+[^/?#&]+-(?P<id>\d+)"
_TEST = {
"url": "http://tv.r7.com/record-play/balanco-geral/videos/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-16102015",
"only_matching": True,
}
@classmethod
def suitable(cls, url):
return False if R7IE.suitable(url) else super(R7ArticleIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<div[^>]+(?:id=["\']player-|class=["\']embed["\'][^>]+id=["\'])([\da-f]{24})',
webpage,
"video id",
)
return self.url_result(
"http://player.r7.com/video/i/%s" % video_id, R7IE.ie_key()
)
| 37.968992 | 528 | 0.535321 |
7947bd40e22d9e4b37ccabdbcd493cfa926a33a7 | 154 | py | Python | 001146StepikPyBegin/Stepik001146PyBeginсh07p06st10CODE06_20210122.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p06st10CODE06_20210122.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p06st10CODE06_20210122.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | '''
Будет ли выполнен блок кода else, в приведенном ниже фрагменте кода?
'''
n = 0
while n < 10:
n += 2
print(n)
else:
print('Цикл завершен.') | 17.111111 | 68 | 0.616883 |
7947bef84ff372a56373786d113a400945a3f635 | 1,237 | py | Python | indra/tests/test_rlimsp.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 136 | 2016-02-11T22:06:37.000Z | 2022-03-31T17:26:20.000Z | indra/tests/test_rlimsp.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 748 | 2016-02-03T16:27:56.000Z | 2022-03-09T14:27:54.000Z | indra/tests/test_rlimsp.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 56 | 2015-08-28T14:03:44.000Z | 2022-02-04T06:15:55.000Z | import os
import unittest
from indra.sources import rlimsp
@unittest.skip('RLIMS-P webservice is down')
def test_simple_usage():
rp = rlimsp.process_from_webservice('PMC3717945')
stmts = rp.statements
assert len(stmts) == 33, len(stmts)
for s in stmts:
assert len(s.evidence) == 1, "Wrong amount of evidence."
ev = s.evidence[0]
assert ev.annotations, "Missing annotations."
assert 'agents' in ev.annotations.keys()
assert 'trigger' in ev.annotations.keys()
@unittest.skip('RLIMS-P webservice is down')
def test_ungrounded_endpoint_with_pmids():
pmid_list = ['16403219', '22258404', '16961925', '22096607']
stmts = []
for pmid in pmid_list:
rp = rlimsp.process_from_webservice(pmid, id_type='pmid')
assert len(rp.statements) > 10, len(rp.statements)
stmts.extend(rp.statements)
assert len(stmts) == 394, len(stmts)
return
def test_tyrosine_grounding():
here = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(here, 'rlimsp_site.json')
rp = rlimsp.process_from_json_file(fname)
assert len(rp.statements) == 1
stmt = rp.statements[0]
assert stmt.residue == 'Y'
assert stmt.position == '705'
| 32.552632 | 65 | 0.674212 |
7947c1b19c4f586a75a08e6b43de40904814495f | 10,443 | py | Python | plugins/modules/wiring_validate.py | realnirmal/dellemc.os10 | 5f6d7d969fad78824c35a6853e06a3cae15e4e2d | [
"Apache-2.0"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | plugins/modules/wiring_validate.py | realnirmal/dellemc.os10 | 5f6d7d969fad78824c35a6853e06a3cae15e4e2d | [
"Apache-2.0"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | plugins/modules/wiring_validate.py | realnirmal/dellemc.os10 | 5f6d7d969fad78824c35a6853e06a3cae15e4e2d | [
"Apache-2.0"
] | 3 | 2020-03-16T06:48:29.000Z | 2020-04-30T06:24:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
__metaclass__ = type
DOCUMENTATION = '''
module: wiring_validate
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Validate the wiring based on the planned wiring details
description:
- Get the wiring info using lldp output and show system network summary.
options:
show_lldp_neighbors_list:
description:
- show lldp neighbor output
type: 'list'
required: True
show_system_network_summary:
description:
- show system network summary output
type: 'list'
required: True
planned_neighbors:
description:
- planned neighbours input from group_var to compare actual
type: 'list'
required: True
'''
EXAMPLES = '''
Copy below YAML into a playbook (e.g. play.yml) and run as follows:
#$ ansible-playbook -i inv play.yml
name: show system Configuration
hosts: localhost
connection: local
gather_facts: False
tasks:
- name: "Get Dell EMC OS10 Show lldp"
os10_command:
commands:
- command: "show lldp neighbors"
provider: "{{ hostvars[item].cli }}"
with_items: "{{ groups['all'] }}"
register: show_lldp
- local_action: copy content={{ show_lldp }} dest=show
- set_fact:
output_lldp: "{{ output_lldp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item,
'stdout_show_lldp': item.stdout}] }}"
loop: "{{ show_lldp.results }}"
- debug: var=output_lldp
- name: "Get Dell EMC OS10 Show system"
import_role:
name: os10_fabric_summary
register: show_system_network_summary
- debug: var=show_system_network_summary
- name: call lib to process
wiring_validate:
show_lldp_neighbors_list: "{{ output_lldp }}"
show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
planned_neighbors: "{{ intended_neighbors }}"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from collections import OrderedDict
import re
import traceback
class WiringValidation(object):
def __init__(self):
self.module = AnsibleModule(argument_spec=self.get_fields())
self.show_lldp_neighbors_list = self.module.params['show_lldp_neighbors_list']
self.show_system_network_summary = self.module.params['show_system_network_summary']
self.planned_neighbors = self.module.params['planned_neighbors']
self.exit_msg = OrderedDict()
def get_fields(self):
spec_fields = {
'show_lldp_neighbors_list': {
'type': 'list',
'required': True
},
'show_system_network_summary': {
'type': 'list',
'required': True
},
'planned_neighbors': {
'type': 'list',
'required': True
}
}
return spec_fields
# get switch inv name from mac
def get_switch_inv_name_from_mac(self, mac):
inv_name = None
for show_system in self.show_system_network_summary:
if (str.lower(show_system["node-mac"])) == (str.lower(mac)):
inv_name = show_system.get("inv_name")
break
return inv_name
# get service tag for switch
def get_service_tag_and_mac(self):
svc_tag_mac = {}
for show_system in self.show_system_network_summary:
temp_dict = {}
temp_dict["svc-tag"] = show_system.get("service-tag")
temp_dict["node-mac"] = show_system.get("node-mac")
if bool(temp_dict):
svc_tag_mac[show_system["inv_name"]] = temp_dict
return svc_tag_mac
# form actual neighbors per network with help of lldp output and show
# sytem output
def get_actual_neigbor(self, lldp_list):
final_out = list()
for lldp in lldp_list:
# check whether lldp output mac match with system summary mac and
# collect port and host info
source_switch = lldp["inv_name"]
lldp_mac = lldp["rem_mac"]
for index, rem_mac in enumerate(lldp_mac):
final_dict = {}
final_dict["source_switch"] = source_switch
final_dict["source_port"] = lldp["loc_port"][index]
final_dict["dest_port"] = lldp["rem_port"][index]
dest_switch = self.get_switch_inv_name_from_mac(rem_mac)
if dest_switch is not None:
final_dict["dest_switch"] = dest_switch
else:
final_dict["dest_switch"] = "unknown"
final_out.append(final_dict)
return final_out
def parse_lldp_output(self):
nbr_list = list()
for item in self.show_lldp_neighbors_list:
out_dict = {}
loc_port = list()
rem_port = list()
rem_mac = list()
out_dict["host"] = item.get("host")
out_dict["inv_name"] = item.get("inv_name")
show_lldp_output = item.get("stdout_show_lldp")
if show_lldp_output is not None:
output = str(show_lldp_output[0])
lldp_regexp = r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)"
lines = output.splitlines()
for line in lines:
if "Loc PortID" in line:
continue
match = re.match(lldp_regexp, line)
if match:
val = match.groups()
loc_port.append(val[0])
rem_port.append(val[2])
rem_mac.append(val[3])
out_dict["loc_port"] = loc_port
out_dict["rem_port"] = rem_port
out_dict["rem_mac"] = rem_mac
if bool(out_dict):
nbr_list.append(out_dict)
return nbr_list
def perform_action(self):
try:
lldp_list = self.parse_lldp_output()
actual_nbr = self.get_actual_neigbor(lldp_list)
svc_tag_mac = self.get_service_tag_and_mac()
# Validate the planned neighbors with actual neighbors
mismatch_list = list()
for planned_neighbors in self.planned_neighbors:
bflag = False
if planned_neighbors not in actual_nbr:
for actual_neighbors in actual_nbr:
if (actual_neighbors["source_switch"] == planned_neighbors["source_switch"]
and actual_neighbors["source_port"] == planned_neighbors["source_port"]):
if (actual_neighbors["dest_switch"] !=
planned_neighbors["dest_switch"]):
bflag = True
if (actual_neighbors["dest_switch"]
!= "unknown"):
reason = (
"Destination switch is not an expected value, "
"expected switch: {0},port: {1}; actual switch: {2}(svc-tag:{3}, node_mac:{4}), port: {5}" .format(
planned_neighbors["dest_switch"],
planned_neighbors["dest_port"],
actual_neighbors["dest_switch"],
svc_tag_mac.get(
actual_neighbors["dest_switch"]).get("svc-tag"),
svc_tag_mac.get(
actual_neighbors["dest_switch"]).get("node-mac"),
actual_neighbors["dest_port"]))
else:
reason = (
"Destination switch is not an expected value, "
"expected switch: {0},port: {1}; actual switch: {2}, port: {3}" .format(
planned_neighbors["dest_switch"],
planned_neighbors["dest_port"],
actual_neighbors["dest_switch"],
actual_neighbors["dest_port"]))
planned_neighbors["reason"] = reason
planned_neighbors["error_type"] = "link-mismatch"
break
if(actual_neighbors["dest_port"] != planned_neighbors["dest_port"]):
bflag = True
reason = (
"Destination switch port is not an expected value, "
"expected port: {0} actual port: {1}" .format(
planned_neighbors["dest_port"],
actual_neighbors["dest_port"]))
planned_neighbors["reason"] = reason
planned_neighbors["error_type"] = "link-mismatch"
break
if not bflag:
reason = "link is not found for source switch: {0},port: {1}".format(
planned_neighbors["source_switch"], planned_neighbors["source_port"])
planned_neighbors["reason"] = reason
planned_neighbors["error_type"] = "link-missing"
mismatch_list.append(planned_neighbors)
self.exit_msg.update({"results": mismatch_list})
self.module.exit_json(changed=False, msg=self.exit_msg)
except Exception as e:
self.module.fail_json(
msg=to_native(e),
exception=traceback.format_exc())
def main():
module_instance = WiringValidation()
module_instance.perform_action()
if __name__ == '__main__':
main()
| 42.279352 | 139 | 0.53385 |
7947c1b2634a0800781b94e29392cccb39e18d78 | 2,447 | py | Python | pydoof/helpers.py | EnriqueSoria/pydoof | e5a2b7129e6c18e92b69501946be35cd386fcb47 | [
"MIT"
] | null | null | null | pydoof/helpers.py | EnriqueSoria/pydoof | e5a2b7129e6c18e92b69501946be35cd386fcb47 | [
"MIT"
] | null | null | null | pydoof/helpers.py | EnriqueSoria/pydoof | e5a2b7129e6c18e92b69501946be35cd386fcb47 | [
"MIT"
] | null | null | null | """
Collection of functions to assist PyDoof modules.
"""
from collections import Iterable
from datetime import date
from enum import Enum
from typing import Any, List
def parse_query_params(params):
"""
Parses a query-parameters dictionary into their proper parameters schema.
Each key value of the dictionary represents a parameter and its value. The
function parses each key-value based on the value type.
* Parses dates into a string following the "YYYYMMDD" format.
* Parses dictionaries like `parameter: {key: value}` into parameter
`parameter[key]: value`.
* Parses lists like `parameter: [val0, val1]` into parameter
`parameter[]: [val0, val1]`.
* Excludes parameters where its value is `None`.
"""
query_params = {}
for param, value in params.items():
query_params.update(
_parse_param(param, value)
)
return query_params
def _has_dicts(values: List[Any]):
# Could be possible to check only the first element.
# Is used on facets and sort, for example.
return any(isinstance(value, dict) for value in values)
def _parse_param(param: str, value: Any):
query_params = {}
if isinstance(value, date):
query_params[param] = value.strftime("%Y%m%d")
elif isinstance(value, dict):
for k, v in value.items():
query_params.update(
_parse_param(f'{param}[{k}]', v)
)
elif isinstance(value, Enum):
query_params[param] = value.value
elif not isinstance(value, str) and isinstance(value, Iterable):
if _has_dicts(value):
params = (_parse_param(f'{param}[{i}]', v)
for i, v in enumerate(value))
else:
params = (_parse_param(f'{param}[]', v) for v in value)
query_params.update(
_dicts_appends(params)
)
elif isinstance(value, bool):
query_params[param] = str(value).lower()
elif value is not None:
query_params[param] = value
return query_params
def _dicts_appends(dicts):
dict_join = {}
for dict_ in dicts:
for key, value in dict_.items():
if key in dict_join:
try:
dict_join[key].append(value)
except AttributeError:
dict_join[key] = [dict_join[key], value]
else:
dict_join[key] = value
return dict_join
| 30.974684 | 78 | 0.616673 |
7947c1dbc09f433e39f61b171aefe7d0e3bca890 | 263 | py | Python | The fizzbuzz.py | Nadeemk07/The-FizzBuzz-Game | e63688d63c3c9c3f38b21070ce8b0d0269c1a8f8 | [
"MIT"
] | null | null | null | The fizzbuzz.py | Nadeemk07/The-FizzBuzz-Game | e63688d63c3c9c3f38b21070ce8b0d0269c1a8f8 | [
"MIT"
] | null | null | null | The fizzbuzz.py | Nadeemk07/The-FizzBuzz-Game | e63688d63c3c9c3f38b21070ce8b0d0269c1a8f8 | [
"MIT"
] | null | null | null | # the code starts here
for number in range(1, 101):
if number % 3 == 0 and number % 5 == 0:
print("Fizz Buzz")
elif number % 3 == 0:
print("fizz")
elif number % 5 == 0:
print("buzz")
else:
print(number)
| 21.916667 | 44 | 0.486692 |
7947c2e034e6a77465faa03ebd6d37be73f54105 | 569 | py | Python | pytube/__init__.py | programwithrey/pytube | 8883b154c6b4f42a0d9e920dada501ef13ff5a1c | [
"MIT-0"
] | 5 | 2021-01-26T08:39:47.000Z | 2021-01-30T00:34:43.000Z | pytube/__init__.py | programwithrey/pytube | 8883b154c6b4f42a0d9e920dada501ef13ff5a1c | [
"MIT-0"
] | 2 | 2021-04-06T18:35:52.000Z | 2021-06-02T03:55:13.000Z | pytube/__init__.py | programwithrey/pytube | 8883b154c6b4f42a0d9e920dada501ef13ff5a1c | [
"MIT-0"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa: F401
# noreorder
"""
Pytube: a very serious Python library for downloading YouTube Videos.
"""
__title__ = "pytube"
__author__ = "Nick Ficano, Harold Martin"
__license__ = "MIT License"
__copyright__ = "Copyright 2019 Nick Ficano"
__js__ = None
__js_url__ = None
from pytube.version import __version__
from pytube.streams import Stream
from pytube.captions import Caption
from pytube.query import CaptionQuery
from pytube.query import StreamQuery
from pytube.__main__ import YouTube
from pytube.contrib.playlist import Playlist
| 27.095238 | 69 | 0.787346 |
7947c2f3dc1e553f54c9dd62754073e1ec9a98cd | 10,451 | py | Python | pytorch_widedeep/models/text/stacked_attentive_rnn.py | TangleSpace/pytorch-widedeep | ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff | [
"MIT"
] | null | null | null | pytorch_widedeep/models/text/stacked_attentive_rnn.py | TangleSpace/pytorch-widedeep | ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff | [
"MIT"
] | null | null | null | pytorch_widedeep/models/text/stacked_attentive_rnn.py | TangleSpace/pytorch-widedeep | ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import torch
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.text._encoders import ContextAttentionEncoder
from pytorch_widedeep.models.tabular.mlp._layers import MLP
class StackedAttentiveRNN(nn.Module):
r"""Text classifier/regressor comprised by a stack of blocks:
``[RNN + Attention]``. This can be used as the ``deeptext`` component of a
Wide & Deep model or independently by itself.
In addition, there is the option to add a Fully Connected (FC) set of
dense layers on top of the attentiob blocks
Parameters
----------
vocab_size: int
Number of words in the vocabulary
embed_dim: int, Optional, default = None
Dimension of the word embeddings if non-pretained word vectors are
used
embed_matrix: np.ndarray, Optional, default = None
Pretrained word embeddings
embed_trainable: bool, default = True
Boolean indicating if the pretrained embeddings are trainable
rnn_type: str, default = 'lstm'
String indicating the type of RNN to use. One of 'lstm' or 'gru'
hidden_dim: int, default = 64
Hidden dim of the RNN
bidirectional: bool, default = True
Boolean indicating whether the staked RNNs are bidirectional
padding_idx: int, default = 1
index of the padding token in the padded-tokenised sequences. The
``TextPreprocessor`` class within this library uses ``fastai``'s
tokenizer where the token index 0 is reserved for the `'unknown'`
word token. Therefore, the default value is set to 1.
n_blocks: int, default = 3
Number of attention blocks. Each block is comprised by an RNN and a
Context Attention Encoder
attn_concatenate: bool, default = True
Boolean indicating if the input to the attention mechanism will be the
output of the RNN or the output of the RNN concatenated with the last
hidden state or simply
attn_dropout: float, default = 0.1
Internal dropout for the attention mechanism
with_addnorm: bool, default = False
Boolean indicating if the output of each block will be added to the
input and normalised
head_hidden_dims: List, Optional, default = None
List with the sizes of the dense layers in the head e.g: [128, 64]
head_activation: str, default = "relu"
Activation function for the dense layers in the head. Currently
`'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported
head_dropout: float, Optional, default = None
Dropout of the dense layers in the head
head_batchnorm: bool, default = False
Boolean indicating whether or not to include batch normalization in
the dense layers that form the `'rnn_mlp'`
head_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers in the head
head_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
word_embed: ``nn.Module``
word embedding matrix
rnn: ``nn.Module``
Stack of RNNs
rnn_mlp: ``nn.Sequential``
Stack of dense layers on top of the RNN. This will only exists if
``head_layers_dim`` is not ``None``
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the ``WideDeep`` class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import StackedAttentiveRNN
>>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)
>>> model = StackedAttentiveRNN(vocab_size=4, hidden_dim=4, padding_idx=0, embed_dim=4)
>>> out = model(X_text)
"""
def __init__(
self,
vocab_size: int,
embed_dim: Optional[int] = None,
embed_matrix: Optional[np.ndarray] = None,
embed_trainable: bool = True,
rnn_type: str = "lstm",
hidden_dim: int = 64,
bidirectional: bool = False,
padding_idx: int = 1,
n_blocks: int = 3,
attn_concatenate: bool = False,
attn_dropout: float = 0.1,
with_addnorm: bool = False,
head_hidden_dims: Optional[List[int]] = None,
head_activation: str = "relu",
head_dropout: Optional[float] = None,
head_batchnorm: bool = False,
head_batchnorm_last: bool = False,
head_linear_first: bool = False,
):
super(StackedAttentiveRNN, self).__init__()
if (
embed_dim is not None
and embed_matrix is not None
and not embed_dim == embed_matrix.shape[1]
):
warnings.warn(
"the input embedding dimension {} and the dimension of the "
"pretrained embeddings {} do not match. The pretrained embeddings "
"dimension ({}) will be used".format(
embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]
),
UserWarning,
)
if rnn_type.lower() not in ["lstm", "gru"]:
raise ValueError(
f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead"
)
self.vocab_size = vocab_size
self.embed_trainable = embed_trainable
self.embed_dim = embed_dim
self.rnn_type = rnn_type
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
self.padding_idx = padding_idx
self.n_blocks = n_blocks
self.attn_concatenate = attn_concatenate
self.attn_dropout = attn_dropout
self.with_addnorm = with_addnorm
self.head_hidden_dims = head_hidden_dims
self.head_activation = head_activation
self.head_dropout = head_dropout
self.head_batchnorm = head_batchnorm
self.head_batchnorm_last = head_batchnorm_last
self.head_linear_first = head_linear_first
# Embeddings
self.word_embed, self.embed_dim = self._set_embeddings(embed_matrix)
# Linear Projection: if embed_dim is different that the input of the
# attention blocks we add a linear projection
if bidirectional and attn_concatenate:
attn_input_dim = hidden_dim * 4
elif bidirectional or attn_concatenate:
attn_input_dim = hidden_dim * 2
else:
attn_input_dim = hidden_dim
self.output_dim = attn_input_dim
if attn_input_dim != self.embed_dim:
self.embed_proj: Union[nn.Linear, nn.Identity] = nn.Linear(
self.embed_dim, attn_input_dim
)
else:
self.embed_proj = nn.Identity()
# RNN
rnn_params = {
"input_size": attn_input_dim,
"hidden_size": hidden_dim,
"bidirectional": bidirectional,
"batch_first": True,
}
if self.rnn_type.lower() == "lstm":
self.rnn: Union[nn.LSTM, nn.GRU] = nn.LSTM(**rnn_params)
elif self.rnn_type.lower() == "gru":
self.rnn = nn.GRU(**rnn_params)
# FC-Head (Mlp)
self.attention_blks = nn.ModuleList()
for i in range(n_blocks):
self.attention_blks.append(
ContextAttentionEncoder(
self.rnn,
attn_input_dim,
attn_dropout,
attn_concatenate,
with_addnorm=with_addnorm if i != n_blocks - 1 else False,
sum_along_seq=i == n_blocks - 1,
)
)
# Mlp
if self.head_hidden_dims is not None:
head_hidden_dims = [self.output_dim] + head_hidden_dims
self.rnn_mlp: Union[MLP, nn.Identity] = MLP(
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
)
self.output_dim = head_hidden_dims[-1]
else:
# simple hack to add readability in the forward pass
self.rnn_mlp = nn.Identity()
def forward(self, X: Tensor) -> Tensor: # type: ignore
x = self.embed_proj(self.word_embed(X.long()))
h = nn.init.zeros_(
torch.Tensor(2 if self.bidirectional else 1, X.shape[0], self.hidden_dim)
).to(x.device)
if self.rnn_type == "lstm":
c = nn.init.zeros_(
torch.Tensor(
2 if self.bidirectional else 1, X.shape[0], self.hidden_dim
)
).to(x.device)
else:
c = None
for blk in self.attention_blks:
x, h, c = blk(x, h, c)
return self.rnn_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights
The shape of the attention weights is:
:math:`(N, S)`
Where *N* is the batch size and *S* is the length of the sequence
"""
return [blk.attn.attn_weights for blk in self.attention_blks]
def _set_embeddings(
self, embed_matrix: Union[Any, np.ndarray]
) -> Tuple[nn.Module, int]:
if isinstance(embed_matrix, np.ndarray):
assert (
embed_matrix.dtype == "float32"
), "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(
str(embed_matrix.dtype)
)
word_embed = nn.Embedding(
self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx
)
if self.embed_trainable:
word_embed.weight = nn.Parameter(
torch.tensor(embed_matrix), requires_grad=True
)
else:
word_embed.weight = nn.Parameter(
torch.tensor(embed_matrix), requires_grad=False
)
embed_dim = embed_matrix.shape[1]
else:
word_embed = nn.Embedding(
self.vocab_size, self.embed_dim, padding_idx=self.padding_idx
)
embed_dim = self.embed_dim
return word_embed, embed_dim
| 37.458781 | 91 | 0.6031 |
7947c3ad6deb6d12b751307d2fa7af5bef1809e0 | 6,289 | py | Python | homeassistant/components/tradfri/config_flow.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 6 | 2016-11-25T06:36:27.000Z | 2021-11-16T11:20:23.000Z | homeassistant/components/tradfri/config_flow.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/tradfri/config_flow.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Config flow for Tradfri."""
import asyncio
from uuid import uuid4
import async_timeout
from pytradfri import Gateway, RequestError
from pytradfri.api.aiocoap_api import APIFactory
import voluptuous as vol
from homeassistant import config_entries
from .const import (
CONF_GATEWAY_ID,
CONF_HOST,
CONF_IDENTITY,
CONF_IMPORT_GROUPS,
CONF_KEY,
KEY_SECURITY_CODE,
)
class AuthError(Exception):
"""Exception if authentication occurs."""
def __init__(self, code):
"""Initialize exception."""
super().__init__()
self.code = code
@config_entries.HANDLERS.register("tradfri")
class FlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize flow."""
self._host = None
self._import_groups = False
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_auth()
async def async_step_auth(self, user_input=None):
"""Handle the authentication with a gateway."""
errors = {}
if user_input is not None:
host = user_input.get(CONF_HOST, self._host)
try:
auth = await authenticate(
self.hass, host, user_input[KEY_SECURITY_CODE]
)
# We don't ask for import group anymore as group state
# is not reliable, don't want to show that to the user.
# But we still allow specifying import group via config yaml.
auth[CONF_IMPORT_GROUPS] = self._import_groups
return await self._entry_from_data(auth)
except AuthError as err:
if err.code == "invalid_security_code":
errors[KEY_SECURITY_CODE] = err.code
else:
errors["base"] = err.code
else:
user_input = {}
fields = {}
if self._host is None:
fields[vol.Required(CONF_HOST, default=user_input.get(CONF_HOST))] = str
fields[
vol.Required(KEY_SECURITY_CODE, default=user_input.get(KEY_SECURITY_CODE))
] = str
return self.async_show_form(
step_id="auth", data_schema=vol.Schema(fields), errors=errors
)
async def async_step_homekit(self, discovery_info):
"""Handle homekit discovery."""
await self.async_set_unique_id(discovery_info["properties"]["id"])
self._abort_if_unique_id_configured({CONF_HOST: discovery_info["host"]})
host = discovery_info["host"]
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) != host:
continue
# Backwards compat, we update old entries
if not entry.unique_id:
self.hass.config_entries.async_update_entry(
entry, unique_id=discovery_info["properties"]["id"]
)
return self.async_abort(reason="already_configured")
self._host = host
return await self.async_step_auth()
async def async_step_import(self, user_input):
"""Import a config entry."""
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == user_input["host"]:
return self.async_abort(reason="already_configured")
# Happens if user has host directly in configuration.yaml
if "key" not in user_input:
self._host = user_input["host"]
self._import_groups = user_input[CONF_IMPORT_GROUPS]
return await self.async_step_auth()
try:
data = await get_gateway_info(
self.hass,
user_input["host"],
# Old config format had a fixed identity
user_input.get("identity", "homeassistant"),
user_input["key"],
)
data[CONF_IMPORT_GROUPS] = user_input[CONF_IMPORT_GROUPS]
return await self._entry_from_data(data)
except AuthError:
# If we fail to connect, just pass it on to discovery
self._host = user_input["host"]
return await self.async_step_auth()
async def _entry_from_data(self, data):
"""Create an entry from data."""
host = data[CONF_HOST]
gateway_id = data[CONF_GATEWAY_ID]
same_hub_entries = [
entry.entry_id
for entry in self._async_current_entries()
if entry.data.get(CONF_GATEWAY_ID) == gateway_id
or entry.data.get(CONF_HOST) == host
]
if same_hub_entries:
await asyncio.wait(
[
self.hass.config_entries.async_remove(entry_id)
for entry_id in same_hub_entries
]
)
return self.async_create_entry(title=host, data=data)
async def authenticate(hass, host, security_code):
"""Authenticate with a Tradfri hub."""
identity = uuid4().hex
api_factory = await APIFactory.init(host, psk_id=identity)
try:
with async_timeout.timeout(5):
key = await api_factory.generate_psk(security_code)
except RequestError as err:
raise AuthError("invalid_security_code") from err
except asyncio.TimeoutError as err:
raise AuthError("timeout") from err
finally:
await api_factory.shutdown()
return await get_gateway_info(hass, host, identity, key)
async def get_gateway_info(hass, host, identity, key):
"""Return info for the gateway."""
try:
factory = await APIFactory.init(host, psk_id=identity, psk=key)
api = factory.request
gateway = Gateway()
gateway_info_result = await api(gateway.get_gateway_info())
await factory.shutdown()
except (OSError, RequestError) as err:
# We're also catching OSError as PyTradfri doesn't catch that one yet
# Upstream PR: https://github.com/ggravlingen/pytradfri/pull/189
raise AuthError("cannot_connect") from err
return {
CONF_HOST: host,
CONF_IDENTITY: identity,
CONF_KEY: key,
CONF_GATEWAY_ID: gateway_info_result.id,
}
| 31.288557 | 86 | 0.613929 |
7947c408025eacd4581048e9f5f63fb1130a354a | 1,766 | py | Python | pygame_demos2/world.py | chivitc1/pygamedemo | e1515ed9b94171a9e61a92ecec9da8cdcbe0efde | [
"MIT"
] | null | null | null | pygame_demos2/world.py | chivitc1/pygamedemo | e1515ed9b94171a9e61a92ecec9da8cdcbe0efde | [
"MIT"
] | null | null | null | pygame_demos2/world.py | chivitc1/pygamedemo | e1515ed9b94171a9e61a92ecec9da8cdcbe0efde | [
"MIT"
] | null | null | null | import pygame
from pygame_demos2.vector2 import Vector2
class World:
SCR_SIZE = SRC_W, SRC_H = 640, 480
BACKGROUND_COLOR = (255, 255, 255)
NEST_POSITION = (SRC_W//2, SRC_H//2)
NEST_SIZE = 80.0
def __init__(self):
self.entities = {}
self.last_entity_id = 0
# Draw the nest (a circle) on the background
self.background = pygame.surface.Surface(self.SCR_SIZE).convert()
self.background.fill(self.BACKGROUND_COLOR)
pygame.draw.circle(self.background, (200, 255, 200), self.NEST_POSITION, int(self.NEST_SIZE))
def add_entity(self, entity):
self.entities[self.last_entity_id] = entity
entity.id = self.last_entity_id
self.last_entity_id += 1
def remove_entity(self, entity):
del self.entities[entity.id]
def get(self, entity_id):
if entity_id in self.entities:
return self.entities[entity_id]
else:
return None
def process(self, time_passed):
time_passed_seconds = time_passed / 1000.0
for entity in list(self.entities.values()):
entity.process(time_passed_seconds)
def render(self, surface):
# Draw the background and all the entities
surface.blit(self.background, (0, 0))
for entity in list(self.entities.values()):
entity.render(surface)
def get_close_entity(self, name, location, e_range=1000):
# Find an entity within range of a location
location = Vector2(*location)
for entity in self.entities.values():
if entity.name == name:
distance = location.get_distance_to(entity.location)
if distance < e_range:
return entity
return None
| 32.109091 | 101 | 0.63137 |
7947c42ec231c4d4528e5bfa6744c0b7c559e0aa | 1,985 | py | Python | models/sem_ch_graph_conv.py | fullmoonhalf/SemGCN | ce1dce98f8b7cc600ba7e733d17d71192c24b596 | [
"Apache-2.0"
] | 384 | 2019-06-21T18:27:20.000Z | 2022-03-29T06:22:46.000Z | models/sem_ch_graph_conv.py | fullmoonhalf/SemGCN | ce1dce98f8b7cc600ba7e733d17d71192c24b596 | [
"Apache-2.0"
] | 52 | 2019-07-12T12:39:16.000Z | 2021-12-18T15:18:45.000Z | models/sem_ch_graph_conv.py | fullmoonhalf/SemGCN | ce1dce98f8b7cc600ba7e733d17d71192c24b596 | [
"Apache-2.0"
] | 82 | 2019-06-26T07:51:52.000Z | 2022-02-27T10:40:27.000Z | from __future__ import absolute_import, division
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class SemCHGraphConv(nn.Module):
"""
Semantic channel-wise graph convolution layer
"""
def __init__(self, in_features, out_features, adj, bias=True):
super(SemCHGraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Parameter(torch.zeros(size=(2, in_features, out_features), dtype=torch.float))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.adj = adj.unsqueeze(0).repeat(out_features, 1, 1)
self.m = (self.adj > 0)
self.e = nn.Parameter(torch.zeros(out_features, len(self.m[0].nonzero()), dtype=torch.float))
nn.init.constant_(self.e.data, 1)
if bias:
self.bias = nn.Parameter(torch.zeros(out_features, dtype=torch.float))
stdv = 1. / math.sqrt(self.W.size(1))
self.bias.data.uniform_(-stdv, stdv)
else:
self.register_parameter('bias', None)
def forward(self, input):
h0 = torch.matmul(input, self.W[0]).unsqueeze(1).transpose(1, 3) # B * C * J * 1
h1 = torch.matmul(input, self.W[1]).unsqueeze(1).transpose(1, 3) # B * C * J * 1
adj = -9e15 * torch.ones_like(self.adj).to(input.device) # C * J * J
adj[self.m] = self.e.view(-1)
adj = F.softmax(adj, dim=2)
E = torch.eye(adj.size(1), dtype=torch.float).to(input.device)
E = E.unsqueeze(0).repeat(self.out_features, 1, 1) # C * J * J
output = torch.matmul(adj * E, h0) + torch.matmul(adj * (1 - E), h1)
output = output.transpose(1, 3).squeeze(1)
if self.bias is not None:
return output + self.bias.view(1, 1, -1)
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 36.759259 | 109 | 0.606045 |
7947c59429e8044841971e29c9e8dee145f96c9b | 87 | py | Python | src/hedm/metrics.py | csm-adapt/hedm | 33a4983d914ce8873d23ca2fcae14c0d6e162e0d | [
"MIT"
] | null | null | null | src/hedm/metrics.py | csm-adapt/hedm | 33a4983d914ce8873d23ca2fcae14c0d6e162e0d | [
"MIT"
] | null | null | null | src/hedm/metrics.py | csm-adapt/hedm | 33a4983d914ce8873d23ca2fcae14c0d6e162e0d | [
"MIT"
] | null | null | null | __author__ = "Branden Kappes"
__package__ = "hedm"
import numpy as np
def probable
| 9.666667 | 29 | 0.735632 |
7947c6bf7263e49c5a263f3d283dc112a25f64cb | 8,723 | py | Python | contrib/linearize/linearize-data.py | ashleyholman/bitcoin | 5121c68657fb90baa50b709adcc48348c4876783 | [
"MIT"
] | 1,422 | 2016-03-22T19:43:18.000Z | 2021-07-23T02:20:42.000Z | contrib/linearize/linearize-data.py | ashleyholman/bitcoin | 5121c68657fb90baa50b709adcc48348c4876783 | [
"MIT"
] | 91 | 2016-10-13T19:32:42.000Z | 2020-12-13T09:00:23.000Z | contrib/linearize/linearize-data.py | ashleyholman/bitcoin | 5121c68657fb90baa50b709adcc48348c4876783 | [
"MIT"
] | 73 | 2016-10-05T02:40:28.000Z | 2022-02-01T17:55:40.000Z | #!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return "%s/blk%05d.dat" % (self.settings['input'], fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 28.884106 | 108 | 0.687722 |
7947c6c4e743987d9f5ccc07ea3954b48f01c2ba | 1,000 | py | Python | tests/test_plugins.py | kevincarrogan/mistune | 5baf7bd007b6da8c9e751bed2a4f3e54824fba52 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plugins.py | kevincarrogan/mistune | 5baf7bd007b6da8c9e751bed2a4f3e54824fba52 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plugins.py | kevincarrogan/mistune | 5baf7bd007b6da8c9e751bed2a4f3e54824fba52 | [
"BSD-3-Clause"
] | null | null | null | from mistune import Markdown, AstRenderer, HTMLRenderer, plugins
from tests import BaseTestCase, fixtures
def load_plugin(plugin_name, ast=False):
_plugin = getattr(plugins, "plugin_{}".format(plugin_name))
class TestPlugin(BaseTestCase):
md = Markdown(renderer=HTMLRenderer(escape=False), plugins=[_plugin])
def test_ast_renderer(self):
md = Markdown(renderer=AstRenderer(), plugins=[_plugin])
data = fixtures.load_json(plugin_name + ".json")
self.assertEqual(md(data["text"]), data["tokens"])
if ast:
test_ast_renderer.__doc__ = "Run {} ast renderer".format(plugin_name)
setattr(TestPlugin, "test_ast_renderer", test_ast_renderer)
TestPlugin.load_fixtures(plugin_name + ".txt")
globals()["TestPlugin_" + plugin_name] = TestPlugin
load_plugin("url")
load_plugin("strikethrough")
load_plugin("footnotes", True)
load_plugin("table", True)
load_plugin("task_lists", True)
load_plugin("def_list", True)
load_plugin("abbr")
| 32.258065 | 77 | 0.72 |
7947c724860f4913e26b3c7f95b3de922e0f9551 | 5,669 | py | Python | mmdetection/third_party/text_perceptron/mmdet/models/detectors/text_perceptron_det.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | 4 | 2021-07-08T03:08:16.000Z | 2022-03-20T02:53:29.000Z | mmdetection/third_party/text_perceptron/mmdet/models/detectors/text_perceptron_det.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | null | null | null | mmdetection/third_party/text_perceptron/mmdet/models/detectors/text_perceptron_det.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | null | null | null | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : text_perceptron_det.py
# Abstract : the main pipeline definition of tp_det model
# Current Version: 1.0.0
# Author : Liang Qiao
# Date : 2020-05-31
# Modified Date : 2020-11-25
# Modified by : inusheng
# Comments : Code and comment standardized
####################################################################################################
"""
import os
import time
import importlib
import torch.nn as nn
from mmdet.models.detectors.base import BaseDetector
from mmdet.models import builder, build_roi_extractor
from mmdet.models.registry import DETECTORS
@DETECTORS.register_module
class TextPerceptronDet(BaseDetector):
"""
Description:
Text Perceptron Detector model structure
Properties:
backbone: network backbone (e.g. ResNet)
neck: network neck (e.g., FPN)
mask_head: head for loss calculation (e.g., TPHead)
train_cfg: related parameters for training
test_cfg: related parameters for test
pretrained: pretrained model
Note: def of backbone, neck, ... are terms used in standard mmdetection framework.
You are recommended to be familiar with mmdetection by searching for some quick-run tutorials.
"""
def __init__(self,
backbone,
neck=None,
mask_head=None,
shape_transform_module=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.mask_head = builder.build_head(mask_head)
if shape_transform_module is not None:
self.shape_transform_module = build_roi_extractor(shape_transform_module)
else:
self.shape_transform_module = None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""
Description:
network parameters initialization
Args:
pretrained:pretrained model
"""
super().init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for module in self.neck:
module.init_weights()
else:
self.neck.init_weights()
self.mask_head.init_weights()
def extract_feat(self, img):
"""
Description:
feature extraction, mainly including backbone part and neck part
Args:
img: input images
Returns:
x: output feature maps through feature extractor
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_masks,
**kwargs
):
"""
Description:
forward pass and loss computing (this forward function is used for training)
Arguments:
img (np.ndarray): input images
img_metas(dict) : image meta-info
gt_masks(np.ndarray): ground-truth label for training
Returns:
losses(dict): losses for training data
"""
losses = dict()
x = self.extract_feat(img)
# compute features through mask_head
mask_pred = self.mask_head(x)
# get ground-truth label
mask_targets = self.mask_head.get_target(gt_masks)
# compute loss
loss_mask = self.mask_head.loss(mask_pred, mask_targets)
# update loss
losses.update(loss_mask)
# For End-to-End training,
# Text Recognition part is unavailable due to the confidential policy
# [SUSPENDED] Hopefully this part will be relesed later in the feature.
# roi_features = self.shape_transform_module(mask_pred)
# recog_features = self.recog_backbone(roi_features)
return losses
def forward_dummy(self, img):
"""
Description:
dummy forward pass (mainly for FLOPS calculation)
"""
x = self.extract_feat(img)
outs = self.mask_head(x)
return outs
def simple_test(self, img, img_meta, **kwargs):
"""
Description:
forward inference (for test)
Args:
img: input image
img_meta: image meta-info
Returns:
results: predicted results, { points:[x1, y1, x2, y2, ..., xn, yn]}
"""
x = self.extract_feat(img)
mask_pred = self.mask_head(x)
points = self.shape_transform_module(mask_pred, img_meta)
# For End-to-End testing
# Text Recognition part is unavailable due to the confidential policy
# [SUSPENDED] Hopefully this part will be relesed later in the feature.
# points, crop_regions = self.shape_transform_module(mask_pred, img_meta)
return points
def aug_test(self, img, img_meta):
raise NotImplementedError | 31.848315 | 112 | 0.566414 |
7947c93e64a4fbfdadefedc2346f85578e6c8d12 | 549 | py | Python | dia_2/simple_flask.py | mariogen/curso_python | 767512edd07ee02f72293f539aa61b73e1dfe78d | [
"MIT"
] | null | null | null | dia_2/simple_flask.py | mariogen/curso_python | 767512edd07ee02f72293f539aa61b73e1dfe78d | [
"MIT"
] | null | null | null | dia_2/simple_flask.py | mariogen/curso_python | 767512edd07ee02f72293f539aa61b73e1dfe78d | [
"MIT"
] | null | null | null | from flask import Flask, request, render_template
import json
app = Flask(__name__)
@app.route("/hello")
def hello():
return "Hello World!"
@app.route("/sum/<a>/<b>")
def sum(a,b):
return str(int(a)+int(b))
@app.route("/operations",methods=['POST'])
def operations():
a,b = request.json['a'], request.json['b']
return json.dumps({"mais": a+b,
"menos":a-b,
"vezes":a*b,
"dividido":a/b})
if __name__ == '__main__':
app.run(port=5000)
| 22.875 | 50 | 0.52459 |
7947cad8dbd8fa7159010bdd0bb66eb3dddd9601 | 216 | py | Python | app/defaults/AB_Test/set_reward.py | bartfrenk/streamingbandit | 4237a05b439c2c12912e813f0b76ccf8af382aef | [
"MIT"
] | 64 | 2017-05-21T06:08:57.000Z | 2022-01-25T14:44:54.000Z | app/defaults/AB_Test/set_reward.py | bartfrenk/streamingbandit | 4237a05b439c2c12912e813f0b76ccf8af382aef | [
"MIT"
] | 76 | 2017-05-04T10:30:59.000Z | 2020-05-07T06:43:03.000Z | app/defaults/AB_Test/set_reward.py | bartfrenk/streamingbandit | 4237a05b439c2c12912e813f0b76ccf8af382aef | [
"MIT"
] | 12 | 2017-05-04T13:10:23.000Z | 2020-02-22T17:12:49.000Z | # -*- coding: utf-8 -*-
prop = base.Proportion(self.get_theta(key="treatment", value=self.action["treatment"]))
prop.update(self.reward["value"])
self.set_theta(prop, key="treatment", value=self.action["treatment"])
| 43.2 | 87 | 0.717593 |
7947cb61c910e429036f18962a447e9df0ae9ab2 | 549 | py | Python | data_types.py | natewachter/astr-119-session-3 | 0c26350d543195c6a593eef1a6f590ca35644ca2 | [
"MIT"
] | null | null | null | data_types.py | natewachter/astr-119-session-3 | 0c26350d543195c6a593eef1a6f590ca35644ca2 | [
"MIT"
] | null | null | null | data_types.py | natewachter/astr-119-session-3 | 0c26350d543195c6a593eef1a6f590ca35644ca2 | [
"MIT"
] | null | null | null | import numpy as np
#integers
i = 10 # integer
print("The data type of i is ",type(i)) # integer
a_i = np.zeros(i,dtype=int) # declare an array of zeros
print("The data type of a_i is ",type(a_i)) #np.ndarray
print("The data type of a_i[0] is", type(a_i[0])) #int64
#floats
x = 119.0
print("The data type of x is ",type(x))
y = 1.19e2 # floating point number in sci notation
print("The data type of y is ",type(y))
z = np.zeros(i, dtype=float)
print("The data type of z is ",type(z))
print("The data type of z[0] is ",type(z[0])) | 22.875 | 58 | 0.648452 |
7947cc082d1881f0e8a0f7ca90ba880f1f37a60d | 88 | py | Python | baekjoon/math/5596-total-score.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | 2 | 2019-02-08T01:23:07.000Z | 2020-11-19T12:23:52.000Z | baekjoon/math/5596-total-score.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | baekjoon/math/5596-total-score.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | a = sum(map(int, input().split()))
b = sum(map(int, input().split()))
print (max(a, b))
| 22 | 34 | 0.568182 |
7947cc15cdd371fc15a7a007d64c9bf4eeadf3f8 | 3,409 | py | Python | xrpl/asyncio/transaction/reliable_submission.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | 1 | 2022-03-11T07:01:02.000Z | 2022-03-11T07:01:02.000Z | xrpl/asyncio/transaction/reliable_submission.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | null | null | null | xrpl/asyncio/transaction/reliable_submission.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | 1 | 2022-01-28T14:12:57.000Z | 2022-01-28T14:12:57.000Z | """High-level reliable submission methods with XRPL transactions."""
import asyncio
from typing import Any, Dict, cast
from typing_extensions import Final
from xrpl.asyncio.clients import Client
from xrpl.asyncio.ledger import get_latest_validated_ledger_sequence
from xrpl.asyncio.transaction.ledger import get_transaction_from_hash
from xrpl.asyncio.transaction.main import submit_transaction
from xrpl.constants import XRPLException
from xrpl.models.response import Response
from xrpl.models.transactions.transaction import Transaction
_LEDGER_CLOSE_TIME: Final[int] = 4
class XRPLReliableSubmissionException(XRPLException):
"""General XRPL Reliable Submission Exception."""
pass
async def _wait_for_final_transaction_outcome(
transaction_hash: str, client: Client
) -> Response:
"""
The core logic of reliable submission. Polls the ledger until the result of the
transaction can be considered final, meaning it has either been included in a
validated ledger, or the transaction's lastLedgerSequence has been surpassed by the
latest ledger sequence (meaning it will never be included in a validated ledger).
"""
await asyncio.sleep(_LEDGER_CLOSE_TIME)
# new persisted transaction
# query transaction by hash
transaction_response = await get_transaction_from_hash(transaction_hash, client)
result = cast(Dict[str, Any], transaction_response.result)
if "validated" in result and result["validated"]:
# result is in a validated ledger, outcome is final
return transaction_response
last_ledger_sequence = result["LastLedgerSequence"]
latest_ledger_sequence = await get_latest_validated_ledger_sequence(client)
if last_ledger_sequence > latest_ledger_sequence:
# outcome is not yet final
return await _wait_for_final_transaction_outcome(transaction_hash, client)
raise XRPLReliableSubmissionException(
f"The latest ledger sequence {latest_ledger_sequence} is greater than the "
f"last ledger sequence {last_ledger_sequence} in the transaction."
)
async def send_reliable_submission(
transaction: Transaction, client: Client
) -> Response:
"""
Asynchronously submits a transaction and verifies that it has been included in a
validated ledger (or has errored/will not be included for some reason).
`See Reliable Transaction Submission
<https://xrpl.org/reliable-transaction-submission.html>`_
Args:
transaction: the signed transaction to submit to the ledger. Requires a
`last_ledger_sequence` param.
client: the network client used to submit the transaction to a rippled node.
Returns:
The response from a validated ledger.
Raises:
XRPLReliableSubmissionException: if the transaction fails or is missing a
`last_ledger_sequence` param.
"""
transaction_hash = transaction.get_hash()
submit_response = await submit_transaction(transaction, client)
result = cast(Dict[str, Any], submit_response.result)
if result["engine_result"] != "tesSUCCESS":
result_code = result["engine_result"]
result_message = result["engine_result_message"]
raise XRPLReliableSubmissionException(
f"Transaction failed, {result_code}: {result_message}"
)
return await _wait_for_final_transaction_outcome(transaction_hash, client)
| 37.461538 | 87 | 0.75418 |
7947cc7a7beeb68ddeedac6b2c608e61277ceacf | 6,727 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_applicationpersistenceprofile.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_applicationpersistenceprofile.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_applicationpersistenceprofile.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationpersistenceprofile
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of ApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure ApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
app_cookie_persistence_profile:
description:
- Specifies the application cookie persistence profile parameters.
description:
description:
- User defined description for the object.
hdr_persistence_profile:
description:
- Specifies the custom http header persistence profile parameters.
http_cookie_persistence_profile:
description:
- Specifies the http cookie persistence profile parameters.
ip_persistence_profile:
description:
- Specifies the client ip persistence profile parameters.
is_federated:
description:
- This field describes the object's replication scope.
- If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines.
- If the field is set to true, then the object is replicated across the federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
type: bool
name:
description:
- A user-friendly name for the persistence profile.
required: true
persistence_type:
description:
- Method used to persist clients to the same server for a duration of time or a session.
- Enum options - PERSISTENCE_TYPE_CLIENT_IP_ADDRESS, PERSISTENCE_TYPE_HTTP_COOKIE, PERSISTENCE_TYPE_TLS, PERSISTENCE_TYPE_CLIENT_IPV6_ADDRESS,
- PERSISTENCE_TYPE_CUSTOM_HTTP_HEADER, PERSISTENCE_TYPE_APP_COOKIE, PERSISTENCE_TYPE_GSLB_SITE.
- Default value when not specified in API or module is interpreted by Avi Controller as PERSISTENCE_TYPE_CLIENT_IP_ADDRESS.
required: true
server_hm_down_recovery:
description:
- Specifies behavior when a persistent server has been marked down by a health monitor.
- Enum options - HM_DOWN_PICK_NEW_SERVER, HM_DOWN_ABORT_CONNECTION, HM_DOWN_CONTINUE_PERSISTENT_SERVER.
- Default value when not specified in API or module is interpreted by Avi Controller as HM_DOWN_PICK_NEW_SERVER.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create an Application Persistence setting using http cookie.
avi_applicationpersistenceprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
http_cookie_persistence_profile:
always_send_cookie: false
cookie_name: My-HTTP
key:
- aes_key: ShYGZdMks8j6Bpvm2sCvaXWzvXms2Z9ob+TTjRy46lQ=
name: c1276819-550c-4adf-912d-59efa5fd7269
- aes_key: OGsyVk84VCtyMENFOW0rMnRXVnNrb0RzdG5mT29oamJRb0dlbHZVSjR1az0=
name: a080de57-77c3-4580-a3ea-e7a6493c14fd
- aes_key: UVN0cU9HWmFUM2xOUzBVcmVXaHFXbnBLVUUxMU1VSktSVU5HWjJOWmVFMTBUMUV4UmxsNk4xQmFZejA9
name: 60478846-33c6-484d-868d-bbc324fce4a5
timeout: 15
name: My-HTTP-Cookie
persistence_type: PERSISTENCE_TYPE_HTTP_COOKIE
server_hm_down_recovery: HM_DOWN_PICK_NEW_SERVER
tenant_ref: Demo
"""
RETURN = '''
obj:
description: ApplicationPersistenceProfile (api/applicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
app_cookie_persistence_profile=dict(type='dict',),
description=dict(type='str',),
hdr_persistence_profile=dict(type='dict',),
http_cookie_persistence_profile=dict(type='dict',),
ip_persistence_profile=dict(type='dict',),
is_federated=dict(type='bool',),
name=dict(type='str', required=True),
persistence_type=dict(type='str', required=True),
server_hm_down_recovery=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
| 39.804734 | 154 | 0.673554 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.